2
0

sched.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680
  1. /*
  2. * The contents of this file are subject to the Mozilla Public
  3. * License Version 1.1 (the "License"); you may not use this file
  4. * except in compliance with the License. You may obtain a copy of
  5. * the License at http://www.mozilla.org/MPL/
  6. *
  7. * Software distributed under the License is distributed on an "AS
  8. * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
  9. * implied. See the License for the specific language governing
  10. * rights and limitations under the License.
  11. *
  12. * The Original Code is the Netscape Portable Runtime library.
  13. *
  14. * The Initial Developer of the Original Code is Netscape
  15. * Communications Corporation. Portions created by Netscape are
  16. * Copyright (C) 1994-2000 Netscape Communications Corporation. All
  17. * Rights Reserved.
  18. *
  19. * Contributor(s): Silicon Graphics, Inc.
  20. *
  21. * Portions created by SGI are Copyright (C) 2000-2001 Silicon
  22. * Graphics, Inc. All Rights Reserved.
  23. *
  24. * Alternatively, the contents of this file may be used under the
  25. * terms of the GNU General Public License Version 2 or later (the
  26. * "GPL"), in which case the provisions of the GPL are applicable
  27. * instead of those above. If you wish to allow use of your
  28. * version of this file only under the terms of the GPL and not to
  29. * allow others to use your version of this file under the MPL,
  30. * indicate your decision by deleting the provisions above and
  31. * replace them with the notice and other provisions required by
  32. * the GPL. If you do not delete the provisions above, a recipient
  33. * may use your version of this file under either the MPL or the
  34. * GPL.
  35. */
  36. /*
  37. * This file is derived directly from Netscape Communications Corporation,
  38. * and consists of extensive modifications made during the year(s) 1999-2000.
  39. */
  40. #include <stdlib.h>
  41. #include <unistd.h>
  42. #include <fcntl.h>
  43. #include <string.h>
  44. #include <time.h>
  45. #include <errno.h>
  46. #include "common.h"
  47. /* Global data */
  48. _st_vp_t _st_this_vp; /* This VP */
  49. _st_thread_t *_st_this_thread; /* Current thread */
  50. int _st_active_count = 0; /* Active thread count */
  51. time_t _st_curr_time = 0; /* Current time as returned by time(2) */
  52. st_utime_t _st_last_tset; /* Last time it was fetched */
  53. int st_poll(struct pollfd *pds, int npds, st_utime_t timeout)
  54. {
  55. struct pollfd *pd;
  56. struct pollfd *epd = pds + npds;
  57. _st_pollq_t pq;
  58. _st_thread_t *me = _ST_CURRENT_THREAD();
  59. int n;
  60. if (me->flags & _ST_FL_INTERRUPT) {
  61. me->flags &= ~_ST_FL_INTERRUPT;
  62. errno = EINTR;
  63. return -1;
  64. }
  65. if ((*_st_eventsys->pollset_add)(pds, npds) < 0) {
  66. return -1;
  67. }
  68. pq.pds = pds;
  69. pq.npds = npds;
  70. pq.thread = me;
  71. pq.on_ioq = 1;
  72. _ST_ADD_IOQ(pq);
  73. if (timeout != ST_UTIME_NO_TIMEOUT) {
  74. _ST_ADD_SLEEPQ(me, timeout);
  75. }
  76. me->state = _ST_ST_IO_WAIT;
  77. _ST_SWITCH_CONTEXT(me);
  78. n = 0;
  79. if (pq.on_ioq) {
  80. /* If we timed out, the pollq might still be on the ioq. Remove it */
  81. _ST_DEL_IOQ(pq);
  82. (*_st_eventsys->pollset_del)(pds, npds);
  83. } else {
  84. /* Count the number of ready descriptors */
  85. for (pd = pds; pd < epd; pd++) {
  86. if (pd->revents) {
  87. n++;
  88. }
  89. }
  90. }
  91. if (me->flags & _ST_FL_INTERRUPT) {
  92. me->flags &= ~_ST_FL_INTERRUPT;
  93. errno = EINTR;
  94. return -1;
  95. }
  96. return n;
  97. }
  98. void _st_vp_schedule(void)
  99. {
  100. _st_thread_t *trd;
  101. if (_ST_RUNQ.next != &_ST_RUNQ) {
  102. /* Pull thread off of the run queue */
  103. trd = _ST_THREAD_PTR(_ST_RUNQ.next);
  104. _ST_DEL_RUNQ(trd);
  105. } else {
  106. /* If there are no threads to run, switch to the idle thread */
  107. trd = _st_this_vp.idle_thread;
  108. }
  109. ST_ASSERT(trd->state == _ST_ST_RUNNABLE);
  110. /* Resume the thread */
  111. trd->state = _ST_ST_RUNNING;
  112. _ST_RESTORE_CONTEXT(trd);
  113. }
  114. /*
  115. * Initialize this Virtual Processor
  116. */
  117. int st_init(void)
  118. {
  119. _st_thread_t *trd;
  120. if (_st_active_count) {
  121. /* Already initialized */
  122. return 0;
  123. }
  124. /* We can ignore return value here */
  125. st_set_eventsys(ST_EVENTSYS_DEFAULT);
  126. if (_st_io_init() < 0) {
  127. return -1;
  128. }
  129. memset(&_st_this_vp, 0, sizeof(_st_vp_t));
  130. ST_INIT_CLIST(&_ST_RUNQ);
  131. ST_INIT_CLIST(&_ST_IOQ);
  132. ST_INIT_CLIST(&_ST_ZOMBIEQ);
  133. #ifdef DEBUG
  134. ST_INIT_CLIST(&_ST_THREADQ);
  135. #endif
  136. if ((*_st_eventsys->init)() < 0) {
  137. return -1;
  138. }
  139. _st_this_vp.pagesize = getpagesize();
  140. _st_this_vp.last_clock = st_utime();
  141. /*
  142. * Create idle thread
  143. */
  144. _st_this_vp.idle_thread = st_thread_create(_st_idle_thread_start, NULL, 0, 0);
  145. if (!_st_this_vp.idle_thread) {
  146. return -1;
  147. }
  148. _st_this_vp.idle_thread->flags = _ST_FL_IDLE_THREAD;
  149. _st_active_count--;
  150. _ST_DEL_RUNQ(_st_this_vp.idle_thread);
  151. /*
  152. * Initialize primordial thread
  153. */
  154. trd = (_st_thread_t *) calloc(1, sizeof(_st_thread_t) +
  155. (ST_KEYS_MAX * sizeof(void *)));
  156. if (!trd) {
  157. return -1;
  158. }
  159. trd->private_data = (void **) (trd + 1);
  160. trd->state = _ST_ST_RUNNING;
  161. trd->flags = _ST_FL_PRIMORDIAL;
  162. _ST_SET_CURRENT_THREAD(trd);
  163. _st_active_count++;
  164. #ifdef DEBUG
  165. _ST_ADD_THREADQ(trd);
  166. #endif
  167. return 0;
  168. }
  169. #ifdef ST_SWITCH_CB
  170. st_switch_cb_t st_set_switch_in_cb(st_switch_cb_t cb)
  171. {
  172. st_switch_cb_t ocb = _st_this_vp.switch_in_cb;
  173. _st_this_vp.switch_in_cb = cb;
  174. return ocb;
  175. }
  176. st_switch_cb_t st_set_switch_out_cb(st_switch_cb_t cb)
  177. {
  178. st_switch_cb_t ocb = _st_this_vp.switch_out_cb;
  179. _st_this_vp.switch_out_cb = cb;
  180. return ocb;
  181. }
  182. #endif
  183. /*
  184. * Start function for the idle thread
  185. */
  186. /* ARGSUSED */
  187. void *_st_idle_thread_start(void *arg)
  188. {
  189. _st_thread_t *me = _ST_CURRENT_THREAD();
  190. while (_st_active_count > 0) {
  191. /* Idle vp till I/O is ready or the smallest timeout expired */
  192. _ST_VP_IDLE();
  193. /* Check sleep queue for expired threads */
  194. _st_vp_check_clock();
  195. me->state = _ST_ST_RUNNABLE;
  196. _ST_SWITCH_CONTEXT(me);
  197. }
  198. /* No more threads */
  199. exit(0);
  200. /* NOTREACHED */
  201. return NULL;
  202. }
  203. void st_thread_exit(void *retval)
  204. {
  205. _st_thread_t *trd = _ST_CURRENT_THREAD();
  206. trd->retval = retval;
  207. _st_thread_cleanup(trd);
  208. _st_active_count--;
  209. if (trd->term) {
  210. /* Put thread on the zombie queue */
  211. trd->state = _ST_ST_ZOMBIE;
  212. _ST_ADD_ZOMBIEQ(trd);
  213. /* Notify on our termination condition variable */
  214. st_cond_signal(trd->term);
  215. /* Switch context and come back later */
  216. _ST_SWITCH_CONTEXT(trd);
  217. /* Continue the cleanup */
  218. st_cond_destroy(trd->term);
  219. trd->term = NULL;
  220. }
  221. #ifdef DEBUG
  222. _ST_DEL_THREADQ(trd);
  223. #endif
  224. if (!(trd->flags & _ST_FL_PRIMORDIAL)) {
  225. _st_stack_free(trd->stack);
  226. }
  227. /* Find another thread to run */
  228. _ST_SWITCH_CONTEXT(trd);
  229. /* Not going to land here */
  230. }
  231. int st_thread_join(_st_thread_t *trd, void **retvalp)
  232. {
  233. _st_cond_t *term = trd->term;
  234. /* Can't join a non-joinable thread */
  235. if (term == NULL) {
  236. errno = EINVAL;
  237. return -1;
  238. }
  239. if (_ST_CURRENT_THREAD() == trd) {
  240. errno = EDEADLK;
  241. return -1;
  242. }
  243. /* Multiple threads can't wait on the same joinable thread */
  244. if (term->wait_q.next != &term->wait_q) {
  245. errno = EINVAL;
  246. return -1;
  247. }
  248. while (trd->state != _ST_ST_ZOMBIE) {
  249. if (st_cond_timedwait(term, ST_UTIME_NO_TIMEOUT) != 0) {
  250. return -1;
  251. }
  252. }
  253. if (retvalp) {
  254. *retvalp = trd->retval;
  255. }
  256. /*
  257. * Remove target thread from the zombie queue and make it runnable.
  258. * When it gets scheduled later, it will do the clean up.
  259. */
  260. trd->state = _ST_ST_RUNNABLE;
  261. _ST_DEL_ZOMBIEQ(trd);
  262. _ST_ADD_RUNQ(trd);
  263. return 0;
  264. }
  265. void _st_thread_main(void)
  266. {
  267. _st_thread_t *trd = _ST_CURRENT_THREAD();
  268. /*
  269. * Cap the stack by zeroing out the saved return address register
  270. * value. This allows some debugging/profiling tools to know when
  271. * to stop unwinding the stack. It's a no-op on most platforms.
  272. */
  273. MD_CAP_STACK(&trd);
  274. /* Run thread main */
  275. trd->retval = (*trd->start)(trd->arg);
  276. /* All done, time to go away */
  277. st_thread_exit(trd->retval);
  278. }
  279. /*
  280. * Insert "thread" into the timeout heap, in the position
  281. * specified by thread->heap_index. See docs/timeout_heap.txt
  282. * for details about the timeout heap.
  283. */
  284. static _st_thread_t **heap_insert(_st_thread_t *trd)
  285. {
  286. int target = trd->heap_index;
  287. int s = target;
  288. _st_thread_t **p = &_ST_SLEEPQ;
  289. int bits = 0;
  290. int bit;
  291. int index = 1;
  292. while (s) {
  293. s >>= 1;
  294. bits++;
  295. }
  296. for (bit = bits - 2; bit >= 0; bit--) {
  297. if (trd->due < (*p)->due) {
  298. _st_thread_t *t = *p;
  299. trd->left = t->left;
  300. trd->right = t->right;
  301. *p = trd;
  302. trd->heap_index = index;
  303. trd = t;
  304. }
  305. index <<= 1;
  306. if (target & (1 << bit)) {
  307. p = &((*p)->right);
  308. index |= 1;
  309. } else {
  310. p = &((*p)->left);
  311. }
  312. }
  313. trd->heap_index = index;
  314. *p = trd;
  315. trd->left = trd->right = NULL;
  316. return p;
  317. }
  318. /*
  319. * Delete "thread" from the timeout heap.
  320. */
  321. static void heap_delete(_st_thread_t *trd)
  322. {
  323. _st_thread_t *t, **p;
  324. int bits = 0;
  325. int s, bit;
  326. /* First find and unlink the last heap element */
  327. p = &_ST_SLEEPQ;
  328. s = _ST_SLEEPQ_SIZE;
  329. while (s) {
  330. s >>= 1;
  331. bits++;
  332. }
  333. for (bit = bits - 2; bit >= 0; bit--) {
  334. if (_ST_SLEEPQ_SIZE & (1 << bit)) {
  335. p = &((*p)->right);
  336. } else {
  337. p = &((*p)->left);
  338. }
  339. }
  340. t = *p;
  341. *p = NULL;
  342. --_ST_SLEEPQ_SIZE;
  343. if (t != trd) {
  344. /*
  345. * Insert the unlinked last element in place of the element we are deleting
  346. */
  347. t->heap_index = trd->heap_index;
  348. p = heap_insert(t);
  349. t = *p;
  350. t->left = trd->left;
  351. t->right = trd->right;
  352. /*
  353. * Reestablish the heap invariant.
  354. */
  355. for (;;) {
  356. _st_thread_t *y; /* The younger child */
  357. int index_tmp;
  358. if (t->left == NULL) {
  359. break;
  360. } else if (t->right == NULL) {
  361. y = t->left;
  362. } else if (t->left->due < t->right->due) {
  363. y = t->left;
  364. } else {
  365. y = t->right;
  366. }
  367. if (t->due > y->due) {
  368. _st_thread_t *tl = y->left;
  369. _st_thread_t *tr = y->right;
  370. *p = y;
  371. if (y == t->left) {
  372. y->left = t;
  373. y->right = t->right;
  374. p = &y->left;
  375. } else {
  376. y->left = t->left;
  377. y->right = t;
  378. p = &y->right;
  379. }
  380. t->left = tl;
  381. t->right = tr;
  382. index_tmp = t->heap_index;
  383. t->heap_index = y->heap_index;
  384. y->heap_index = index_tmp;
  385. } else {
  386. break;
  387. }
  388. }
  389. }
  390. trd->left = trd->right = NULL;
  391. }
  392. void _st_add_sleep_q(_st_thread_t *trd, st_utime_t timeout)
  393. {
  394. trd->due = _ST_LAST_CLOCK + timeout;
  395. trd->flags |= _ST_FL_ON_SLEEPQ;
  396. trd->heap_index = ++_ST_SLEEPQ_SIZE;
  397. heap_insert(trd);
  398. }
  399. void _st_del_sleep_q(_st_thread_t *trd)
  400. {
  401. heap_delete(trd);
  402. trd->flags &= ~_ST_FL_ON_SLEEPQ;
  403. }
  404. void _st_vp_check_clock(void)
  405. {
  406. _st_thread_t *trd;
  407. st_utime_t elapsed, now;
  408. now = st_utime();
  409. elapsed = now - _ST_LAST_CLOCK;
  410. _ST_LAST_CLOCK = now;
  411. if (_st_curr_time && now - _st_last_tset > 999000) {
  412. _st_curr_time = time(NULL);
  413. _st_last_tset = now;
  414. }
  415. while (_ST_SLEEPQ != NULL) {
  416. trd = _ST_SLEEPQ;
  417. ST_ASSERT(trd->flags & _ST_FL_ON_SLEEPQ);
  418. if (trd->due > now) {
  419. break;
  420. }
  421. _ST_DEL_SLEEPQ(trd);
  422. /* If thread is waiting on condition variable, set the time out flag */
  423. if (trd->state == _ST_ST_COND_WAIT) {
  424. trd->flags |= _ST_FL_TIMEDOUT;
  425. }
  426. /* Make thread runnable */
  427. ST_ASSERT(!(trd->flags & _ST_FL_IDLE_THREAD));
  428. trd->state = _ST_ST_RUNNABLE;
  429. _ST_ADD_RUNQ(trd);
  430. }
  431. }
  432. void st_thread_interrupt(_st_thread_t* trd)
  433. {
  434. /* If thread is already dead */
  435. if (trd->state == _ST_ST_ZOMBIE) {
  436. return;
  437. }
  438. trd->flags |= _ST_FL_INTERRUPT;
  439. if (trd->state == _ST_ST_RUNNING || trd->state == _ST_ST_RUNNABLE) {
  440. return;
  441. }
  442. if (trd->flags & _ST_FL_ON_SLEEPQ) {
  443. _ST_DEL_SLEEPQ(trd);
  444. }
  445. /* Make thread runnable */
  446. trd->state = _ST_ST_RUNNABLE;
  447. _ST_ADD_RUNQ(trd);
  448. }
  449. _st_thread_t *st_thread_create(void *(*start)(void *arg), void *arg, int joinable, int stk_size)
  450. {
  451. _st_thread_t *trd;
  452. _st_stack_t *stack;
  453. void **ptds;
  454. char *sp;
  455. /* Adjust stack size */
  456. if (stk_size == 0) {
  457. stk_size = ST_DEFAULT_STACK_SIZE;
  458. }
  459. stk_size = ((stk_size + _ST_PAGE_SIZE - 1) / _ST_PAGE_SIZE) * _ST_PAGE_SIZE;
  460. stack = _st_stack_new(stk_size);
  461. if (!stack) {
  462. return NULL;
  463. }
  464. /* Allocate thread object and per-thread data off the stack */
  465. #if defined (MD_STACK_GROWS_DOWN)
  466. sp = stack->stk_top;
  467. /*
  468. * The stack segment is split in the middle. The upper half is used
  469. * as backing store for the register stack which grows upward.
  470. * The lower half is used for the traditional memory stack which
  471. * grows downward. Both stacks start in the middle and grow outward
  472. * from each other.
  473. */
  474. /**
  475. The below comments is by winlin:
  476. The Stack public structure:
  477. +--------------------------------------------------------------+
  478. | stack |
  479. +--------------------------------------------------------------+
  480. bottom top
  481. The code bellow use the stack as:
  482. +-----------------+-----------------+-------------+------------+
  483. | stack of thread |pad+align(128B+) |thread(336B) | keys(128B) |
  484. +-----------------+-----------------+-------------+------------+
  485. bottom sp trd ptds top
  486. (context[0].__jmpbuf.sp) (private_data)
  487. */
  488. sp = sp - (ST_KEYS_MAX * sizeof(void *));
  489. ptds = (void **) sp;
  490. sp = sp - sizeof(_st_thread_t);
  491. trd = (_st_thread_t *) sp;
  492. /* Make stack 64-byte aligned */
  493. if ((unsigned long)sp & 0x3f) {
  494. sp = sp - ((unsigned long)sp & 0x3f);
  495. }
  496. stack->sp = sp - _ST_STACK_PAD_SIZE;
  497. #else
  498. #error "Only Supports Stack Grown Down"
  499. #endif
  500. memset(trd, 0, sizeof(_st_thread_t));
  501. memset(ptds, 0, ST_KEYS_MAX * sizeof(void *));
  502. /* Initialize thread */
  503. trd->private_data = ptds;
  504. trd->stack = stack;
  505. trd->start = start;
  506. trd->arg = arg;
  507. // by winlin, expand macro MD_INIT_CONTEXT
  508. #if defined(__mips__)
  509. MD_SETJMP((trd)->context);
  510. trd->context[0].__jmpbuf[0].__pc = (__ptr_t) _st_thread_main;
  511. trd->context[0].__jmpbuf[0].__sp = stack->sp;
  512. #else
  513. if (MD_SETJMP((trd)->context)) {
  514. _st_thread_main();
  515. }
  516. MD_GET_SP(trd) = (long) (stack->sp);
  517. #endif
  518. /* If thread is joinable, allocate a termination condition variable */
  519. if (joinable) {
  520. trd->term = st_cond_new();
  521. if (trd->term == NULL) {
  522. _st_stack_free(trd->stack);
  523. return NULL;
  524. }
  525. }
  526. /* Make thread runnable */
  527. trd->state = _ST_ST_RUNNABLE;
  528. _st_active_count++;
  529. _ST_ADD_RUNQ(trd);
  530. #ifdef DEBUG
  531. _ST_ADD_THREADQ(trd);
  532. #endif
  533. return trd;
  534. }
  535. _st_thread_t *st_thread_self(void)
  536. {
  537. return _ST_CURRENT_THREAD();
  538. }
  539. #ifdef DEBUG
  540. /* ARGSUSED */
  541. void _st_show_thread_stack(_st_thread_t *trd, const char *messg)
  542. {
  543. }
  544. /* To be set from debugger */
  545. int _st_iterate_threads_flag = 0;
  546. void _st_iterate_threads(void)
  547. {
  548. static _st_thread_t *trd = NULL;
  549. static jmp_buf orig_jb, save_jb;
  550. _st_clist_t *q;
  551. if (!_st_iterate_threads_flag) {
  552. if (trd) {
  553. memcpy(trd->context, save_jb, sizeof(jmp_buf));
  554. MD_LONGJMP(orig_jb, 1);
  555. }
  556. return;
  557. }
  558. if (trd) {
  559. memcpy(trd->context, save_jb, sizeof(jmp_buf));
  560. _st_show_thread_stack(trd, NULL);
  561. } else {
  562. if (MD_SETJMP(orig_jb)) {
  563. _st_iterate_threads_flag = 0;
  564. trd = NULL;
  565. _st_show_thread_stack(trd, "Iteration completed");
  566. return;
  567. }
  568. trd = _ST_CURRENT_THREAD();
  569. _st_show_thread_stack(trd, "Iteration started");
  570. }
  571. q = trd->tlink.next;
  572. if (q == &_ST_THREADQ) {
  573. q = q->next;
  574. }
  575. ST_ASSERT(q != &_ST_THREADQ);
  576. trd = _ST_THREAD_THREADQ_PTR(q);
  577. if (trd == _ST_CURRENT_THREAD()) {
  578. MD_LONGJMP(orig_jb, 1);
  579. }
  580. memcpy(save_jb, trd->context, sizeof(jmp_buf));
  581. MD_LONGJMP(trd->context, 1);
  582. }
  583. #endif /* DEBUG */