sched.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783
  1. /*
  2. * The contents of this file are subject to the Mozilla Public
  3. * License Version 1.1 (the "License"); you may not use this file
  4. * except in compliance with the License. You may obtain a copy of
  5. * the License at http://www.mozilla.org/MPL/
  6. *
  7. * Software distributed under the License is distributed on an "AS
  8. * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or
  9. * implied. See the License for the specific language governing
  10. * rights and limitations under the License.
  11. *
  12. * The Original Code is the Netscape Portable Runtime library.
  13. *
  14. * The Initial Developer of the Original Code is Netscape
  15. * Communications Corporation. Portions created by Netscape are
  16. * Copyright (C) 1994-2000 Netscape Communications Corporation. All
  17. * Rights Reserved.
  18. *
  19. * Contributor(s): Silicon Graphics, Inc.
  20. *
  21. * Portions created by SGI are Copyright (C) 2000-2001 Silicon
  22. * Graphics, Inc. All Rights Reserved.
  23. *
  24. * Alternatively, the contents of this file may be used under the
  25. * terms of the GNU General Public License Version 2 or later (the
  26. * "GPL"), in which case the provisions of the GPL are applicable
  27. * instead of those above. If you wish to allow use of your
  28. * version of this file only under the terms of the GPL and not to
  29. * allow others to use your version of this file under the MPL,
  30. * indicate your decision by deleting the provisions above and
  31. * replace them with the notice and other provisions required by
  32. * the GPL. If you do not delete the provisions above, a recipient
  33. * may use your version of this file under either the MPL or the
  34. * GPL.
  35. */
  36. /*
  37. * This file is derived directly from Netscape Communications Corporation,
  38. * and consists of extensive modifications made during the year(s) 1999-2000.
  39. */
  40. #include <stdlib.h>
  41. #include <unistd.h>
  42. #include <fcntl.h>
  43. #include <string.h>
  44. #include <time.h>
  45. #include <errno.h>
  46. #include "common.h"
  47. /* merge from https://github.com/toffaletti/state-threads/commit/7f57fc9acc05e657bca1223f1e5b9b1a45ed929b */
  48. #ifndef NVALGRIND
  49. #include <valgrind/valgrind.h>
  50. #endif
  51. // Global stat.
  52. #if defined(DEBUG) && defined(DEBUG_STATS)
  53. unsigned long long _st_stat_sched_15ms = 0;
  54. unsigned long long _st_stat_sched_20ms = 0;
  55. unsigned long long _st_stat_sched_25ms = 0;
  56. unsigned long long _st_stat_sched_30ms = 0;
  57. unsigned long long _st_stat_sched_35ms = 0;
  58. unsigned long long _st_stat_sched_40ms = 0;
  59. unsigned long long _st_stat_sched_80ms = 0;
  60. unsigned long long _st_stat_sched_160ms = 0;
  61. unsigned long long _st_stat_sched_s = 0;
  62. unsigned long long _st_stat_thread_run = 0;
  63. unsigned long long _st_stat_thread_idle = 0;
  64. unsigned long long _st_stat_thread_yield = 0;
  65. unsigned long long _st_stat_thread_yield2 = 0;
  66. #endif
  67. /* Global data */
  68. _st_vp_t _st_this_vp; /* This VP */
  69. _st_thread_t *_st_this_thread; /* Current thread */
  70. int _st_active_count = 0; /* Active thread count */
  71. time_t _st_curr_time = 0; /* Current time as returned by time(2) */
  72. st_utime_t _st_last_tset; /* Last time it was fetched */
  73. int st_poll(struct pollfd *pds, int npds, st_utime_t timeout)
  74. {
  75. struct pollfd *pd;
  76. struct pollfd *epd = pds + npds;
  77. _st_pollq_t pq;
  78. _st_thread_t *me = _ST_CURRENT_THREAD();
  79. int n;
  80. if (me->flags & _ST_FL_INTERRUPT) {
  81. me->flags &= ~_ST_FL_INTERRUPT;
  82. errno = EINTR;
  83. return -1;
  84. }
  85. if ((*_st_eventsys->pollset_add)(pds, npds) < 0)
  86. return -1;
  87. pq.pds = pds;
  88. pq.npds = npds;
  89. pq.thread = me;
  90. pq.on_ioq = 1;
  91. _ST_ADD_IOQ(pq);
  92. if (timeout != ST_UTIME_NO_TIMEOUT)
  93. _ST_ADD_SLEEPQ(me, timeout);
  94. me->state = _ST_ST_IO_WAIT;
  95. _ST_SWITCH_CONTEXT(me);
  96. n = 0;
  97. if (pq.on_ioq) {
  98. /* If we timed out, the pollq might still be on the ioq. Remove it */
  99. _ST_DEL_IOQ(pq);
  100. (*_st_eventsys->pollset_del)(pds, npds);
  101. } else {
  102. /* Count the number of ready descriptors */
  103. for (pd = pds; pd < epd; pd++) {
  104. if (pd->revents)
  105. n++;
  106. }
  107. }
  108. if (me->flags & _ST_FL_INTERRUPT) {
  109. me->flags &= ~_ST_FL_INTERRUPT;
  110. errno = EINTR;
  111. return -1;
  112. }
  113. return n;
  114. }
  115. void _st_vp_schedule(void)
  116. {
  117. _st_thread_t *thread;
  118. if (_ST_RUNQ.next != &_ST_RUNQ) {
  119. #if defined(DEBUG) && defined(DEBUG_STATS)
  120. ++_st_stat_thread_run;
  121. #endif
  122. /* Pull thread off of the run queue */
  123. thread = _ST_THREAD_PTR(_ST_RUNQ.next);
  124. _ST_DEL_RUNQ(thread);
  125. } else {
  126. #if defined(DEBUG) && defined(DEBUG_STATS)
  127. ++_st_stat_thread_idle;
  128. #endif
  129. /* If there are no threads to run, switch to the idle thread */
  130. thread = _st_this_vp.idle_thread;
  131. }
  132. ST_ASSERT(thread->state == _ST_ST_RUNNABLE);
  133. /* Resume the thread */
  134. thread->state = _ST_ST_RUNNING;
  135. _ST_RESTORE_CONTEXT(thread);
  136. }
  137. /*
  138. * Initialize this Virtual Processor
  139. */
  140. int st_init(void)
  141. {
  142. _st_thread_t *thread;
  143. if (_st_active_count) {
  144. /* Already initialized */
  145. return 0;
  146. }
  147. /* We can ignore return value here */
  148. st_set_eventsys(ST_EVENTSYS_DEFAULT);
  149. if (_st_io_init() < 0)
  150. return -1;
  151. memset(&_st_this_vp, 0, sizeof(_st_vp_t));
  152. ST_INIT_CLIST(&_ST_RUNQ);
  153. ST_INIT_CLIST(&_ST_IOQ);
  154. ST_INIT_CLIST(&_ST_ZOMBIEQ);
  155. #ifdef DEBUG
  156. ST_INIT_CLIST(&_ST_THREADQ);
  157. #endif
  158. if ((*_st_eventsys->init)() < 0)
  159. return -1;
  160. _st_this_vp.pagesize = getpagesize();
  161. _st_this_vp.last_clock = st_utime();
  162. /*
  163. * Create idle thread
  164. */
  165. _st_this_vp.idle_thread = st_thread_create(_st_idle_thread_start, NULL, 0, 0);
  166. if (!_st_this_vp.idle_thread)
  167. return -1;
  168. _st_this_vp.idle_thread->flags = _ST_FL_IDLE_THREAD;
  169. _st_active_count--;
  170. _ST_DEL_RUNQ(_st_this_vp.idle_thread);
  171. /*
  172. * Initialize primordial thread
  173. */
  174. thread = (_st_thread_t *) calloc(1, sizeof(_st_thread_t) + (ST_KEYS_MAX * sizeof(void *)));
  175. if (!thread)
  176. return -1;
  177. thread->private_data = (void **) (thread + 1);
  178. thread->state = _ST_ST_RUNNING;
  179. thread->flags = _ST_FL_PRIMORDIAL;
  180. _ST_SET_CURRENT_THREAD(thread);
  181. _st_active_count++;
  182. #ifdef DEBUG
  183. _ST_ADD_THREADQ(thread);
  184. #endif
  185. return 0;
  186. }
  187. #ifdef ST_SWITCH_CB
  188. st_switch_cb_t st_set_switch_in_cb(st_switch_cb_t cb)
  189. {
  190. st_switch_cb_t ocb = _st_this_vp.switch_in_cb;
  191. _st_this_vp.switch_in_cb = cb;
  192. return ocb;
  193. }
  194. st_switch_cb_t st_set_switch_out_cb(st_switch_cb_t cb)
  195. {
  196. st_switch_cb_t ocb = _st_this_vp.switch_out_cb;
  197. _st_this_vp.switch_out_cb = cb;
  198. return ocb;
  199. }
  200. #endif
  201. /*
  202. * Start function for the idle thread
  203. */
  204. /* ARGSUSED */
  205. void *_st_idle_thread_start(void *arg)
  206. {
  207. _st_thread_t *me = _ST_CURRENT_THREAD();
  208. while (_st_active_count > 0) {
  209. /* Idle vp till I/O is ready or the smallest timeout expired */
  210. _ST_VP_IDLE();
  211. /* Check sleep queue for expired threads */
  212. _st_vp_check_clock();
  213. me->state = _ST_ST_RUNNABLE;
  214. _ST_SWITCH_CONTEXT(me);
  215. }
  216. /* No more threads */
  217. exit(0);
  218. /* NOTREACHED */
  219. return NULL;
  220. }
  221. void st_thread_exit(void *retval)
  222. {
  223. _st_thread_t *thread = _ST_CURRENT_THREAD();
  224. thread->retval = retval;
  225. _st_thread_cleanup(thread);
  226. _st_active_count--;
  227. if (thread->term) {
  228. /* Put thread on the zombie queue */
  229. thread->state = _ST_ST_ZOMBIE;
  230. _ST_ADD_ZOMBIEQ(thread);
  231. /* Notify on our termination condition variable */
  232. st_cond_signal(thread->term);
  233. /* Switch context and come back later */
  234. _ST_SWITCH_CONTEXT(thread);
  235. /* Continue the cleanup */
  236. st_cond_destroy(thread->term);
  237. thread->term = NULL;
  238. }
  239. #ifdef DEBUG
  240. _ST_DEL_THREADQ(thread);
  241. #endif
  242. /* merge from https://github.com/toffaletti/state-threads/commit/7f57fc9acc05e657bca1223f1e5b9b1a45ed929b */
  243. #ifndef NVALGRIND
  244. if (!(thread->flags & _ST_FL_PRIMORDIAL)) {
  245. VALGRIND_STACK_DEREGISTER(thread->stack->valgrind_stack_id);
  246. }
  247. #endif
  248. if (!(thread->flags & _ST_FL_PRIMORDIAL))
  249. _st_stack_free(thread->stack);
  250. /* Find another thread to run */
  251. _ST_SWITCH_CONTEXT(thread);
  252. /* Not going to land here */
  253. }
  254. int st_thread_join(_st_thread_t *thread, void **retvalp)
  255. {
  256. _st_cond_t *term = thread->term;
  257. /* Can't join a non-joinable thread */
  258. if (term == NULL) {
  259. errno = EINVAL;
  260. return -1;
  261. }
  262. if (_ST_CURRENT_THREAD() == thread) {
  263. errno = EDEADLK;
  264. return -1;
  265. }
  266. /* Multiple threads can't wait on the same joinable thread */
  267. if (term->wait_q.next != &term->wait_q) {
  268. errno = EINVAL;
  269. return -1;
  270. }
  271. while (thread->state != _ST_ST_ZOMBIE) {
  272. if (st_cond_timedwait(term, ST_UTIME_NO_TIMEOUT) != 0)
  273. return -1;
  274. }
  275. if (retvalp)
  276. *retvalp = thread->retval;
  277. /*
  278. * Remove target thread from the zombie queue and make it runnable.
  279. * When it gets scheduled later, it will do the clean up.
  280. */
  281. thread->state = _ST_ST_RUNNABLE;
  282. _ST_DEL_ZOMBIEQ(thread);
  283. _ST_ADD_RUNQ(thread);
  284. return 0;
  285. }
  286. void _st_thread_main(void)
  287. {
  288. _st_thread_t *thread = _ST_CURRENT_THREAD();
  289. /*
  290. * Cap the stack by zeroing out the saved return address register
  291. * value. This allows some debugging/profiling tools to know when
  292. * to stop unwinding the stack. It's a no-op on most platforms.
  293. */
  294. MD_CAP_STACK(&thread);
  295. /* Run thread main */
  296. thread->retval = (*thread->start)(thread->arg);
  297. /* All done, time to go away */
  298. st_thread_exit(thread->retval);
  299. }
  300. /*
  301. * Insert "thread" into the timeout heap, in the position
  302. * specified by thread->heap_index. See docs/timeout_heap.txt
  303. * for details about the timeout heap.
  304. */
  305. static _st_thread_t **heap_insert(_st_thread_t *thread) {
  306. int target = thread->heap_index;
  307. int s = target;
  308. _st_thread_t **p = &_ST_SLEEPQ;
  309. int bits = 0;
  310. int bit;
  311. int index = 1;
  312. while (s) {
  313. s >>= 1;
  314. bits++;
  315. }
  316. for (bit = bits - 2; bit >= 0; bit--) {
  317. if (thread->due < (*p)->due) {
  318. _st_thread_t *t = *p;
  319. thread->left = t->left;
  320. thread->right = t->right;
  321. *p = thread;
  322. thread->heap_index = index;
  323. thread = t;
  324. }
  325. index <<= 1;
  326. if (target & (1 << bit)) {
  327. p = &((*p)->right);
  328. index |= 1;
  329. } else {
  330. p = &((*p)->left);
  331. }
  332. }
  333. thread->heap_index = index;
  334. *p = thread;
  335. thread->left = thread->right = NULL;
  336. return p;
  337. }
  338. /*
  339. * Delete "thread" from the timeout heap.
  340. */
  341. static void heap_delete(_st_thread_t *thread) {
  342. _st_thread_t *t, **p;
  343. int bits = 0;
  344. int s, bit;
  345. /* First find and unlink the last heap element */
  346. p = &_ST_SLEEPQ;
  347. s = _ST_SLEEPQ_SIZE;
  348. while (s) {
  349. s >>= 1;
  350. bits++;
  351. }
  352. for (bit = bits - 2; bit >= 0; bit--) {
  353. if (_ST_SLEEPQ_SIZE & (1 << bit)) {
  354. p = &((*p)->right);
  355. } else {
  356. p = &((*p)->left);
  357. }
  358. }
  359. t = *p;
  360. *p = NULL;
  361. --_ST_SLEEPQ_SIZE;
  362. if (t != thread) {
  363. /*
  364. * Insert the unlinked last element in place of the element we are deleting
  365. */
  366. t->heap_index = thread->heap_index;
  367. p = heap_insert(t);
  368. t = *p;
  369. t->left = thread->left;
  370. t->right = thread->right;
  371. /*
  372. * Reestablish the heap invariant.
  373. */
  374. for (;;) {
  375. _st_thread_t *y; /* The younger child */
  376. int index_tmp;
  377. if (t->left == NULL)
  378. break;
  379. else if (t->right == NULL)
  380. y = t->left;
  381. else if (t->left->due < t->right->due)
  382. y = t->left;
  383. else
  384. y = t->right;
  385. if (t->due > y->due) {
  386. _st_thread_t *tl = y->left;
  387. _st_thread_t *tr = y->right;
  388. *p = y;
  389. if (y == t->left) {
  390. y->left = t;
  391. y->right = t->right;
  392. p = &y->left;
  393. } else {
  394. y->left = t->left;
  395. y->right = t;
  396. p = &y->right;
  397. }
  398. t->left = tl;
  399. t->right = tr;
  400. index_tmp = t->heap_index;
  401. t->heap_index = y->heap_index;
  402. y->heap_index = index_tmp;
  403. } else {
  404. break;
  405. }
  406. }
  407. }
  408. thread->left = thread->right = NULL;
  409. }
  410. void _st_add_sleep_q(_st_thread_t *thread, st_utime_t timeout)
  411. {
  412. thread->due = _ST_LAST_CLOCK + timeout;
  413. thread->flags |= _ST_FL_ON_SLEEPQ;
  414. thread->heap_index = ++_ST_SLEEPQ_SIZE;
  415. heap_insert(thread);
  416. }
  417. void _st_del_sleep_q(_st_thread_t *thread)
  418. {
  419. heap_delete(thread);
  420. thread->flags &= ~_ST_FL_ON_SLEEPQ;
  421. }
  422. void _st_vp_check_clock(void)
  423. {
  424. _st_thread_t *thread;
  425. st_utime_t elapsed, now;
  426. now = st_utime();
  427. elapsed = now < _ST_LAST_CLOCK? 0 : now - _ST_LAST_CLOCK; // Might step back.
  428. _ST_LAST_CLOCK = now;
  429. #if defined(DEBUG) && defined(DEBUG_STATS)
  430. if (elapsed <= 10000) {
  431. ++_st_stat_sched_15ms;
  432. } else if (elapsed <= 21000) {
  433. ++_st_stat_sched_20ms;
  434. } else if (elapsed <= 25000) {
  435. ++_st_stat_sched_25ms;
  436. } else if (elapsed <= 30000) {
  437. ++_st_stat_sched_30ms;
  438. } else if (elapsed <= 35000) {
  439. ++_st_stat_sched_35ms;
  440. } else if (elapsed <= 40000) {
  441. ++_st_stat_sched_40ms;
  442. } else if (elapsed <= 80000) {
  443. ++_st_stat_sched_80ms;
  444. } else if (elapsed <= 160000) {
  445. ++_st_stat_sched_160ms;
  446. } else {
  447. ++_st_stat_sched_s;
  448. }
  449. #endif
  450. if (_st_curr_time && now - _st_last_tset > 999000) {
  451. _st_curr_time = time(NULL);
  452. _st_last_tset = now;
  453. }
  454. while (_ST_SLEEPQ != NULL) {
  455. thread = _ST_SLEEPQ;
  456. ST_ASSERT(thread->flags & _ST_FL_ON_SLEEPQ);
  457. if (thread->due > now)
  458. break;
  459. _ST_DEL_SLEEPQ(thread);
  460. /* If thread is waiting on condition variable, set the time out flag */
  461. if (thread->state == _ST_ST_COND_WAIT)
  462. thread->flags |= _ST_FL_TIMEDOUT;
  463. /* Make thread runnable */
  464. ST_ASSERT(!(thread->flags & _ST_FL_IDLE_THREAD));
  465. thread->state = _ST_ST_RUNNABLE;
  466. // Insert at the head of RunQ, to execute timer first.
  467. _ST_INSERT_RUNQ(thread);
  468. }
  469. }
  470. void st_thread_yield()
  471. {
  472. _st_thread_t *me = _ST_CURRENT_THREAD();
  473. #if defined(DEBUG) && defined(DEBUG_STATS)
  474. ++_st_stat_thread_yield;
  475. #endif
  476. /* Check sleep queue for expired threads */
  477. _st_vp_check_clock();
  478. // If not thread in RunQ to yield to, ignore and continue to run.
  479. if (_ST_RUNQ.next == &_ST_RUNQ) {
  480. return;
  481. }
  482. #if defined(DEBUG) && defined(DEBUG_STATS)
  483. ++_st_stat_thread_yield2;
  484. #endif
  485. // Append thread to the tail of RunQ, we will back after all threads executed.
  486. me->state = _ST_ST_RUNNABLE;
  487. _ST_ADD_RUNQ(me);
  488. // Yield to other threads in the RunQ.
  489. _ST_SWITCH_CONTEXT(me);
  490. }
  491. void st_thread_interrupt(_st_thread_t *thread)
  492. {
  493. /* If thread is already dead */
  494. if (thread->state == _ST_ST_ZOMBIE)
  495. return;
  496. thread->flags |= _ST_FL_INTERRUPT;
  497. if (thread->state == _ST_ST_RUNNING || thread->state == _ST_ST_RUNNABLE)
  498. return;
  499. if (thread->flags & _ST_FL_ON_SLEEPQ)
  500. _ST_DEL_SLEEPQ(thread);
  501. /* Make thread runnable */
  502. thread->state = _ST_ST_RUNNABLE;
  503. _ST_ADD_RUNQ(thread);
  504. }
  505. /* Merge from https://github.com/michaeltalyansky/state-threads/commit/cce736426c2320ffec7c9820df49ee7a18ae638c */
  506. #if defined(__arm__) && !defined(MD_USE_BUILTIN_SETJMP) && __GLIBC_MINOR__ >= 19
  507. extern unsigned long __pointer_chk_guard;
  508. #define PTR_MANGLE(var) \
  509. (var) = (__typeof (var)) ((unsigned long) (var) ^ __pointer_chk_guard)
  510. #define PTR_DEMANGLE(var) PTR_MANGLE (var)
  511. #endif
  512. _st_thread_t *st_thread_create(void *(*start)(void *arg), void *arg, int joinable, int stk_size)
  513. {
  514. _st_thread_t *thread;
  515. _st_stack_t *stack;
  516. void **ptds;
  517. char *sp;
  518. #ifdef __ia64__
  519. char *bsp;
  520. #endif
  521. /* Adjust stack size */
  522. if (stk_size == 0)
  523. stk_size = ST_DEFAULT_STACK_SIZE;
  524. stk_size = ((stk_size + _ST_PAGE_SIZE - 1) / _ST_PAGE_SIZE) * _ST_PAGE_SIZE;
  525. stack = _st_stack_new(stk_size);
  526. if (!stack)
  527. return NULL;
  528. /* Allocate thread object and per-thread data off the stack */
  529. #if defined (MD_STACK_GROWS_DOWN)
  530. sp = stack->stk_top;
  531. #ifdef __ia64__
  532. /*
  533. * The stack segment is split in the middle. The upper half is used
  534. * as backing store for the register stack which grows upward.
  535. * The lower half is used for the traditional memory stack which
  536. * grows downward. Both stacks start in the middle and grow outward
  537. * from each other.
  538. */
  539. sp -= (stk_size >> 1);
  540. bsp = sp;
  541. /* Make register stack 64-byte aligned */
  542. if ((unsigned long)bsp & 0x3f)
  543. bsp = bsp + (0x40 - ((unsigned long)bsp & 0x3f));
  544. stack->bsp = bsp + _ST_STACK_PAD_SIZE;
  545. #endif
  546. sp = sp - (ST_KEYS_MAX * sizeof(void *));
  547. ptds = (void **) sp;
  548. sp = sp - sizeof(_st_thread_t);
  549. thread = (_st_thread_t *) sp;
  550. /* Make stack 64-byte aligned */
  551. if ((unsigned long)sp & 0x3f)
  552. sp = sp - ((unsigned long)sp & 0x3f);
  553. stack->sp = sp - _ST_STACK_PAD_SIZE;
  554. #elif defined (MD_STACK_GROWS_UP)
  555. sp = stack->stk_bottom;
  556. thread = (_st_thread_t *) sp;
  557. sp = sp + sizeof(_st_thread_t);
  558. ptds = (void **) sp;
  559. sp = sp + (ST_KEYS_MAX * sizeof(void *));
  560. /* Make stack 64-byte aligned */
  561. if ((unsigned long)sp & 0x3f)
  562. sp = sp + (0x40 - ((unsigned long)sp & 0x3f));
  563. stack->sp = sp + _ST_STACK_PAD_SIZE;
  564. #else
  565. #error Unknown OS
  566. #endif
  567. memset(thread, 0, sizeof(_st_thread_t));
  568. memset(ptds, 0, ST_KEYS_MAX * sizeof(void *));
  569. /* Initialize thread */
  570. thread->private_data = ptds;
  571. thread->stack = stack;
  572. thread->start = start;
  573. thread->arg = arg;
  574. #ifndef __ia64__
  575. /* Merge from https://github.com/michaeltalyansky/state-threads/commit/cce736426c2320ffec7c9820df49ee7a18ae638c */
  576. #if defined(__arm__) && !defined(MD_USE_BUILTIN_SETJMP) && __GLIBC_MINOR__ >= 19
  577. volatile void * lsp = PTR_MANGLE(stack->sp);
  578. if (_setjmp ((thread)->context))
  579. _st_thread_main();
  580. (thread)->context[0].__jmpbuf[8] = (long) (lsp);
  581. #else
  582. _ST_INIT_CONTEXT(thread, stack->sp, _st_thread_main);
  583. #endif
  584. #else
  585. _ST_INIT_CONTEXT(thread, stack->sp, stack->bsp, _st_thread_main);
  586. #endif
  587. /* If thread is joinable, allocate a termination condition variable */
  588. if (joinable) {
  589. thread->term = st_cond_new();
  590. if (thread->term == NULL) {
  591. _st_stack_free(thread->stack);
  592. return NULL;
  593. }
  594. }
  595. /* Make thread runnable */
  596. thread->state = _ST_ST_RUNNABLE;
  597. _st_active_count++;
  598. _ST_ADD_RUNQ(thread);
  599. #ifdef DEBUG
  600. _ST_ADD_THREADQ(thread);
  601. #endif
  602. /* merge from https://github.com/toffaletti/state-threads/commit/7f57fc9acc05e657bca1223f1e5b9b1a45ed929b */
  603. #ifndef NVALGRIND
  604. if (!(thread->flags & _ST_FL_PRIMORDIAL)) {
  605. thread->stack->valgrind_stack_id = VALGRIND_STACK_REGISTER(thread->stack->stk_top, thread->stack->stk_bottom);
  606. }
  607. #endif
  608. return thread;
  609. }
  610. _st_thread_t *st_thread_self(void)
  611. {
  612. return _ST_CURRENT_THREAD();
  613. }
  614. #ifdef DEBUG
  615. /* ARGSUSED */
  616. void _st_show_thread_stack(_st_thread_t *thread, const char *messg)
  617. {
  618. }
  619. /* To be set from debugger */
  620. int _st_iterate_threads_flag = 0;
  621. void _st_iterate_threads(void)
  622. {
  623. static _st_thread_t *thread = NULL;
  624. static jmp_buf orig_jb, save_jb;
  625. _st_clist_t *q;
  626. if (!_st_iterate_threads_flag) {
  627. if (thread) {
  628. memcpy(thread->context, save_jb, sizeof(jmp_buf));
  629. MD_LONGJMP(orig_jb, 1);
  630. }
  631. return;
  632. }
  633. if (thread) {
  634. memcpy(thread->context, save_jb, sizeof(jmp_buf));
  635. _st_show_thread_stack(thread, NULL);
  636. } else {
  637. if (MD_SETJMP(orig_jb)) {
  638. _st_iterate_threads_flag = 0;
  639. thread = NULL;
  640. _st_show_thread_stack(thread, "Iteration completed");
  641. return;
  642. }
  643. thread = _ST_CURRENT_THREAD();
  644. _st_show_thread_stack(thread, "Iteration started");
  645. }
  646. q = thread->tlink.next;
  647. if (q == &_ST_THREADQ)
  648. q = q->next;
  649. ST_ASSERT(q != &_ST_THREADQ);
  650. thread = _ST_THREAD_THREADQ_PTR(q);
  651. if (thread == _ST_CURRENT_THREAD())
  652. MD_LONGJMP(orig_jb, 1);
  653. memcpy(save_jb, thread->context, sizeof(jmp_buf));
  654. MD_LONGJMP(thread->context, 1);
  655. }
  656. #endif /* DEBUG */