ks_hash.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714
  1. /*
  2. * Copyright (c) 2002, Christopher Clark
  3. * All rights reserved.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. *
  9. * * Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. *
  12. * * Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. *
  16. * * Neither the name of the original author; nor the names of any contributors
  17. * may be used to endorse or promote products derived from this software
  18. * without specific prior written permission.
  19. *
  20. *
  21. * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  22. * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  23. * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  24. * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
  25. * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  26. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  27. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  28. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  29. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  30. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  31. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  32. */
  33. #include "libks/ks.h"
  34. #include "libks/ks_hash.h"
  35. struct entry
  36. {
  37. void *k, *v;
  38. unsigned int h;
  39. ks_hash_flag_t flags;
  40. ks_hash_destructor_t destructor;
  41. struct entry *next;
  42. };
  43. struct ks_hash_iterator {
  44. unsigned int pos;
  45. ks_locked_t locked;
  46. struct entry *e;
  47. struct ks_hash *h;
  48. };
  49. struct ks_hash {
  50. unsigned int tablelength;
  51. struct entry **table;
  52. unsigned int entrycount;
  53. unsigned int loadlimit;
  54. unsigned int primeindex;
  55. unsigned int (*hashfn) (void *k);
  56. int (*eqfn) (void *k1, void *k2);
  57. ks_hash_flag_t flags;
  58. ks_hash_destructor_t destructor;
  59. ks_rwl_t *rwl;
  60. ks_mutex_t *mutex;
  61. uint32_t readers;
  62. ks_size_t keysize;
  63. ks_hash_mode_t mode;
  64. };
  65. /*****************************************************************************/
  66. /*****************************************************************************/
  67. static inline unsigned int hash(ks_hash_t *h, const void *k)
  68. {
  69. unsigned int i;
  70. switch (h->mode)
  71. {
  72. case KS_HASH_MODE_ARBITRARY:
  73. i = ks_hash_default_arbitrary(k, h->keysize, 13);
  74. break;
  75. case KS_HASH_MODE_INT:
  76. case KS_HASH_MODE_INT64:
  77. case KS_HASH_MODE_PTR:
  78. i = h->hashfn((void *)&k);
  79. break;
  80. case KS_HASH_MODE_UUID:
  81. default:
  82. i = h->hashfn((void *)k);
  83. break;
  84. }
  85. /* Aim to protect against poor hash functions by adding logic here
  86. * - logic taken from java 1.4 hash source */
  87. i += ~(i << 9);
  88. i ^= ((i >> 14) | (i << 18)); /* >>> */
  89. i += (i << 4);
  90. i ^= ((i >> 10) | (i << 22)); /* >>> */
  91. return i;
  92. }
  93. /*****************************************************************************/
  94. /* indexFor */
  95. static __inline__ unsigned int
  96. indexFor(unsigned int tablelength, unsigned int hashvalue) {
  97. return (hashvalue % tablelength);
  98. }
  99. /* Only works if tablelength == 2^N */
  100. /*static inline unsigned int
  101. indexFor(unsigned int tablelength, unsigned int hashvalue)
  102. {
  103. return (hashvalue & (tablelength - 1u));
  104. }
  105. */
  106. /*****************************************************************************/
  107. //#define freekey(X) free(X)
  108. /*
  109. Credit for primes table: Aaron Krowne
  110. http://br.endernet.org/~akrowne/
  111. http://planetmath.org/encyclopedia/GoodKs_HashPrimes.html
  112. */
  113. static const unsigned int primes[] = {
  114. 53, 97, 193, 389,
  115. 769, 1543, 3079, 6151,
  116. 12289, 24593, 49157, 98317,
  117. 196613, 393241, 786433, 1572869,
  118. 3145739, 6291469, 12582917, 25165843,
  119. 50331653, 100663319, 201326611, 402653189,
  120. 805306457, 1610612741
  121. };
  122. const unsigned int prime_table_length = sizeof(primes)/sizeof(primes[0]);
  123. const float max_load_factor = 0.65f;
  124. /*****************************************************************************/
  125. static void ks_hash_cleanup(void *ptr, void *arg, ks_pool_cleanup_action_t action, ks_pool_cleanup_type_t type)
  126. {
  127. //ks_hash_t *hash = (ks_hash_t *) ptr;
  128. switch(action) {
  129. case KS_MPCL_ANNOUNCE:
  130. break;
  131. case KS_MPCL_TEARDOWN:
  132. break;
  133. case KS_MPCL_DESTROY:
  134. //ks_hash_destroy(&hash);
  135. break;
  136. }
  137. }
  138. KS_DECLARE(ks_status_t) ks_hash_create(ks_hash_t **hp, ks_hash_mode_t mode, ks_hash_flag_t flags, ks_pool_t *pool)
  139. {
  140. return ks_hash_create_ex(hp, 16, NULL, NULL, mode, flags, NULL, pool);
  141. }
  142. KS_DECLARE(void) ks_hash_set_flags(ks_hash_t *h, ks_hash_flag_t flags)
  143. {
  144. h->flags = flags;
  145. }
  146. KS_DECLARE(void) ks_hash_set_keysize(ks_hash_t *h, ks_size_t keysize)
  147. {
  148. h->keysize = keysize;
  149. }
  150. KS_DECLARE(void) ks_hash_set_destructor(ks_hash_t *h, ks_hash_destructor_t destructor)
  151. {
  152. h->destructor = destructor;
  153. }
  154. KS_DECLARE(ks_status_t)
  155. ks_hash_create_ex(
  156. ks_hash_t **hp,
  157. unsigned int minsize,
  158. unsigned int (*hashf) (void*),
  159. int (*eqf) (void*,void*),
  160. ks_hash_mode_t mode,
  161. ks_hash_flag_t flags,
  162. ks_hash_destructor_t destructor,
  163. ks_pool_t *pool)
  164. {
  165. ks_hash_t *h;
  166. unsigned int pindex, size = primes[0];
  167. ks_size_t keysize = 0;
  168. switch(mode) {
  169. case KS_HASH_MODE_CASE_INSENSITIVE:
  170. ks_assert(hashf == NULL);
  171. hashf = ks_hash_default_ci;
  172. break;
  173. case KS_HASH_MODE_INT:
  174. ks_assert(hashf == NULL);
  175. ks_assert(eqf == NULL);
  176. hashf = ks_hash_default_int;
  177. eqf = ks_hash_equalkeys_int;
  178. keysize = 4;
  179. break;
  180. case KS_HASH_MODE_INT64:
  181. ks_assert(hashf == NULL);
  182. ks_assert(eqf == NULL);
  183. hashf = ks_hash_default_int64;
  184. eqf = ks_hash_equalkeys_int64;
  185. keysize = 8;
  186. break;
  187. case KS_HASH_MODE_UUID:
  188. ks_assert(hashf == NULL);
  189. ks_assert(eqf == NULL);
  190. hashf = ks_hash_default_uuid;
  191. eqf = ks_hash_equalkeys_uuid;
  192. keysize = sizeof(uuid_t);
  193. break;
  194. case KS_HASH_MODE_PTR:
  195. ks_assert(hashf == NULL);
  196. ks_assert(eqf == NULL);
  197. hashf = ks_hash_default_ptr;
  198. eqf = ks_hash_equalkeys_ptr;
  199. keysize = sizeof(void *);
  200. break;
  201. case KS_HASH_MODE_ARBITRARY:
  202. keysize = sizeof(void *);
  203. break;
  204. default:
  205. break;
  206. }
  207. if ((flags & KS_HASH_FLAG_NOLOCK)) {
  208. flags &= ~KS_HASH_FLAG_RWLOCK;
  209. }
  210. ks_assert(pool);
  211. if (!hashf) hashf = ks_hash_default;
  212. if (!eqf) eqf = ks_hash_equalkeys;
  213. if (!minsize) minsize = 16;
  214. /* Check requested ks_hash isn't too large */
  215. if (minsize > (1u << 30)) {*hp = NULL; return KS_STATUS_FAIL;}
  216. /* Enforce size as prime */
  217. for (pindex=0; pindex < prime_table_length; pindex++) {
  218. if (primes[pindex] > minsize) {
  219. size = primes[pindex];
  220. break;
  221. }
  222. }
  223. h = (ks_hash_t *) ks_pool_alloc(pool, sizeof(ks_hash_t));
  224. h->flags = flags;
  225. h->destructor = destructor;
  226. h->keysize = keysize;
  227. h->mode = mode;
  228. if ((flags & KS_HASH_FLAG_RWLOCK)) {
  229. ks_rwl_create(&h->rwl, pool);
  230. }
  231. if (!(flags & KS_HASH_FLAG_NOLOCK)) {
  232. ks_mutex_create(&h->mutex, KS_MUTEX_FLAG_DEFAULT, pool);
  233. }
  234. if (NULL == h) abort(); /*oom*/
  235. h->table = (struct entry **)ks_pool_alloc(pool, sizeof(struct entry*) * size);
  236. if (NULL == h->table) abort(); /*oom*/
  237. //memset(h->table, 0, size * sizeof(struct entry *));
  238. h->tablelength = size;
  239. h->primeindex = pindex;
  240. h->entrycount = 0;
  241. h->hashfn = hashf;
  242. h->eqfn = eqf;
  243. h->loadlimit = (unsigned int) ceil(size * max_load_factor);
  244. *hp = h;
  245. ks_pool_set_cleanup(h, NULL, ks_hash_cleanup);
  246. return KS_STATUS_SUCCESS;
  247. }
  248. /*****************************************************************************/
  249. static int
  250. ks_hash_expand(ks_hash_t *h)
  251. {
  252. /* Double the size of the table to accomodate more entries */
  253. struct entry **newtable;
  254. struct entry *e;
  255. struct entry **pE;
  256. unsigned int newsize, i, index;
  257. /* Check we're not hitting max capacity */
  258. if (h->primeindex == (prime_table_length - 1)) return 0;
  259. newsize = primes[++(h->primeindex)];
  260. newtable = (struct entry **)ks_pool_alloc(ks_pool_get(h), sizeof(struct entry*) * newsize);
  261. if (NULL != newtable)
  262. {
  263. memset(newtable, 0, newsize * sizeof(struct entry *));
  264. /* This algorithm is not 'stable'. ie. it reverses the list
  265. * when it transfers entries between the tables */
  266. for (i = 0; i < h->tablelength; i++) {
  267. while (NULL != (e = h->table[i])) {
  268. h->table[i] = e->next;
  269. index = indexFor(newsize,e->h);
  270. e->next = newtable[index];
  271. newtable[index] = e;
  272. }
  273. }
  274. ks_pool_free(&h->table);
  275. h->table = newtable;
  276. }
  277. /* Plan B: realloc instead */
  278. else
  279. {
  280. newtable = (struct entry **)
  281. ks_pool_resize(h->table, newsize * sizeof(struct entry *));
  282. if (NULL == newtable) { (h->primeindex)--; return 0; }
  283. h->table = newtable;
  284. memset(newtable[h->tablelength], 0, newsize - h->tablelength);
  285. for (i = 0; i < h->tablelength; i++) {
  286. for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) {
  287. index = indexFor(newsize,e->h);
  288. if (index == i) {
  289. pE = &(e->next);
  290. } else {
  291. *pE = e->next;
  292. e->next = newtable[index];
  293. newtable[index] = e;
  294. }
  295. }
  296. }
  297. }
  298. h->tablelength = newsize;
  299. h->loadlimit = (unsigned int) ceil(newsize * max_load_factor);
  300. return -1;
  301. }
  302. /*****************************************************************************/
  303. KS_DECLARE(unsigned int)
  304. ks_hash_count(ks_hash_t *h)
  305. {
  306. return h->entrycount;
  307. }
  308. static int key_equals(ks_hash_t *h, const void * const k1, const void * const k2)
  309. {
  310. switch (h->mode)
  311. {
  312. case KS_HASH_MODE_ARBITRARY:
  313. return !memcmp(k1, k2, h->keysize);
  314. case KS_HASH_MODE_INT:
  315. case KS_HASH_MODE_INT64:
  316. case KS_HASH_MODE_PTR:
  317. return h->eqfn((void *)&k1, (void *)&k2);
  318. case KS_HASH_MODE_UUID:
  319. return h->eqfn((void *)k1, (void *)k2);
  320. default: break;
  321. }
  322. return h->eqfn((void *)k1, (void *)k2);
  323. }
  324. static void * _ks_hash_remove(ks_hash_t *h, const void * const k, unsigned int hashvalue, unsigned int index) {
  325. /* TODO: consider compacting the table when the load factor drops enough,
  326. * or provide a 'compact' method. */
  327. struct entry *e;
  328. struct entry **pE;
  329. void *v;
  330. pE = &(h->table[index]);
  331. e = *pE;
  332. while (NULL != e) {
  333. /* Check hash value to short circuit heavier comparison */
  334. if ((hashvalue == e->h) && (key_equals(h, k, e->k))) {
  335. *pE = e->next;
  336. h->entrycount--;
  337. v = e->v;
  338. if (e->flags & KS_HASH_FLAG_FREE_KEY) {
  339. ks_pool_free(&e->k);
  340. }
  341. if (e->flags & KS_HASH_FLAG_FREE_VALUE) {
  342. ks_pool_free(&e->v);
  343. v = NULL;
  344. } else if (e->destructor) {
  345. e->destructor(e->v);
  346. v = e->v = NULL;
  347. } else if (h->destructor) {
  348. h->destructor(e->v);
  349. v = e->v = NULL;
  350. }
  351. ks_pool_free(&e);
  352. return v;
  353. }
  354. pE = &(e->next);
  355. e = e->next;
  356. }
  357. return NULL;
  358. }
  359. /*****************************************************************************/
  360. KS_DECLARE(ks_status_t) ks_hash_insert_ex(ks_hash_t *h, const void * const k, const void * const v, ks_hash_flag_t flags, ks_hash_destructor_t destructor)
  361. {
  362. struct entry *e;
  363. unsigned int hashvalue = hash(h, k);
  364. unsigned int index = indexFor(h->tablelength, hashvalue);
  365. ks_hash_write_lock(h);
  366. if (!flags) {
  367. flags = h->flags;
  368. }
  369. if (flags & KS_HASH_FLAG_DUP_CHECK) {
  370. _ks_hash_remove(h, k, hashvalue, index);
  371. }
  372. if (++(h->entrycount) > h->loadlimit)
  373. {
  374. /* Ignore the return value. If expand fails, we should
  375. * still try cramming just this value into the existing table
  376. * -- we may not have memory for a larger table, but one more
  377. * element may be ok. Next time we insert, we'll try expanding again.*/
  378. ks_hash_expand(h);
  379. index = indexFor(h->tablelength, hashvalue);
  380. }
  381. e = (struct entry *)ks_pool_alloc(ks_pool_get(h), sizeof(struct entry));
  382. e->h = hashvalue;
  383. e->k = (void *)k;
  384. e->v = (void *)v;
  385. e->flags = flags;
  386. e->destructor = destructor;
  387. e->next = h->table[index];
  388. h->table[index] = e;
  389. ks_hash_write_unlock(h);
  390. return KS_STATUS_SUCCESS;
  391. }
  392. KS_DECLARE(void) ks_hash_write_lock(ks_hash_t *h)
  393. {
  394. if ((h->flags & KS_HASH_FLAG_NOLOCK)) {
  395. return;
  396. } else if ((h->flags & KS_HASH_FLAG_RWLOCK)) {
  397. ks_rwl_write_lock(h->rwl);
  398. } else {
  399. ks_mutex_lock(h->mutex);
  400. }
  401. }
  402. KS_DECLARE(void) ks_hash_write_unlock(ks_hash_t *h)
  403. {
  404. if ((h->flags & KS_HASH_FLAG_NOLOCK)) {
  405. return;
  406. } else if ((h->flags & KS_HASH_FLAG_RWLOCK)) {
  407. ks_rwl_write_unlock(h->rwl);
  408. } else {
  409. ks_mutex_unlock(h->mutex);
  410. }
  411. }
  412. KS_DECLARE(ks_status_t) ks_hash_read_lock(ks_hash_t *h)
  413. {
  414. if (!(h->flags & KS_HASH_FLAG_RWLOCK)) {
  415. return KS_STATUS_INACTIVE;
  416. }
  417. ks_rwl_read_lock(h->rwl);
  418. ks_mutex_lock(h->mutex);
  419. h->readers++;
  420. ks_mutex_unlock(h->mutex);
  421. return KS_STATUS_SUCCESS;
  422. }
  423. KS_DECLARE(ks_status_t) ks_hash_read_unlock(ks_hash_t *h)
  424. {
  425. if (!(h->flags & KS_HASH_FLAG_RWLOCK)) {
  426. return KS_STATUS_INACTIVE;
  427. }
  428. ks_mutex_lock(h->mutex);
  429. ks_assert(h->readers != 0);
  430. h->readers--;
  431. ks_mutex_unlock(h->mutex);
  432. ks_rwl_read_unlock(h->rwl);
  433. return KS_STATUS_SUCCESS;
  434. }
  435. /*****************************************************************************/
  436. KS_DECLARE(void *) /* returns value associated with key */
  437. ks_hash_search(ks_hash_t *h, const void *k, ks_locked_t locked)
  438. {
  439. struct entry *e;
  440. unsigned int hashvalue, index;
  441. void *v = NULL;
  442. ks_assert(locked != KS_READLOCKED || (h->flags & KS_HASH_FLAG_RWLOCK));
  443. hashvalue = hash(h,k);
  444. index = indexFor(h->tablelength,hashvalue);
  445. if (locked == KS_READLOCKED) {
  446. ks_rwl_read_lock(h->rwl);
  447. ks_mutex_lock(h->mutex);
  448. h->readers++;
  449. ks_mutex_unlock(h->mutex);
  450. }
  451. e = h->table[index];
  452. while (NULL != e) {
  453. /* Check hash value to short circuit heavier comparison */
  454. if ((hashvalue == e->h) && (key_equals(h, (void *)k, e->k))) {
  455. v = e->v;
  456. break;
  457. }
  458. e = e->next;
  459. }
  460. return v;
  461. }
  462. /*****************************************************************************/
  463. /* returns value associated with key */
  464. KS_DECLARE(void *) ks_hash_remove(ks_hash_t *h, const void * const k)
  465. {
  466. void *v;
  467. unsigned int hashvalue = hash(h,k);
  468. ks_hash_write_lock(h);
  469. v = _ks_hash_remove(h, k, hashvalue, indexFor(h->tablelength,hashvalue));
  470. ks_hash_write_unlock(h);
  471. return v;
  472. }
  473. /*****************************************************************************/
  474. /* destroy */
  475. KS_DECLARE(void) ks_hash_destroy(ks_hash_t **h)
  476. {
  477. unsigned int i;
  478. struct entry *e, *f;
  479. struct entry **table;
  480. if (!h || !*h)
  481. return;
  482. table = (*h)->table;
  483. ks_hash_write_lock(*h);
  484. for (i = 0; i < (*h)->tablelength; i++) {
  485. e = table[i];
  486. while (NULL != e) {
  487. f = e; e = e->next;
  488. if (f->flags & KS_HASH_FLAG_FREE_KEY) {
  489. ks_pool_free(&f->k);
  490. }
  491. if (f->flags & KS_HASH_FLAG_FREE_VALUE) {
  492. ks_pool_free(&f->v);
  493. } else if (f->destructor) {
  494. f->destructor(f->v);
  495. f->v = NULL;
  496. } else if ((*h)->destructor) {
  497. (*h)->destructor(f->v);
  498. f->v = NULL;
  499. }
  500. ks_pool_free(&f);
  501. }
  502. }
  503. ks_pool_free(&(*h)->table);
  504. ks_hash_write_unlock(*h);
  505. if ((*h)->rwl) ks_pool_free(&(*h)->rwl);
  506. if ((*h)->mutex) {
  507. ks_pool_free(&(*h)->mutex);
  508. }
  509. ks_pool_free(&(*h));
  510. *h = NULL;
  511. }
  512. KS_DECLARE(void) ks_hash_last(ks_hash_iterator_t **iP)
  513. {
  514. ks_hash_iterator_t *i = *iP;
  515. if (i->locked == KS_READLOCKED) {
  516. ks_mutex_lock(i->h->mutex);
  517. i->h->readers--;
  518. ks_mutex_unlock(i->h->mutex);
  519. ks_rwl_read_unlock(i->h->rwl);
  520. }
  521. ks_pool_free(&i);
  522. *iP = NULL;
  523. }
  524. KS_DECLARE(ks_hash_iterator_t *) ks_hash_next(ks_hash_iterator_t **iP)
  525. {
  526. ks_hash_iterator_t *i = *iP;
  527. if (i->e) {
  528. if ((i->e = i->e->next) != 0) {
  529. return i;
  530. } else {
  531. i->pos++;
  532. }
  533. }
  534. while(i->pos < i->h->tablelength && !i->h->table[i->pos]) {
  535. i->pos++;
  536. }
  537. if (i->pos >= i->h->tablelength) {
  538. goto end;
  539. }
  540. if ((i->e = i->h->table[i->pos]) != 0) {
  541. return i;
  542. }
  543. end:
  544. ks_hash_last(iP);
  545. return NULL;
  546. }
  547. KS_DECLARE(ks_hash_iterator_t *) ks_hash_first(ks_hash_t *h, ks_locked_t locked)
  548. {
  549. ks_hash_iterator_t *iterator;
  550. ks_assert(locked != KS_READLOCKED || (h->flags & KS_HASH_FLAG_RWLOCK));
  551. iterator = ks_pool_alloc(ks_pool_get(h), sizeof(*iterator));
  552. ks_assert(iterator);
  553. iterator->pos = 0;
  554. iterator->e = NULL;
  555. iterator->h = h;
  556. if (locked == KS_READLOCKED) {
  557. ks_rwl_read_lock(h->rwl);
  558. iterator->locked = locked;
  559. ks_mutex_lock(h->mutex);
  560. h->readers++;
  561. ks_mutex_unlock(h->mutex);
  562. }
  563. return ks_hash_next(&iterator);
  564. }
  565. KS_DECLARE(void) ks_hash_this_val(ks_hash_iterator_t *i, void *val)
  566. {
  567. if (i->e) {
  568. i->e->v = val;
  569. }
  570. }
  571. KS_DECLARE(void) ks_hash_this(ks_hash_iterator_t *i, const void **key, ks_ssize_t *klen, void **val)
  572. {
  573. if (i->e) {
  574. if (key) {
  575. *key = i->e->k;
  576. }
  577. if (klen) {
  578. *klen = (int)strlen(i->e->k);
  579. }
  580. if (val) {
  581. *val = i->e->v;
  582. }
  583. } else {
  584. if (key) {
  585. *key = NULL;
  586. }
  587. if (klen) {
  588. *klen = 0;
  589. }
  590. if (val) {
  591. *val = NULL;
  592. }
  593. }
  594. }
  595. /* For Emacs:
  596. * Local Variables:
  597. * mode:c
  598. * indent-tabs-mode:t
  599. * tab-width:4
  600. * c-basic-offset:4
  601. * End:
  602. * For VIM:
  603. * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet:
  604. */