decay.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599
  1. #include "test/jemalloc_test.h"
  2. #include "jemalloc/internal/ticker.h"
  3. static nstime_monotonic_t *nstime_monotonic_orig;
  4. static nstime_update_t *nstime_update_orig;
  5. static unsigned nupdates_mock;
  6. static nstime_t time_mock;
  7. static bool monotonic_mock;
  8. static bool
  9. check_background_thread_enabled(void) {
  10. bool enabled;
  11. size_t sz = sizeof(bool);
  12. int ret = mallctl("background_thread", (void *)&enabled, &sz, NULL,0);
  13. if (ret == ENOENT) {
  14. return false;
  15. }
  16. assert_d_eq(ret, 0, "Unexpected mallctl error");
  17. return enabled;
  18. }
  19. static bool
  20. nstime_monotonic_mock(void) {
  21. return monotonic_mock;
  22. }
  23. static bool
  24. nstime_update_mock(nstime_t *time) {
  25. nupdates_mock++;
  26. if (monotonic_mock) {
  27. nstime_copy(time, &time_mock);
  28. }
  29. return !monotonic_mock;
  30. }
  31. static unsigned
  32. do_arena_create(ssize_t dirty_decay_ms, ssize_t muzzy_decay_ms) {
  33. unsigned arena_ind;
  34. size_t sz = sizeof(unsigned);
  35. assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
  36. 0, "Unexpected mallctl() failure");
  37. size_t mib[3];
  38. size_t miblen = sizeof(mib)/sizeof(size_t);
  39. assert_d_eq(mallctlnametomib("arena.0.dirty_decay_ms", mib, &miblen),
  40. 0, "Unexpected mallctlnametomib() failure");
  41. mib[1] = (size_t)arena_ind;
  42. assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
  43. (void *)&dirty_decay_ms, sizeof(dirty_decay_ms)), 0,
  44. "Unexpected mallctlbymib() failure");
  45. assert_d_eq(mallctlnametomib("arena.0.muzzy_decay_ms", mib, &miblen),
  46. 0, "Unexpected mallctlnametomib() failure");
  47. mib[1] = (size_t)arena_ind;
  48. assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL,
  49. (void *)&muzzy_decay_ms, sizeof(muzzy_decay_ms)), 0,
  50. "Unexpected mallctlbymib() failure");
  51. return arena_ind;
  52. }
  53. static void
  54. do_arena_destroy(unsigned arena_ind) {
  55. size_t mib[3];
  56. size_t miblen = sizeof(mib)/sizeof(size_t);
  57. assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
  58. "Unexpected mallctlnametomib() failure");
  59. mib[1] = (size_t)arena_ind;
  60. assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
  61. "Unexpected mallctlbymib() failure");
  62. }
  63. void
  64. do_epoch(void) {
  65. uint64_t epoch = 1;
  66. assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch, sizeof(epoch)),
  67. 0, "Unexpected mallctl() failure");
  68. }
  69. void
  70. do_purge(unsigned arena_ind) {
  71. size_t mib[3];
  72. size_t miblen = sizeof(mib)/sizeof(size_t);
  73. assert_d_eq(mallctlnametomib("arena.0.purge", mib, &miblen), 0,
  74. "Unexpected mallctlnametomib() failure");
  75. mib[1] = (size_t)arena_ind;
  76. assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
  77. "Unexpected mallctlbymib() failure");
  78. }
  79. void
  80. do_decay(unsigned arena_ind) {
  81. size_t mib[3];
  82. size_t miblen = sizeof(mib)/sizeof(size_t);
  83. assert_d_eq(mallctlnametomib("arena.0.decay", mib, &miblen), 0,
  84. "Unexpected mallctlnametomib() failure");
  85. mib[1] = (size_t)arena_ind;
  86. assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
  87. "Unexpected mallctlbymib() failure");
  88. }
  89. static uint64_t
  90. get_arena_npurge_impl(const char *mibname, unsigned arena_ind) {
  91. size_t mib[4];
  92. size_t miblen = sizeof(mib)/sizeof(size_t);
  93. assert_d_eq(mallctlnametomib(mibname, mib, &miblen), 0,
  94. "Unexpected mallctlnametomib() failure");
  95. mib[2] = (size_t)arena_ind;
  96. uint64_t npurge = 0;
  97. size_t sz = sizeof(npurge);
  98. assert_d_eq(mallctlbymib(mib, miblen, (void *)&npurge, &sz, NULL, 0),
  99. config_stats ? 0 : ENOENT, "Unexpected mallctlbymib() failure");
  100. return npurge;
  101. }
  102. static uint64_t
  103. get_arena_dirty_npurge(unsigned arena_ind) {
  104. do_epoch();
  105. return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind);
  106. }
  107. static uint64_t
  108. get_arena_muzzy_npurge(unsigned arena_ind) {
  109. do_epoch();
  110. return get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
  111. }
  112. static uint64_t
  113. get_arena_npurge(unsigned arena_ind) {
  114. do_epoch();
  115. return get_arena_npurge_impl("stats.arenas.0.dirty_npurge", arena_ind) +
  116. get_arena_npurge_impl("stats.arenas.0.muzzy_npurge", arena_ind);
  117. }
  118. static size_t
  119. get_arena_pdirty(unsigned arena_ind) {
  120. do_epoch();
  121. size_t mib[4];
  122. size_t miblen = sizeof(mib)/sizeof(size_t);
  123. assert_d_eq(mallctlnametomib("stats.arenas.0.pdirty", mib, &miblen), 0,
  124. "Unexpected mallctlnametomib() failure");
  125. mib[2] = (size_t)arena_ind;
  126. size_t pdirty;
  127. size_t sz = sizeof(pdirty);
  128. assert_d_eq(mallctlbymib(mib, miblen, (void *)&pdirty, &sz, NULL, 0), 0,
  129. "Unexpected mallctlbymib() failure");
  130. return pdirty;
  131. }
  132. static size_t
  133. get_arena_pmuzzy(unsigned arena_ind) {
  134. do_epoch();
  135. size_t mib[4];
  136. size_t miblen = sizeof(mib)/sizeof(size_t);
  137. assert_d_eq(mallctlnametomib("stats.arenas.0.pmuzzy", mib, &miblen), 0,
  138. "Unexpected mallctlnametomib() failure");
  139. mib[2] = (size_t)arena_ind;
  140. size_t pmuzzy;
  141. size_t sz = sizeof(pmuzzy);
  142. assert_d_eq(mallctlbymib(mib, miblen, (void *)&pmuzzy, &sz, NULL, 0), 0,
  143. "Unexpected mallctlbymib() failure");
  144. return pmuzzy;
  145. }
  146. static void *
  147. do_mallocx(size_t size, int flags) {
  148. void *p = mallocx(size, flags);
  149. assert_ptr_not_null(p, "Unexpected mallocx() failure");
  150. return p;
  151. }
  152. static void
  153. generate_dirty(unsigned arena_ind, size_t size) {
  154. int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
  155. void *p = do_mallocx(size, flags);
  156. dallocx(p, flags);
  157. }
  158. TEST_BEGIN(test_decay_ticks) {
  159. test_skip_if(check_background_thread_enabled());
  160. ticker_t *decay_ticker;
  161. unsigned tick0, tick1, arena_ind;
  162. size_t sz, large0;
  163. void *p;
  164. sz = sizeof(size_t);
  165. assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
  166. 0), 0, "Unexpected mallctl failure");
  167. /* Set up a manually managed arena for test. */
  168. arena_ind = do_arena_create(0, 0);
  169. /* Migrate to the new arena, and get the ticker. */
  170. unsigned old_arena_ind;
  171. size_t sz_arena_ind = sizeof(old_arena_ind);
  172. assert_d_eq(mallctl("thread.arena", (void *)&old_arena_ind,
  173. &sz_arena_ind, (void *)&arena_ind, sizeof(arena_ind)), 0,
  174. "Unexpected mallctl() failure");
  175. decay_ticker = decay_ticker_get(tsd_fetch(), arena_ind);
  176. assert_ptr_not_null(decay_ticker,
  177. "Unexpected failure getting decay ticker");
  178. /*
  179. * Test the standard APIs using a large size class, since we can't
  180. * control tcache interactions for small size classes (except by
  181. * completely disabling tcache for the entire test program).
  182. */
  183. /* malloc(). */
  184. tick0 = ticker_read(decay_ticker);
  185. p = malloc(large0);
  186. assert_ptr_not_null(p, "Unexpected malloc() failure");
  187. tick1 = ticker_read(decay_ticker);
  188. assert_u32_ne(tick1, tick0, "Expected ticker to tick during malloc()");
  189. /* free(). */
  190. tick0 = ticker_read(decay_ticker);
  191. free(p);
  192. tick1 = ticker_read(decay_ticker);
  193. assert_u32_ne(tick1, tick0, "Expected ticker to tick during free()");
  194. /* calloc(). */
  195. tick0 = ticker_read(decay_ticker);
  196. p = calloc(1, large0);
  197. assert_ptr_not_null(p, "Unexpected calloc() failure");
  198. tick1 = ticker_read(decay_ticker);
  199. assert_u32_ne(tick1, tick0, "Expected ticker to tick during calloc()");
  200. free(p);
  201. /* posix_memalign(). */
  202. tick0 = ticker_read(decay_ticker);
  203. assert_d_eq(posix_memalign(&p, sizeof(size_t), large0), 0,
  204. "Unexpected posix_memalign() failure");
  205. tick1 = ticker_read(decay_ticker);
  206. assert_u32_ne(tick1, tick0,
  207. "Expected ticker to tick during posix_memalign()");
  208. free(p);
  209. /* aligned_alloc(). */
  210. tick0 = ticker_read(decay_ticker);
  211. p = aligned_alloc(sizeof(size_t), large0);
  212. assert_ptr_not_null(p, "Unexpected aligned_alloc() failure");
  213. tick1 = ticker_read(decay_ticker);
  214. assert_u32_ne(tick1, tick0,
  215. "Expected ticker to tick during aligned_alloc()");
  216. free(p);
  217. /* realloc(). */
  218. /* Allocate. */
  219. tick0 = ticker_read(decay_ticker);
  220. p = realloc(NULL, large0);
  221. assert_ptr_not_null(p, "Unexpected realloc() failure");
  222. tick1 = ticker_read(decay_ticker);
  223. assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
  224. /* Reallocate. */
  225. tick0 = ticker_read(decay_ticker);
  226. p = realloc(p, large0);
  227. assert_ptr_not_null(p, "Unexpected realloc() failure");
  228. tick1 = ticker_read(decay_ticker);
  229. assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
  230. /* Deallocate. */
  231. tick0 = ticker_read(decay_ticker);
  232. realloc(p, 0);
  233. tick1 = ticker_read(decay_ticker);
  234. assert_u32_ne(tick1, tick0, "Expected ticker to tick during realloc()");
  235. /*
  236. * Test the *allocx() APIs using large and small size classes, with
  237. * tcache explicitly disabled.
  238. */
  239. {
  240. unsigned i;
  241. size_t allocx_sizes[2];
  242. allocx_sizes[0] = large0;
  243. allocx_sizes[1] = 1;
  244. for (i = 0; i < sizeof(allocx_sizes) / sizeof(size_t); i++) {
  245. sz = allocx_sizes[i];
  246. /* mallocx(). */
  247. tick0 = ticker_read(decay_ticker);
  248. p = mallocx(sz, MALLOCX_TCACHE_NONE);
  249. assert_ptr_not_null(p, "Unexpected mallocx() failure");
  250. tick1 = ticker_read(decay_ticker);
  251. assert_u32_ne(tick1, tick0,
  252. "Expected ticker to tick during mallocx() (sz=%zu)",
  253. sz);
  254. /* rallocx(). */
  255. tick0 = ticker_read(decay_ticker);
  256. p = rallocx(p, sz, MALLOCX_TCACHE_NONE);
  257. assert_ptr_not_null(p, "Unexpected rallocx() failure");
  258. tick1 = ticker_read(decay_ticker);
  259. assert_u32_ne(tick1, tick0,
  260. "Expected ticker to tick during rallocx() (sz=%zu)",
  261. sz);
  262. /* xallocx(). */
  263. tick0 = ticker_read(decay_ticker);
  264. xallocx(p, sz, 0, MALLOCX_TCACHE_NONE);
  265. tick1 = ticker_read(decay_ticker);
  266. assert_u32_ne(tick1, tick0,
  267. "Expected ticker to tick during xallocx() (sz=%zu)",
  268. sz);
  269. /* dallocx(). */
  270. tick0 = ticker_read(decay_ticker);
  271. dallocx(p, MALLOCX_TCACHE_NONE);
  272. tick1 = ticker_read(decay_ticker);
  273. assert_u32_ne(tick1, tick0,
  274. "Expected ticker to tick during dallocx() (sz=%zu)",
  275. sz);
  276. /* sdallocx(). */
  277. p = mallocx(sz, MALLOCX_TCACHE_NONE);
  278. assert_ptr_not_null(p, "Unexpected mallocx() failure");
  279. tick0 = ticker_read(decay_ticker);
  280. sdallocx(p, sz, MALLOCX_TCACHE_NONE);
  281. tick1 = ticker_read(decay_ticker);
  282. assert_u32_ne(tick1, tick0,
  283. "Expected ticker to tick during sdallocx() "
  284. "(sz=%zu)", sz);
  285. }
  286. }
  287. /*
  288. * Test tcache fill/flush interactions for large and small size classes,
  289. * using an explicit tcache.
  290. */
  291. unsigned tcache_ind, i;
  292. size_t tcache_sizes[2];
  293. tcache_sizes[0] = large0;
  294. tcache_sizes[1] = 1;
  295. size_t tcache_max, sz_tcache_max;
  296. sz_tcache_max = sizeof(tcache_max);
  297. assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max,
  298. &sz_tcache_max, NULL, 0), 0, "Unexpected mallctl() failure");
  299. sz = sizeof(unsigned);
  300. assert_d_eq(mallctl("tcache.create", (void *)&tcache_ind, &sz,
  301. NULL, 0), 0, "Unexpected mallctl failure");
  302. for (i = 0; i < sizeof(tcache_sizes) / sizeof(size_t); i++) {
  303. sz = tcache_sizes[i];
  304. /* tcache fill. */
  305. tick0 = ticker_read(decay_ticker);
  306. p = mallocx(sz, MALLOCX_TCACHE(tcache_ind));
  307. assert_ptr_not_null(p, "Unexpected mallocx() failure");
  308. tick1 = ticker_read(decay_ticker);
  309. assert_u32_ne(tick1, tick0,
  310. "Expected ticker to tick during tcache fill "
  311. "(sz=%zu)", sz);
  312. /* tcache flush. */
  313. dallocx(p, MALLOCX_TCACHE(tcache_ind));
  314. tick0 = ticker_read(decay_ticker);
  315. assert_d_eq(mallctl("tcache.flush", NULL, NULL,
  316. (void *)&tcache_ind, sizeof(unsigned)), 0,
  317. "Unexpected mallctl failure");
  318. tick1 = ticker_read(decay_ticker);
  319. /* Will only tick if it's in tcache. */
  320. if (sz <= tcache_max) {
  321. assert_u32_ne(tick1, tick0,
  322. "Expected ticker to tick during tcache "
  323. "flush (sz=%zu)", sz);
  324. } else {
  325. assert_u32_eq(tick1, tick0,
  326. "Unexpected ticker tick during tcache "
  327. "flush (sz=%zu)", sz);
  328. }
  329. }
  330. }
  331. TEST_END
  332. static void
  333. decay_ticker_helper(unsigned arena_ind, int flags, bool dirty, ssize_t dt,
  334. uint64_t dirty_npurge0, uint64_t muzzy_npurge0, bool terminate_asap) {
  335. #define NINTERVALS 101
  336. nstime_t time, update_interval, decay_ms, deadline;
  337. nstime_init(&time, 0);
  338. nstime_update(&time);
  339. nstime_init2(&decay_ms, dt, 0);
  340. nstime_copy(&deadline, &time);
  341. nstime_add(&deadline, &decay_ms);
  342. nstime_init2(&update_interval, dt, 0);
  343. nstime_idivide(&update_interval, NINTERVALS);
  344. /*
  345. * Keep q's slab from being deallocated during the looping below. If a
  346. * cached slab were to repeatedly come and go during looping, it could
  347. * prevent the decay backlog ever becoming empty.
  348. */
  349. void *p = do_mallocx(1, flags);
  350. uint64_t dirty_npurge1, muzzy_npurge1;
  351. do {
  352. for (unsigned i = 0; i < DECAY_NTICKS_PER_UPDATE / 2;
  353. i++) {
  354. void *q = do_mallocx(1, flags);
  355. dallocx(q, flags);
  356. }
  357. dirty_npurge1 = get_arena_dirty_npurge(arena_ind);
  358. muzzy_npurge1 = get_arena_muzzy_npurge(arena_ind);
  359. nstime_add(&time_mock, &update_interval);
  360. nstime_update(&time);
  361. } while (nstime_compare(&time, &deadline) <= 0 && ((dirty_npurge1 ==
  362. dirty_npurge0 && muzzy_npurge1 == muzzy_npurge0) ||
  363. !terminate_asap));
  364. dallocx(p, flags);
  365. if (config_stats) {
  366. assert_u64_gt(dirty_npurge1 + muzzy_npurge1, dirty_npurge0 +
  367. muzzy_npurge0, "Expected purging to occur");
  368. }
  369. #undef NINTERVALS
  370. }
  371. TEST_BEGIN(test_decay_ticker) {
  372. test_skip_if(check_background_thread_enabled());
  373. #define NPS 2048
  374. ssize_t ddt = opt_dirty_decay_ms;
  375. ssize_t mdt = opt_muzzy_decay_ms;
  376. unsigned arena_ind = do_arena_create(ddt, mdt);
  377. int flags = (MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE);
  378. void *ps[NPS];
  379. size_t large;
  380. /*
  381. * Allocate a bunch of large objects, pause the clock, deallocate every
  382. * other object (to fragment virtual memory), restore the clock, then
  383. * [md]allocx() in a tight loop while advancing time rapidly to verify
  384. * the ticker triggers purging.
  385. */
  386. size_t tcache_max;
  387. size_t sz = sizeof(size_t);
  388. assert_d_eq(mallctl("arenas.tcache_max", (void *)&tcache_max, &sz, NULL,
  389. 0), 0, "Unexpected mallctl failure");
  390. large = nallocx(tcache_max + 1, flags);
  391. do_purge(arena_ind);
  392. uint64_t dirty_npurge0 = get_arena_dirty_npurge(arena_ind);
  393. uint64_t muzzy_npurge0 = get_arena_muzzy_npurge(arena_ind);
  394. for (unsigned i = 0; i < NPS; i++) {
  395. ps[i] = do_mallocx(large, flags);
  396. }
  397. nupdates_mock = 0;
  398. nstime_init(&time_mock, 0);
  399. nstime_update(&time_mock);
  400. monotonic_mock = true;
  401. nstime_monotonic_orig = nstime_monotonic;
  402. nstime_update_orig = nstime_update;
  403. nstime_monotonic = nstime_monotonic_mock;
  404. nstime_update = nstime_update_mock;
  405. for (unsigned i = 0; i < NPS; i += 2) {
  406. dallocx(ps[i], flags);
  407. unsigned nupdates0 = nupdates_mock;
  408. do_decay(arena_ind);
  409. assert_u_gt(nupdates_mock, nupdates0,
  410. "Expected nstime_update() to be called");
  411. }
  412. decay_ticker_helper(arena_ind, flags, true, ddt, dirty_npurge0,
  413. muzzy_npurge0, true);
  414. decay_ticker_helper(arena_ind, flags, false, ddt+mdt, dirty_npurge0,
  415. muzzy_npurge0, false);
  416. do_arena_destroy(arena_ind);
  417. nstime_monotonic = nstime_monotonic_orig;
  418. nstime_update = nstime_update_orig;
  419. #undef NPS
  420. }
  421. TEST_END
  422. TEST_BEGIN(test_decay_nonmonotonic) {
  423. test_skip_if(check_background_thread_enabled());
  424. #define NPS (SMOOTHSTEP_NSTEPS + 1)
  425. int flags = (MALLOCX_ARENA(0) | MALLOCX_TCACHE_NONE);
  426. void *ps[NPS];
  427. uint64_t npurge0 = 0;
  428. uint64_t npurge1 = 0;
  429. size_t sz, large0;
  430. unsigned i, nupdates0;
  431. sz = sizeof(size_t);
  432. assert_d_eq(mallctl("arenas.lextent.0.size", (void *)&large0, &sz, NULL,
  433. 0), 0, "Unexpected mallctl failure");
  434. assert_d_eq(mallctl("arena.0.purge", NULL, NULL, NULL, 0), 0,
  435. "Unexpected mallctl failure");
  436. do_epoch();
  437. sz = sizeof(uint64_t);
  438. npurge0 = get_arena_npurge(0);
  439. nupdates_mock = 0;
  440. nstime_init(&time_mock, 0);
  441. nstime_update(&time_mock);
  442. monotonic_mock = false;
  443. nstime_monotonic_orig = nstime_monotonic;
  444. nstime_update_orig = nstime_update;
  445. nstime_monotonic = nstime_monotonic_mock;
  446. nstime_update = nstime_update_mock;
  447. for (i = 0; i < NPS; i++) {
  448. ps[i] = mallocx(large0, flags);
  449. assert_ptr_not_null(ps[i], "Unexpected mallocx() failure");
  450. }
  451. for (i = 0; i < NPS; i++) {
  452. dallocx(ps[i], flags);
  453. nupdates0 = nupdates_mock;
  454. assert_d_eq(mallctl("arena.0.decay", NULL, NULL, NULL, 0), 0,
  455. "Unexpected arena.0.decay failure");
  456. assert_u_gt(nupdates_mock, nupdates0,
  457. "Expected nstime_update() to be called");
  458. }
  459. do_epoch();
  460. sz = sizeof(uint64_t);
  461. npurge1 = get_arena_npurge(0);
  462. if (config_stats) {
  463. assert_u64_eq(npurge0, npurge1, "Unexpected purging occurred");
  464. }
  465. nstime_monotonic = nstime_monotonic_orig;
  466. nstime_update = nstime_update_orig;
  467. #undef NPS
  468. }
  469. TEST_END
  470. TEST_BEGIN(test_decay_now) {
  471. test_skip_if(check_background_thread_enabled());
  472. unsigned arena_ind = do_arena_create(0, 0);
  473. assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
  474. assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
  475. size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
  476. /* Verify that dirty/muzzy pages never linger after deallocation. */
  477. for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
  478. size_t size = sizes[i];
  479. generate_dirty(arena_ind, size);
  480. assert_zu_eq(get_arena_pdirty(arena_ind), 0,
  481. "Unexpected dirty pages");
  482. assert_zu_eq(get_arena_pmuzzy(arena_ind), 0,
  483. "Unexpected muzzy pages");
  484. }
  485. do_arena_destroy(arena_ind);
  486. }
  487. TEST_END
  488. TEST_BEGIN(test_decay_never) {
  489. test_skip_if(check_background_thread_enabled());
  490. unsigned arena_ind = do_arena_create(-1, -1);
  491. int flags = MALLOCX_ARENA(arena_ind) | MALLOCX_TCACHE_NONE;
  492. assert_zu_eq(get_arena_pdirty(arena_ind), 0, "Unexpected dirty pages");
  493. assert_zu_eq(get_arena_pmuzzy(arena_ind), 0, "Unexpected muzzy pages");
  494. size_t sizes[] = {16, PAGE<<2, HUGEPAGE<<2};
  495. void *ptrs[sizeof(sizes)/sizeof(size_t)];
  496. for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
  497. ptrs[i] = do_mallocx(sizes[i], flags);
  498. }
  499. /* Verify that each deallocation generates additional dirty pages. */
  500. size_t pdirty_prev = get_arena_pdirty(arena_ind);
  501. size_t pmuzzy_prev = get_arena_pmuzzy(arena_ind);
  502. assert_zu_eq(pdirty_prev, 0, "Unexpected dirty pages");
  503. assert_zu_eq(pmuzzy_prev, 0, "Unexpected muzzy pages");
  504. for (unsigned i = 0; i < sizeof(sizes)/sizeof(size_t); i++) {
  505. dallocx(ptrs[i], flags);
  506. size_t pdirty = get_arena_pdirty(arena_ind);
  507. size_t pmuzzy = get_arena_pmuzzy(arena_ind);
  508. assert_zu_gt(pdirty, pdirty_prev,
  509. "Expected dirty pages to increase.");
  510. assert_zu_eq(pmuzzy, 0, "Unexpected muzzy pages");
  511. pdirty_prev = pdirty;
  512. }
  513. do_arena_destroy(arena_ind);
  514. }
  515. TEST_END
  516. int
  517. main(void) {
  518. return test(
  519. test_decay_ticks,
  520. test_decay_ticker,
  521. test_decay_nonmonotonic,
  522. test_decay_now,
  523. test_decay_never);
  524. }