2
0

retained.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. #include "test/jemalloc_test.h"
  2. #include "jemalloc/internal/spin.h"
  3. static unsigned arena_ind;
  4. static size_t sz;
  5. static size_t esz;
  6. #define NEPOCHS 8
  7. #define PER_THD_NALLOCS 1
  8. static atomic_u_t epoch;
  9. static atomic_u_t nfinished;
  10. static unsigned
  11. do_arena_create(extent_hooks_t *h) {
  12. unsigned arena_ind;
  13. size_t sz = sizeof(unsigned);
  14. assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz,
  15. (void *)(h != NULL ? &h : NULL), (h != NULL ? sizeof(h) : 0)), 0,
  16. "Unexpected mallctl() failure");
  17. return arena_ind;
  18. }
  19. static void
  20. do_arena_destroy(unsigned arena_ind) {
  21. size_t mib[3];
  22. size_t miblen;
  23. miblen = sizeof(mib)/sizeof(size_t);
  24. assert_d_eq(mallctlnametomib("arena.0.destroy", mib, &miblen), 0,
  25. "Unexpected mallctlnametomib() failure");
  26. mib[1] = (size_t)arena_ind;
  27. assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
  28. "Unexpected mallctlbymib() failure");
  29. }
  30. static void
  31. do_refresh(void) {
  32. uint64_t epoch = 1;
  33. assert_d_eq(mallctl("epoch", NULL, NULL, (void *)&epoch,
  34. sizeof(epoch)), 0, "Unexpected mallctl() failure");
  35. }
  36. static size_t
  37. do_get_size_impl(const char *cmd, unsigned arena_ind) {
  38. size_t mib[4];
  39. size_t miblen = sizeof(mib) / sizeof(size_t);
  40. size_t z = sizeof(size_t);
  41. assert_d_eq(mallctlnametomib(cmd, mib, &miblen),
  42. 0, "Unexpected mallctlnametomib(\"%s\", ...) failure", cmd);
  43. mib[2] = arena_ind;
  44. size_t size;
  45. assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &z, NULL, 0),
  46. 0, "Unexpected mallctlbymib([\"%s\"], ...) failure", cmd);
  47. return size;
  48. }
  49. static size_t
  50. do_get_active(unsigned arena_ind) {
  51. return do_get_size_impl("stats.arenas.0.pactive", arena_ind) * PAGE;
  52. }
  53. static size_t
  54. do_get_mapped(unsigned arena_ind) {
  55. return do_get_size_impl("stats.arenas.0.mapped", arena_ind);
  56. }
  57. static void *
  58. thd_start(void *arg) {
  59. for (unsigned next_epoch = 1; next_epoch < NEPOCHS; next_epoch++) {
  60. /* Busy-wait for next epoch. */
  61. unsigned cur_epoch;
  62. spin_t spinner = SPIN_INITIALIZER;
  63. while ((cur_epoch = atomic_load_u(&epoch, ATOMIC_ACQUIRE)) !=
  64. next_epoch) {
  65. spin_adaptive(&spinner);
  66. }
  67. assert_u_eq(cur_epoch, next_epoch, "Unexpected epoch");
  68. /*
  69. * Allocate. The main thread will reset the arena, so there's
  70. * no need to deallocate.
  71. */
  72. for (unsigned i = 0; i < PER_THD_NALLOCS; i++) {
  73. void *p = mallocx(sz, MALLOCX_ARENA(arena_ind) |
  74. MALLOCX_TCACHE_NONE
  75. );
  76. assert_ptr_not_null(p,
  77. "Unexpected mallocx() failure\n");
  78. }
  79. /* Let the main thread know we've finished this iteration. */
  80. atomic_fetch_add_u(&nfinished, 1, ATOMIC_RELEASE);
  81. }
  82. return NULL;
  83. }
  84. TEST_BEGIN(test_retained) {
  85. test_skip_if(!config_stats);
  86. arena_ind = do_arena_create(NULL);
  87. sz = nallocx(HUGEPAGE, 0);
  88. esz = sz + sz_large_pad;
  89. atomic_store_u(&epoch, 0, ATOMIC_RELAXED);
  90. unsigned nthreads = ncpus * 2;
  91. VARIABLE_ARRAY(thd_t, threads, nthreads);
  92. for (unsigned i = 0; i < nthreads; i++) {
  93. thd_create(&threads[i], thd_start, NULL);
  94. }
  95. for (unsigned e = 1; e < NEPOCHS; e++) {
  96. atomic_store_u(&nfinished, 0, ATOMIC_RELEASE);
  97. atomic_store_u(&epoch, e, ATOMIC_RELEASE);
  98. /* Wait for threads to finish allocating. */
  99. spin_t spinner = SPIN_INITIALIZER;
  100. while (atomic_load_u(&nfinished, ATOMIC_ACQUIRE) < nthreads) {
  101. spin_adaptive(&spinner);
  102. }
  103. /*
  104. * Assert that retained is no more than the sum of size classes
  105. * that should have been used to satisfy the worker threads'
  106. * requests, discounting per growth fragmentation.
  107. */
  108. do_refresh();
  109. size_t allocated = esz * nthreads * PER_THD_NALLOCS;
  110. size_t active = do_get_active(arena_ind);
  111. assert_zu_le(allocated, active, "Unexpected active memory");
  112. size_t mapped = do_get_mapped(arena_ind);
  113. assert_zu_le(active, mapped, "Unexpected mapped memory");
  114. arena_t *arena = arena_get(tsdn_fetch(), arena_ind, false);
  115. size_t usable = 0;
  116. size_t fragmented = 0;
  117. for (pszind_t pind = sz_psz2ind(HUGEPAGE); pind <
  118. arena->extent_grow_next; pind++) {
  119. size_t psz = sz_pind2sz(pind);
  120. size_t psz_fragmented = psz % esz;
  121. size_t psz_usable = psz - psz_fragmented;
  122. /*
  123. * Only consider size classes that wouldn't be skipped.
  124. */
  125. if (psz_usable > 0) {
  126. assert_zu_lt(usable, allocated,
  127. "Excessive retained memory "
  128. "(%#zx[+%#zx] > %#zx)", usable, psz_usable,
  129. allocated);
  130. fragmented += psz_fragmented;
  131. usable += psz_usable;
  132. }
  133. }
  134. /*
  135. * Clean up arena. Destroying and recreating the arena
  136. * is simpler that specifying extent hooks that deallocate
  137. * (rather than retaining) during reset.
  138. */
  139. do_arena_destroy(arena_ind);
  140. assert_u_eq(do_arena_create(NULL), arena_ind,
  141. "Unexpected arena index");
  142. }
  143. for (unsigned i = 0; i < nthreads; i++) {
  144. thd_join(threads[i], NULL);
  145. }
  146. do_arena_destroy(arena_ind);
  147. }
  148. TEST_END
  149. int
  150. main(void) {
  151. return test(
  152. test_retained);
  153. }