pack.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. #include "test/jemalloc_test.h"
  2. /*
  3. * Size class that is a divisor of the page size, ideally 4+ regions per run.
  4. */
  5. #if LG_PAGE <= 14
  6. #define SZ (ZU(1) << (LG_PAGE - 2))
  7. #else
  8. #define SZ ZU(4096)
  9. #endif
  10. /*
  11. * Number of slabs to consume at high water mark. Should be at least 2 so that
  12. * if mmap()ed memory grows downward, downward growth of mmap()ed memory is
  13. * tested.
  14. */
  15. #define NSLABS 8
  16. static unsigned
  17. binind_compute(void) {
  18. size_t sz;
  19. unsigned nbins, i;
  20. sz = sizeof(nbins);
  21. assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
  22. "Unexpected mallctl failure");
  23. for (i = 0; i < nbins; i++) {
  24. size_t mib[4];
  25. size_t miblen = sizeof(mib)/sizeof(size_t);
  26. size_t size;
  27. assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib,
  28. &miblen), 0, "Unexpected mallctlnametomb failure");
  29. mib[2] = (size_t)i;
  30. sz = sizeof(size);
  31. assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL,
  32. 0), 0, "Unexpected mallctlbymib failure");
  33. if (size == SZ) {
  34. return i;
  35. }
  36. }
  37. test_fail("Unable to compute nregs_per_run");
  38. return 0;
  39. }
  40. static size_t
  41. nregs_per_run_compute(void) {
  42. uint32_t nregs;
  43. size_t sz;
  44. unsigned binind = binind_compute();
  45. size_t mib[4];
  46. size_t miblen = sizeof(mib)/sizeof(size_t);
  47. assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
  48. "Unexpected mallctlnametomb failure");
  49. mib[2] = (size_t)binind;
  50. sz = sizeof(nregs);
  51. assert_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL,
  52. 0), 0, "Unexpected mallctlbymib failure");
  53. return nregs;
  54. }
  55. static unsigned
  56. arenas_create_mallctl(void) {
  57. unsigned arena_ind;
  58. size_t sz;
  59. sz = sizeof(arena_ind);
  60. assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
  61. 0, "Error in arenas.create");
  62. return arena_ind;
  63. }
  64. static void
  65. arena_reset_mallctl(unsigned arena_ind) {
  66. size_t mib[3];
  67. size_t miblen = sizeof(mib)/sizeof(size_t);
  68. assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
  69. "Unexpected mallctlnametomib() failure");
  70. mib[1] = (size_t)arena_ind;
  71. assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
  72. "Unexpected mallctlbymib() failure");
  73. }
  74. TEST_BEGIN(test_pack) {
  75. bool prof_enabled;
  76. size_t sz = sizeof(prof_enabled);
  77. if (mallctl("opt.prof", (void *)&prof_enabled, &sz, NULL, 0) == 0) {
  78. test_skip_if(prof_enabled);
  79. }
  80. unsigned arena_ind = arenas_create_mallctl();
  81. size_t nregs_per_run = nregs_per_run_compute();
  82. size_t nregs = nregs_per_run * NSLABS;
  83. VARIABLE_ARRAY(void *, ptrs, nregs);
  84. size_t i, j, offset;
  85. /* Fill matrix. */
  86. for (i = offset = 0; i < NSLABS; i++) {
  87. for (j = 0; j < nregs_per_run; j++) {
  88. void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
  89. MALLOCX_TCACHE_NONE);
  90. assert_ptr_not_null(p,
  91. "Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |"
  92. " MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu",
  93. SZ, arena_ind, i, j);
  94. ptrs[(i * nregs_per_run) + j] = p;
  95. }
  96. }
  97. /*
  98. * Free all but one region of each run, but rotate which region is
  99. * preserved, so that subsequent allocations exercise the within-run
  100. * layout policy.
  101. */
  102. offset = 0;
  103. for (i = offset = 0;
  104. i < NSLABS;
  105. i++, offset = (offset + 1) % nregs_per_run) {
  106. for (j = 0; j < nregs_per_run; j++) {
  107. void *p = ptrs[(i * nregs_per_run) + j];
  108. if (offset == j) {
  109. continue;
  110. }
  111. dallocx(p, MALLOCX_ARENA(arena_ind) |
  112. MALLOCX_TCACHE_NONE);
  113. }
  114. }
  115. /*
  116. * Logically refill matrix, skipping preserved regions and verifying
  117. * that the matrix is unmodified.
  118. */
  119. offset = 0;
  120. for (i = offset = 0;
  121. i < NSLABS;
  122. i++, offset = (offset + 1) % nregs_per_run) {
  123. for (j = 0; j < nregs_per_run; j++) {
  124. void *p;
  125. if (offset == j) {
  126. continue;
  127. }
  128. p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
  129. MALLOCX_TCACHE_NONE);
  130. assert_ptr_eq(p, ptrs[(i * nregs_per_run) + j],
  131. "Unexpected refill discrepancy, run=%zu, reg=%zu\n",
  132. i, j);
  133. }
  134. }
  135. /* Clean up. */
  136. arena_reset_mallctl(arena_ind);
  137. }
  138. TEST_END
  139. int
  140. main(void) {
  141. return test(
  142. test_pack);
  143. }