2
0

prof_reset.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302
  1. #include "test/jemalloc_test.h"
  2. #ifdef JEMALLOC_PROF
  3. const char *malloc_conf =
  4. "prof:true,prof_active:false,lg_prof_sample:0";
  5. #endif
  6. static int
  7. prof_dump_open_intercept(bool propagate_err, const char *filename)
  8. {
  9. int fd;
  10. fd = open("/dev/null", O_WRONLY);
  11. assert_d_ne(fd, -1, "Unexpected open() failure");
  12. return (fd);
  13. }
  14. static void
  15. set_prof_active(bool active)
  16. {
  17. assert_d_eq(mallctl("prof.active", NULL, NULL, &active, sizeof(active)),
  18. 0, "Unexpected mallctl failure");
  19. }
  20. static size_t
  21. get_lg_prof_sample(void)
  22. {
  23. size_t lg_prof_sample;
  24. size_t sz = sizeof(size_t);
  25. assert_d_eq(mallctl("prof.lg_sample", &lg_prof_sample, &sz, NULL, 0), 0,
  26. "Unexpected mallctl failure while reading profiling sample rate");
  27. return (lg_prof_sample);
  28. }
  29. static void
  30. do_prof_reset(size_t lg_prof_sample)
  31. {
  32. assert_d_eq(mallctl("prof.reset", NULL, NULL,
  33. &lg_prof_sample, sizeof(size_t)), 0,
  34. "Unexpected mallctl failure while resetting profile data");
  35. assert_zu_eq(lg_prof_sample, get_lg_prof_sample(),
  36. "Expected profile sample rate change");
  37. }
  38. TEST_BEGIN(test_prof_reset_basic)
  39. {
  40. size_t lg_prof_sample_orig, lg_prof_sample, lg_prof_sample_next;
  41. size_t sz;
  42. unsigned i;
  43. test_skip_if(!config_prof);
  44. sz = sizeof(size_t);
  45. assert_d_eq(mallctl("opt.lg_prof_sample", &lg_prof_sample_orig, &sz,
  46. NULL, 0), 0,
  47. "Unexpected mallctl failure while reading profiling sample rate");
  48. assert_zu_eq(lg_prof_sample_orig, 0,
  49. "Unexpected profiling sample rate");
  50. lg_prof_sample = get_lg_prof_sample();
  51. assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
  52. "Unexpected disagreement between \"opt.lg_prof_sample\" and "
  53. "\"prof.lg_sample\"");
  54. /* Test simple resets. */
  55. for (i = 0; i < 2; i++) {
  56. assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
  57. "Unexpected mallctl failure while resetting profile data");
  58. lg_prof_sample = get_lg_prof_sample();
  59. assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
  60. "Unexpected profile sample rate change");
  61. }
  62. /* Test resets with prof.lg_sample changes. */
  63. lg_prof_sample_next = 1;
  64. for (i = 0; i < 2; i++) {
  65. do_prof_reset(lg_prof_sample_next);
  66. lg_prof_sample = get_lg_prof_sample();
  67. assert_zu_eq(lg_prof_sample, lg_prof_sample_next,
  68. "Expected profile sample rate change");
  69. lg_prof_sample_next = lg_prof_sample_orig;
  70. }
  71. /* Make sure the test code restored prof.lg_sample. */
  72. lg_prof_sample = get_lg_prof_sample();
  73. assert_zu_eq(lg_prof_sample_orig, lg_prof_sample,
  74. "Unexpected disagreement between \"opt.lg_prof_sample\" and "
  75. "\"prof.lg_sample\"");
  76. }
  77. TEST_END
  78. bool prof_dump_header_intercepted = false;
  79. prof_cnt_t cnt_all_copy = {0, 0, 0, 0};
  80. static bool
  81. prof_dump_header_intercept(bool propagate_err, const prof_cnt_t *cnt_all)
  82. {
  83. prof_dump_header_intercepted = true;
  84. memcpy(&cnt_all_copy, cnt_all, sizeof(prof_cnt_t));
  85. return (false);
  86. }
  87. TEST_BEGIN(test_prof_reset_cleanup)
  88. {
  89. void *p;
  90. prof_dump_header_t *prof_dump_header_orig;
  91. test_skip_if(!config_prof);
  92. set_prof_active(true);
  93. assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
  94. p = mallocx(1, 0);
  95. assert_ptr_not_null(p, "Unexpected mallocx() failure");
  96. assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
  97. prof_dump_header_orig = prof_dump_header;
  98. prof_dump_header = prof_dump_header_intercept;
  99. assert_false(prof_dump_header_intercepted, "Unexpected intercept");
  100. assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
  101. 0, "Unexpected error while dumping heap profile");
  102. assert_true(prof_dump_header_intercepted, "Expected intercept");
  103. assert_u64_eq(cnt_all_copy.curobjs, 1, "Expected 1 allocation");
  104. assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0), 0,
  105. "Unexpected error while resetting heap profile data");
  106. assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
  107. 0, "Unexpected error while dumping heap profile");
  108. assert_u64_eq(cnt_all_copy.curobjs, 0, "Expected 0 allocations");
  109. assert_zu_eq(prof_bt_count(), 1, "Expected 1 backtrace");
  110. prof_dump_header = prof_dump_header_orig;
  111. dallocx(p, 0);
  112. assert_zu_eq(prof_bt_count(), 0, "Expected 0 backtraces");
  113. set_prof_active(false);
  114. }
  115. TEST_END
  116. #define NTHREADS 4
  117. #define NALLOCS_PER_THREAD (1U << 13)
  118. #define OBJ_RING_BUF_COUNT 1531
  119. #define RESET_INTERVAL (1U << 10)
  120. #define DUMP_INTERVAL 3677
  121. static void *
  122. thd_start(void *varg)
  123. {
  124. unsigned thd_ind = *(unsigned *)varg;
  125. unsigned i;
  126. void *objs[OBJ_RING_BUF_COUNT];
  127. memset(objs, 0, sizeof(objs));
  128. for (i = 0; i < NALLOCS_PER_THREAD; i++) {
  129. if (i % RESET_INTERVAL == 0) {
  130. assert_d_eq(mallctl("prof.reset", NULL, NULL, NULL, 0),
  131. 0, "Unexpected error while resetting heap profile "
  132. "data");
  133. }
  134. if (i % DUMP_INTERVAL == 0) {
  135. assert_d_eq(mallctl("prof.dump", NULL, NULL, NULL, 0),
  136. 0, "Unexpected error while dumping heap profile");
  137. }
  138. {
  139. void **pp = &objs[i % OBJ_RING_BUF_COUNT];
  140. if (*pp != NULL) {
  141. dallocx(*pp, 0);
  142. *pp = NULL;
  143. }
  144. *pp = btalloc(1, thd_ind*NALLOCS_PER_THREAD + i);
  145. assert_ptr_not_null(*pp,
  146. "Unexpected btalloc() failure");
  147. }
  148. }
  149. /* Clean up any remaining objects. */
  150. for (i = 0; i < OBJ_RING_BUF_COUNT; i++) {
  151. void **pp = &objs[i % OBJ_RING_BUF_COUNT];
  152. if (*pp != NULL) {
  153. dallocx(*pp, 0);
  154. *pp = NULL;
  155. }
  156. }
  157. return (NULL);
  158. }
  159. TEST_BEGIN(test_prof_reset)
  160. {
  161. size_t lg_prof_sample_orig;
  162. thd_t thds[NTHREADS];
  163. unsigned thd_args[NTHREADS];
  164. unsigned i;
  165. size_t bt_count, tdata_count;
  166. test_skip_if(!config_prof);
  167. bt_count = prof_bt_count();
  168. assert_zu_eq(bt_count, 0,
  169. "Unexpected pre-existing tdata structures");
  170. tdata_count = prof_tdata_count();
  171. lg_prof_sample_orig = get_lg_prof_sample();
  172. do_prof_reset(5);
  173. set_prof_active(true);
  174. for (i = 0; i < NTHREADS; i++) {
  175. thd_args[i] = i;
  176. thd_create(&thds[i], thd_start, (void *)&thd_args[i]);
  177. }
  178. for (i = 0; i < NTHREADS; i++)
  179. thd_join(thds[i], NULL);
  180. assert_zu_eq(prof_bt_count(), bt_count,
  181. "Unexpected bactrace count change");
  182. assert_zu_eq(prof_tdata_count(), tdata_count,
  183. "Unexpected remaining tdata structures");
  184. set_prof_active(false);
  185. do_prof_reset(lg_prof_sample_orig);
  186. }
  187. TEST_END
  188. #undef NTHREADS
  189. #undef NALLOCS_PER_THREAD
  190. #undef OBJ_RING_BUF_COUNT
  191. #undef RESET_INTERVAL
  192. #undef DUMP_INTERVAL
  193. /* Test sampling at the same allocation site across resets. */
  194. #define NITER 10
  195. TEST_BEGIN(test_xallocx)
  196. {
  197. size_t lg_prof_sample_orig;
  198. unsigned i;
  199. void *ptrs[NITER];
  200. test_skip_if(!config_prof);
  201. lg_prof_sample_orig = get_lg_prof_sample();
  202. set_prof_active(true);
  203. /* Reset profiling. */
  204. do_prof_reset(0);
  205. for (i = 0; i < NITER; i++) {
  206. void *p;
  207. size_t sz, nsz;
  208. /* Reset profiling. */
  209. do_prof_reset(0);
  210. /* Allocate small object (which will be promoted). */
  211. p = ptrs[i] = mallocx(1, 0);
  212. assert_ptr_not_null(p, "Unexpected mallocx() failure");
  213. /* Reset profiling. */
  214. do_prof_reset(0);
  215. /* Perform successful xallocx(). */
  216. sz = sallocx(p, 0);
  217. assert_zu_eq(xallocx(p, sz, 0, 0), sz,
  218. "Unexpected xallocx() failure");
  219. /* Perform unsuccessful xallocx(). */
  220. nsz = nallocx(sz+1, 0);
  221. assert_zu_eq(xallocx(p, nsz, 0, 0), sz,
  222. "Unexpected xallocx() success");
  223. }
  224. for (i = 0; i < NITER; i++) {
  225. /* dallocx. */
  226. dallocx(ptrs[i], 0);
  227. }
  228. set_prof_active(false);
  229. do_prof_reset(lg_prof_sample_orig);
  230. }
  231. TEST_END
  232. #undef NITER
  233. int
  234. main(void)
  235. {
  236. /* Intercept dumping prior to running any tests. */
  237. prof_dump_open = prof_dump_open_intercept;
  238. return (test(
  239. test_prof_reset_basic,
  240. test_prof_reset_cleanup,
  241. test_prof_reset,
  242. test_xallocx));
  243. }