prof.h revision 9b0cbf0850b130a9b0a8c58bd10b2926b2083510
1/******************************************************************************/ 2#ifdef JEMALLOC_H_TYPES 3 4typedef struct prof_bt_s prof_bt_t; 5typedef struct prof_cnt_s prof_cnt_t; 6typedef struct prof_thr_cnt_s prof_thr_cnt_t; 7typedef struct prof_ctx_s prof_ctx_t; 8typedef struct prof_tdata_s prof_tdata_t; 9 10/* Option defaults. */ 11#ifdef JEMALLOC_PROF 12# define PROF_PREFIX_DEFAULT "jeprof" 13#else 14# define PROF_PREFIX_DEFAULT "" 15#endif 16#define LG_PROF_SAMPLE_DEFAULT 19 17#define LG_PROF_INTERVAL_DEFAULT -1 18 19/* 20 * Hard limit on stack backtrace depth. The version of prof_backtrace() that 21 * is based on __builtin_return_address() necessarily has a hard-coded number 22 * of backtrace frame handlers, and should be kept in sync with this setting. 23 */ 24#define PROF_BT_MAX 128 25 26/* Maximum number of backtraces to store in each per thread LRU cache. */ 27#define PROF_TCMAX 1024 28 29/* Initial hash table size. */ 30#define PROF_CKH_MINITEMS 64 31 32/* Size of memory buffer to use when writing dump files. */ 33#define PROF_DUMP_BUFSIZE 65536 34 35/* Size of stack-allocated buffer used by prof_printf(). */ 36#define PROF_PRINTF_BUFSIZE 128 37 38/* 39 * Number of mutexes shared among all ctx's. No space is allocated for these 40 * unless profiling is enabled, so it's okay to over-provision. 41 */ 42#define PROF_NCTX_LOCKS 1024 43 44/* 45 * prof_tdata pointers close to NULL are used to encode state information that 46 * is used for cleaning up during thread shutdown. 47 */ 48#define PROF_TDATA_STATE_REINCARNATED ((prof_tdata_t *)(uintptr_t)1) 49#define PROF_TDATA_STATE_PURGATORY ((prof_tdata_t *)(uintptr_t)2) 50#define PROF_TDATA_STATE_MAX PROF_TDATA_STATE_PURGATORY 51 52#endif /* JEMALLOC_H_TYPES */ 53/******************************************************************************/ 54#ifdef JEMALLOC_H_STRUCTS 55 56struct prof_bt_s { 57 /* Backtrace, stored as len program counters. */ 58 void **vec; 59 unsigned len; 60}; 61 62#ifdef JEMALLOC_PROF_LIBGCC 63/* Data structure passed to libgcc _Unwind_Backtrace() callback functions. */ 64typedef struct { 65 prof_bt_t *bt; 66 unsigned nignore; 67 unsigned max; 68} prof_unwind_data_t; 69#endif 70 71struct prof_cnt_s { 72 /* 73 * Profiling counters. An allocation/deallocation pair can operate on 74 * different prof_thr_cnt_t objects that are linked into the same 75 * prof_ctx_t cnts_ql, so it is possible for the cur* counters to go 76 * negative. In principle it is possible for the *bytes counters to 77 * overflow/underflow, but a general solution would require something 78 * like 128-bit counters; this implementation doesn't bother to solve 79 * that problem. 80 */ 81 int64_t curobjs; 82 int64_t curbytes; 83 uint64_t accumobjs; 84 uint64_t accumbytes; 85}; 86 87struct prof_thr_cnt_s { 88 /* Linkage into prof_ctx_t's cnts_ql. */ 89 ql_elm(prof_thr_cnt_t) cnts_link; 90 91 /* Linkage into thread's LRU. */ 92 ql_elm(prof_thr_cnt_t) lru_link; 93 94 /* 95 * Associated context. If a thread frees an object that it did not 96 * allocate, it is possible that the context is not cached in the 97 * thread's hash table, in which case it must be able to look up the 98 * context, insert a new prof_thr_cnt_t into the thread's hash table, 99 * and link it into the prof_ctx_t's cnts_ql. 100 */ 101 prof_ctx_t *ctx; 102 103 /* 104 * Threads use memory barriers to update the counters. Since there is 105 * only ever one writer, the only challenge is for the reader to get a 106 * consistent read of the counters. 107 * 108 * The writer uses this series of operations: 109 * 110 * 1) Increment epoch to an odd number. 111 * 2) Update counters. 112 * 3) Increment epoch to an even number. 113 * 114 * The reader must assure 1) that the epoch is even while it reads the 115 * counters, and 2) that the epoch doesn't change between the time it 116 * starts and finishes reading the counters. 117 */ 118 unsigned epoch; 119 120 /* Profiling counters. */ 121 prof_cnt_t cnts; 122}; 123 124struct prof_ctx_s { 125 /* Associated backtrace. */ 126 prof_bt_t *bt; 127 128 /* Protects nlimbo, cnt_merged, and cnts_ql. */ 129 malloc_mutex_t *lock; 130 131 /* 132 * Number of threads that currently cause this ctx to be in a state of 133 * limbo due to one of: 134 * - Initializing per thread counters associated with this ctx. 135 * - Preparing to destroy this ctx. 136 * - Dumping a heap profile that includes this ctx. 137 * nlimbo must be 1 (single destroyer) in order to safely destroy the 138 * ctx. 139 */ 140 unsigned nlimbo; 141 142 /* Temporary storage for summation during dump. */ 143 prof_cnt_t cnt_summed; 144 145 /* When threads exit, they merge their stats into cnt_merged. */ 146 prof_cnt_t cnt_merged; 147 148 /* 149 * List of profile counters, one for each thread that has allocated in 150 * this context. 151 */ 152 ql_head(prof_thr_cnt_t) cnts_ql; 153 154 /* Linkage for list of contexts to be dumped. */ 155 ql_elm(prof_ctx_t) dump_link; 156}; 157typedef ql_head(prof_ctx_t) prof_ctx_list_t; 158 159struct prof_tdata_s { 160 /* 161 * Hash of (prof_bt_t *)-->(prof_thr_cnt_t *). Each thread keeps a 162 * cache of backtraces, with associated thread-specific prof_thr_cnt_t 163 * objects. Other threads may read the prof_thr_cnt_t contents, but no 164 * others will ever write them. 165 * 166 * Upon thread exit, the thread must merge all the prof_thr_cnt_t 167 * counter data into the associated prof_ctx_t objects, and unlink/free 168 * the prof_thr_cnt_t objects. 169 */ 170 ckh_t bt2cnt; 171 172 /* LRU for contents of bt2cnt. */ 173 ql_head(prof_thr_cnt_t) lru_ql; 174 175 /* Backtrace vector, used for calls to prof_backtrace(). */ 176 void **vec; 177 178 /* Sampling state. */ 179 uint64_t prng_state; 180 uint64_t threshold; 181 uint64_t accum; 182 183 /* State used to avoid dumping while operating on prof internals. */ 184 bool enq; 185 bool enq_idump; 186 bool enq_gdump; 187}; 188 189#endif /* JEMALLOC_H_STRUCTS */ 190/******************************************************************************/ 191#ifdef JEMALLOC_H_EXTERNS 192 193extern bool opt_prof; 194/* 195 * Even if opt_prof is true, sampling can be temporarily disabled by setting 196 * opt_prof_active to false. No locking is used when updating opt_prof_active, 197 * so there are no guarantees regarding how long it will take for all threads 198 * to notice state changes. 199 */ 200extern bool opt_prof_active; 201extern size_t opt_lg_prof_sample; /* Mean bytes between samples. */ 202extern ssize_t opt_lg_prof_interval; /* lg(prof_interval). */ 203extern bool opt_prof_gdump; /* High-water memory dumping. */ 204extern bool opt_prof_final; /* Final profile dumping. */ 205extern bool opt_prof_leak; /* Dump leak summary at exit. */ 206extern bool opt_prof_accum; /* Report cumulative bytes. */ 207extern char opt_prof_prefix[ 208 /* Minimize memory bloat for non-prof builds. */ 209#ifdef JEMALLOC_PROF 210 PATH_MAX + 211#endif 212 1]; 213 214/* 215 * Profile dump interval, measured in bytes allocated. Each arena triggers a 216 * profile dump when it reaches this threshold. The effect is that the 217 * interval between profile dumps averages prof_interval, though the actual 218 * interval between dumps will tend to be sporadic, and the interval will be a 219 * maximum of approximately (prof_interval * narenas). 220 */ 221extern uint64_t prof_interval; 222 223void bt_init(prof_bt_t *bt, void **vec); 224void prof_backtrace(prof_bt_t *bt, unsigned nignore); 225prof_thr_cnt_t *prof_lookup(prof_bt_t *bt); 226#ifdef JEMALLOC_JET 227size_t prof_bt_count(void); 228typedef int (prof_dump_open_t)(bool, const char *); 229extern prof_dump_open_t *prof_dump_open; 230#endif 231void prof_idump(void); 232bool prof_mdump(const char *filename); 233void prof_gdump(void); 234prof_tdata_t *prof_tdata_init(void); 235void prof_tdata_cleanup(void *arg); 236void prof_boot0(void); 237void prof_boot1(void); 238bool prof_boot2(void); 239void prof_prefork(void); 240void prof_postfork_parent(void); 241void prof_postfork_child(void); 242 243#endif /* JEMALLOC_H_EXTERNS */ 244/******************************************************************************/ 245#ifdef JEMALLOC_H_INLINES 246 247#define PROF_ALLOC_PREP(nignore, size, ret) do { \ 248 prof_tdata_t *prof_tdata; \ 249 prof_bt_t bt; \ 250 \ 251 assert(size == s2u(size)); \ 252 \ 253 prof_tdata = prof_tdata_get(true); \ 254 if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) { \ 255 if (prof_tdata != NULL) \ 256 ret = (prof_thr_cnt_t *)(uintptr_t)1U; \ 257 else \ 258 ret = NULL; \ 259 break; \ 260 } \ 261 \ 262 if (opt_prof_active == false) { \ 263 /* Sampling is currently inactive, so avoid sampling. */\ 264 ret = (prof_thr_cnt_t *)(uintptr_t)1U; \ 265 } else if (opt_lg_prof_sample == 0) { \ 266 /* Don't bother with sampling logic, since sampling */\ 267 /* interval is 1. */\ 268 bt_init(&bt, prof_tdata->vec); \ 269 prof_backtrace(&bt, nignore); \ 270 ret = prof_lookup(&bt); \ 271 } else { \ 272 if (prof_tdata->threshold == 0) { \ 273 /* Initialize. Seed the prng differently for */\ 274 /* each thread. */\ 275 prof_tdata->prng_state = \ 276 (uint64_t)(uintptr_t)&size; \ 277 prof_sample_threshold_update(prof_tdata); \ 278 } \ 279 \ 280 /* Determine whether to capture a backtrace based on */\ 281 /* whether size is enough for prof_accum to reach */\ 282 /* prof_tdata->threshold. However, delay updating */\ 283 /* these variables until prof_{m,re}alloc(), because */\ 284 /* we don't know for sure that the allocation will */\ 285 /* succeed. */\ 286 /* */\ 287 /* Use subtraction rather than addition to avoid */\ 288 /* potential integer overflow. */\ 289 if (size >= prof_tdata->threshold - \ 290 prof_tdata->accum) { \ 291 bt_init(&bt, prof_tdata->vec); \ 292 prof_backtrace(&bt, nignore); \ 293 ret = prof_lookup(&bt); \ 294 } else \ 295 ret = (prof_thr_cnt_t *)(uintptr_t)1U; \ 296 } \ 297} while (0) 298 299#ifndef JEMALLOC_ENABLE_INLINE 300malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *) 301 302prof_tdata_t *prof_tdata_get(bool create); 303void prof_sample_threshold_update(prof_tdata_t *prof_tdata); 304prof_ctx_t *prof_ctx_get(const void *ptr); 305void prof_ctx_set(const void *ptr, prof_ctx_t *ctx); 306bool prof_sample_accum_update(size_t size); 307void prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt); 308void prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt, 309 size_t old_usize, prof_ctx_t *old_ctx); 310void prof_free(const void *ptr, size_t size); 311#endif 312 313#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_PROF_C_)) 314/* Thread-specific backtrace cache, used to reduce bt2ctx contention. */ 315malloc_tsd_externs(prof_tdata, prof_tdata_t *) 316malloc_tsd_funcs(JEMALLOC_INLINE, prof_tdata, prof_tdata_t *, NULL, 317 prof_tdata_cleanup) 318 319JEMALLOC_INLINE prof_tdata_t * 320prof_tdata_get(bool create) 321{ 322 prof_tdata_t *prof_tdata; 323 324 cassert(config_prof); 325 326 prof_tdata = *prof_tdata_tsd_get(); 327 if (create && prof_tdata == NULL) 328 prof_tdata = prof_tdata_init(); 329 330 return (prof_tdata); 331} 332 333JEMALLOC_INLINE void 334prof_sample_threshold_update(prof_tdata_t *prof_tdata) 335{ 336 /* 337 * The body of this function is compiled out unless heap profiling is 338 * enabled, so that it is possible to compile jemalloc with floating 339 * point support completely disabled. Avoiding floating point code is 340 * important on memory-constrained systems, but it also enables a 341 * workaround for versions of glibc that don't properly save/restore 342 * floating point registers during dynamic lazy symbol loading (which 343 * internally calls into whatever malloc implementation happens to be 344 * integrated into the application). Note that some compilers (e.g. 345 * gcc 4.8) may use floating point registers for fast memory moves, so 346 * jemalloc must be compiled with such optimizations disabled (e.g. 347 * -mno-sse) in order for the workaround to be complete. 348 */ 349#ifdef JEMALLOC_PROF 350 uint64_t r; 351 double u; 352 353 cassert(config_prof); 354 355 /* 356 * Compute sample threshold as a geometrically distributed random 357 * variable with mean (2^opt_lg_prof_sample). 358 * 359 * __ __ 360 * | log(u) | 1 361 * prof_tdata->threshold = | -------- |, where p = ------------------- 362 * | log(1-p) | opt_lg_prof_sample 363 * 2 364 * 365 * For more information on the math, see: 366 * 367 * Non-Uniform Random Variate Generation 368 * Luc Devroye 369 * Springer-Verlag, New York, 1986 370 * pp 500 371 * (http://luc.devroye.org/rnbookindex.html) 372 */ 373 prng64(r, 53, prof_tdata->prng_state, 374 UINT64_C(6364136223846793005), UINT64_C(1442695040888963407)); 375 u = (double)r * (1.0/9007199254740992.0L); 376 prof_tdata->threshold = (uint64_t)(log(u) / 377 log(1.0 - (1.0 / (double)((uint64_t)1U << opt_lg_prof_sample)))) 378 + (uint64_t)1U; 379#endif 380} 381 382JEMALLOC_INLINE prof_ctx_t * 383prof_ctx_get(const void *ptr) 384{ 385 prof_ctx_t *ret; 386 arena_chunk_t *chunk; 387 388 cassert(config_prof); 389 assert(ptr != NULL); 390 391 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 392 if (chunk != ptr) { 393 /* Region. */ 394 ret = arena_prof_ctx_get(ptr); 395 } else 396 ret = huge_prof_ctx_get(ptr); 397 398 return (ret); 399} 400 401JEMALLOC_INLINE void 402prof_ctx_set(const void *ptr, prof_ctx_t *ctx) 403{ 404 arena_chunk_t *chunk; 405 406 cassert(config_prof); 407 assert(ptr != NULL); 408 409 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 410 if (chunk != ptr) { 411 /* Region. */ 412 arena_prof_ctx_set(ptr, ctx); 413 } else 414 huge_prof_ctx_set(ptr, ctx); 415} 416 417JEMALLOC_INLINE bool 418prof_sample_accum_update(size_t size) 419{ 420 prof_tdata_t *prof_tdata; 421 422 cassert(config_prof); 423 /* Sampling logic is unnecessary if the interval is 1. */ 424 assert(opt_lg_prof_sample != 0); 425 426 prof_tdata = prof_tdata_get(false); 427 if ((uintptr_t)prof_tdata <= (uintptr_t)PROF_TDATA_STATE_MAX) 428 return (true); 429 430 /* Take care to avoid integer overflow. */ 431 if (size >= prof_tdata->threshold - prof_tdata->accum) { 432 prof_tdata->accum -= (prof_tdata->threshold - size); 433 /* Compute new sample threshold. */ 434 prof_sample_threshold_update(prof_tdata); 435 while (prof_tdata->accum >= prof_tdata->threshold) { 436 prof_tdata->accum -= prof_tdata->threshold; 437 prof_sample_threshold_update(prof_tdata); 438 } 439 return (false); 440 } else { 441 prof_tdata->accum += size; 442 return (true); 443 } 444} 445 446JEMALLOC_INLINE void 447prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt) 448{ 449 450 cassert(config_prof); 451 assert(ptr != NULL); 452 assert(usize == isalloc(ptr, true)); 453 454 if (opt_lg_prof_sample != 0) { 455 if (prof_sample_accum_update(usize)) { 456 /* 457 * Don't sample. For malloc()-like allocation, it is 458 * always possible to tell in advance how large an 459 * object's usable size will be, so there should never 460 * be a difference between the usize passed to 461 * PROF_ALLOC_PREP() and prof_malloc(). 462 */ 463 assert((uintptr_t)cnt == (uintptr_t)1U); 464 } 465 } 466 467 if ((uintptr_t)cnt > (uintptr_t)1U) { 468 prof_ctx_set(ptr, cnt->ctx); 469 470 cnt->epoch++; 471 /*********/ 472 mb_write(); 473 /*********/ 474 cnt->cnts.curobjs++; 475 cnt->cnts.curbytes += usize; 476 if (opt_prof_accum) { 477 cnt->cnts.accumobjs++; 478 cnt->cnts.accumbytes += usize; 479 } 480 /*********/ 481 mb_write(); 482 /*********/ 483 cnt->epoch++; 484 /*********/ 485 mb_write(); 486 /*********/ 487 } else 488 prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U); 489} 490 491JEMALLOC_INLINE void 492prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt, 493 size_t old_usize, prof_ctx_t *old_ctx) 494{ 495 prof_thr_cnt_t *told_cnt; 496 497 cassert(config_prof); 498 assert(ptr != NULL || (uintptr_t)cnt <= (uintptr_t)1U); 499 500 if (ptr != NULL) { 501 assert(usize == isalloc(ptr, true)); 502 if (opt_lg_prof_sample != 0) { 503 if (prof_sample_accum_update(usize)) { 504 /* 505 * Don't sample. The usize passed to 506 * PROF_ALLOC_PREP() was larger than what 507 * actually got allocated, so a backtrace was 508 * captured for this allocation, even though 509 * its actual usize was insufficient to cross 510 * the sample threshold. 511 */ 512 cnt = (prof_thr_cnt_t *)(uintptr_t)1U; 513 } 514 } 515 } 516 517 if ((uintptr_t)old_ctx > (uintptr_t)1U) { 518 told_cnt = prof_lookup(old_ctx->bt); 519 if (told_cnt == NULL) { 520 /* 521 * It's too late to propagate OOM for this realloc(), 522 * so operate directly on old_cnt->ctx->cnt_merged. 523 */ 524 malloc_mutex_lock(old_ctx->lock); 525 old_ctx->cnt_merged.curobjs--; 526 old_ctx->cnt_merged.curbytes -= old_usize; 527 malloc_mutex_unlock(old_ctx->lock); 528 told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U; 529 } 530 } else 531 told_cnt = (prof_thr_cnt_t *)(uintptr_t)1U; 532 533 if ((uintptr_t)told_cnt > (uintptr_t)1U) 534 told_cnt->epoch++; 535 if ((uintptr_t)cnt > (uintptr_t)1U) { 536 prof_ctx_set(ptr, cnt->ctx); 537 cnt->epoch++; 538 } else if (ptr != NULL) 539 prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U); 540 /*********/ 541 mb_write(); 542 /*********/ 543 if ((uintptr_t)told_cnt > (uintptr_t)1U) { 544 told_cnt->cnts.curobjs--; 545 told_cnt->cnts.curbytes -= old_usize; 546 } 547 if ((uintptr_t)cnt > (uintptr_t)1U) { 548 cnt->cnts.curobjs++; 549 cnt->cnts.curbytes += usize; 550 if (opt_prof_accum) { 551 cnt->cnts.accumobjs++; 552 cnt->cnts.accumbytes += usize; 553 } 554 } 555 /*********/ 556 mb_write(); 557 /*********/ 558 if ((uintptr_t)told_cnt > (uintptr_t)1U) 559 told_cnt->epoch++; 560 if ((uintptr_t)cnt > (uintptr_t)1U) 561 cnt->epoch++; 562 /*********/ 563 mb_write(); /* Not strictly necessary. */ 564} 565 566JEMALLOC_INLINE void 567prof_free(const void *ptr, size_t size) 568{ 569 prof_ctx_t *ctx = prof_ctx_get(ptr); 570 571 cassert(config_prof); 572 573 if ((uintptr_t)ctx > (uintptr_t)1) { 574 prof_thr_cnt_t *tcnt; 575 assert(size == isalloc(ptr, true)); 576 tcnt = prof_lookup(ctx->bt); 577 578 if (tcnt != NULL) { 579 tcnt->epoch++; 580 /*********/ 581 mb_write(); 582 /*********/ 583 tcnt->cnts.curobjs--; 584 tcnt->cnts.curbytes -= size; 585 /*********/ 586 mb_write(); 587 /*********/ 588 tcnt->epoch++; 589 /*********/ 590 mb_write(); 591 /*********/ 592 } else { 593 /* 594 * OOM during free() cannot be propagated, so operate 595 * directly on cnt->ctx->cnt_merged. 596 */ 597 malloc_mutex_lock(ctx->lock); 598 ctx->cnt_merged.curobjs--; 599 ctx->cnt_merged.curbytes -= size; 600 malloc_mutex_unlock(ctx->lock); 601 } 602 } 603} 604#endif 605 606#endif /* JEMALLOC_H_INLINES */ 607/******************************************************************************/ 608