malloc_debug_check.cpp revision c701e5b3357b6484572d46f29c5d1e51063dfcbb
1/*
2 * Copyright (C) 2012 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <arpa/inet.h>
30#include <dlfcn.h>
31#include <errno.h>
32#include <errno.h>
33#include <fcntl.h>
34#include <pthread.h>
35#include <stdarg.h>
36#include <stdbool.h>
37#include <stddef.h>
38#include <stdio.h>
39#include <stdlib.h>
40#include <string.h>
41#include <sys/param.h>
42#include <sys/socket.h>
43#include <sys/system_properties.h>
44#include <sys/types.h>
45#include <time.h>
46#include <unistd.h>
47#include <unwind.h>
48
49#include "debug_mapinfo.h"
50#include "debug_stacktrace.h"
51#include "malloc_debug_common.h"
52#include "malloc_debug_disable.h"
53#include "private/bionic_macros.h"
54#include "private/libc_logging.h"
55#include "private/ScopedPthreadMutexLocker.h"
56
57#define MAX_BACKTRACE_DEPTH 16
58#define ALLOCATION_TAG      0x1ee7d00d
59#define BACKLOG_TAG         0xbabecafe
60#define FREE_POISON         0xa5
61#define FRONT_GUARD         0xaa
62#define FRONT_GUARD_LEN     (1<<5)
63#define REAR_GUARD          0xbb
64#define REAR_GUARD_LEN      (1<<5)
65
66static void log_message(const char* format, ...) {
67  va_list args;
68  va_start(args, format);
69  __libc_format_log_va_list(ANDROID_LOG_ERROR, "libc", format, args);
70  va_end(args);
71}
72
73struct hdr_t {
74    uint32_t tag;
75    void* base;  // Always points to the memory allocated using malloc.
76                 // For memory allocated in chk_memalign, this value will
77                 // not be the same as the location of the start of this
78                 // structure.
79    hdr_t* prev;
80    hdr_t* next;
81    uintptr_t bt[MAX_BACKTRACE_DEPTH];
82    int bt_depth;
83    uintptr_t freed_bt[MAX_BACKTRACE_DEPTH];
84    int freed_bt_depth;
85    size_t size;
86    uint8_t front_guard[FRONT_GUARD_LEN];
87} __attribute__((packed, aligned(MALLOC_ALIGNMENT)));
88
89struct ftr_t {
90    uint8_t rear_guard[REAR_GUARD_LEN];
91} __attribute__((packed));
92
93static inline ftr_t* to_ftr(hdr_t* hdr) {
94    return reinterpret_cast<ftr_t*>(reinterpret_cast<char*>(hdr + 1) + hdr->size);
95}
96
97static inline void* user(hdr_t* hdr) {
98    return hdr + 1;
99}
100
101static inline hdr_t* meta(void* user) {
102    return reinterpret_cast<hdr_t*>(user) - 1;
103}
104
105static inline const hdr_t* const_meta(const void* user) {
106    return reinterpret_cast<const hdr_t*>(user) - 1;
107}
108
109// TODO: introduce a struct for this global state.
110// There are basically two lists here, the regular list and the backlog list.
111// We should be able to remove the duplication.
112static unsigned g_allocated_block_count;
113static hdr_t* tail;
114static hdr_t* head;
115static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
116
117static unsigned backlog_num;
118static hdr_t* backlog_tail;
119static hdr_t* backlog_head;
120static pthread_mutex_t backlog_lock = PTHREAD_MUTEX_INITIALIZER;
121
122// This variable is set to the value of property libc.debug.malloc.backlog.
123// It determines the size of the backlog we use to detect multiple frees.
124static unsigned g_malloc_debug_backlog = 100;
125
126__LIBC_HIDDEN__ HashTable* g_hash_table;
127__LIBC_HIDDEN__ const MallocDebug* g_malloc_dispatch;
128
129static inline void init_front_guard(hdr_t* hdr) {
130    memset(hdr->front_guard, FRONT_GUARD, FRONT_GUARD_LEN);
131}
132
133static inline bool is_front_guard_valid(hdr_t* hdr) {
134    for (size_t i = 0; i < FRONT_GUARD_LEN; i++) {
135        if (hdr->front_guard[i] != FRONT_GUARD) {
136            return false;
137        }
138    }
139    return true;
140}
141
142static inline void init_rear_guard(hdr_t* hdr) {
143    ftr_t* ftr = to_ftr(hdr);
144    memset(ftr->rear_guard, REAR_GUARD, REAR_GUARD_LEN);
145}
146
147static inline bool is_rear_guard_valid(hdr_t* hdr) {
148    unsigned i;
149    int valid = 1;
150    int first_mismatch = -1;
151    ftr_t* ftr = to_ftr(hdr);
152    for (i = 0; i < REAR_GUARD_LEN; i++) {
153        if (ftr->rear_guard[i] != REAR_GUARD) {
154            if (first_mismatch < 0)
155                first_mismatch = i;
156            valid = 0;
157        } else if (first_mismatch >= 0) {
158            log_message("+++ REAR GUARD MISMATCH [%d, %d)\n", first_mismatch, i);
159            first_mismatch = -1;
160        }
161    }
162
163    if (first_mismatch >= 0)
164        log_message("+++ REAR GUARD MISMATCH [%d, %d)\n", first_mismatch, i);
165    return valid;
166}
167
168static inline void add_locked(hdr_t* hdr, hdr_t** tail, hdr_t** head) {
169    hdr->prev = NULL;
170    hdr->next = *head;
171    if (*head)
172        (*head)->prev = hdr;
173    else
174        *tail = hdr;
175    *head = hdr;
176}
177
178static inline int del_locked(hdr_t* hdr, hdr_t** tail, hdr_t** head) {
179    if (hdr->prev) {
180        hdr->prev->next = hdr->next;
181    } else {
182        *head = hdr->next;
183    }
184    if (hdr->next) {
185        hdr->next->prev = hdr->prev;
186    } else {
187        *tail = hdr->prev;
188    }
189    return 0;
190}
191
192static inline void add(hdr_t* hdr, size_t size) {
193    ScopedPthreadMutexLocker locker(&lock);
194    hdr->tag = ALLOCATION_TAG;
195    hdr->size = size;
196    init_front_guard(hdr);
197    init_rear_guard(hdr);
198    ++g_allocated_block_count;
199    add_locked(hdr, &tail, &head);
200}
201
202static inline int del(hdr_t* hdr) {
203    if (hdr->tag != ALLOCATION_TAG) {
204        return -1;
205    }
206
207    ScopedPthreadMutexLocker locker(&lock);
208    del_locked(hdr, &tail, &head);
209    --g_allocated_block_count;
210    return 0;
211}
212
213static inline void poison(hdr_t* hdr) {
214    memset(user(hdr), FREE_POISON, hdr->size);
215}
216
217static bool was_used_after_free(hdr_t* hdr) {
218    const uint8_t* data = reinterpret_cast<const uint8_t*>(user(hdr));
219    for (size_t i = 0; i < hdr->size; i++) {
220        if (data[i] != FREE_POISON) {
221            return true;
222        }
223    }
224    return false;
225}
226
227/* returns 1 if valid, *safe == 1 if safe to dump stack */
228static inline int check_guards(hdr_t* hdr, int* safe) {
229    *safe = 1;
230    if (!is_front_guard_valid(hdr)) {
231        if (hdr->front_guard[0] == FRONT_GUARD) {
232            log_message("+++ ALLOCATION %p SIZE %d HAS A CORRUPTED FRONT GUARD\n",
233                       user(hdr), hdr->size);
234        } else {
235            log_message("+++ ALLOCATION %p HAS A CORRUPTED FRONT GUARD "\
236                      "(NOT DUMPING STACKTRACE)\n", user(hdr));
237            /* Allocation header is probably corrupt, do not print stack trace */
238            *safe = 0;
239        }
240        return 0;
241    }
242
243    if (!is_rear_guard_valid(hdr)) {
244        log_message("+++ ALLOCATION %p SIZE %d HAS A CORRUPTED REAR GUARD\n",
245                   user(hdr), hdr->size);
246        return 0;
247    }
248
249    return 1;
250}
251
252/* returns 1 if valid, *safe == 1 if safe to dump stack */
253static inline int check_allocation_locked(hdr_t* hdr, int* safe) {
254    int valid = 1;
255    *safe = 1;
256
257    if (hdr->tag != ALLOCATION_TAG && hdr->tag != BACKLOG_TAG) {
258        log_message("+++ ALLOCATION %p HAS INVALID TAG %08x (NOT DUMPING STACKTRACE)\n",
259                   user(hdr), hdr->tag);
260        // Allocation header is probably corrupt, do not dequeue or dump stack
261        // trace.
262        *safe = 0;
263        return 0;
264    }
265
266    if (hdr->tag == BACKLOG_TAG && was_used_after_free(hdr)) {
267        log_message("+++ ALLOCATION %p SIZE %d WAS USED AFTER BEING FREED\n",
268                   user(hdr), hdr->size);
269        valid = 0;
270        /* check the guards to see if it's safe to dump a stack trace */
271        check_guards(hdr, safe);
272    } else {
273        valid = check_guards(hdr, safe);
274    }
275
276    if (!valid && *safe) {
277        log_message("+++ ALLOCATION %p SIZE %d ALLOCATED HERE:\n",
278                        user(hdr), hdr->size);
279        log_backtrace(hdr->bt, hdr->bt_depth);
280        if (hdr->tag == BACKLOG_TAG) {
281            log_message("+++ ALLOCATION %p SIZE %d FREED HERE:\n",
282                       user(hdr), hdr->size);
283            log_backtrace(hdr->freed_bt, hdr->freed_bt_depth);
284        }
285    }
286
287    return valid;
288}
289
290static inline int del_and_check_locked(hdr_t* hdr,
291                                       hdr_t** tail, hdr_t** head, unsigned* cnt,
292                                       int* safe) {
293    int valid = check_allocation_locked(hdr, safe);
294    if (safe) {
295        (*cnt)--;
296        del_locked(hdr, tail, head);
297    }
298    return valid;
299}
300
301static inline void del_from_backlog_locked(hdr_t* hdr) {
302    int safe;
303    del_and_check_locked(hdr,
304                         &backlog_tail, &backlog_head, &backlog_num,
305                         &safe);
306    hdr->tag = 0; /* clear the tag */
307}
308
309static inline void del_from_backlog(hdr_t* hdr) {
310    ScopedPthreadMutexLocker locker(&backlog_lock);
311    del_from_backlog_locked(hdr);
312}
313
314static inline int del_leak(hdr_t* hdr, int* safe) {
315    ScopedPthreadMutexLocker locker(&lock);
316    return del_and_check_locked(hdr, &tail, &head, &g_allocated_block_count, safe);
317}
318
319static inline void add_to_backlog(hdr_t* hdr) {
320    ScopedPthreadMutexLocker locker(&backlog_lock);
321    hdr->tag = BACKLOG_TAG;
322    backlog_num++;
323    add_locked(hdr, &backlog_tail, &backlog_head);
324    poison(hdr);
325    /* If we've exceeded the maximum backlog, clear it up */
326    while (backlog_num > g_malloc_debug_backlog) {
327        hdr_t* gone = backlog_tail;
328        del_from_backlog_locked(gone);
329        g_malloc_dispatch->free(gone->base);
330    }
331}
332
333extern "C" void* chk_malloc(size_t bytes) {
334//  log_message("%s: %s\n", __FILE__, __FUNCTION__);
335    if (DebugCallsDisabled()) {
336        return g_malloc_dispatch->malloc(bytes);
337    }
338
339    size_t size = sizeof(hdr_t) + bytes + sizeof(ftr_t);
340    if (size < bytes) { // Overflow
341        errno = ENOMEM;
342        return NULL;
343    }
344    hdr_t* hdr = static_cast<hdr_t*>(g_malloc_dispatch->malloc(size));
345    if (hdr) {
346        hdr->base = hdr;
347        hdr->bt_depth = get_backtrace(hdr->bt, MAX_BACKTRACE_DEPTH);
348        add(hdr, bytes);
349        return user(hdr);
350    }
351    return NULL;
352}
353
354extern "C" void* chk_memalign(size_t alignment, size_t bytes) {
355    if (DebugCallsDisabled()) {
356        return g_malloc_dispatch->memalign(alignment, bytes);
357    }
358
359    if (alignment <= MALLOC_ALIGNMENT) {
360        return chk_malloc(bytes);
361    }
362
363    // Make the alignment a power of two.
364    if (!powerof2(alignment)) {
365        alignment = BIONIC_ROUND_UP_POWER_OF_2(alignment);
366    }
367
368    // here, alignment is at least MALLOC_ALIGNMENT<<1 bytes
369    // we will align by at least MALLOC_ALIGNMENT bytes
370    // and at most alignment-MALLOC_ALIGNMENT bytes
371    size_t size = (alignment-MALLOC_ALIGNMENT) + bytes;
372    if (size < bytes) { // Overflow.
373        return NULL;
374    }
375
376    void* base = g_malloc_dispatch->malloc(sizeof(hdr_t) + size + sizeof(ftr_t));
377    if (base != NULL) {
378        // Check that the actual pointer that will be returned is aligned
379        // properly.
380        uintptr_t ptr = reinterpret_cast<uintptr_t>(user(reinterpret_cast<hdr_t*>(base)));
381        if ((ptr % alignment) != 0) {
382            // Align the pointer.
383            ptr += ((-ptr) % alignment);
384        }
385
386        hdr_t* hdr = meta(reinterpret_cast<void*>(ptr));
387        hdr->base = base;
388        hdr->bt_depth = get_backtrace(hdr->bt, MAX_BACKTRACE_DEPTH);
389        add(hdr, bytes);
390        return user(hdr);
391    }
392    return base;
393}
394
395extern "C" void chk_free(void* ptr) {
396//  log_message("%s: %s\n", __FILE__, __FUNCTION__);
397    if (DebugCallsDisabled()) {
398        return g_malloc_dispatch->free(ptr);
399    }
400
401    if (!ptr) /* ignore free(NULL) */
402        return;
403
404    hdr_t* hdr = meta(ptr);
405
406    if (del(hdr) < 0) {
407        uintptr_t bt[MAX_BACKTRACE_DEPTH];
408        int depth = get_backtrace(bt, MAX_BACKTRACE_DEPTH);
409        if (hdr->tag == BACKLOG_TAG) {
410            log_message("+++ ALLOCATION %p SIZE %d BYTES MULTIPLY FREED!\n",
411                       user(hdr), hdr->size);
412            log_message("+++ ALLOCATION %p SIZE %d ALLOCATED HERE:\n",
413                       user(hdr), hdr->size);
414            log_backtrace(hdr->bt, hdr->bt_depth);
415            /* hdr->freed_bt_depth should be nonzero here */
416            log_message("+++ ALLOCATION %p SIZE %d FIRST FREED HERE:\n",
417                       user(hdr), hdr->size);
418            log_backtrace(hdr->freed_bt, hdr->freed_bt_depth);
419            log_message("+++ ALLOCATION %p SIZE %d NOW BEING FREED HERE:\n",
420                       user(hdr), hdr->size);
421            log_backtrace(bt, depth);
422        } else {
423            log_message("+++ ALLOCATION %p IS CORRUPTED OR NOT ALLOCATED VIA TRACKER!\n",
424                       user(hdr));
425            log_backtrace(bt, depth);
426        }
427    } else {
428        hdr->freed_bt_depth = get_backtrace(hdr->freed_bt, MAX_BACKTRACE_DEPTH);
429        add_to_backlog(hdr);
430    }
431}
432
433extern "C" void* chk_realloc(void* ptr, size_t bytes) {
434//  log_message("%s: %s\n", __FILE__, __FUNCTION__);
435    if (DebugCallsDisabled()) {
436        return g_malloc_dispatch->realloc(ptr, bytes);
437    }
438
439    if (!ptr) {
440        return chk_malloc(bytes);
441    }
442
443#ifdef REALLOC_ZERO_BYTES_FREE
444    if (!bytes) {
445        chk_free(ptr);
446        return NULL;
447    }
448#endif
449
450    hdr_t* hdr = meta(ptr);
451
452    if (del(hdr) < 0) {
453        uintptr_t bt[MAX_BACKTRACE_DEPTH];
454        int depth = get_backtrace(bt, MAX_BACKTRACE_DEPTH);
455        if (hdr->tag == BACKLOG_TAG) {
456            log_message("+++ REALLOCATION %p SIZE %d OF FREED MEMORY!\n",
457                       user(hdr), bytes, hdr->size);
458            log_message("+++ ALLOCATION %p SIZE %d ALLOCATED HERE:\n",
459                       user(hdr), hdr->size);
460            log_backtrace(hdr->bt, hdr->bt_depth);
461            /* hdr->freed_bt_depth should be nonzero here */
462            log_message("+++ ALLOCATION %p SIZE %d FIRST FREED HERE:\n",
463                       user(hdr), hdr->size);
464            log_backtrace(hdr->freed_bt, hdr->freed_bt_depth);
465            log_message("+++ ALLOCATION %p SIZE %d NOW BEING REALLOCATED HERE:\n",
466                       user(hdr), hdr->size);
467            log_backtrace(bt, depth);
468
469             /* We take the memory out of the backlog and fall through so the
470             * reallocation below succeeds.  Since we didn't really free it, we
471             * can default to this behavior.
472             */
473            del_from_backlog(hdr);
474        } else {
475            log_message("+++ REALLOCATION %p SIZE %d IS CORRUPTED OR NOT ALLOCATED VIA TRACKER!\n",
476                       user(hdr), bytes);
477            log_backtrace(bt, depth);
478            // just get a whole new allocation and leak the old one
479            return g_malloc_dispatch->realloc(0, bytes);
480            // return realloc(user(hdr), bytes); // assuming it was allocated externally
481        }
482    }
483
484    size_t size = sizeof(hdr_t) + bytes + sizeof(ftr_t);
485    if (size < bytes) { // Overflow
486        errno = ENOMEM;
487        return NULL;
488    }
489    if (hdr->base != hdr) {
490        // An allocation from memalign, so create another allocation and
491        // copy the data out.
492        void* newMem = g_malloc_dispatch->malloc(size);
493        if (newMem == NULL) {
494            return NULL;
495        }
496        memcpy(newMem, hdr, sizeof(hdr_t) + hdr->size);
497        g_malloc_dispatch->free(hdr->base);
498        hdr = static_cast<hdr_t*>(newMem);
499    } else {
500        hdr = static_cast<hdr_t*>(g_malloc_dispatch->realloc(hdr, size));
501    }
502    if (hdr) {
503        hdr->base = hdr;
504        hdr->bt_depth = get_backtrace(hdr->bt, MAX_BACKTRACE_DEPTH);
505        add(hdr, bytes);
506        return user(hdr);
507    }
508    return NULL;
509}
510
511extern "C" void* chk_calloc(size_t nmemb, size_t bytes) {
512//  log_message("%s: %s\n", __FILE__, __FUNCTION__);
513    if (DebugCallsDisabled()) {
514        return g_malloc_dispatch->calloc(nmemb, bytes);
515    }
516
517    size_t total_bytes = nmemb * bytes;
518    size_t size = sizeof(hdr_t) + total_bytes + sizeof(ftr_t);
519    if (size < total_bytes || (nmemb && SIZE_MAX / nmemb < bytes)) { // Overflow
520        errno = ENOMEM;
521        return NULL;
522    }
523    hdr_t* hdr = static_cast<hdr_t*>(g_malloc_dispatch->calloc(1, size));
524    if (hdr) {
525        hdr->base = hdr;
526        hdr->bt_depth = get_backtrace(hdr->bt, MAX_BACKTRACE_DEPTH);
527        add(hdr, total_bytes);
528        return user(hdr);
529    }
530    return NULL;
531}
532
533extern "C" size_t chk_malloc_usable_size(const void* ptr) {
534    if (DebugCallsDisabled()) {
535        return g_malloc_dispatch->malloc_usable_size(ptr);
536    }
537
538    // malloc_usable_size returns 0 for NULL and unknown blocks.
539    if (ptr == NULL)
540        return 0;
541
542    const hdr_t* hdr = const_meta(ptr);
543
544    // The sentinel tail is written just after the request block bytes
545    // so there is no extra room we can report here.
546    return hdr->size;
547}
548
549extern "C" struct mallinfo chk_mallinfo() {
550  return g_malloc_dispatch->mallinfo();
551}
552
553extern "C" int chk_posix_memalign(void** memptr, size_t alignment, size_t size) {
554  if (DebugCallsDisabled()) {
555    return g_malloc_dispatch->posix_memalign(memptr, alignment, size);
556  }
557
558  if (!powerof2(alignment)) {
559    return EINVAL;
560  }
561  int saved_errno = errno;
562  *memptr = chk_memalign(alignment, size);
563  errno = saved_errno;
564  return (*memptr != NULL) ? 0 : ENOMEM;
565}
566
567#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
568extern "C" void* chk_pvalloc(size_t bytes) {
569  if (DebugCallsDisabled()) {
570    return g_malloc_dispatch->pvalloc(bytes);
571  }
572
573  size_t pagesize = getpagesize();
574  size_t size = BIONIC_ALIGN(bytes, pagesize);
575  if (size < bytes) { // Overflow
576    return NULL;
577  }
578  return chk_memalign(pagesize, size);
579}
580
581extern "C" void* chk_valloc(size_t size) {
582  if (DebugCallsDisabled()) {
583    return g_malloc_dispatch->valloc(size);
584  }
585  return chk_memalign(getpagesize(), size);
586}
587#endif
588
589static void ReportMemoryLeaks() {
590  ScopedDisableDebugCalls disable;
591
592  // Use /proc/self/exe link to obtain the program name for logging
593  // purposes. If it's not available, we set it to "<unknown>".
594  char exe[PATH_MAX];
595  int count;
596  if ((count = readlink("/proc/self/exe", exe, sizeof(exe) - 1)) == -1) {
597    strlcpy(exe, "<unknown>", sizeof(exe));
598  } else {
599    exe[count] = '\0';
600  }
601
602  if (g_allocated_block_count == 0) {
603    log_message("+++ %s did not leak", exe);
604    return;
605  }
606
607  size_t index = 1;
608  const size_t total = g_allocated_block_count;
609  while (head != NULL) {
610    int safe;
611    hdr_t* block = head;
612    log_message("+++ %s leaked block of size %d at %p (leak %d of %d)",
613                exe, block->size, user(block), index++, total);
614    if (del_leak(block, &safe)) {
615      /* safe == 1, because the allocation is valid */
616      log_backtrace(block->bt, block->bt_depth);
617    }
618  }
619
620  while (backlog_head != NULL) {
621    del_from_backlog(backlog_tail);
622  }
623}
624
625pthread_key_t g_debug_calls_disabled;
626
627extern "C" bool malloc_debug_initialize(HashTable* hash_table, const MallocDebug* malloc_dispatch) {
628  g_hash_table = hash_table;
629  g_malloc_dispatch = malloc_dispatch;
630
631  pthread_key_create(&g_debug_calls_disabled, NULL);
632
633  char debug_backlog[PROP_VALUE_MAX];
634  if (__system_property_get("libc.debug.malloc.backlog", debug_backlog)) {
635    g_malloc_debug_backlog = atoi(debug_backlog);
636    info_log("%s: setting backlog length to %d\n", getprogname(), g_malloc_debug_backlog);
637  }
638
639  backtrace_startup();
640  return true;
641}
642
643extern "C" void malloc_debug_finalize(int malloc_debug_level) {
644  // We only track leaks at level 10.
645  if (malloc_debug_level == 10) {
646    ReportMemoryLeaks();
647  }
648  backtrace_shutdown();
649
650  pthread_setspecific(g_debug_calls_disabled, NULL);
651}
652