malloc_debug_leak.cpp revision b5e08542840d5722defae3e750d49a7d5ce6ccc9
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <arpa/inet.h>
30#include <dlfcn.h>
31#include <errno.h>
32#include <fcntl.h>
33#include <pthread.h>
34#include <stdarg.h>
35#include <stddef.h>
36#include <stdint.h>
37#include <stdio.h>
38#include <stdlib.h>
39#include <string.h>
40#include <sys/param.h>
41#include <sys/select.h>
42#include <sys/socket.h>
43#include <sys/system_properties.h>
44#include <sys/types.h>
45#include <sys/un.h>
46#include <unistd.h>
47#include <unwind.h>
48
49#include "debug_stacktrace.h"
50#include "malloc_debug_backtrace.h"
51#include "malloc_debug_common.h"
52#include "malloc_debug_disable.h"
53
54#include "private/bionic_macros.h"
55#include "private/libc_logging.h"
56#include "private/ScopedPthreadMutexLocker.h"
57
58// This file should be included into the build only when
59// MALLOC_LEAK_CHECK, or MALLOC_QEMU_INSTRUMENT, or both
60// macros are defined.
61#ifndef MALLOC_LEAK_CHECK
62#error MALLOC_LEAK_CHECK is not defined.
63#endif  // !MALLOC_LEAK_CHECK
64
65extern int gMallocLeakZygoteChild;
66extern HashTable* g_hash_table;
67extern const MallocDebug* g_malloc_dispatch;
68
69// =============================================================================
70// stack trace functions
71// =============================================================================
72
73#define GUARD               0x48151642
74#define DEBUG               0
75
76// =============================================================================
77// Structures
78// =============================================================================
79
80struct AllocationEntry {
81    HashEntry* entry;
82    uint32_t guard;
83} __attribute__((aligned(MALLOC_ALIGNMENT)));
84
85static inline AllocationEntry* to_header(void* mem) {
86  return reinterpret_cast<AllocationEntry*>(mem) - 1;
87}
88
89static inline const AllocationEntry* const_to_header(const void* mem) {
90  return reinterpret_cast<const AllocationEntry*>(mem) - 1;
91}
92
93// =============================================================================
94// Hash Table functions
95// =============================================================================
96
97static uint32_t get_hash(uintptr_t* backtrace, size_t numEntries) {
98    if (backtrace == NULL) return 0;
99
100    int hash = 0;
101    size_t i;
102    for (i = 0 ; i < numEntries ; i++) {
103        hash = (hash * 33) + (backtrace[i] >> 2);
104    }
105
106    return hash;
107}
108
109static HashEntry* find_entry(HashTable* table, int slot,
110                             uintptr_t* backtrace, size_t numEntries, size_t size) {
111    HashEntry* entry = table->slots[slot];
112    while (entry != NULL) {
113        //debug_log("backtrace: %p, entry: %p entry->backtrace: %p\n",
114        //        backtrace, entry, (entry != NULL) ? entry->backtrace : NULL);
115        /*
116         * See if the entry matches exactly.  We compare the "size" field,
117         * including the flag bits.
118         */
119        if (entry->size == size && entry->numEntries == numEntries &&
120                !memcmp(backtrace, entry->backtrace, numEntries * sizeof(uintptr_t))) {
121            return entry;
122        }
123
124        entry = entry->next;
125    }
126
127    return NULL;
128}
129
130static HashEntry* record_backtrace(uintptr_t* backtrace, size_t numEntries, size_t size) {
131    size_t hash = get_hash(backtrace, numEntries);
132    size_t slot = hash % HASHTABLE_SIZE;
133
134    if (size & SIZE_FLAG_MASK) {
135        debug_log("malloc_debug: allocation %zx exceeds bit width\n", size);
136        abort();
137    }
138
139    if (gMallocLeakZygoteChild) {
140        size |= SIZE_FLAG_ZYGOTE_CHILD;
141    }
142
143    HashEntry* entry = find_entry(g_hash_table, slot, backtrace, numEntries, size);
144
145    if (entry != NULL) {
146        entry->allocations++;
147    } else {
148        // create a new entry
149        entry = static_cast<HashEntry*>(g_malloc_dispatch->malloc(sizeof(HashEntry) + numEntries*sizeof(uintptr_t)));
150        if (!entry) {
151            return NULL;
152        }
153        entry->allocations = 1;
154        entry->slot = slot;
155        entry->prev = NULL;
156        entry->next = g_hash_table->slots[slot];
157        entry->numEntries = numEntries;
158        entry->size = size;
159
160        memcpy(entry->backtrace, backtrace, numEntries * sizeof(uintptr_t));
161
162        g_hash_table->slots[slot] = entry;
163
164        if (entry->next != NULL) {
165            entry->next->prev = entry;
166        }
167
168        // we just added an entry, increase the size of the hashtable
169        g_hash_table->count++;
170    }
171
172    return entry;
173}
174
175static int is_valid_entry(HashEntry* entry) {
176  if (entry != NULL) {
177    for (size_t i = 0; i < HASHTABLE_SIZE; ++i) {
178      HashEntry* e1 = g_hash_table->slots[i];
179      while (e1 != NULL) {
180        if (e1 == entry) {
181          return 1;
182        }
183        e1 = e1->next;
184      }
185    }
186  }
187  return 0;
188}
189
190static void remove_entry(HashEntry* entry) {
191  HashEntry* prev = entry->prev;
192  HashEntry* next = entry->next;
193
194  if (prev != NULL) entry->prev->next = next;
195  if (next != NULL) entry->next->prev = prev;
196
197  if (prev == NULL) {
198    // we are the head of the list. set the head to be next
199    g_hash_table->slots[entry->slot] = entry->next;
200  }
201
202  // we just removed and entry, decrease the size of the hashtable
203  g_hash_table->count--;
204}
205
206// =============================================================================
207// malloc fill functions
208// =============================================================================
209
210#define CHK_FILL_FREE           0xef
211#define CHK_SENTINEL_VALUE      0xeb
212
213extern "C" void* fill_calloc(size_t n_elements, size_t elem_size) {
214    return g_malloc_dispatch->calloc(n_elements, elem_size);
215}
216
217extern "C" void* fill_malloc(size_t bytes) {
218    void* buffer = g_malloc_dispatch->malloc(bytes);
219    if (buffer) {
220        memset(buffer, CHK_SENTINEL_VALUE, bytes);
221    }
222    return buffer;
223}
224
225extern "C" void fill_free(void* mem) {
226    size_t bytes = g_malloc_dispatch->malloc_usable_size(mem);
227    memset(mem, CHK_FILL_FREE, bytes);
228    g_malloc_dispatch->free(mem);
229}
230
231extern "C" void* fill_realloc(void* mem, size_t bytes) {
232    size_t oldSize = g_malloc_dispatch->malloc_usable_size(mem);
233    void* newMem = g_malloc_dispatch->realloc(mem, bytes);
234    if (newMem) {
235        // If this is larger than before, fill the extra with our pattern.
236        size_t newSize = g_malloc_dispatch->malloc_usable_size(newMem);
237        if (newSize > oldSize) {
238            memset(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(newMem)+oldSize), CHK_FILL_FREE, newSize-oldSize);
239        }
240    }
241    return newMem;
242}
243
244extern "C" void* fill_memalign(size_t alignment, size_t bytes) {
245    void* buffer = g_malloc_dispatch->memalign(alignment, bytes);
246    if (buffer) {
247        memset(buffer, CHK_SENTINEL_VALUE, bytes);
248    }
249    return buffer;
250}
251
252extern "C" size_t fill_malloc_usable_size(const void* mem) {
253    // Since we didn't allocate extra bytes before or after, we can
254    // report the normal usable size here.
255    return g_malloc_dispatch->malloc_usable_size(mem);
256}
257
258extern "C" struct mallinfo fill_mallinfo() {
259  return g_malloc_dispatch->mallinfo();
260}
261
262extern "C" int fill_posix_memalign(void** memptr, size_t alignment, size_t size) {
263  if (!powerof2(alignment)) {
264    return EINVAL;
265  }
266  int saved_errno = errno;
267  *memptr = fill_memalign(alignment, size);
268  errno = saved_errno;
269  return (*memptr != NULL) ? 0 : ENOMEM;
270}
271
272#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
273extern "C" void* fill_pvalloc(size_t bytes) {
274  size_t pagesize = getpagesize();
275  size_t size = BIONIC_ALIGN(bytes, pagesize);
276  if (size < bytes) { // Overflow
277    return NULL;
278  }
279  return fill_memalign(pagesize, size);
280}
281
282extern "C" void* fill_valloc(size_t size) {
283  return fill_memalign(getpagesize(), size);
284}
285#endif
286
287// =============================================================================
288// malloc leak functions
289// =============================================================================
290
291static uint32_t MEMALIGN_GUARD      = 0xA1A41520;
292
293extern "C" void* leak_malloc(size_t bytes) {
294    if (DebugCallsDisabled()) {
295        return g_malloc_dispatch->malloc(bytes);
296    }
297
298    // allocate enough space infront of the allocation to store the pointer for
299    // the alloc structure. This will making free'ing the structer really fast!
300
301    // 1. allocate enough memory and include our header
302    // 2. set the base pointer to be right after our header
303
304    size_t size = bytes + sizeof(AllocationEntry);
305    if (size < bytes) { // Overflow.
306        errno = ENOMEM;
307        return NULL;
308    }
309
310    void* base = g_malloc_dispatch->malloc(size);
311    if (base != NULL) {
312        ScopedPthreadMutexLocker locker(&g_hash_table->lock);
313
314        uintptr_t backtrace[BACKTRACE_SIZE];
315        size_t numEntries = GET_BACKTRACE(backtrace, BACKTRACE_SIZE);
316
317        AllocationEntry* header = reinterpret_cast<AllocationEntry*>(base);
318        header->entry = record_backtrace(backtrace, numEntries, bytes);
319        header->guard = GUARD;
320
321        // now increment base to point to after our header.
322        // this should just work since our header is 8 bytes.
323        base = reinterpret_cast<AllocationEntry*>(base) + 1;
324    }
325
326    return base;
327}
328
329extern "C" void leak_free(void* mem) {
330  if (DebugCallsDisabled()) {
331    return g_malloc_dispatch->free(mem);
332  }
333
334  if (mem == NULL) {
335    return;
336  }
337
338  ScopedPthreadMutexLocker locker(&g_hash_table->lock);
339
340  // check the guard to make sure it is valid
341  AllocationEntry* header = to_header(mem);
342
343  if (header->guard != GUARD) {
344    // could be a memaligned block
345    if (header->guard == MEMALIGN_GUARD) {
346      // For memaligned blocks, header->entry points to the memory
347      // allocated through leak_malloc.
348      header = to_header(header->entry);
349    }
350  }
351
352  if (header->guard == GUARD || is_valid_entry(header->entry)) {
353    // decrement the allocations
354    HashEntry* entry = header->entry;
355    entry->allocations--;
356    if (entry->allocations <= 0) {
357      remove_entry(entry);
358      g_malloc_dispatch->free(entry);
359    }
360
361    // now free the memory!
362    g_malloc_dispatch->free(header);
363  } else {
364    debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
365              header->guard, header->entry);
366  }
367}
368
369extern "C" void* leak_calloc(size_t n_elements, size_t elem_size) {
370    if (DebugCallsDisabled()) {
371        return g_malloc_dispatch->calloc(n_elements, elem_size);
372    }
373
374    // Fail on overflow - just to be safe even though this code runs only
375    // within the debugging C library, not the production one.
376    if (n_elements && SIZE_MAX / n_elements < elem_size) {
377        errno = ENOMEM;
378        return NULL;
379    }
380    size_t size = n_elements * elem_size;
381    void* ptr  = leak_malloc(size);
382    if (ptr != NULL) {
383        memset(ptr, 0, size);
384    }
385    return ptr;
386}
387
388extern "C" void* leak_realloc(void* oldMem, size_t bytes) {
389    if (DebugCallsDisabled()) {
390        return g_malloc_dispatch->realloc(oldMem, bytes);
391    }
392
393    if (oldMem == NULL) {
394        return leak_malloc(bytes);
395    }
396
397    void* newMem = NULL;
398    AllocationEntry* header = to_header(oldMem);
399    if (header->guard == MEMALIGN_GUARD) {
400        // Get the real header.
401        header = to_header(header->entry);
402    } else if (header->guard != GUARD) {
403        debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
404                   header->guard, header->entry);
405        errno = ENOMEM;
406        return NULL;
407    }
408
409    newMem = leak_malloc(bytes);
410    if (newMem != NULL) {
411        size_t oldSize = header->entry->size & ~SIZE_FLAG_MASK;
412        size_t copySize = (oldSize <= bytes) ? oldSize : bytes;
413        memcpy(newMem, oldMem, copySize);
414        leak_free(oldMem);
415    }
416
417    return newMem;
418}
419
420extern "C" void* leak_memalign(size_t alignment, size_t bytes) {
421    if (DebugCallsDisabled()) {
422        return g_malloc_dispatch->memalign(alignment, bytes);
423    }
424
425    // we can just use malloc
426    if (alignment <= MALLOC_ALIGNMENT) {
427        return leak_malloc(bytes);
428    }
429
430    // need to make sure it's a power of two
431    if (!powerof2(alignment)) {
432        alignment = BIONIC_ROUND_UP_POWER_OF_2(alignment);
433    }
434
435    // here, alignment is at least MALLOC_ALIGNMENT<<1 bytes
436    // we will align by at least MALLOC_ALIGNMENT bytes
437    // and at most alignment-MALLOC_ALIGNMENT bytes
438    size_t size = (alignment-MALLOC_ALIGNMENT) + bytes;
439    if (size < bytes) { // Overflow.
440        return NULL;
441    }
442
443    void* base = leak_malloc(size);
444    if (base != NULL) {
445        uintptr_t ptr = reinterpret_cast<uintptr_t>(base);
446        if ((ptr % alignment) == 0) {
447            return base;
448        }
449
450        // align the pointer
451        ptr += ((-ptr) % alignment);
452
453        // Already allocated enough space for the header. This assumes
454        // that the malloc alignment is at least 8, otherwise, this is
455        // not guaranteed to have the space for the header.
456        AllocationEntry* header = to_header(reinterpret_cast<void*>(ptr));
457        header->guard = MEMALIGN_GUARD;
458        header->entry = reinterpret_cast<HashEntry*>(base);
459
460        return reinterpret_cast<void*>(ptr);
461    }
462    return base;
463}
464
465extern "C" size_t leak_malloc_usable_size(const void* mem) {
466    if (DebugCallsDisabled()) {
467        return g_malloc_dispatch->malloc_usable_size(mem);
468    }
469
470    if (mem != NULL) {
471        // Check the guard to make sure it is valid.
472        const AllocationEntry* header = const_to_header((void*)mem);
473
474        if (header->guard == MEMALIGN_GUARD) {
475            // If this is a memalign'd pointer, then grab the header from
476            // entry.
477            header = const_to_header(header->entry);
478        } else if (header->guard != GUARD) {
479            debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
480                      header->guard, header->entry);
481            return 0;
482        }
483
484        size_t ret = g_malloc_dispatch->malloc_usable_size(header);
485        if (ret != 0) {
486            // The usable area starts at 'mem' and stops at 'header+ret'.
487            return reinterpret_cast<uintptr_t>(header) + ret - reinterpret_cast<uintptr_t>(mem);
488        }
489    }
490    return 0;
491}
492
493extern "C" struct mallinfo leak_mallinfo() {
494  return g_malloc_dispatch->mallinfo();
495}
496
497extern "C" int leak_posix_memalign(void** memptr, size_t alignment, size_t size) {
498  if (DebugCallsDisabled()) {
499    return g_malloc_dispatch->posix_memalign(memptr, alignment, size);
500  }
501
502  if (!powerof2(alignment)) {
503    return EINVAL;
504  }
505  int saved_errno = errno;
506  *memptr = leak_memalign(alignment, size);
507  errno = saved_errno;
508  return (*memptr != NULL) ? 0 : ENOMEM;
509}
510
511#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
512extern "C" void* leak_pvalloc(size_t bytes) {
513  if (DebugCallsDisabled()) {
514    return g_malloc_dispatch->pvalloc(bytes);
515  }
516
517  size_t pagesize = getpagesize();
518  size_t size = BIONIC_ALIGN(bytes, pagesize);
519  if (size < bytes) { // Overflow
520    return NULL;
521  }
522  return leak_memalign(pagesize, size);
523}
524
525extern "C" void* leak_valloc(size_t size) {
526  if (DebugCallsDisabled()) {
527    return g_malloc_dispatch->valloc(size);
528  }
529
530  return leak_memalign(getpagesize(), size);
531}
532#endif
533