malloc_debug_leak.cpp revision 11837629693e520452672e0eae28d3ce71f80ed6
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <arpa/inet.h>
30#include <dlfcn.h>
31#include <errno.h>
32#include <fcntl.h>
33#include <pthread.h>
34#include <stdarg.h>
35#include <stddef.h>
36#include <stdint.h>
37#include <stdio.h>
38#include <stdlib.h>
39#include <string.h>
40#include <sys/param.h>
41#include <sys/select.h>
42#include <sys/socket.h>
43#include <sys/system_properties.h>
44#include <sys/types.h>
45#include <sys/un.h>
46#include <unistd.h>
47#include <unwind.h>
48
49#include "debug_stacktrace.h"
50#include "malloc_debug_common.h"
51#include "malloc_debug_disable.h"
52
53#include "private/bionic_macros.h"
54#include "private/libc_logging.h"
55#include "private/ScopedPthreadMutexLocker.h"
56
57// This file should be included into the build only when
58// MALLOC_LEAK_CHECK, or MALLOC_QEMU_INSTRUMENT, or both
59// macros are defined.
60#ifndef MALLOC_LEAK_CHECK
61#error MALLOC_LEAK_CHECK is not defined.
62#endif  // !MALLOC_LEAK_CHECK
63
64extern int gMallocLeakZygoteChild;
65extern HashTable* g_hash_table;
66extern const MallocDebug* g_malloc_dispatch;
67
68// =============================================================================
69// stack trace functions
70// =============================================================================
71
72#define GUARD               0x48151642
73#define DEBUG               0
74
75// =============================================================================
76// Structures
77// =============================================================================
78
79struct AllocationEntry {
80    HashEntry* entry;
81    uint32_t guard;
82} __attribute__((aligned(MALLOC_ALIGNMENT)));
83
84static inline AllocationEntry* to_header(void* mem) {
85  return reinterpret_cast<AllocationEntry*>(mem) - 1;
86}
87
88static inline const AllocationEntry* const_to_header(const void* mem) {
89  return reinterpret_cast<const AllocationEntry*>(mem) - 1;
90}
91
92// =============================================================================
93// Hash Table functions
94// =============================================================================
95
96static uint32_t get_hash(uintptr_t* backtrace, size_t numEntries) {
97    if (backtrace == NULL) return 0;
98
99    int hash = 0;
100    size_t i;
101    for (i = 0 ; i < numEntries ; i++) {
102        hash = (hash * 33) + (backtrace[i] >> 2);
103    }
104
105    return hash;
106}
107
108static HashEntry* find_entry(HashTable* table, int slot,
109                             uintptr_t* backtrace, size_t numEntries, size_t size) {
110    HashEntry* entry = table->slots[slot];
111    while (entry != NULL) {
112        //debug_log("backtrace: %p, entry: %p entry->backtrace: %p\n",
113        //        backtrace, entry, (entry != NULL) ? entry->backtrace : NULL);
114        /*
115         * See if the entry matches exactly.  We compare the "size" field,
116         * including the flag bits.
117         */
118        if (entry->size == size && entry->numEntries == numEntries &&
119                !memcmp(backtrace, entry->backtrace, numEntries * sizeof(uintptr_t))) {
120            return entry;
121        }
122
123        entry = entry->next;
124    }
125
126    return NULL;
127}
128
129static HashEntry* record_backtrace(uintptr_t* backtrace, size_t numEntries, size_t size) {
130    size_t hash = get_hash(backtrace, numEntries);
131    size_t slot = hash % HASHTABLE_SIZE;
132
133    if (size & SIZE_FLAG_MASK) {
134        debug_log("malloc_debug: allocation %zx exceeds bit width\n", size);
135        abort();
136    }
137
138    if (gMallocLeakZygoteChild) {
139        size |= SIZE_FLAG_ZYGOTE_CHILD;
140    }
141
142    HashEntry* entry = find_entry(g_hash_table, slot, backtrace, numEntries, size);
143
144    if (entry != NULL) {
145        entry->allocations++;
146    } else {
147        // create a new entry
148        entry = static_cast<HashEntry*>(g_malloc_dispatch->malloc(sizeof(HashEntry) + numEntries*sizeof(uintptr_t)));
149        if (!entry) {
150            return NULL;
151        }
152        entry->allocations = 1;
153        entry->slot = slot;
154        entry->prev = NULL;
155        entry->next = g_hash_table->slots[slot];
156        entry->numEntries = numEntries;
157        entry->size = size;
158
159        memcpy(entry->backtrace, backtrace, numEntries * sizeof(uintptr_t));
160
161        g_hash_table->slots[slot] = entry;
162
163        if (entry->next != NULL) {
164            entry->next->prev = entry;
165        }
166
167        // we just added an entry, increase the size of the hashtable
168        g_hash_table->count++;
169    }
170
171    return entry;
172}
173
174static int is_valid_entry(HashEntry* entry) {
175  if (entry != NULL) {
176    for (size_t i = 0; i < HASHTABLE_SIZE; ++i) {
177      HashEntry* e1 = g_hash_table->slots[i];
178      while (e1 != NULL) {
179        if (e1 == entry) {
180          return 1;
181        }
182        e1 = e1->next;
183      }
184    }
185  }
186  return 0;
187}
188
189static void remove_entry(HashEntry* entry) {
190  HashEntry* prev = entry->prev;
191  HashEntry* next = entry->next;
192
193  if (prev != NULL) entry->prev->next = next;
194  if (next != NULL) entry->next->prev = prev;
195
196  if (prev == NULL) {
197    // we are the head of the list. set the head to be next
198    g_hash_table->slots[entry->slot] = entry->next;
199  }
200
201  // we just removed and entry, decrease the size of the hashtable
202  g_hash_table->count--;
203}
204
205// =============================================================================
206// malloc fill functions
207// =============================================================================
208
209#define CHK_FILL_FREE           0xef
210#define CHK_SENTINEL_VALUE      0xeb
211
212extern "C" void* fill_calloc(size_t n_elements, size_t elem_size) {
213    return g_malloc_dispatch->calloc(n_elements, elem_size);
214}
215
216extern "C" void* fill_malloc(size_t bytes) {
217    void* buffer = g_malloc_dispatch->malloc(bytes);
218    if (buffer) {
219        memset(buffer, CHK_SENTINEL_VALUE, bytes);
220    }
221    return buffer;
222}
223
224extern "C" void fill_free(void* mem) {
225    size_t bytes = g_malloc_dispatch->malloc_usable_size(mem);
226    memset(mem, CHK_FILL_FREE, bytes);
227    g_malloc_dispatch->free(mem);
228}
229
230extern "C" void* fill_realloc(void* mem, size_t bytes) {
231    size_t oldSize = g_malloc_dispatch->malloc_usable_size(mem);
232    void* newMem = g_malloc_dispatch->realloc(mem, bytes);
233    if (newMem) {
234        // If this is larger than before, fill the extra with our pattern.
235        size_t newSize = g_malloc_dispatch->malloc_usable_size(newMem);
236        if (newSize > oldSize) {
237            memset(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(newMem)+oldSize), CHK_FILL_FREE, newSize-oldSize);
238        }
239    }
240    return newMem;
241}
242
243extern "C" void* fill_memalign(size_t alignment, size_t bytes) {
244    void* buffer = g_malloc_dispatch->memalign(alignment, bytes);
245    if (buffer) {
246        memset(buffer, CHK_SENTINEL_VALUE, bytes);
247    }
248    return buffer;
249}
250
251extern "C" size_t fill_malloc_usable_size(const void* mem) {
252    // Since we didn't allocate extra bytes before or after, we can
253    // report the normal usable size here.
254    return g_malloc_dispatch->malloc_usable_size(mem);
255}
256
257extern "C" struct mallinfo fill_mallinfo() {
258  return g_malloc_dispatch->mallinfo();
259}
260
261extern "C" int fill_posix_memalign(void** memptr, size_t alignment, size_t size) {
262  if (!powerof2(alignment)) {
263    return EINVAL;
264  }
265  int saved_errno = errno;
266  *memptr = fill_memalign(alignment, size);
267  errno = saved_errno;
268  return (*memptr != NULL) ? 0 : ENOMEM;
269}
270
271#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
272extern "C" void* fill_pvalloc(size_t bytes) {
273  size_t pagesize = getpagesize();
274  size_t size = BIONIC_ALIGN(bytes, pagesize);
275  if (size < bytes) { // Overflow
276    return NULL;
277  }
278  return fill_memalign(pagesize, size);
279}
280
281extern "C" void* fill_valloc(size_t size) {
282  return fill_memalign(getpagesize(), size);
283}
284#endif
285
286// =============================================================================
287// malloc leak functions
288// =============================================================================
289
290static uint32_t MEMALIGN_GUARD      = 0xA1A41520;
291
292extern "C" void* leak_malloc(size_t bytes) {
293    if (DebugCallsDisabled()) {
294        return g_malloc_dispatch->malloc(bytes);
295    }
296
297    // allocate enough space infront of the allocation to store the pointer for
298    // the alloc structure. This will making free'ing the structer really fast!
299
300    // 1. allocate enough memory and include our header
301    // 2. set the base pointer to be right after our header
302
303    size_t size = bytes + sizeof(AllocationEntry);
304    if (size < bytes) { // Overflow.
305        errno = ENOMEM;
306        return NULL;
307    }
308
309    void* base = g_malloc_dispatch->malloc(size);
310    if (base != NULL) {
311        ScopedPthreadMutexLocker locker(&g_hash_table->lock);
312
313        uintptr_t backtrace[BACKTRACE_SIZE];
314        size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE);
315
316        AllocationEntry* header = reinterpret_cast<AllocationEntry*>(base);
317        header->entry = record_backtrace(backtrace, numEntries, bytes);
318        header->guard = GUARD;
319
320        // now increment base to point to after our header.
321        // this should just work since our header is 8 bytes.
322        base = reinterpret_cast<AllocationEntry*>(base) + 1;
323    }
324
325    return base;
326}
327
328extern "C" void leak_free(void* mem) {
329  if (DebugCallsDisabled()) {
330    return g_malloc_dispatch->free(mem);
331  }
332
333  if (mem == NULL) {
334    return;
335  }
336
337  ScopedPthreadMutexLocker locker(&g_hash_table->lock);
338
339  // check the guard to make sure it is valid
340  AllocationEntry* header = to_header(mem);
341
342  if (header->guard != GUARD) {
343    // could be a memaligned block
344    if (header->guard == MEMALIGN_GUARD) {
345      // For memaligned blocks, header->entry points to the memory
346      // allocated through leak_malloc.
347      header = to_header(header->entry);
348    }
349  }
350
351  if (header->guard == GUARD || is_valid_entry(header->entry)) {
352    // decrement the allocations
353    HashEntry* entry = header->entry;
354    entry->allocations--;
355    if (entry->allocations <= 0) {
356      remove_entry(entry);
357      g_malloc_dispatch->free(entry);
358    }
359
360    // now free the memory!
361    g_malloc_dispatch->free(header);
362  } else {
363    debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
364              header->guard, header->entry);
365  }
366}
367
368extern "C" void* leak_calloc(size_t n_elements, size_t elem_size) {
369    if (DebugCallsDisabled()) {
370        return g_malloc_dispatch->calloc(n_elements, elem_size);
371    }
372
373    // Fail on overflow - just to be safe even though this code runs only
374    // within the debugging C library, not the production one.
375    if (n_elements && SIZE_MAX / n_elements < elem_size) {
376        errno = ENOMEM;
377        return NULL;
378    }
379    size_t size = n_elements * elem_size;
380    void* ptr  = leak_malloc(size);
381    if (ptr != NULL) {
382        memset(ptr, 0, size);
383    }
384    return ptr;
385}
386
387extern "C" void* leak_realloc(void* oldMem, size_t bytes) {
388    if (DebugCallsDisabled()) {
389        return g_malloc_dispatch->realloc(oldMem, bytes);
390    }
391
392    if (oldMem == NULL) {
393        return leak_malloc(bytes);
394    }
395
396    void* newMem = NULL;
397    AllocationEntry* header = to_header(oldMem);
398    if (header->guard == MEMALIGN_GUARD) {
399        // Get the real header.
400        header = to_header(header->entry);
401    } else if (header->guard != GUARD) {
402        debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
403                   header->guard, header->entry);
404        errno = ENOMEM;
405        return NULL;
406    }
407
408    newMem = leak_malloc(bytes);
409    if (newMem != NULL) {
410        size_t oldSize = header->entry->size & ~SIZE_FLAG_MASK;
411        size_t copySize = (oldSize <= bytes) ? oldSize : bytes;
412        memcpy(newMem, oldMem, copySize);
413        leak_free(oldMem);
414    }
415
416    return newMem;
417}
418
419extern "C" void* leak_memalign(size_t alignment, size_t bytes) {
420    if (DebugCallsDisabled()) {
421        return g_malloc_dispatch->memalign(alignment, bytes);
422    }
423
424    // we can just use malloc
425    if (alignment <= MALLOC_ALIGNMENT) {
426        return leak_malloc(bytes);
427    }
428
429    // need to make sure it's a power of two
430    if (!powerof2(alignment)) {
431        alignment = BIONIC_ROUND_UP_POWER_OF_2(alignment);
432    }
433
434    // here, alignment is at least MALLOC_ALIGNMENT<<1 bytes
435    // we will align by at least MALLOC_ALIGNMENT bytes
436    // and at most alignment-MALLOC_ALIGNMENT bytes
437    size_t size = (alignment-MALLOC_ALIGNMENT) + bytes;
438    if (size < bytes) { // Overflow.
439        return NULL;
440    }
441
442    void* base = leak_malloc(size);
443    if (base != NULL) {
444        uintptr_t ptr = reinterpret_cast<uintptr_t>(base);
445        if ((ptr % alignment) == 0) {
446            return base;
447        }
448
449        // align the pointer
450        ptr += ((-ptr) % alignment);
451
452        // Already allocated enough space for the header. This assumes
453        // that the malloc alignment is at least 8, otherwise, this is
454        // not guaranteed to have the space for the header.
455        AllocationEntry* header = to_header(reinterpret_cast<void*>(ptr));
456        header->guard = MEMALIGN_GUARD;
457        header->entry = reinterpret_cast<HashEntry*>(base);
458
459        return reinterpret_cast<void*>(ptr);
460    }
461    return base;
462}
463
464extern "C" size_t leak_malloc_usable_size(const void* mem) {
465    if (DebugCallsDisabled()) {
466        return g_malloc_dispatch->malloc_usable_size(mem);
467    }
468
469    if (mem != NULL) {
470        // Check the guard to make sure it is valid.
471        const AllocationEntry* header = const_to_header((void*)mem);
472
473        if (header->guard == MEMALIGN_GUARD) {
474            // If this is a memalign'd pointer, then grab the header from
475            // entry.
476            header = const_to_header(header->entry);
477        } else if (header->guard != GUARD) {
478            debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
479                      header->guard, header->entry);
480            return 0;
481        }
482
483        size_t ret = g_malloc_dispatch->malloc_usable_size(header);
484        if (ret != 0) {
485            // The usable area starts at 'mem' and stops at 'header+ret'.
486            return reinterpret_cast<uintptr_t>(header) + ret - reinterpret_cast<uintptr_t>(mem);
487        }
488    }
489    return 0;
490}
491
492extern "C" struct mallinfo leak_mallinfo() {
493  return g_malloc_dispatch->mallinfo();
494}
495
496extern "C" int leak_posix_memalign(void** memptr, size_t alignment, size_t size) {
497  if (DebugCallsDisabled()) {
498    return g_malloc_dispatch->posix_memalign(memptr, alignment, size);
499  }
500
501  if (!powerof2(alignment)) {
502    return EINVAL;
503  }
504  int saved_errno = errno;
505  *memptr = leak_memalign(alignment, size);
506  errno = saved_errno;
507  return (*memptr != NULL) ? 0 : ENOMEM;
508}
509
510#if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
511extern "C" void* leak_pvalloc(size_t bytes) {
512  if (DebugCallsDisabled()) {
513    return g_malloc_dispatch->pvalloc(bytes);
514  }
515
516  size_t pagesize = getpagesize();
517  size_t size = BIONIC_ALIGN(bytes, pagesize);
518  if (size < bytes) { // Overflow
519    return NULL;
520  }
521  return leak_memalign(pagesize, size);
522}
523
524extern "C" void* leak_valloc(size_t size) {
525  if (DebugCallsDisabled()) {
526    return g_malloc_dispatch->valloc(size);
527  }
528
529  return leak_memalign(getpagesize(), size);
530}
531#endif
532