malloc_debug_leak.cpp revision 91570ce987ef93f9ba2fa663a5fee1bd2525a2ba
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <arpa/inet.h>
30#include <dlfcn.h>
31#include <errno.h>
32#include <fcntl.h>
33#include <pthread.h>
34#include <stdarg.h>
35#include <stddef.h>
36#include <stdint.h>
37#include <stdio.h>
38#include <stdlib.h>
39#include <string.h>
40#include <sys/param.h>
41#include <sys/select.h>
42#include <sys/socket.h>
43#include <sys/system_properties.h>
44#include <sys/types.h>
45#include <sys/un.h>
46#include <unistd.h>
47#include <unwind.h>
48
49#include "debug_stacktrace.h"
50#include "malloc_debug_common.h"
51
52#include "private/bionic_macros.h"
53#include "private/libc_logging.h"
54#include "private/ScopedPthreadMutexLocker.h"
55
56// This file should be included into the build only when
57// MALLOC_LEAK_CHECK, or MALLOC_QEMU_INSTRUMENT, or both
58// macros are defined.
59#ifndef MALLOC_LEAK_CHECK
60#error MALLOC_LEAK_CHECK is not defined.
61#endif  // !MALLOC_LEAK_CHECK
62
63extern int gMallocLeakZygoteChild;
64extern HashTable* g_hash_table;
65extern const MallocDebug* g_malloc_dispatch;
66
67// =============================================================================
68// stack trace functions
69// =============================================================================
70
71#define GUARD               0x48151642
72#define DEBUG               0
73
74// =============================================================================
75// Structures
76// =============================================================================
77
78struct AllocationEntry {
79    HashEntry* entry;
80    uint32_t guard;
81} __attribute__((aligned(MALLOC_ALIGNMENT)));
82
83static inline AllocationEntry* to_header(void* mem) {
84  return reinterpret_cast<AllocationEntry*>(mem) - 1;
85}
86
87static inline const AllocationEntry* const_to_header(const void* mem) {
88  return reinterpret_cast<const AllocationEntry*>(mem) - 1;
89}
90
91// =============================================================================
92// Hash Table functions
93// =============================================================================
94
95static uint32_t get_hash(uintptr_t* backtrace, size_t numEntries) {
96    if (backtrace == NULL) return 0;
97
98    int hash = 0;
99    size_t i;
100    for (i = 0 ; i < numEntries ; i++) {
101        hash = (hash * 33) + (backtrace[i] >> 2);
102    }
103
104    return hash;
105}
106
107static HashEntry* find_entry(HashTable* table, int slot,
108                             uintptr_t* backtrace, size_t numEntries, size_t size) {
109    HashEntry* entry = table->slots[slot];
110    while (entry != NULL) {
111        //debug_log("backtrace: %p, entry: %p entry->backtrace: %p\n",
112        //        backtrace, entry, (entry != NULL) ? entry->backtrace : NULL);
113        /*
114         * See if the entry matches exactly.  We compare the "size" field,
115         * including the flag bits.
116         */
117        if (entry->size == size && entry->numEntries == numEntries &&
118                !memcmp(backtrace, entry->backtrace, numEntries * sizeof(uintptr_t))) {
119            return entry;
120        }
121
122        entry = entry->next;
123    }
124
125    return NULL;
126}
127
128static HashEntry* record_backtrace(uintptr_t* backtrace, size_t numEntries, size_t size) {
129    size_t hash = get_hash(backtrace, numEntries);
130    size_t slot = hash % HASHTABLE_SIZE;
131
132    if (size & SIZE_FLAG_MASK) {
133        debug_log("malloc_debug: allocation %zx exceeds bit width\n", size);
134        abort();
135    }
136
137    if (gMallocLeakZygoteChild) {
138        size |= SIZE_FLAG_ZYGOTE_CHILD;
139    }
140
141    HashEntry* entry = find_entry(g_hash_table, slot, backtrace, numEntries, size);
142
143    if (entry != NULL) {
144        entry->allocations++;
145    } else {
146        // create a new entry
147        entry = static_cast<HashEntry*>(g_malloc_dispatch->malloc(sizeof(HashEntry) + numEntries*sizeof(uintptr_t)));
148        if (!entry) {
149            return NULL;
150        }
151        entry->allocations = 1;
152        entry->slot = slot;
153        entry->prev = NULL;
154        entry->next = g_hash_table->slots[slot];
155        entry->numEntries = numEntries;
156        entry->size = size;
157
158        memcpy(entry->backtrace, backtrace, numEntries * sizeof(uintptr_t));
159
160        g_hash_table->slots[slot] = entry;
161
162        if (entry->next != NULL) {
163            entry->next->prev = entry;
164        }
165
166        // we just added an entry, increase the size of the hashtable
167        g_hash_table->count++;
168    }
169
170    return entry;
171}
172
173static int is_valid_entry(HashEntry* entry) {
174  if (entry != NULL) {
175    for (size_t i = 0; i < HASHTABLE_SIZE; ++i) {
176      HashEntry* e1 = g_hash_table->slots[i];
177      while (e1 != NULL) {
178        if (e1 == entry) {
179          return 1;
180        }
181        e1 = e1->next;
182      }
183    }
184  }
185  return 0;
186}
187
188static void remove_entry(HashEntry* entry) {
189  HashEntry* prev = entry->prev;
190  HashEntry* next = entry->next;
191
192  if (prev != NULL) entry->prev->next = next;
193  if (next != NULL) entry->next->prev = prev;
194
195  if (prev == NULL) {
196    // we are the head of the list. set the head to be next
197    g_hash_table->slots[entry->slot] = entry->next;
198  }
199
200  // we just removed and entry, decrease the size of the hashtable
201  g_hash_table->count--;
202}
203
204// =============================================================================
205// malloc fill functions
206// =============================================================================
207
208#define CHK_FILL_FREE           0xef
209#define CHK_SENTINEL_VALUE      0xeb
210
211extern "C" void* fill_calloc(size_t n_elements, size_t elem_size) {
212    return g_malloc_dispatch->calloc(n_elements, elem_size);
213}
214
215extern "C" void* fill_malloc(size_t bytes) {
216    void* buffer = g_malloc_dispatch->malloc(bytes);
217    if (buffer) {
218        memset(buffer, CHK_SENTINEL_VALUE, bytes);
219    }
220    return buffer;
221}
222
223extern "C" void fill_free(void* mem) {
224    size_t bytes = g_malloc_dispatch->malloc_usable_size(mem);
225    memset(mem, CHK_FILL_FREE, bytes);
226    g_malloc_dispatch->free(mem);
227}
228
229extern "C" void* fill_realloc(void* mem, size_t bytes) {
230    size_t oldSize = g_malloc_dispatch->malloc_usable_size(mem);
231    void* newMem = g_malloc_dispatch->realloc(mem, bytes);
232    if (newMem) {
233        // If this is larger than before, fill the extra with our pattern.
234        size_t newSize = g_malloc_dispatch->malloc_usable_size(newMem);
235        if (newSize > oldSize) {
236            memset(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(newMem)+oldSize), CHK_FILL_FREE, newSize-oldSize);
237        }
238    }
239    return newMem;
240}
241
242extern "C" void* fill_memalign(size_t alignment, size_t bytes) {
243    void* buffer = g_malloc_dispatch->memalign(alignment, bytes);
244    if (buffer) {
245        memset(buffer, CHK_SENTINEL_VALUE, bytes);
246    }
247    return buffer;
248}
249
250extern "C" size_t fill_malloc_usable_size(const void* mem) {
251    // Since we didn't allocate extra bytes before or after, we can
252    // report the normal usable size here.
253    return g_malloc_dispatch->malloc_usable_size(mem);
254}
255
256extern "C" struct mallinfo fill_mallinfo() {
257  return g_malloc_dispatch->mallinfo();
258}
259
260extern "C" int fill_posix_memalign(void** memptr, size_t alignment, size_t size) {
261  if (!powerof2(alignment)) {
262    return EINVAL;
263  }
264  int saved_errno = errno;
265  *memptr = fill_memalign(alignment, size);
266  errno = saved_errno;
267  return (*memptr != NULL) ? 0 : ENOMEM;
268}
269
270extern "C" void* fill_pvalloc(size_t bytes) {
271  size_t pagesize = getpagesize();
272  size_t size = BIONIC_ALIGN(bytes, pagesize);
273  if (size < bytes) { // Overflow
274    return NULL;
275  }
276  return fill_memalign(pagesize, size);
277}
278
279extern "C" void* fill_valloc(size_t size) {
280  return fill_memalign(getpagesize(), size);
281}
282
283// =============================================================================
284// malloc leak functions
285// =============================================================================
286
287static uint32_t MEMALIGN_GUARD      = 0xA1A41520;
288
289extern "C" void* leak_malloc(size_t bytes) {
290    // allocate enough space infront of the allocation to store the pointer for
291    // the alloc structure. This will making free'ing the structer really fast!
292
293    // 1. allocate enough memory and include our header
294    // 2. set the base pointer to be right after our header
295
296    size_t size = bytes + sizeof(AllocationEntry);
297    if (size < bytes) { // Overflow.
298        errno = ENOMEM;
299        return NULL;
300    }
301
302    void* base = g_malloc_dispatch->malloc(size);
303    if (base != NULL) {
304        ScopedPthreadMutexLocker locker(&g_hash_table->lock);
305
306        uintptr_t backtrace[BACKTRACE_SIZE];
307        size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE);
308
309        AllocationEntry* header = reinterpret_cast<AllocationEntry*>(base);
310        header->entry = record_backtrace(backtrace, numEntries, bytes);
311        header->guard = GUARD;
312
313        // now increment base to point to after our header.
314        // this should just work since our header is 8 bytes.
315        base = reinterpret_cast<AllocationEntry*>(base) + 1;
316    }
317
318    return base;
319}
320
321extern "C" void leak_free(void* mem) {
322  if (mem == NULL) {
323    return;
324  }
325
326  ScopedPthreadMutexLocker locker(&g_hash_table->lock);
327
328  // check the guard to make sure it is valid
329  AllocationEntry* header = to_header(mem);
330
331  if (header->guard != GUARD) {
332    // could be a memaligned block
333    if (header->guard == MEMALIGN_GUARD) {
334      // For memaligned blocks, header->entry points to the memory
335      // allocated through leak_malloc.
336      header = to_header(header->entry);
337    }
338  }
339
340  if (header->guard == GUARD || is_valid_entry(header->entry)) {
341    // decrement the allocations
342    HashEntry* entry = header->entry;
343    entry->allocations--;
344    if (entry->allocations <= 0) {
345      remove_entry(entry);
346      g_malloc_dispatch->free(entry);
347    }
348
349    // now free the memory!
350    g_malloc_dispatch->free(header);
351  } else {
352    debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
353              header->guard, header->entry);
354  }
355}
356
357extern "C" void* leak_calloc(size_t n_elements, size_t elem_size) {
358    // Fail on overflow - just to be safe even though this code runs only
359    // within the debugging C library, not the production one.
360    if (n_elements && SIZE_MAX / n_elements < elem_size) {
361        errno = ENOMEM;
362        return NULL;
363    }
364    size_t size = n_elements * elem_size;
365    void* ptr  = leak_malloc(size);
366    if (ptr != NULL) {
367        memset(ptr, 0, size);
368    }
369    return ptr;
370}
371
372extern "C" void* leak_realloc(void* oldMem, size_t bytes) {
373    if (oldMem == NULL) {
374        return leak_malloc(bytes);
375    }
376
377    void* newMem = NULL;
378    AllocationEntry* header = to_header(oldMem);
379    if (header->guard == MEMALIGN_GUARD) {
380        // Get the real header.
381        header = to_header(header->entry);
382    } else if (header->guard != GUARD) {
383        debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
384                   header->guard, header->entry);
385        errno = ENOMEM;
386        return NULL;
387    }
388
389    newMem = leak_malloc(bytes);
390    if (newMem != NULL) {
391        size_t oldSize = header->entry->size & ~SIZE_FLAG_MASK;
392        size_t copySize = (oldSize <= bytes) ? oldSize : bytes;
393        memcpy(newMem, oldMem, copySize);
394        leak_free(oldMem);
395    }
396
397    return newMem;
398}
399
400extern "C" void* leak_memalign(size_t alignment, size_t bytes) {
401    // we can just use malloc
402    if (alignment <= MALLOC_ALIGNMENT) {
403        return leak_malloc(bytes);
404    }
405
406    // need to make sure it's a power of two
407    if (!powerof2(alignment)) {
408        alignment = BIONIC_ROUND_UP_POWER_OF_2(alignment);
409    }
410
411    // here, alignment is at least MALLOC_ALIGNMENT<<1 bytes
412    // we will align by at least MALLOC_ALIGNMENT bytes
413    // and at most alignment-MALLOC_ALIGNMENT bytes
414    size_t size = (alignment-MALLOC_ALIGNMENT) + bytes;
415    if (size < bytes) { // Overflow.
416        return NULL;
417    }
418
419    void* base = leak_malloc(size);
420    if (base != NULL) {
421        uintptr_t ptr = reinterpret_cast<uintptr_t>(base);
422        if ((ptr % alignment) == 0) {
423            return base;
424        }
425
426        // align the pointer
427        ptr += ((-ptr) % alignment);
428
429        // Already allocated enough space for the header. This assumes
430        // that the malloc alignment is at least 8, otherwise, this is
431        // not guaranteed to have the space for the header.
432        AllocationEntry* header = to_header(reinterpret_cast<void*>(ptr));
433        header->guard = MEMALIGN_GUARD;
434        header->entry = reinterpret_cast<HashEntry*>(base);
435
436        return reinterpret_cast<void*>(ptr);
437    }
438    return base;
439}
440
441extern "C" size_t leak_malloc_usable_size(const void* mem) {
442    if (mem != NULL) {
443        // Check the guard to make sure it is valid.
444        const AllocationEntry* header = const_to_header((void*)mem);
445
446        if (header->guard == MEMALIGN_GUARD) {
447            // If this is a memalign'd pointer, then grab the header from
448            // entry.
449            header = const_to_header(header->entry);
450        } else if (header->guard != GUARD) {
451            debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
452                      header->guard, header->entry);
453            return 0;
454        }
455
456        size_t ret = g_malloc_dispatch->malloc_usable_size(header);
457        if (ret != 0) {
458            // The usable area starts at 'mem' and stops at 'header+ret'.
459            return reinterpret_cast<uintptr_t>(header) + ret - reinterpret_cast<uintptr_t>(mem);
460        }
461    }
462    return 0;
463}
464
465extern "C" struct mallinfo leak_mallinfo() {
466  return g_malloc_dispatch->mallinfo();
467}
468
469extern "C" int leak_posix_memalign(void** memptr, size_t alignment, size_t size) {
470  if (!powerof2(alignment)) {
471    return EINVAL;
472  }
473  int saved_errno = errno;
474  *memptr = leak_memalign(alignment, size);
475  errno = saved_errno;
476  return (*memptr != NULL) ? 0 : ENOMEM;
477}
478
479extern "C" void* leak_pvalloc(size_t bytes) {
480  size_t pagesize = getpagesize();
481  size_t size = BIONIC_ALIGN(bytes, pagesize);
482  if (size < bytes) { // Overflow
483    return NULL;
484  }
485  return leak_memalign(pagesize, size);
486}
487
488extern "C" void* leak_valloc(size_t size) {
489  return leak_memalign(getpagesize(), size);
490}
491