malloc_debug_leak.cpp revision 8e52e8fe83632c667521c1c8e4f640e94c09baed
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <arpa/inet.h>
30#include <dlfcn.h>
31#include <errno.h>
32#include <fcntl.h>
33#include <pthread.h>
34#include <stdarg.h>
35#include <stddef.h>
36#include <stdint.h>
37#include <stdio.h>
38#include <stdlib.h>
39#include <string.h>
40#include <sys/select.h>
41#include <sys/socket.h>
42#include <sys/system_properties.h>
43#include <sys/types.h>
44#include <sys/un.h>
45#include <unistd.h>
46#include <unwind.h>
47
48#include "debug_stacktrace.h"
49#include "malloc_debug_common.h"
50
51#include "private/libc_logging.h"
52#include "private/ScopedPthreadMutexLocker.h"
53
54// This file should be included into the build only when
55// MALLOC_LEAK_CHECK, or MALLOC_QEMU_INSTRUMENT, or both
56// macros are defined.
57#ifndef MALLOC_LEAK_CHECK
58#error MALLOC_LEAK_CHECK is not defined.
59#endif  // !MALLOC_LEAK_CHECK
60
61extern int gMallocLeakZygoteChild;
62extern HashTable* g_hash_table;
63
64// =============================================================================
65// stack trace functions
66// =============================================================================
67
68#define GUARD               0x48151642
69#define DEBUG               0
70
71// =============================================================================
72// Structures
73// =============================================================================
74
75struct AllocationEntry {
76    HashEntry* entry;
77    uint32_t guard;
78} __attribute__((aligned(MALLOC_ALIGNMENT)));
79
80static inline AllocationEntry* to_header(void* mem) {
81  return reinterpret_cast<AllocationEntry*>(mem) - 1;
82}
83
84static inline const AllocationEntry* const_to_header(const void* mem) {
85  return reinterpret_cast<const AllocationEntry*>(mem) - 1;
86}
87
88// =============================================================================
89// Hash Table functions
90// =============================================================================
91
92static uint32_t get_hash(uintptr_t* backtrace, size_t numEntries) {
93    if (backtrace == NULL) return 0;
94
95    int hash = 0;
96    size_t i;
97    for (i = 0 ; i < numEntries ; i++) {
98        hash = (hash * 33) + (backtrace[i] >> 2);
99    }
100
101    return hash;
102}
103
104static HashEntry* find_entry(HashTable* table, int slot,
105                             uintptr_t* backtrace, size_t numEntries, size_t size) {
106    HashEntry* entry = table->slots[slot];
107    while (entry != NULL) {
108        //debug_log("backtrace: %p, entry: %p entry->backtrace: %p\n",
109        //        backtrace, entry, (entry != NULL) ? entry->backtrace : NULL);
110        /*
111         * See if the entry matches exactly.  We compare the "size" field,
112         * including the flag bits.
113         */
114        if (entry->size == size && entry->numEntries == numEntries &&
115                !memcmp(backtrace, entry->backtrace, numEntries * sizeof(uintptr_t))) {
116            return entry;
117        }
118
119        entry = entry->next;
120    }
121
122    return NULL;
123}
124
125static HashEntry* record_backtrace(uintptr_t* backtrace, size_t numEntries, size_t size) {
126    size_t hash = get_hash(backtrace, numEntries);
127    size_t slot = hash % HASHTABLE_SIZE;
128
129    if (size & SIZE_FLAG_MASK) {
130        debug_log("malloc_debug: allocation %zx exceeds bit width\n", size);
131        abort();
132    }
133
134    if (gMallocLeakZygoteChild) {
135        size |= SIZE_FLAG_ZYGOTE_CHILD;
136    }
137
138    HashEntry* entry = find_entry(g_hash_table, slot, backtrace, numEntries, size);
139
140    if (entry != NULL) {
141        entry->allocations++;
142    } else {
143        // create a new entry
144        entry = static_cast<HashEntry*>(Malloc(malloc)(sizeof(HashEntry) + numEntries*sizeof(uintptr_t)));
145        if (!entry) {
146            return NULL;
147        }
148        entry->allocations = 1;
149        entry->slot = slot;
150        entry->prev = NULL;
151        entry->next = g_hash_table->slots[slot];
152        entry->numEntries = numEntries;
153        entry->size = size;
154
155        memcpy(entry->backtrace, backtrace, numEntries * sizeof(uintptr_t));
156
157        g_hash_table->slots[slot] = entry;
158
159        if (entry->next != NULL) {
160            entry->next->prev = entry;
161        }
162
163        // we just added an entry, increase the size of the hashtable
164        g_hash_table->count++;
165    }
166
167    return entry;
168}
169
170static int is_valid_entry(HashEntry* entry) {
171  if (entry != NULL) {
172    for (size_t i = 0; i < HASHTABLE_SIZE; ++i) {
173      HashEntry* e1 = g_hash_table->slots[i];
174      while (e1 != NULL) {
175        if (e1 == entry) {
176          return 1;
177        }
178        e1 = e1->next;
179      }
180    }
181  }
182  return 0;
183}
184
185static void remove_entry(HashEntry* entry) {
186  HashEntry* prev = entry->prev;
187  HashEntry* next = entry->next;
188
189  if (prev != NULL) entry->prev->next = next;
190  if (next != NULL) entry->next->prev = prev;
191
192  if (prev == NULL) {
193    // we are the head of the list. set the head to be next
194    g_hash_table->slots[entry->slot] = entry->next;
195  }
196
197  // we just removed and entry, decrease the size of the hashtable
198  g_hash_table->count--;
199}
200
201// =============================================================================
202// malloc fill functions
203// =============================================================================
204
205#define CHK_FILL_FREE           0xef
206#define CHK_SENTINEL_VALUE      0xeb
207
208extern "C" void* fill_calloc(size_t n_elements, size_t elem_size) {
209    return Malloc(calloc)(n_elements, elem_size);
210}
211
212extern "C" void* fill_malloc(size_t bytes) {
213    void* buffer = Malloc(malloc)(bytes);
214    if (buffer) {
215        memset(buffer, CHK_SENTINEL_VALUE, bytes);
216    }
217    return buffer;
218}
219
220extern "C" void fill_free(void* mem) {
221    size_t bytes = Malloc(malloc_usable_size)(mem);
222    memset(mem, CHK_FILL_FREE, bytes);
223    Malloc(free)(mem);
224}
225
226extern "C" void* fill_realloc(void* mem, size_t bytes) {
227    size_t oldSize = Malloc(malloc_usable_size)(mem);
228    void* newMem = Malloc(realloc)(mem, bytes);
229    if (newMem) {
230        // If this is larger than before, fill the extra with our pattern.
231        size_t newSize = Malloc(malloc_usable_size)(newMem);
232        if (newSize > oldSize) {
233            memset(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(newMem)+oldSize), CHK_FILL_FREE, newSize-oldSize);
234        }
235    }
236    return newMem;
237}
238
239extern "C" void* fill_memalign(size_t alignment, size_t bytes) {
240    void* buffer = Malloc(memalign)(alignment, bytes);
241    if (buffer) {
242        memset(buffer, CHK_SENTINEL_VALUE, bytes);
243    }
244    return buffer;
245}
246
247extern "C" size_t fill_malloc_usable_size(const void* mem) {
248    // Since we didn't allocate extra bytes before or after, we can
249    // report the normal usable size here.
250    return Malloc(malloc_usable_size)(mem);
251}
252
253// =============================================================================
254// malloc leak functions
255// =============================================================================
256
257static uint32_t MEMALIGN_GUARD      = 0xA1A41520;
258
259extern "C" void* leak_malloc(size_t bytes) {
260    // allocate enough space infront of the allocation to store the pointer for
261    // the alloc structure. This will making free'ing the structer really fast!
262
263    // 1. allocate enough memory and include our header
264    // 2. set the base pointer to be right after our header
265
266    size_t size = bytes + sizeof(AllocationEntry);
267    if (size < bytes) { // Overflow.
268        return NULL;
269    }
270
271    void* base = Malloc(malloc)(size);
272    if (base != NULL) {
273        ScopedPthreadMutexLocker locker(&g_hash_table->lock);
274
275        uintptr_t backtrace[BACKTRACE_SIZE];
276        size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE);
277
278        AllocationEntry* header = reinterpret_cast<AllocationEntry*>(base);
279        header->entry = record_backtrace(backtrace, numEntries, bytes);
280        header->guard = GUARD;
281
282        // now increment base to point to after our header.
283        // this should just work since our header is 8 bytes.
284        base = reinterpret_cast<AllocationEntry*>(base) + 1;
285    }
286
287    return base;
288}
289
290extern "C" void leak_free(void* mem) {
291  if (mem == NULL) {
292    return;
293  }
294
295  ScopedPthreadMutexLocker locker(&g_hash_table->lock);
296
297  // check the guard to make sure it is valid
298  AllocationEntry* header = to_header(mem);
299
300  if (header->guard != GUARD) {
301    // could be a memaligned block
302    if (header->guard == MEMALIGN_GUARD) {
303      // For memaligned blocks, header->entry points to the memory
304      // allocated through leak_malloc.
305      header = to_header(header->entry);
306    }
307  }
308
309  if (header->guard == GUARD || is_valid_entry(header->entry)) {
310    // decrement the allocations
311    HashEntry* entry = header->entry;
312    entry->allocations--;
313    if (entry->allocations <= 0) {
314      remove_entry(entry);
315      Malloc(free)(entry);
316    }
317
318    // now free the memory!
319    Malloc(free)(header);
320  } else {
321    debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
322              header->guard, header->entry);
323  }
324}
325
326extern "C" void* leak_calloc(size_t n_elements, size_t elem_size) {
327    // Fail on overflow - just to be safe even though this code runs only
328    // within the debugging C library, not the production one.
329    if (n_elements && SIZE_MAX / n_elements < elem_size) {
330        return NULL;
331    }
332    size_t size = n_elements * elem_size;
333    void* ptr  = leak_malloc(size);
334    if (ptr != NULL) {
335        memset(ptr, 0, size);
336    }
337    return ptr;
338}
339
340extern "C" void* leak_realloc(void* oldMem, size_t bytes) {
341    if (oldMem == NULL) {
342        return leak_malloc(bytes);
343    }
344
345    void* newMem = NULL;
346    AllocationEntry* header = to_header(oldMem);
347    if (header->guard == MEMALIGN_GUARD) {
348        // Get the real header.
349        header = to_header(header->entry);
350    } else if (header->guard != GUARD) {
351        debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
352                   header->guard, header->entry);
353        return NULL;
354    }
355
356    newMem = leak_malloc(bytes);
357    if (newMem != NULL) {
358        size_t oldSize = header->entry->size & ~SIZE_FLAG_MASK;
359        size_t copySize = (oldSize <= bytes) ? oldSize : bytes;
360        memcpy(newMem, oldMem, copySize);
361    }
362    leak_free(oldMem);
363
364    return newMem;
365}
366
367extern "C" void* leak_memalign(size_t alignment, size_t bytes) {
368    // we can just use malloc
369    if (alignment <= MALLOC_ALIGNMENT) {
370        return leak_malloc(bytes);
371    }
372
373    // need to make sure it's a power of two
374    if (alignment & (alignment-1)) {
375        alignment = 1L << (31 - __builtin_clz(alignment));
376    }
377
378    // here, alignment is at least MALLOC_ALIGNMENT<<1 bytes
379    // we will align by at least MALLOC_ALIGNMENT bytes
380    // and at most alignment-MALLOC_ALIGNMENT bytes
381    size_t size = (alignment-MALLOC_ALIGNMENT) + bytes;
382    if (size < bytes) { // Overflow.
383        return NULL;
384    }
385
386    void* base = leak_malloc(size);
387    if (base != NULL) {
388        uintptr_t ptr = reinterpret_cast<uintptr_t>(base);
389        if ((ptr % alignment) == 0) {
390            return base;
391        }
392
393        // align the pointer
394        ptr += ((-ptr) % alignment);
395
396        // Already allocated enough space for the header. This assumes
397        // that the malloc alignment is at least 8, otherwise, this is
398        // not guaranteed to have the space for the header.
399        AllocationEntry* header = to_header(reinterpret_cast<void*>(ptr));
400        header->guard = MEMALIGN_GUARD;
401        header->entry = reinterpret_cast<HashEntry*>(base);
402
403        return reinterpret_cast<void*>(ptr);
404    }
405    return base;
406}
407
408extern "C" size_t leak_malloc_usable_size(const void* mem) {
409    if (mem != NULL) {
410        // Check the guard to make sure it is valid.
411        const AllocationEntry* header = const_to_header((void*)mem);
412
413        if (header->guard == MEMALIGN_GUARD) {
414            // If this is a memalign'd pointer, then grab the header from
415            // entry.
416            header = const_to_header(header->entry);
417        } else if (header->guard != GUARD) {
418            debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
419                      header->guard, header->entry);
420            return 0;
421        }
422
423        size_t ret = Malloc(malloc_usable_size)(header);
424        if (ret != 0) {
425            // The usable area starts at 'mem' and stops at 'header+ret'.
426            return reinterpret_cast<uintptr_t>(header) + ret - reinterpret_cast<uintptr_t>(mem);
427        }
428    }
429    return 0;
430}
431