1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29#include <arpa/inet.h>
30#include <dlfcn.h>
31#include <errno.h>
32#include <fcntl.h>
33#include <pthread.h>
34#include <stdarg.h>
35#include <stddef.h>
36#include <stdint.h>
37#include <stdio.h>
38#include <stdlib.h>
39#include <string.h>
40#include <sys/select.h>
41#include <sys/socket.h>
42#include <sys/system_properties.h>
43#include <sys/types.h>
44#include <sys/un.h>
45#include <unistd.h>
46#include <unwind.h>
47
48#include "debug_stacktrace.h"
49#include "dlmalloc.h"
50#include "libc_logging.h"
51#include "malloc_debug_common.h"
52#include "ScopedPthreadMutexLocker.h"
53
54// This file should be included into the build only when
55// MALLOC_LEAK_CHECK, or MALLOC_QEMU_INSTRUMENT, or both
56// macros are defined.
57#ifndef MALLOC_LEAK_CHECK
58#error MALLOC_LEAK_CHECK is not defined.
59#endif  // !MALLOC_LEAK_CHECK
60
61// Global variables defined in malloc_debug_common.c
62extern int gMallocLeakZygoteChild;
63extern pthread_mutex_t gAllocationsMutex;
64extern HashTable gHashTable;
65
66// =============================================================================
67// stack trace functions
68// =============================================================================
69
70#define GUARD               0x48151642
71#define DEBUG               0
72
73// =============================================================================
74// Structures
75// =============================================================================
76
77struct AllocationEntry {
78    HashEntry* entry;
79    uint32_t guard;
80} __attribute__((aligned(MALLOC_ALIGNMENT)));
81
82static inline AllocationEntry* to_header(void* mem) {
83  return reinterpret_cast<AllocationEntry*>(mem) - 1;
84}
85
86static inline const AllocationEntry* const_to_header(const void* mem) {
87  return reinterpret_cast<const AllocationEntry*>(mem) - 1;
88}
89
90// =============================================================================
91// Hash Table functions
92// =============================================================================
93
94static uint32_t get_hash(uintptr_t* backtrace, size_t numEntries) {
95    if (backtrace == NULL) return 0;
96
97    int hash = 0;
98    size_t i;
99    for (i = 0 ; i < numEntries ; i++) {
100        hash = (hash * 33) + (backtrace[i] >> 2);
101    }
102
103    return hash;
104}
105
106static HashEntry* find_entry(HashTable* table, int slot,
107                             uintptr_t* backtrace, size_t numEntries, size_t size) {
108    HashEntry* entry = table->slots[slot];
109    while (entry != NULL) {
110        //debug_log("backtrace: %p, entry: %p entry->backtrace: %p\n",
111        //        backtrace, entry, (entry != NULL) ? entry->backtrace : NULL);
112        /*
113         * See if the entry matches exactly.  We compare the "size" field,
114         * including the flag bits.
115         */
116        if (entry->size == size && entry->numEntries == numEntries &&
117                !memcmp(backtrace, entry->backtrace, numEntries * sizeof(uintptr_t))) {
118            return entry;
119        }
120
121        entry = entry->next;
122    }
123
124    return NULL;
125}
126
127static HashEntry* record_backtrace(uintptr_t* backtrace, size_t numEntries, size_t size) {
128    size_t hash = get_hash(backtrace, numEntries);
129    size_t slot = hash % HASHTABLE_SIZE;
130
131    if (size & SIZE_FLAG_MASK) {
132        debug_log("malloc_debug: allocation %zx exceeds bit width\n", size);
133        abort();
134    }
135
136    if (gMallocLeakZygoteChild) {
137        size |= SIZE_FLAG_ZYGOTE_CHILD;
138    }
139
140    HashEntry* entry = find_entry(&gHashTable, slot, backtrace, numEntries, size);
141
142    if (entry != NULL) {
143        entry->allocations++;
144    } else {
145        // create a new entry
146        entry = static_cast<HashEntry*>(dlmalloc(sizeof(HashEntry) + numEntries*sizeof(uintptr_t)));
147        if (!entry) {
148            return NULL;
149        }
150        entry->allocations = 1;
151        entry->slot = slot;
152        entry->prev = NULL;
153        entry->next = gHashTable.slots[slot];
154        entry->numEntries = numEntries;
155        entry->size = size;
156
157        memcpy(entry->backtrace, backtrace, numEntries * sizeof(uintptr_t));
158
159        gHashTable.slots[slot] = entry;
160
161        if (entry->next != NULL) {
162            entry->next->prev = entry;
163        }
164
165        // we just added an entry, increase the size of the hashtable
166        gHashTable.count++;
167    }
168
169    return entry;
170}
171
172static int is_valid_entry(HashEntry* entry) {
173    if (entry != NULL) {
174        int i;
175        for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
176            HashEntry* e1 = gHashTable.slots[i];
177
178            while (e1 != NULL) {
179                if (e1 == entry) {
180                    return 1;
181                }
182
183                e1 = e1->next;
184            }
185        }
186    }
187
188    return 0;
189}
190
191static void remove_entry(HashEntry* entry) {
192    HashEntry* prev = entry->prev;
193    HashEntry* next = entry->next;
194
195    if (prev != NULL) entry->prev->next = next;
196    if (next != NULL) entry->next->prev = prev;
197
198    if (prev == NULL) {
199        // we are the head of the list. set the head to be next
200        gHashTable.slots[entry->slot] = entry->next;
201    }
202
203    // we just removed and entry, decrease the size of the hashtable
204    gHashTable.count--;
205}
206
207// =============================================================================
208// malloc fill functions
209// =============================================================================
210
211#define CHK_FILL_FREE           0xef
212#define CHK_SENTINEL_VALUE      0xeb
213
214extern "C" void* fill_calloc(size_t n_elements, size_t elem_size) {
215    return dlcalloc(n_elements, elem_size);
216}
217
218extern "C" void* fill_malloc(size_t bytes) {
219    void* buffer = dlmalloc(bytes);
220    if (buffer) {
221        memset(buffer, CHK_SENTINEL_VALUE, bytes);
222    }
223    return buffer;
224}
225
226extern "C" void fill_free(void* mem) {
227    size_t bytes = dlmalloc_usable_size(mem);
228    memset(mem, CHK_FILL_FREE, bytes);
229    dlfree(mem);
230}
231
232extern "C" void* fill_realloc(void* mem, size_t bytes) {
233    size_t oldSize = dlmalloc_usable_size(mem);
234    void* newMem = dlrealloc(mem, bytes);
235    if (newMem) {
236        // If this is larger than before, fill the extra with our pattern.
237        size_t newSize = dlmalloc_usable_size(newMem);
238        if (newSize > oldSize) {
239            memset(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(newMem)+oldSize), CHK_FILL_FREE, newSize-oldSize);
240        }
241    }
242    return newMem;
243}
244
245extern "C" void* fill_memalign(size_t alignment, size_t bytes) {
246    void* buffer = dlmemalign(alignment, bytes);
247    if (buffer) {
248        memset(buffer, CHK_SENTINEL_VALUE, bytes);
249    }
250    return buffer;
251}
252
253extern "C" size_t fill_malloc_usable_size(const void* mem) {
254    // Since we didn't allocate extra bytes before or after, we can
255    // report the normal usable size here.
256    return dlmalloc_usable_size(mem);
257}
258
259// =============================================================================
260// malloc leak functions
261// =============================================================================
262
263static uint32_t MEMALIGN_GUARD      = 0xA1A41520;
264
265extern "C" void* leak_malloc(size_t bytes) {
266    // allocate enough space infront of the allocation to store the pointer for
267    // the alloc structure. This will making free'ing the structer really fast!
268
269    // 1. allocate enough memory and include our header
270    // 2. set the base pointer to be right after our header
271
272    size_t size = bytes + sizeof(AllocationEntry);
273    if (size < bytes) { // Overflow.
274        return NULL;
275    }
276
277    void* base = dlmalloc(size);
278    if (base != NULL) {
279        ScopedPthreadMutexLocker locker(&gAllocationsMutex);
280
281        uintptr_t backtrace[BACKTRACE_SIZE];
282        size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE);
283
284        AllocationEntry* header = reinterpret_cast<AllocationEntry*>(base);
285        header->entry = record_backtrace(backtrace, numEntries, bytes);
286        header->guard = GUARD;
287
288        // now increment base to point to after our header.
289        // this should just work since our header is 8 bytes.
290        base = reinterpret_cast<AllocationEntry*>(base) + 1;
291    }
292
293    return base;
294}
295
296extern "C" void leak_free(void* mem) {
297    if (mem != NULL) {
298        ScopedPthreadMutexLocker locker(&gAllocationsMutex);
299
300        // check the guard to make sure it is valid
301        AllocationEntry* header = to_header(mem);
302
303        if (header->guard != GUARD) {
304            // could be a memaligned block
305            if (header->guard == MEMALIGN_GUARD) {
306                // For memaligned blocks, header->entry points to the memory
307                // allocated through leak_malloc.
308                header = to_header(header->entry);
309            }
310        }
311
312        if (header->guard == GUARD || is_valid_entry(header->entry)) {
313            // decrement the allocations
314            HashEntry* entry = header->entry;
315            entry->allocations--;
316            if (entry->allocations <= 0) {
317                remove_entry(entry);
318                dlfree(entry);
319            }
320
321            // now free the memory!
322            dlfree(header);
323        } else {
324            debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
325                    header->guard, header->entry);
326        }
327    }
328}
329
330extern "C" void* leak_calloc(size_t n_elements, size_t elem_size) {
331    /* Fail on overflow - just to be safe even though this code runs only
332     * within the debugging C library, not the production one */
333    if (n_elements && MAX_SIZE_T / n_elements < elem_size) {
334        return NULL;
335    }
336    size_t size = n_elements * elem_size;
337    void* ptr  = leak_malloc(size);
338    if (ptr != NULL) {
339        memset(ptr, 0, size);
340    }
341    return ptr;
342}
343
344extern "C" void* leak_realloc(void* oldMem, size_t bytes) {
345    if (oldMem == NULL) {
346        return leak_malloc(bytes);
347    }
348
349    void* newMem = NULL;
350    AllocationEntry* header = to_header(oldMem);
351    if (header->guard == MEMALIGN_GUARD) {
352        // Get the real header.
353        header = to_header(header->entry);
354    } else if (header->guard != GUARD) {
355        debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
356                   header->guard, header->entry);
357        return NULL;
358    }
359
360    newMem = leak_malloc(bytes);
361    if (newMem != NULL) {
362        size_t oldSize = header->entry->size & ~SIZE_FLAG_MASK;
363        size_t copySize = (oldSize <= bytes) ? oldSize : bytes;
364        memcpy(newMem, oldMem, copySize);
365    }
366    leak_free(oldMem);
367
368    return newMem;
369}
370
371extern "C" void* leak_memalign(size_t alignment, size_t bytes) {
372    // we can just use malloc
373    if (alignment <= MALLOC_ALIGNMENT) {
374        return leak_malloc(bytes);
375    }
376
377    // need to make sure it's a power of two
378    if (alignment & (alignment-1)) {
379        alignment = 1L << (31 - __builtin_clz(alignment));
380    }
381
382    // here, alignment is at least MALLOC_ALIGNMENT<<1 bytes
383    // we will align by at least MALLOC_ALIGNMENT bytes
384    // and at most alignment-MALLOC_ALIGNMENT bytes
385    size_t size = (alignment-MALLOC_ALIGNMENT) + bytes;
386    if (size < bytes) { // Overflow.
387        return NULL;
388    }
389
390    void* base = leak_malloc(size);
391    if (base != NULL) {
392        uintptr_t ptr = reinterpret_cast<uintptr_t>(base);
393        if ((ptr % alignment) == 0) {
394            return base;
395        }
396
397        // align the pointer
398        ptr += ((-ptr) % alignment);
399
400        // Already allocated enough space for the header. This assumes
401        // that the malloc alignment is at least 8, otherwise, this is
402        // not guaranteed to have the space for the header.
403        AllocationEntry* header = to_header(reinterpret_cast<void*>(ptr));
404        header->guard = MEMALIGN_GUARD;
405        header->entry = reinterpret_cast<HashEntry*>(base);
406
407        return reinterpret_cast<void*>(ptr);
408    }
409    return base;
410}
411
412extern "C" size_t leak_malloc_usable_size(const void* mem) {
413    if (mem != NULL) {
414        // Check the guard to make sure it is valid.
415        const AllocationEntry* header = const_to_header((void*)mem);
416
417        if (header->guard == MEMALIGN_GUARD) {
418            // If this is a memalign'd pointer, then grab the header from
419            // entry.
420            header = const_to_header(header->entry);
421        } else if (header->guard != GUARD) {
422            debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
423                      header->guard, header->entry);
424            return 0;
425        }
426
427        size_t ret = dlmalloc_usable_size(header);
428        if (ret != 0) {
429            // The usable area starts at 'mem' and stops at 'header+ret'.
430            return reinterpret_cast<uintptr_t>(header) + ret - reinterpret_cast<uintptr_t>(mem);
431        }
432    }
433    return 0;
434}
435