1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 *  * Redistributions of source code must retain the above copyright
9 *    notice, this list of conditions and the following disclaimer.
10 *  * Redistributions in binary form must reproduce the above copyright
11 *    notice, this list of conditions and the following disclaimer in
12 *    the documentation and/or other materials provided with the
13 *    distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28#include <errno.h>
29#include <pthread.h>
30#include <stdio.h>
31#include <arpa/inet.h>
32#include <sys/socket.h>
33#include <stdlib.h>
34#include <string.h>
35#include <unistd.h>
36#include <errno.h>
37#include <stddef.h>
38#include <stdarg.h>
39#include <fcntl.h>
40#include <unwind.h>
41#include <dlfcn.h>
42
43#include <sys/socket.h>
44#include <sys/un.h>
45#include <sys/select.h>
46#include <sys/types.h>
47#include <sys/system_properties.h>
48
49#include "dlmalloc.h"
50#include "logd.h"
51#include "malloc_debug_common.h"
52
53// This file should be included into the build only when
54// MALLOC_LEAK_CHECK, or MALLOC_QEMU_INSTRUMENT, or both
55// macros are defined.
56#ifndef MALLOC_LEAK_CHECK
57#error MALLOC_LEAK_CHECK is not defined.
58#endif  // !MALLOC_LEAK_CHECK
59
60// Global variables defined in malloc_debug_common.c
61extern int gMallocLeakZygoteChild;
62extern pthread_mutex_t gAllocationsMutex;
63extern HashTable gHashTable;
64extern const MallocDebug __libc_malloc_default_dispatch;
65extern const MallocDebug* __libc_malloc_dispatch;
66
67// =============================================================================
68// log functions
69// =============================================================================
70
71#define debug_log(format, ...)  \
72    __libc_android_log_print(ANDROID_LOG_DEBUG, "malloc_leak_check", (format), ##__VA_ARGS__ )
73#define error_log(format, ...)  \
74    __libc_android_log_print(ANDROID_LOG_ERROR, "malloc_leak_check", (format), ##__VA_ARGS__ )
75#define info_log(format, ...)  \
76    __libc_android_log_print(ANDROID_LOG_INFO, "malloc_leak_check", (format), ##__VA_ARGS__ )
77
78static int gTrapOnError = 1;
79
80#define MALLOC_ALIGNMENT    8
81#define GUARD               0x48151642
82#define DEBUG               0
83
84// =============================================================================
85// Structures
86// =============================================================================
87typedef struct AllocationEntry AllocationEntry;
88struct AllocationEntry {
89    HashEntry* entry;
90    uint32_t guard;
91};
92
93
94// =============================================================================
95// Hash Table functions
96// =============================================================================
97static uint32_t get_hash(intptr_t* backtrace, size_t numEntries)
98{
99    if (backtrace == NULL) return 0;
100
101    int hash = 0;
102    size_t i;
103    for (i = 0 ; i < numEntries ; i++) {
104        hash = (hash * 33) + (backtrace[i] >> 2);
105    }
106
107    return hash;
108}
109
110static HashEntry* find_entry(HashTable* table, int slot,
111        intptr_t* backtrace, size_t numEntries, size_t size)
112{
113    HashEntry* entry = table->slots[slot];
114    while (entry != NULL) {
115        //debug_log("backtrace: %p, entry: %p entry->backtrace: %p\n",
116        //        backtrace, entry, (entry != NULL) ? entry->backtrace : NULL);
117        /*
118         * See if the entry matches exactly.  We compare the "size" field,
119         * including the flag bits.
120         */
121        if (entry->size == size && entry->numEntries == numEntries &&
122                !memcmp(backtrace, entry->backtrace, numEntries * sizeof(intptr_t))) {
123            return entry;
124        }
125
126        entry = entry->next;
127    }
128
129    return NULL;
130}
131
132static HashEntry* record_backtrace(intptr_t* backtrace, size_t numEntries, size_t size)
133{
134    size_t hash = get_hash(backtrace, numEntries);
135    size_t slot = hash % HASHTABLE_SIZE;
136
137    if (size & SIZE_FLAG_MASK) {
138        debug_log("malloc_debug: allocation %zx exceeds bit width\n", size);
139        abort();
140    }
141
142    if (gMallocLeakZygoteChild)
143        size |= SIZE_FLAG_ZYGOTE_CHILD;
144
145    HashEntry* entry = find_entry(&gHashTable, slot, backtrace, numEntries, size);
146
147    if (entry != NULL) {
148        entry->allocations++;
149    } else {
150        // create a new entry
151        entry = (HashEntry*)dlmalloc(sizeof(HashEntry) + numEntries*sizeof(intptr_t));
152        if (!entry)
153            return NULL;
154        entry->allocations = 1;
155        entry->slot = slot;
156        entry->prev = NULL;
157        entry->next = gHashTable.slots[slot];
158        entry->numEntries = numEntries;
159        entry->size = size;
160
161        memcpy(entry->backtrace, backtrace, numEntries * sizeof(intptr_t));
162
163        gHashTable.slots[slot] = entry;
164
165        if (entry->next != NULL) {
166            entry->next->prev = entry;
167        }
168
169        // we just added an entry, increase the size of the hashtable
170        gHashTable.count++;
171    }
172
173    return entry;
174}
175
176static int is_valid_entry(HashEntry* entry)
177{
178    if (entry != NULL) {
179        int i;
180        for (i = 0 ; i < HASHTABLE_SIZE ; i++) {
181            HashEntry* e1 = gHashTable.slots[i];
182
183            while (e1 != NULL) {
184                if (e1 == entry) {
185                    return 1;
186                }
187
188                e1 = e1->next;
189            }
190        }
191    }
192
193    return 0;
194}
195
196static void remove_entry(HashEntry* entry)
197{
198    HashEntry* prev = entry->prev;
199    HashEntry* next = entry->next;
200
201    if (prev != NULL) entry->prev->next = next;
202    if (next != NULL) entry->next->prev = prev;
203
204    if (prev == NULL) {
205        // we are the head of the list. set the head to be next
206        gHashTable.slots[entry->slot] = entry->next;
207    }
208
209    // we just removed and entry, decrease the size of the hashtable
210    gHashTable.count--;
211}
212
213
214// =============================================================================
215// stack trace functions
216// =============================================================================
217
218typedef struct
219{
220    size_t count;
221    intptr_t* addrs;
222} stack_crawl_state_t;
223
224
225/* depends how the system includes define this */
226#ifdef HAVE_UNWIND_CONTEXT_STRUCT
227typedef struct _Unwind_Context __unwind_context;
228#else
229typedef _Unwind_Context __unwind_context;
230#endif
231
232static _Unwind_Reason_Code trace_function(__unwind_context *context, void *arg)
233{
234    stack_crawl_state_t* state = (stack_crawl_state_t*)arg;
235    if (state->count) {
236        intptr_t ip = (intptr_t)_Unwind_GetIP(context);
237        if (ip) {
238            state->addrs[0] = ip;
239            state->addrs++;
240            state->count--;
241            return _URC_NO_REASON;
242        }
243    }
244    /*
245     * If we run out of space to record the address or 0 has been seen, stop
246     * unwinding the stack.
247     */
248    return _URC_END_OF_STACK;
249}
250
251static inline
252int get_backtrace(intptr_t* addrs, size_t max_entries)
253{
254    stack_crawl_state_t state;
255    state.count = max_entries;
256    state.addrs = (intptr_t*)addrs;
257    _Unwind_Backtrace(trace_function, (void*)&state);
258    return max_entries - state.count;
259}
260
261// =============================================================================
262// malloc check functions
263// =============================================================================
264
265#define CHK_FILL_FREE           0xef
266#define CHK_SENTINEL_VALUE      0xeb
267#define CHK_SENTINEL_HEAD_SIZE  16
268#define CHK_SENTINEL_TAIL_SIZE  16
269#define CHK_OVERHEAD_SIZE       (   CHK_SENTINEL_HEAD_SIZE +    \
270                                    CHK_SENTINEL_TAIL_SIZE +    \
271                                    sizeof(size_t) )
272
273static void dump_stack_trace()
274{
275    intptr_t addrs[20];
276    int c = get_backtrace(addrs, 20);
277    char buf[16];
278    char tmp[16*20];
279    int i;
280
281    tmp[0] = 0; // Need to initialize tmp[0] for the first strcat
282    for (i=0 ; i<c; i++) {
283        snprintf(buf, sizeof buf, "%2d: %08x\n", i, addrs[i]);
284        strlcat(tmp, buf, sizeof tmp);
285    }
286    __libc_android_log_print(ANDROID_LOG_ERROR, "libc", "call stack:\n%s", tmp);
287}
288
289static int is_valid_malloc_pointer(void* addr)
290{
291    return 1;
292}
293
294static void assert_log_message(const char* format, ...)
295{
296    va_list  args;
297
298    pthread_mutex_lock(&gAllocationsMutex);
299    {
300        const MallocDebug* current_dispatch = __libc_malloc_dispatch;
301        __libc_malloc_dispatch = &__libc_malloc_default_dispatch;
302        va_start(args, format);
303        __libc_android_log_vprint(ANDROID_LOG_ERROR, "libc",
304                                format, args);
305        va_end(args);
306        dump_stack_trace();
307        if (gTrapOnError) {
308            __builtin_trap();
309        }
310        __libc_malloc_dispatch = current_dispatch;
311    }
312    pthread_mutex_unlock(&gAllocationsMutex);
313}
314
315static void assert_valid_malloc_pointer(void* mem)
316{
317    if (mem && !is_valid_malloc_pointer(mem)) {
318        assert_log_message(
319            "*** MALLOC CHECK: buffer %p, is not a valid "
320            "malloc pointer (are you mixing up new/delete "
321            "and malloc/free?)", mem);
322    }
323}
324
325/* Check that a given address corresponds to a guarded block,
326 * and returns its original allocation size in '*allocated'.
327 * 'func' is the capitalized name of the caller function.
328 * Returns 0 on success, or -1 on failure.
329 * NOTE: Does not return if gTrapOnError is set.
330 */
331static int chk_mem_check(void*       mem,
332                         size_t*     allocated,
333                         const char* func)
334{
335    char*  buffer;
336    size_t offset, bytes;
337    int    i;
338    char*  buf;
339
340    /* first check the bytes in the sentinel header */
341    buf = (char*)mem - CHK_SENTINEL_HEAD_SIZE;
342    for (i=0 ; i<CHK_SENTINEL_HEAD_SIZE ; i++) {
343        if (buf[i] != CHK_SENTINEL_VALUE) {
344            assert_log_message(
345                "*** %s CHECK: buffer %p "
346                "corrupted %d bytes before allocation",
347                func, mem, CHK_SENTINEL_HEAD_SIZE-i);
348            return -1;
349        }
350    }
351
352    /* then the ones in the sentinel trailer */
353    buffer = (char*)mem - CHK_SENTINEL_HEAD_SIZE;
354    offset = dlmalloc_usable_size(buffer) - sizeof(size_t);
355    bytes  = *(size_t *)(buffer + offset);
356
357    buf = (char*)mem + bytes;
358    for (i=CHK_SENTINEL_TAIL_SIZE-1 ; i>=0 ; i--) {
359        if (buf[i] != CHK_SENTINEL_VALUE) {
360            assert_log_message(
361                "*** %s CHECK: buffer %p, size=%lu, "
362                "corrupted %d bytes after allocation",
363                func, buffer, bytes, i+1);
364            return -1;
365        }
366    }
367
368    *allocated = bytes;
369    return 0;
370}
371
372
373void* chk_malloc(size_t bytes)
374{
375    char* buffer = (char*)dlmalloc(bytes + CHK_OVERHEAD_SIZE);
376    if (buffer) {
377        memset(buffer, CHK_SENTINEL_VALUE, bytes + CHK_OVERHEAD_SIZE);
378        size_t offset = dlmalloc_usable_size(buffer) - sizeof(size_t);
379        *(size_t *)(buffer + offset) = bytes;
380        buffer += CHK_SENTINEL_HEAD_SIZE;
381    }
382    return buffer;
383}
384
385void  chk_free(void* mem)
386{
387    assert_valid_malloc_pointer(mem);
388    if (mem) {
389        size_t  size;
390        char*   buffer;
391
392        if (chk_mem_check(mem, &size, "FREE") == 0) {
393            buffer = (char*)mem - CHK_SENTINEL_HEAD_SIZE;
394            memset(buffer, CHK_FILL_FREE, size + CHK_OVERHEAD_SIZE);
395            dlfree(buffer);
396        }
397    }
398}
399
400void* chk_calloc(size_t n_elements, size_t elem_size)
401{
402    size_t  size;
403    void*   ptr;
404
405    /* Fail on overflow - just to be safe even though this code runs only
406     * within the debugging C library, not the production one */
407    if (n_elements && MAX_SIZE_T / n_elements < elem_size) {
408        return NULL;
409    }
410    size = n_elements * elem_size;
411    ptr  = chk_malloc(size);
412    if (ptr != NULL) {
413        memset(ptr, 0, size);
414    }
415    return ptr;
416}
417
418void* chk_realloc(void* mem, size_t bytes)
419{
420    char*   buffer;
421    int     ret;
422    size_t  old_bytes = 0;
423
424    assert_valid_malloc_pointer(mem);
425
426    if (mem != NULL && chk_mem_check(mem, &old_bytes, "REALLOC") < 0)
427        return NULL;
428
429    char* new_buffer = chk_malloc(bytes);
430    if (mem == NULL) {
431        return new_buffer;
432    }
433
434    if (new_buffer) {
435        if (bytes > old_bytes)
436            bytes = old_bytes;
437        memcpy(new_buffer, mem, bytes);
438        chk_free(mem);
439    }
440
441    return new_buffer;
442}
443
444void* chk_memalign(size_t alignment, size_t bytes)
445{
446    // XXX: it's better to use malloc, than being wrong
447    return chk_malloc(bytes);
448}
449
450// =============================================================================
451// malloc fill functions
452// =============================================================================
453
454void* fill_malloc(size_t bytes)
455{
456    void* buffer = dlmalloc(bytes);
457    if (buffer) {
458        memset(buffer, CHK_SENTINEL_VALUE, bytes);
459    }
460    return buffer;
461}
462
463void  fill_free(void* mem)
464{
465    size_t bytes = dlmalloc_usable_size(mem);
466    memset(mem, CHK_FILL_FREE, bytes);
467    dlfree(mem);
468}
469
470void* fill_realloc(void* mem, size_t bytes)
471{
472    void* buffer = fill_malloc(bytes);
473    if (mem == NULL) {
474        return buffer;
475    }
476    if (buffer) {
477        size_t old_size = dlmalloc_usable_size(mem);
478        size_t size = (bytes < old_size)?(bytes):(old_size);
479        memcpy(buffer, mem, size);
480        fill_free(mem);
481    }
482    return buffer;
483}
484
485void* fill_memalign(size_t alignment, size_t bytes)
486{
487    void* buffer = dlmemalign(alignment, bytes);
488    if (buffer) {
489        memset(buffer, CHK_SENTINEL_VALUE, bytes);
490    }
491    return buffer;
492}
493
494// =============================================================================
495// malloc leak functions
496// =============================================================================
497
498#define MEMALIGN_GUARD  ((void*)0xA1A41520)
499
500void* leak_malloc(size_t bytes)
501{
502    // allocate enough space infront of the allocation to store the pointer for
503    // the alloc structure. This will making free'ing the structer really fast!
504
505    // 1. allocate enough memory and include our header
506    // 2. set the base pointer to be right after our header
507
508    void* base = dlmalloc(bytes + sizeof(AllocationEntry));
509    if (base != NULL) {
510        pthread_mutex_lock(&gAllocationsMutex);
511
512            intptr_t backtrace[BACKTRACE_SIZE];
513            size_t numEntries = get_backtrace(backtrace, BACKTRACE_SIZE);
514
515            AllocationEntry* header = (AllocationEntry*)base;
516            header->entry = record_backtrace(backtrace, numEntries, bytes);
517            header->guard = GUARD;
518
519            // now increment base to point to after our header.
520            // this should just work since our header is 8 bytes.
521            base = (AllocationEntry*)base + 1;
522
523        pthread_mutex_unlock(&gAllocationsMutex);
524    }
525
526    return base;
527}
528
529void leak_free(void* mem)
530{
531    if (mem != NULL) {
532        pthread_mutex_lock(&gAllocationsMutex);
533
534        // check the guard to make sure it is valid
535        AllocationEntry* header = (AllocationEntry*)mem - 1;
536
537        if (header->guard != GUARD) {
538            // could be a memaligned block
539            if (((void**)mem)[-1] == MEMALIGN_GUARD) {
540                mem = ((void**)mem)[-2];
541                header = (AllocationEntry*)mem - 1;
542            }
543        }
544
545        if (header->guard == GUARD || is_valid_entry(header->entry)) {
546            // decrement the allocations
547            HashEntry* entry = header->entry;
548            entry->allocations--;
549            if (entry->allocations <= 0) {
550                remove_entry(entry);
551                dlfree(entry);
552            }
553
554            // now free the memory!
555            dlfree(header);
556        } else {
557            debug_log("WARNING bad header guard: '0x%x'! and invalid entry: %p\n",
558                    header->guard, header->entry);
559        }
560
561        pthread_mutex_unlock(&gAllocationsMutex);
562    }
563}
564
565void* leak_calloc(size_t n_elements, size_t elem_size)
566{
567    size_t  size;
568    void*   ptr;
569
570    /* Fail on overflow - just to be safe even though this code runs only
571     * within the debugging C library, not the production one */
572    if (n_elements && MAX_SIZE_T / n_elements < elem_size) {
573        return NULL;
574    }
575    size = n_elements * elem_size;
576    ptr  = leak_malloc(size);
577    if (ptr != NULL) {
578        memset(ptr, 0, size);
579    }
580    return ptr;
581}
582
583void* leak_realloc(void* oldMem, size_t bytes)
584{
585    if (oldMem == NULL) {
586        return leak_malloc(bytes);
587    }
588    void* newMem = NULL;
589    AllocationEntry* header = (AllocationEntry*)oldMem - 1;
590    if (header && header->guard == GUARD) {
591        size_t oldSize = header->entry->size & ~SIZE_FLAG_MASK;
592        newMem = leak_malloc(bytes);
593        if (newMem != NULL) {
594            size_t copySize = (oldSize <= bytes) ? oldSize : bytes;
595            memcpy(newMem, oldMem, copySize);
596            leak_free(oldMem);
597        }
598    } else {
599        newMem = dlrealloc(oldMem, bytes);
600    }
601    return newMem;
602}
603
604void* leak_memalign(size_t alignment, size_t bytes)
605{
606    // we can just use malloc
607    if (alignment <= MALLOC_ALIGNMENT)
608        return leak_malloc(bytes);
609
610    // need to make sure it's a power of two
611    if (alignment & (alignment-1))
612        alignment = 1L << (31 - __builtin_clz(alignment));
613
614    // here, aligment is at least MALLOC_ALIGNMENT<<1 bytes
615    // we will align by at least MALLOC_ALIGNMENT bytes
616    // and at most alignment-MALLOC_ALIGNMENT bytes
617    size_t size = (alignment-MALLOC_ALIGNMENT) + bytes;
618    void* base = leak_malloc(size);
619    if (base != NULL) {
620        intptr_t ptr = (intptr_t)base;
621        if ((ptr % alignment) == 0)
622            return base;
623
624        // align the pointer
625        ptr += ((-ptr) % alignment);
626
627        // there is always enough space for the base pointer and the guard
628        ((void**)ptr)[-1] = MEMALIGN_GUARD;
629        ((void**)ptr)[-2] = base;
630
631        return (void*)ptr;
632    }
633    return base;
634}
635
636/* Initializes malloc debugging framework.
637 * See comments on MallocDebugInit in malloc_debug_common.h
638 */
639int malloc_debug_initialize(void)
640{
641    // We don't really have anything that requires initialization here.
642    return 0;
643}
644