malloc_debug_check.cpp revision 8e52e8fe83632c667521c1c8e4f640e94c09baed
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * * Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * * Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in 12 * the documentation and/or other materials provided with the 13 * distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS 22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29#include <arpa/inet.h> 30#include <dlfcn.h> 31#include <errno.h> 32#include <errno.h> 33#include <fcntl.h> 34#include <pthread.h> 35#include <stdarg.h> 36#include <stdbool.h> 37#include <stddef.h> 38#include <stdio.h> 39#include <stdlib.h> 40#include <string.h> 41#include <sys/socket.h> 42#include <sys/system_properties.h> 43#include <sys/types.h> 44#include <time.h> 45#include <unistd.h> 46#include <unwind.h> 47 48#include "debug_mapinfo.h" 49#include "debug_stacktrace.h" 50#include "private/libc_logging.h" 51#include "malloc_debug_common.h" 52#include "private/ScopedPthreadMutexLocker.h" 53 54#define MAX_BACKTRACE_DEPTH 16 55#define ALLOCATION_TAG 0x1ee7d00d 56#define BACKLOG_TAG 0xbabecafe 57#define FREE_POISON 0xa5 58#define FRONT_GUARD 0xaa 59#define FRONT_GUARD_LEN (1<<5) 60#define REAR_GUARD 0xbb 61#define REAR_GUARD_LEN (1<<5) 62 63static void log_message(const char* format, ...) { 64 va_list args; 65 va_start(args, format); 66 __libc_format_log_va_list(ANDROID_LOG_ERROR, "libc", format, args); 67 va_end(args); 68} 69 70struct hdr_t { 71 uint32_t tag; 72 void* base; // Always points to the memory allocated using malloc. 73 // For memory allocated in chk_memalign, this value will 74 // not be the same as the location of the start of this 75 // structure. 76 hdr_t* prev; 77 hdr_t* next; 78 uintptr_t bt[MAX_BACKTRACE_DEPTH]; 79 int bt_depth; 80 uintptr_t freed_bt[MAX_BACKTRACE_DEPTH]; 81 int freed_bt_depth; 82 size_t size; 83 uint8_t front_guard[FRONT_GUARD_LEN]; 84} __attribute__((packed, aligned(MALLOC_ALIGNMENT))); 85 86struct ftr_t { 87 uint8_t rear_guard[REAR_GUARD_LEN]; 88} __attribute__((packed)); 89 90static inline ftr_t* to_ftr(hdr_t* hdr) { 91 return reinterpret_cast<ftr_t*>(reinterpret_cast<char*>(hdr + 1) + hdr->size); 92} 93 94static inline void* user(hdr_t* hdr) { 95 return hdr + 1; 96} 97 98static inline hdr_t* meta(void* user) { 99 return reinterpret_cast<hdr_t*>(user) - 1; 100} 101 102static inline const hdr_t* const_meta(const void* user) { 103 return reinterpret_cast<const hdr_t*>(user) - 1; 104} 105 106// TODO: introduce a struct for this global state. 107// There are basically two lists here, the regular list and the backlog list. 108// We should be able to remove the duplication. 109static unsigned g_allocated_block_count; 110static hdr_t* tail; 111static hdr_t* head; 112static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER; 113 114static unsigned backlog_num; 115static hdr_t* backlog_tail; 116static hdr_t* backlog_head; 117static pthread_mutex_t backlog_lock = PTHREAD_MUTEX_INITIALIZER; 118 119// This variable is set to the value of property libc.debug.malloc.backlog. 120// It determines the size of the backlog we use to detect multiple frees. 121static unsigned g_malloc_debug_backlog = 100; 122 123__LIBC_HIDDEN__ HashTable* g_hash_table; 124 125static inline void init_front_guard(hdr_t* hdr) { 126 memset(hdr->front_guard, FRONT_GUARD, FRONT_GUARD_LEN); 127} 128 129static inline bool is_front_guard_valid(hdr_t* hdr) { 130 for (size_t i = 0; i < FRONT_GUARD_LEN; i++) { 131 if (hdr->front_guard[i] != FRONT_GUARD) { 132 return false; 133 } 134 } 135 return true; 136} 137 138static inline void init_rear_guard(hdr_t* hdr) { 139 ftr_t* ftr = to_ftr(hdr); 140 memset(ftr->rear_guard, REAR_GUARD, REAR_GUARD_LEN); 141} 142 143static inline bool is_rear_guard_valid(hdr_t* hdr) { 144 unsigned i; 145 int valid = 1; 146 int first_mismatch = -1; 147 ftr_t* ftr = to_ftr(hdr); 148 for (i = 0; i < REAR_GUARD_LEN; i++) { 149 if (ftr->rear_guard[i] != REAR_GUARD) { 150 if (first_mismatch < 0) 151 first_mismatch = i; 152 valid = 0; 153 } else if (first_mismatch >= 0) { 154 log_message("+++ REAR GUARD MISMATCH [%d, %d)\n", first_mismatch, i); 155 first_mismatch = -1; 156 } 157 } 158 159 if (first_mismatch >= 0) 160 log_message("+++ REAR GUARD MISMATCH [%d, %d)\n", first_mismatch, i); 161 return valid; 162} 163 164static inline void add_locked(hdr_t* hdr, hdr_t** tail, hdr_t** head) { 165 hdr->prev = NULL; 166 hdr->next = *head; 167 if (*head) 168 (*head)->prev = hdr; 169 else 170 *tail = hdr; 171 *head = hdr; 172} 173 174static inline int del_locked(hdr_t* hdr, hdr_t** tail, hdr_t** head) { 175 if (hdr->prev) { 176 hdr->prev->next = hdr->next; 177 } else { 178 *head = hdr->next; 179 } 180 if (hdr->next) { 181 hdr->next->prev = hdr->prev; 182 } else { 183 *tail = hdr->prev; 184 } 185 return 0; 186} 187 188static inline void add(hdr_t* hdr, size_t size) { 189 ScopedPthreadMutexLocker locker(&lock); 190 hdr->tag = ALLOCATION_TAG; 191 hdr->size = size; 192 init_front_guard(hdr); 193 init_rear_guard(hdr); 194 ++g_allocated_block_count; 195 add_locked(hdr, &tail, &head); 196} 197 198static inline int del(hdr_t* hdr) { 199 if (hdr->tag != ALLOCATION_TAG) { 200 return -1; 201 } 202 203 ScopedPthreadMutexLocker locker(&lock); 204 del_locked(hdr, &tail, &head); 205 --g_allocated_block_count; 206 return 0; 207} 208 209static inline void poison(hdr_t* hdr) { 210 memset(user(hdr), FREE_POISON, hdr->size); 211} 212 213static bool was_used_after_free(hdr_t* hdr) { 214 const uint8_t* data = reinterpret_cast<const uint8_t*>(user(hdr)); 215 for (size_t i = 0; i < hdr->size; i++) { 216 if (data[i] != FREE_POISON) { 217 return true; 218 } 219 } 220 return false; 221} 222 223/* returns 1 if valid, *safe == 1 if safe to dump stack */ 224static inline int check_guards(hdr_t* hdr, int* safe) { 225 *safe = 1; 226 if (!is_front_guard_valid(hdr)) { 227 if (hdr->front_guard[0] == FRONT_GUARD) { 228 log_message("+++ ALLOCATION %p SIZE %d HAS A CORRUPTED FRONT GUARD\n", 229 user(hdr), hdr->size); 230 } else { 231 log_message("+++ ALLOCATION %p HAS A CORRUPTED FRONT GUARD "\ 232 "(NOT DUMPING STACKTRACE)\n", user(hdr)); 233 /* Allocation header is probably corrupt, do not print stack trace */ 234 *safe = 0; 235 } 236 return 0; 237 } 238 239 if (!is_rear_guard_valid(hdr)) { 240 log_message("+++ ALLOCATION %p SIZE %d HAS A CORRUPTED REAR GUARD\n", 241 user(hdr), hdr->size); 242 return 0; 243 } 244 245 return 1; 246} 247 248/* returns 1 if valid, *safe == 1 if safe to dump stack */ 249static inline int check_allocation_locked(hdr_t* hdr, int* safe) { 250 int valid = 1; 251 *safe = 1; 252 253 if (hdr->tag != ALLOCATION_TAG && hdr->tag != BACKLOG_TAG) { 254 log_message("+++ ALLOCATION %p HAS INVALID TAG %08x (NOT DUMPING STACKTRACE)\n", 255 user(hdr), hdr->tag); 256 // Allocation header is probably corrupt, do not dequeue or dump stack 257 // trace. 258 *safe = 0; 259 return 0; 260 } 261 262 if (hdr->tag == BACKLOG_TAG && was_used_after_free(hdr)) { 263 log_message("+++ ALLOCATION %p SIZE %d WAS USED AFTER BEING FREED\n", 264 user(hdr), hdr->size); 265 valid = 0; 266 /* check the guards to see if it's safe to dump a stack trace */ 267 check_guards(hdr, safe); 268 } else { 269 valid = check_guards(hdr, safe); 270 } 271 272 if (!valid && *safe) { 273 log_message("+++ ALLOCATION %p SIZE %d ALLOCATED HERE:\n", 274 user(hdr), hdr->size); 275 log_backtrace(hdr->bt, hdr->bt_depth); 276 if (hdr->tag == BACKLOG_TAG) { 277 log_message("+++ ALLOCATION %p SIZE %d FREED HERE:\n", 278 user(hdr), hdr->size); 279 log_backtrace(hdr->freed_bt, hdr->freed_bt_depth); 280 } 281 } 282 283 return valid; 284} 285 286static inline int del_and_check_locked(hdr_t* hdr, 287 hdr_t** tail, hdr_t** head, unsigned* cnt, 288 int* safe) { 289 int valid = check_allocation_locked(hdr, safe); 290 if (safe) { 291 (*cnt)--; 292 del_locked(hdr, tail, head); 293 } 294 return valid; 295} 296 297static inline void del_from_backlog_locked(hdr_t* hdr) { 298 int safe; 299 del_and_check_locked(hdr, 300 &backlog_tail, &backlog_head, &backlog_num, 301 &safe); 302 hdr->tag = 0; /* clear the tag */ 303} 304 305static inline void del_from_backlog(hdr_t* hdr) { 306 ScopedPthreadMutexLocker locker(&backlog_lock); 307 del_from_backlog_locked(hdr); 308} 309 310static inline int del_leak(hdr_t* hdr, int* safe) { 311 ScopedPthreadMutexLocker locker(&lock); 312 return del_and_check_locked(hdr, &tail, &head, &g_allocated_block_count, safe); 313} 314 315static inline void add_to_backlog(hdr_t* hdr) { 316 ScopedPthreadMutexLocker locker(&backlog_lock); 317 hdr->tag = BACKLOG_TAG; 318 backlog_num++; 319 add_locked(hdr, &backlog_tail, &backlog_head); 320 poison(hdr); 321 /* If we've exceeded the maximum backlog, clear it up */ 322 while (backlog_num > g_malloc_debug_backlog) { 323 hdr_t* gone = backlog_tail; 324 del_from_backlog_locked(gone); 325 Malloc(free)(gone->base); 326 } 327} 328 329extern "C" void* chk_malloc(size_t size) { 330// log_message("%s: %s\n", __FILE__, __FUNCTION__); 331 332 hdr_t* hdr = static_cast<hdr_t*>(Malloc(malloc)(sizeof(hdr_t) + size + sizeof(ftr_t))); 333 if (hdr) { 334 hdr->base = hdr; 335 hdr->bt_depth = get_backtrace(hdr->bt, MAX_BACKTRACE_DEPTH); 336 add(hdr, size); 337 return user(hdr); 338 } 339 return NULL; 340} 341 342extern "C" void* chk_memalign(size_t alignment, size_t bytes) { 343 if (alignment <= MALLOC_ALIGNMENT) { 344 return chk_malloc(bytes); 345 } 346 347 // Make the alignment a power of two. 348 if (alignment & (alignment-1)) { 349 alignment = 1L << (31 - __builtin_clz(alignment)); 350 } 351 352 // here, alignment is at least MALLOC_ALIGNMENT<<1 bytes 353 // we will align by at least MALLOC_ALIGNMENT bytes 354 // and at most alignment-MALLOC_ALIGNMENT bytes 355 size_t size = (alignment-MALLOC_ALIGNMENT) + bytes; 356 if (size < bytes) { // Overflow. 357 return NULL; 358 } 359 360 void* base = Malloc(malloc)(sizeof(hdr_t) + size + sizeof(ftr_t)); 361 if (base != NULL) { 362 // Check that the actual pointer that will be returned is aligned 363 // properly. 364 uintptr_t ptr = reinterpret_cast<uintptr_t>(user(reinterpret_cast<hdr_t*>(base))); 365 if ((ptr % alignment) != 0) { 366 // Align the pointer. 367 ptr += ((-ptr) % alignment); 368 } 369 370 hdr_t* hdr = meta(reinterpret_cast<void*>(ptr)); 371 hdr->base = base; 372 hdr->bt_depth = get_backtrace(hdr->bt, MAX_BACKTRACE_DEPTH); 373 add(hdr, bytes); 374 return user(hdr); 375 } 376 return base; 377} 378 379extern "C" void chk_free(void* ptr) { 380// log_message("%s: %s\n", __FILE__, __FUNCTION__); 381 382 if (!ptr) /* ignore free(NULL) */ 383 return; 384 385 hdr_t* hdr = meta(ptr); 386 387 if (del(hdr) < 0) { 388 uintptr_t bt[MAX_BACKTRACE_DEPTH]; 389 int depth = get_backtrace(bt, MAX_BACKTRACE_DEPTH); 390 if (hdr->tag == BACKLOG_TAG) { 391 log_message("+++ ALLOCATION %p SIZE %d BYTES MULTIPLY FREED!\n", 392 user(hdr), hdr->size); 393 log_message("+++ ALLOCATION %p SIZE %d ALLOCATED HERE:\n", 394 user(hdr), hdr->size); 395 log_backtrace(hdr->bt, hdr->bt_depth); 396 /* hdr->freed_bt_depth should be nonzero here */ 397 log_message("+++ ALLOCATION %p SIZE %d FIRST FREED HERE:\n", 398 user(hdr), hdr->size); 399 log_backtrace(hdr->freed_bt, hdr->freed_bt_depth); 400 log_message("+++ ALLOCATION %p SIZE %d NOW BEING FREED HERE:\n", 401 user(hdr), hdr->size); 402 log_backtrace(bt, depth); 403 } else { 404 log_message("+++ ALLOCATION %p IS CORRUPTED OR NOT ALLOCATED VIA TRACKER!\n", 405 user(hdr)); 406 log_backtrace(bt, depth); 407 } 408 } else { 409 hdr->freed_bt_depth = get_backtrace(hdr->freed_bt, MAX_BACKTRACE_DEPTH); 410 add_to_backlog(hdr); 411 } 412} 413 414extern "C" void* chk_realloc(void* ptr, size_t size) { 415// log_message("%s: %s\n", __FILE__, __FUNCTION__); 416 417 if (!ptr) { 418 return chk_malloc(size); 419 } 420 421#ifdef REALLOC_ZERO_BYTES_FREE 422 if (!size) { 423 chk_free(ptr); 424 return NULL; 425 } 426#endif 427 428 hdr_t* hdr = meta(ptr); 429 430 if (del(hdr) < 0) { 431 uintptr_t bt[MAX_BACKTRACE_DEPTH]; 432 int depth = get_backtrace(bt, MAX_BACKTRACE_DEPTH); 433 if (hdr->tag == BACKLOG_TAG) { 434 log_message("+++ REALLOCATION %p SIZE %d OF FREED MEMORY!\n", 435 user(hdr), size, hdr->size); 436 log_message("+++ ALLOCATION %p SIZE %d ALLOCATED HERE:\n", 437 user(hdr), hdr->size); 438 log_backtrace(hdr->bt, hdr->bt_depth); 439 /* hdr->freed_bt_depth should be nonzero here */ 440 log_message("+++ ALLOCATION %p SIZE %d FIRST FREED HERE:\n", 441 user(hdr), hdr->size); 442 log_backtrace(hdr->freed_bt, hdr->freed_bt_depth); 443 log_message("+++ ALLOCATION %p SIZE %d NOW BEING REALLOCATED HERE:\n", 444 user(hdr), hdr->size); 445 log_backtrace(bt, depth); 446 447 /* We take the memory out of the backlog and fall through so the 448 * reallocation below succeeds. Since we didn't really free it, we 449 * can default to this behavior. 450 */ 451 del_from_backlog(hdr); 452 } else { 453 log_message("+++ REALLOCATION %p SIZE %d IS CORRUPTED OR NOT ALLOCATED VIA TRACKER!\n", 454 user(hdr), size); 455 log_backtrace(bt, depth); 456 // just get a whole new allocation and leak the old one 457 return Malloc(realloc)(0, size); 458 // return realloc(user(hdr), size); // assuming it was allocated externally 459 } 460 } 461 462 if (hdr->base != hdr) { 463 // An allocation from memalign, so create another allocation and 464 // copy the data out. 465 void* newMem = Malloc(malloc)(sizeof(hdr_t) + size + sizeof(ftr_t)); 466 if (newMem) { 467 memcpy(newMem, hdr, sizeof(hdr_t) + hdr->size); 468 Malloc(free)(hdr->base); 469 hdr = static_cast<hdr_t*>(newMem); 470 } else { 471 Malloc(free)(hdr->base); 472 hdr = NULL; 473 } 474 } else { 475 hdr = static_cast<hdr_t*>(Malloc(realloc)(hdr, sizeof(hdr_t) + size + sizeof(ftr_t))); 476 } 477 if (hdr) { 478 hdr->base = hdr; 479 hdr->bt_depth = get_backtrace(hdr->bt, MAX_BACKTRACE_DEPTH); 480 add(hdr, size); 481 return user(hdr); 482 } 483 484 return NULL; 485} 486 487extern "C" void* chk_calloc(int nmemb, size_t size) { 488// log_message("%s: %s\n", __FILE__, __FUNCTION__); 489 size_t total_size = nmemb * size; 490 hdr_t* hdr = static_cast<hdr_t*>(Malloc(calloc)(1, sizeof(hdr_t) + total_size + sizeof(ftr_t))); 491 if (hdr) { 492 hdr->base = hdr; 493 hdr->bt_depth = get_backtrace(hdr->bt, MAX_BACKTRACE_DEPTH); 494 add(hdr, total_size); 495 return user(hdr); 496 } 497 return NULL; 498} 499 500extern "C" size_t chk_malloc_usable_size(const void* ptr) { 501 // malloc_usable_size returns 0 for NULL and unknown blocks. 502 if (ptr == NULL) 503 return 0; 504 505 const hdr_t* hdr = const_meta(ptr); 506 507 // The sentinel tail is written just after the request block bytes 508 // so there is no extra room we can report here. 509 return hdr->size; 510} 511 512static void ReportMemoryLeaks() { 513 // Use /proc/self/exe link to obtain the program name for logging 514 // purposes. If it's not available, we set it to "<unknown>". 515 char exe[PATH_MAX]; 516 int count; 517 if ((count = readlink("/proc/self/exe", exe, sizeof(exe) - 1)) == -1) { 518 strlcpy(exe, "<unknown>", sizeof(exe)); 519 } else { 520 exe[count] = '\0'; 521 } 522 523 if (g_allocated_block_count == 0) { 524 log_message("+++ %s did not leak", exe); 525 return; 526 } 527 528 size_t index = 1; 529 const size_t total = g_allocated_block_count; 530 while (head != NULL) { 531 int safe; 532 hdr_t* block = head; 533 log_message("+++ %s leaked block of size %d at %p (leak %d of %d)", 534 exe, block->size, user(block), index++, total); 535 if (del_leak(block, &safe)) { 536 /* safe == 1, because the allocation is valid */ 537 log_backtrace(block->bt, block->bt_depth); 538 } 539 } 540 541 while (backlog_head != NULL) { 542 del_from_backlog(backlog_tail); 543 } 544} 545 546extern "C" bool malloc_debug_initialize(HashTable* hash_table) { 547 g_hash_table = hash_table; 548 549 char debug_backlog[PROP_VALUE_MAX]; 550 if (__system_property_get("libc.debug.malloc.backlog", debug_backlog)) { 551 g_malloc_debug_backlog = atoi(debug_backlog); 552 info_log("%s: setting backlog length to %d\n", getprogname(), g_malloc_debug_backlog); 553 } 554 555 backtrace_startup(); 556 return true; 557} 558 559extern "C" void malloc_debug_finalize(int malloc_debug_level) { 560 // We only track leaks at level 10. 561 if (malloc_debug_level == 10) { 562 ReportMemoryLeaks(); 563 } 564 backtrace_shutdown(); 565} 566