asan_malloc_mac.cc revision 24e13723f8477d8c42ab8b2a7f4f69fc089842f1
1//===-- asan_malloc_mac.cc ------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of AddressSanitizer, an address sanity checker. 11// 12// Mac-specific malloc interception. 13//===----------------------------------------------------------------------===// 14 15#include "sanitizer_common/sanitizer_platform.h" 16#ifdef __APPLE__ 17 18#include <AvailabilityMacros.h> 19#include <CoreFoundation/CFBase.h> 20#include <dlfcn.h> 21#include <malloc/malloc.h> 22 23#include "asan_allocator.h" 24#include "asan_interceptors.h" 25#include "asan_internal.h" 26#include "asan_mac.h" 27#include "asan_report.h" 28#include "asan_stack.h" 29#include "asan_stats.h" 30#include "asan_thread_registry.h" 31 32// Similar code is used in Google Perftools, 33// http://code.google.com/p/google-perftools. 34 35// ---------------------- Replacement functions ---------------- {{{1 36using namespace __asan; // NOLINT 37 38// TODO(glider): do we need both zones? 39static malloc_zone_t *system_malloc_zone = 0; 40static malloc_zone_t asan_zone; 41 42INTERCEPTOR(malloc_zone_t *, malloc_create_zone, 43 vm_size_t start_size, unsigned zone_flags) { 44 if (!asan_inited) __asan_init(); 45 GET_STACK_TRACE_MALLOC; 46 malloc_zone_t *new_zone = 47 (malloc_zone_t*)asan_malloc(sizeof(asan_zone), &stack); 48 internal_memcpy(new_zone, &asan_zone, sizeof(asan_zone)); 49 new_zone->zone_name = NULL; // The name will be changed anyway. 50 return new_zone; 51} 52 53INTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) { 54 if (!asan_inited) __asan_init(); 55 return &asan_zone; 56} 57 58INTERCEPTOR(malloc_zone_t *, malloc_default_purgeable_zone, void) { 59 // FIXME: ASan should support purgeable allocations. 60 // https://code.google.com/p/address-sanitizer/issues/detail?id=139 61 if (!asan_inited) __asan_init(); 62 return &asan_zone; 63} 64 65INTERCEPTOR(void, malloc_make_purgeable, void *ptr) { 66 // FIXME: ASan should support purgeable allocations. Ignoring them is fine 67 // for now. 68 if (!asan_inited) __asan_init(); 69} 70 71INTERCEPTOR(int, malloc_make_nonpurgeable, void *ptr) { 72 // FIXME: ASan should support purgeable allocations. Ignoring them is fine 73 // for now. 74 if (!asan_inited) __asan_init(); 75 // Must return 0 if the contents were not purged since the last call to 76 // malloc_make_purgeable(). 77 return 0; 78} 79 80INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) { 81 if (!asan_inited) __asan_init(); 82 // Allocate |strlen("asan-") + 1 + internal_strlen(name)| bytes. 83 size_t buflen = 6 + (name ? internal_strlen(name) : 0); 84 InternalScopedBuffer<char> new_name(buflen); 85 if (name && zone->introspect == asan_zone.introspect) { 86 internal_snprintf(new_name.data(), buflen, "asan-%s", name); 87 name = new_name.data(); 88 } 89 90 // Call the system malloc's implementation for both external and our zones, 91 // since that appropriately changes VM region protections on the zone. 92 REAL(malloc_set_zone_name)(zone, name); 93} 94 95INTERCEPTOR(void *, malloc, size_t size) { 96 if (!asan_inited) __asan_init(); 97 GET_STACK_TRACE_MALLOC; 98 void *res = asan_malloc(size, &stack); 99 return res; 100} 101 102INTERCEPTOR(void, free, void *ptr) { 103 if (!asan_inited) __asan_init(); 104 if (!ptr) return; 105 GET_STACK_TRACE_FREE; 106 asan_free(ptr, &stack, FROM_MALLOC); 107} 108 109INTERCEPTOR(void *, realloc, void *ptr, size_t size) { 110 if (!asan_inited) __asan_init(); 111 GET_STACK_TRACE_MALLOC; 112 return asan_realloc(ptr, size, &stack); 113} 114 115INTERCEPTOR(void *, calloc, size_t nmemb, size_t size) { 116 if (!asan_inited) __asan_init(); 117 GET_STACK_TRACE_MALLOC; 118 return asan_calloc(nmemb, size, &stack); 119} 120 121INTERCEPTOR(void *, valloc, size_t size) { 122 if (!asan_inited) __asan_init(); 123 GET_STACK_TRACE_MALLOC; 124 return asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC); 125} 126 127INTERCEPTOR(size_t, malloc_good_size, size_t size) { 128 if (!asan_inited) __asan_init(); 129 return asan_zone.introspect->good_size(&asan_zone, size); 130} 131 132INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) { 133 if (!asan_inited) __asan_init(); 134 CHECK(memptr); 135 GET_STACK_TRACE_MALLOC; 136 void *result = asan_memalign(alignment, size, &stack, FROM_MALLOC); 137 if (result) { 138 *memptr = result; 139 return 0; 140 } 141 return -1; 142} 143 144namespace { 145 146// TODO(glider): the mz_* functions should be united with the Linux wrappers, 147// as they are basically copied from there. 148size_t mz_size(malloc_zone_t* zone, const void* ptr) { 149 return asan_mz_size(ptr); 150} 151 152void *mz_malloc(malloc_zone_t *zone, size_t size) { 153 if (!asan_inited) { 154 CHECK(system_malloc_zone); 155 return malloc_zone_malloc(system_malloc_zone, size); 156 } 157 GET_STACK_TRACE_MALLOC; 158 return asan_malloc(size, &stack); 159} 160 161void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) { 162 if (!asan_inited) { 163 // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. 164 const size_t kCallocPoolSize = 1024; 165 static uptr calloc_memory_for_dlsym[kCallocPoolSize]; 166 static size_t allocated; 167 size_t size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize; 168 void *mem = (void*)&calloc_memory_for_dlsym[allocated]; 169 allocated += size_in_words; 170 CHECK(allocated < kCallocPoolSize); 171 return mem; 172 } 173 GET_STACK_TRACE_MALLOC; 174 return asan_calloc(nmemb, size, &stack); 175} 176 177void *mz_valloc(malloc_zone_t *zone, size_t size) { 178 if (!asan_inited) { 179 CHECK(system_malloc_zone); 180 return malloc_zone_valloc(system_malloc_zone, size); 181 } 182 GET_STACK_TRACE_MALLOC; 183 return asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC); 184} 185 186#define GET_ZONE_FOR_PTR(ptr) \ 187 malloc_zone_t *zone_ptr = malloc_zone_from_ptr(ptr); \ 188 const char *zone_name = (zone_ptr == 0) ? 0 : zone_ptr->zone_name 189 190void ALWAYS_INLINE free_common(void *context, void *ptr) { 191 if (!ptr) return; 192 GET_STACK_TRACE_FREE; 193 // FIXME: need to retire this flag. 194 if (!flags()->mac_ignore_invalid_free) { 195 asan_free(ptr, &stack, FROM_MALLOC); 196 } else { 197 GET_ZONE_FOR_PTR(ptr); 198 WarnMacFreeUnallocated((uptr)ptr, (uptr)zone_ptr, zone_name, &stack); 199 return; 200 } 201} 202 203// TODO(glider): the allocation callbacks need to be refactored. 204void mz_free(malloc_zone_t *zone, void *ptr) { 205 free_common(zone, ptr); 206} 207 208void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) { 209 if (!ptr) { 210 GET_STACK_TRACE_MALLOC; 211 return asan_malloc(size, &stack); 212 } else { 213 if (asan_mz_size(ptr)) { 214 GET_STACK_TRACE_MALLOC; 215 return asan_realloc(ptr, size, &stack); 216 } else { 217 // We can't recover from reallocating an unknown address, because 218 // this would require reading at most |size| bytes from 219 // potentially unaccessible memory. 220 GET_STACK_TRACE_FREE; 221 GET_ZONE_FOR_PTR(ptr); 222 ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack); 223 } 224 } 225} 226 227void mz_destroy(malloc_zone_t* zone) { 228 // A no-op -- we will not be destroyed! 229 Report("mz_destroy() called -- ignoring\n"); 230} 231 232 // from AvailabilityMacros.h 233#if defined(MAC_OS_X_VERSION_10_6) && \ 234 MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 235void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) { 236 if (!asan_inited) { 237 CHECK(system_malloc_zone); 238 return malloc_zone_memalign(system_malloc_zone, align, size); 239 } 240 GET_STACK_TRACE_MALLOC; 241 return asan_memalign(align, size, &stack, FROM_MALLOC); 242} 243 244// This function is currently unused, and we build with -Werror. 245#if 0 246void mz_free_definite_size(malloc_zone_t* zone, void *ptr, size_t size) { 247 // TODO(glider): check that |size| is valid. 248 UNIMPLEMENTED(); 249} 250#endif 251#endif 252 253kern_return_t mi_enumerator(task_t task, void *, 254 unsigned type_mask, vm_address_t zone_address, 255 memory_reader_t reader, 256 vm_range_recorder_t recorder) { 257 // Should enumerate all the pointers we have. Seems like a lot of work. 258 return KERN_FAILURE; 259} 260 261size_t mi_good_size(malloc_zone_t *zone, size_t size) { 262 // I think it's always safe to return size, but we maybe could do better. 263 return size; 264} 265 266boolean_t mi_check(malloc_zone_t *zone) { 267 UNIMPLEMENTED(); 268} 269 270void mi_print(malloc_zone_t *zone, boolean_t verbose) { 271 UNIMPLEMENTED(); 272} 273 274void mi_log(malloc_zone_t *zone, void *address) { 275 // I don't think we support anything like this 276} 277 278void mi_force_lock(malloc_zone_t *zone) { 279 asan_mz_force_lock(); 280} 281 282void mi_force_unlock(malloc_zone_t *zone) { 283 asan_mz_force_unlock(); 284} 285 286void mi_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) { 287 AsanMallocStats malloc_stats; 288 asanThreadRegistry().FillMallocStatistics(&malloc_stats); 289 CHECK(sizeof(malloc_statistics_t) == sizeof(AsanMallocStats)); 290 internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t)); 291} 292 293#if defined(MAC_OS_X_VERSION_10_6) && \ 294 MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 295boolean_t mi_zone_locked(malloc_zone_t *zone) { 296 // UNIMPLEMENTED(); 297 return false; 298} 299#endif 300 301} // unnamed namespace 302 303namespace __asan { 304 305void ReplaceSystemMalloc() { 306 static malloc_introspection_t asan_introspection; 307 // Ok to use internal_memset, these places are not performance-critical. 308 internal_memset(&asan_introspection, 0, sizeof(asan_introspection)); 309 310 asan_introspection.enumerator = &mi_enumerator; 311 asan_introspection.good_size = &mi_good_size; 312 asan_introspection.check = &mi_check; 313 asan_introspection.print = &mi_print; 314 asan_introspection.log = &mi_log; 315 asan_introspection.force_lock = &mi_force_lock; 316 asan_introspection.force_unlock = &mi_force_unlock; 317 asan_introspection.statistics = &mi_statistics; 318 319 internal_memset(&asan_zone, 0, sizeof(malloc_zone_t)); 320 321 // Start with a version 4 zone which is used for OS X 10.4 and 10.5. 322 asan_zone.version = 4; 323 asan_zone.zone_name = "asan"; 324 asan_zone.size = &mz_size; 325 asan_zone.malloc = &mz_malloc; 326 asan_zone.calloc = &mz_calloc; 327 asan_zone.valloc = &mz_valloc; 328 asan_zone.free = &mz_free; 329 asan_zone.realloc = &mz_realloc; 330 asan_zone.destroy = &mz_destroy; 331 asan_zone.batch_malloc = 0; 332 asan_zone.batch_free = 0; 333 asan_zone.introspect = &asan_introspection; 334 335 // from AvailabilityMacros.h 336#if defined(MAC_OS_X_VERSION_10_6) && \ 337 MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 338 // Switch to version 6 on OSX 10.6 to support memalign. 339 asan_zone.version = 6; 340 asan_zone.free_definite_size = 0; 341 asan_zone.memalign = &mz_memalign; 342 asan_introspection.zone_locked = &mi_zone_locked; 343#endif 344 345 // Register the ASan zone. 346 malloc_zone_register(&asan_zone); 347} 348} // namespace __asan 349 350#endif // __APPLE__ 351