asan_malloc_mac.cc revision 6b233edfdb741fa75faec815ff3940724e8126a5
1//===-- asan_malloc_mac.cc ------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of AddressSanitizer, an address sanity checker. 11// 12// Mac-specific malloc interception. 13//===----------------------------------------------------------------------===// 14 15#include "sanitizer_common/sanitizer_platform.h" 16#if SANITIZER_MAC 17 18#include <AvailabilityMacros.h> 19#include <CoreFoundation/CFBase.h> 20#include <dlfcn.h> 21#include <malloc/malloc.h> 22#include <sys/mman.h> 23 24#include "asan_allocator.h" 25#include "asan_interceptors.h" 26#include "asan_internal.h" 27#include "asan_mac.h" 28#include "asan_report.h" 29#include "asan_stack.h" 30#include "asan_stats.h" 31 32// Similar code is used in Google Perftools, 33// http://code.google.com/p/google-perftools. 34 35// ---------------------- Replacement functions ---------------- {{{1 36using namespace __asan; // NOLINT 37 38// TODO(glider): do we need both zones? 39static malloc_zone_t *system_malloc_zone = 0; 40static malloc_zone_t asan_zone; 41 42INTERCEPTOR(malloc_zone_t *, malloc_create_zone, 43 vm_size_t start_size, unsigned zone_flags) { 44 if (!asan_inited) __asan_init(); 45 GET_STACK_TRACE_MALLOC; 46 uptr page_size = GetPageSizeCached(); 47 uptr allocated_size = RoundUpTo(sizeof(asan_zone), page_size); 48 malloc_zone_t *new_zone = 49 (malloc_zone_t*)asan_memalign(page_size, allocated_size, 50 &stack, FROM_MALLOC); 51 internal_memcpy(new_zone, &asan_zone, sizeof(asan_zone)); 52 new_zone->zone_name = NULL; // The name will be changed anyway. 53 // Prevent the client app from overwriting the zone contents. 54 // Library functions that need to modify the zone will set PROT_WRITE on it. 55 mprotect(new_zone, allocated_size, PROT_READ); 56 return new_zone; 57} 58 59INTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) { 60 if (!asan_inited) __asan_init(); 61 return &asan_zone; 62} 63 64INTERCEPTOR(malloc_zone_t *, malloc_default_purgeable_zone, void) { 65 // FIXME: ASan should support purgeable allocations. 66 // https://code.google.com/p/address-sanitizer/issues/detail?id=139 67 if (!asan_inited) __asan_init(); 68 return &asan_zone; 69} 70 71INTERCEPTOR(void, malloc_make_purgeable, void *ptr) { 72 // FIXME: ASan should support purgeable allocations. Ignoring them is fine 73 // for now. 74 if (!asan_inited) __asan_init(); 75} 76 77INTERCEPTOR(int, malloc_make_nonpurgeable, void *ptr) { 78 // FIXME: ASan should support purgeable allocations. Ignoring them is fine 79 // for now. 80 if (!asan_inited) __asan_init(); 81 // Must return 0 if the contents were not purged since the last call to 82 // malloc_make_purgeable(). 83 return 0; 84} 85 86INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) { 87 if (!asan_inited) __asan_init(); 88 // Allocate |strlen("asan-") + 1 + internal_strlen(name)| bytes. 89 size_t buflen = 6 + (name ? internal_strlen(name) : 0); 90 InternalScopedBuffer<char> new_name(buflen); 91 if (name && zone->introspect == asan_zone.introspect) { 92 internal_snprintf(new_name.data(), buflen, "asan-%s", name); 93 name = new_name.data(); 94 } 95 96 // Call the system malloc's implementation for both external and our zones, 97 // since that appropriately changes VM region protections on the zone. 98 REAL(malloc_set_zone_name)(zone, name); 99} 100 101INTERCEPTOR(void *, malloc, size_t size) { 102 if (!asan_inited) __asan_init(); 103 GET_STACK_TRACE_MALLOC; 104 void *res = asan_malloc(size, &stack); 105 return res; 106} 107 108INTERCEPTOR(void, free, void *ptr) { 109 if (!asan_inited) __asan_init(); 110 if (!ptr) return; 111 GET_STACK_TRACE_FREE; 112 asan_free(ptr, &stack, FROM_MALLOC); 113} 114 115INTERCEPTOR(void *, realloc, void *ptr, size_t size) { 116 if (!asan_inited) __asan_init(); 117 GET_STACK_TRACE_MALLOC; 118 return asan_realloc(ptr, size, &stack); 119} 120 121INTERCEPTOR(void *, calloc, size_t nmemb, size_t size) { 122 if (!asan_inited) __asan_init(); 123 GET_STACK_TRACE_MALLOC; 124 return asan_calloc(nmemb, size, &stack); 125} 126 127INTERCEPTOR(void *, valloc, size_t size) { 128 if (!asan_inited) __asan_init(); 129 GET_STACK_TRACE_MALLOC; 130 return asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC); 131} 132 133INTERCEPTOR(size_t, malloc_good_size, size_t size) { 134 if (!asan_inited) __asan_init(); 135 return asan_zone.introspect->good_size(&asan_zone, size); 136} 137 138INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) { 139 if (!asan_inited) __asan_init(); 140 CHECK(memptr); 141 GET_STACK_TRACE_MALLOC; 142 void *result = asan_memalign(alignment, size, &stack, FROM_MALLOC); 143 if (result) { 144 *memptr = result; 145 return 0; 146 } 147 return -1; 148} 149 150namespace { 151 152// TODO(glider): the mz_* functions should be united with the Linux wrappers, 153// as they are basically copied from there. 154size_t mz_size(malloc_zone_t* zone, const void* ptr) { 155 return asan_mz_size(ptr); 156} 157 158void *mz_malloc(malloc_zone_t *zone, size_t size) { 159 if (!asan_inited) { 160 CHECK(system_malloc_zone); 161 return malloc_zone_malloc(system_malloc_zone, size); 162 } 163 GET_STACK_TRACE_MALLOC; 164 return asan_malloc(size, &stack); 165} 166 167void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) { 168 if (!asan_inited) { 169 // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. 170 const size_t kCallocPoolSize = 1024; 171 static uptr calloc_memory_for_dlsym[kCallocPoolSize]; 172 static size_t allocated; 173 size_t size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize; 174 void *mem = (void*)&calloc_memory_for_dlsym[allocated]; 175 allocated += size_in_words; 176 CHECK(allocated < kCallocPoolSize); 177 return mem; 178 } 179 GET_STACK_TRACE_MALLOC; 180 return asan_calloc(nmemb, size, &stack); 181} 182 183void *mz_valloc(malloc_zone_t *zone, size_t size) { 184 if (!asan_inited) { 185 CHECK(system_malloc_zone); 186 return malloc_zone_valloc(system_malloc_zone, size); 187 } 188 GET_STACK_TRACE_MALLOC; 189 return asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC); 190} 191 192#define GET_ZONE_FOR_PTR(ptr) \ 193 malloc_zone_t *zone_ptr = malloc_zone_from_ptr(ptr); \ 194 const char *zone_name = (zone_ptr == 0) ? 0 : zone_ptr->zone_name 195 196void ALWAYS_INLINE free_common(void *context, void *ptr) { 197 if (!ptr) return; 198 GET_STACK_TRACE_FREE; 199 // FIXME: need to retire this flag. 200 if (!flags()->mac_ignore_invalid_free) { 201 asan_free(ptr, &stack, FROM_MALLOC); 202 } else { 203 GET_ZONE_FOR_PTR(ptr); 204 WarnMacFreeUnallocated((uptr)ptr, (uptr)zone_ptr, zone_name, &stack); 205 return; 206 } 207} 208 209// TODO(glider): the allocation callbacks need to be refactored. 210void mz_free(malloc_zone_t *zone, void *ptr) { 211 free_common(zone, ptr); 212} 213 214void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) { 215 if (!ptr) { 216 GET_STACK_TRACE_MALLOC; 217 return asan_malloc(size, &stack); 218 } else { 219 if (asan_mz_size(ptr)) { 220 GET_STACK_TRACE_MALLOC; 221 return asan_realloc(ptr, size, &stack); 222 } else { 223 // We can't recover from reallocating an unknown address, because 224 // this would require reading at most |size| bytes from 225 // potentially unaccessible memory. 226 GET_STACK_TRACE_FREE; 227 GET_ZONE_FOR_PTR(ptr); 228 ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack); 229 } 230 } 231} 232 233void mz_destroy(malloc_zone_t* zone) { 234 // A no-op -- we will not be destroyed! 235 Report("mz_destroy() called -- ignoring\n"); 236} 237 238 // from AvailabilityMacros.h 239#if defined(MAC_OS_X_VERSION_10_6) && \ 240 MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 241void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) { 242 if (!asan_inited) { 243 CHECK(system_malloc_zone); 244 return malloc_zone_memalign(system_malloc_zone, align, size); 245 } 246 GET_STACK_TRACE_MALLOC; 247 return asan_memalign(align, size, &stack, FROM_MALLOC); 248} 249 250// This function is currently unused, and we build with -Werror. 251#if 0 252void mz_free_definite_size(malloc_zone_t* zone, void *ptr, size_t size) { 253 // TODO(glider): check that |size| is valid. 254 UNIMPLEMENTED(); 255} 256#endif 257#endif 258 259kern_return_t mi_enumerator(task_t task, void *, 260 unsigned type_mask, vm_address_t zone_address, 261 memory_reader_t reader, 262 vm_range_recorder_t recorder) { 263 // Should enumerate all the pointers we have. Seems like a lot of work. 264 return KERN_FAILURE; 265} 266 267size_t mi_good_size(malloc_zone_t *zone, size_t size) { 268 // I think it's always safe to return size, but we maybe could do better. 269 return size; 270} 271 272boolean_t mi_check(malloc_zone_t *zone) { 273 UNIMPLEMENTED(); 274} 275 276void mi_print(malloc_zone_t *zone, boolean_t verbose) { 277 UNIMPLEMENTED(); 278} 279 280void mi_log(malloc_zone_t *zone, void *address) { 281 // I don't think we support anything like this 282} 283 284void mi_force_lock(malloc_zone_t *zone) { 285 asan_mz_force_lock(); 286} 287 288void mi_force_unlock(malloc_zone_t *zone) { 289 asan_mz_force_unlock(); 290} 291 292void mi_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) { 293 AsanMallocStats malloc_stats; 294 FillMallocStatistics(&malloc_stats); 295 CHECK(sizeof(malloc_statistics_t) == sizeof(AsanMallocStats)); 296 internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t)); 297} 298 299#if defined(MAC_OS_X_VERSION_10_6) && \ 300 MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 301boolean_t mi_zone_locked(malloc_zone_t *zone) { 302 // UNIMPLEMENTED(); 303 return false; 304} 305#endif 306 307} // unnamed namespace 308 309namespace __asan { 310 311void ReplaceSystemMalloc() { 312 static malloc_introspection_t asan_introspection; 313 // Ok to use internal_memset, these places are not performance-critical. 314 internal_memset(&asan_introspection, 0, sizeof(asan_introspection)); 315 316 asan_introspection.enumerator = &mi_enumerator; 317 asan_introspection.good_size = &mi_good_size; 318 asan_introspection.check = &mi_check; 319 asan_introspection.print = &mi_print; 320 asan_introspection.log = &mi_log; 321 asan_introspection.force_lock = &mi_force_lock; 322 asan_introspection.force_unlock = &mi_force_unlock; 323 asan_introspection.statistics = &mi_statistics; 324 325 internal_memset(&asan_zone, 0, sizeof(malloc_zone_t)); 326 327 // Start with a version 4 zone which is used for OS X 10.4 and 10.5. 328 asan_zone.version = 4; 329 asan_zone.zone_name = "asan"; 330 asan_zone.size = &mz_size; 331 asan_zone.malloc = &mz_malloc; 332 asan_zone.calloc = &mz_calloc; 333 asan_zone.valloc = &mz_valloc; 334 asan_zone.free = &mz_free; 335 asan_zone.realloc = &mz_realloc; 336 asan_zone.destroy = &mz_destroy; 337 asan_zone.batch_malloc = 0; 338 asan_zone.batch_free = 0; 339 asan_zone.introspect = &asan_introspection; 340 341 // from AvailabilityMacros.h 342#if defined(MAC_OS_X_VERSION_10_6) && \ 343 MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 344 // Switch to version 6 on OSX 10.6 to support memalign. 345 asan_zone.version = 6; 346 asan_zone.free_definite_size = 0; 347 asan_zone.memalign = &mz_memalign; 348 asan_introspection.zone_locked = &mi_zone_locked; 349#endif 350 351 // Register the ASan zone. 352 malloc_zone_register(&asan_zone); 353} 354} // namespace __asan 355 356#endif // SANITIZER_MAC 357