asan_mac.cc revision 47657ce6cbac2fa93d0fd765c5d2872443b50e87
1//===-- asan_mac.cc -------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Mac-specific details.
13//===----------------------------------------------------------------------===//
14
15#ifdef __APPLE__
16
17#include "asan_interceptors.h"
18#include "asan_internal.h"
19#include "asan_mapping.h"
20#include "asan_procmaps.h"
21#include "asan_stack.h"
22#include "asan_thread.h"
23#include "asan_thread_registry.h"
24#include "sanitizer_common/sanitizer_libc.h"
25
26#include <crt_externs.h>  // for _NSGetEnviron
27#include <mach-o/dyld.h>
28#include <mach-o/loader.h>
29#include <sys/mman.h>
30#include <sys/resource.h>
31#include <sys/sysctl.h>
32#include <sys/ucontext.h>
33#include <pthread.h>
34#include <fcntl.h>
35#include <unistd.h>
36#include <libkern/OSAtomic.h>
37#include <CoreFoundation/CFString.h>
38
39namespace __asan {
40
41void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
42  ucontext_t *ucontext = (ucontext_t*)context;
43# if __WORDSIZE == 64
44  *pc = ucontext->uc_mcontext->__ss.__rip;
45  *bp = ucontext->uc_mcontext->__ss.__rbp;
46  *sp = ucontext->uc_mcontext->__ss.__rsp;
47# else
48  *pc = ucontext->uc_mcontext->__ss.__eip;
49  *bp = ucontext->uc_mcontext->__ss.__ebp;
50  *sp = ucontext->uc_mcontext->__ss.__esp;
51# endif  // __WORDSIZE
52}
53
54enum {
55  MACOS_VERSION_UNKNOWN = 0,
56  MACOS_VERSION_LEOPARD,
57  MACOS_VERSION_SNOW_LEOPARD,
58  MACOS_VERSION_LION,
59};
60
61static int GetMacosVersion() {
62  int mib[2] = { CTL_KERN, KERN_OSRELEASE };
63  char version[100];
64  uptr len = 0, maxlen = sizeof(version) / sizeof(version[0]);
65  for (int i = 0; i < maxlen; i++) version[i] = '\0';
66  // Get the version length.
67  CHECK(sysctl(mib, 2, 0, &len, 0, 0) != -1);
68  CHECK(len < maxlen);
69  CHECK(sysctl(mib, 2, version, &len, 0, 0) != -1);
70  switch (version[0]) {
71    case '9': return MACOS_VERSION_LEOPARD;
72    case '1': {
73      switch (version[1]) {
74        case '0': return MACOS_VERSION_SNOW_LEOPARD;
75        case '1': return MACOS_VERSION_LION;
76        default: return MACOS_VERSION_UNKNOWN;
77      }
78    }
79    default: return MACOS_VERSION_UNKNOWN;
80  }
81}
82
83bool PlatformHasDifferentMemcpyAndMemmove() {
84  // On OS X 10.7 memcpy() and memmove() are both resolved
85  // into memmove$VARIANT$sse42.
86  // See also http://code.google.com/p/address-sanitizer/issues/detail?id=34.
87  // TODO(glider): need to check dynamically that memcpy() and memmove() are
88  // actually the same function.
89  return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD;
90}
91
92// No-op. Mac does not support static linkage anyway.
93void *AsanDoesNotSupportStaticLinkage() {
94  return 0;
95}
96
97bool AsanInterceptsSignal(int signum) {
98  return (signum == SIGSEGV || signum == SIGBUS) && FLAG_handle_segv;
99}
100
101void *AsanMmapSomewhereOrDie(uptr size, const char *mem_type) {
102  size = RoundUpTo(size, kPageSize);
103  void *res = internal_mmap(0, size,
104                            PROT_READ | PROT_WRITE,
105                            MAP_PRIVATE | MAP_ANON, -1, 0);
106  if (res == (void*)-1) {
107    OutOfMemoryMessageAndDie(mem_type, size);
108  }
109  return res;
110}
111
112void *AsanMmapFixedNoReserve(uptr fixed_addr, uptr size) {
113  return internal_mmap((void*)fixed_addr, size,
114                      PROT_READ | PROT_WRITE,
115                      MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
116                      0, 0);
117}
118
119void *AsanMprotect(uptr fixed_addr, uptr size) {
120  return internal_mmap((void*)fixed_addr, size,
121                       PROT_NONE,
122                       MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
123                       0, 0);
124}
125
126void AsanUnmapOrDie(void *addr, uptr size) {
127  if (!addr || !size) return;
128  int res = internal_munmap(addr, size);
129  if (res != 0) {
130    Report("Failed to unmap\n");
131    Die();
132  }
133}
134
135const char *AsanGetEnv(const char *name) {
136  char ***env_ptr = _NSGetEnviron();
137  CHECK(env_ptr);
138  char **environ = *env_ptr;
139  CHECK(environ);
140  uptr name_len = internal_strlen(name);
141  while (*environ != 0) {
142    uptr len = internal_strlen(*environ);
143    if (len > name_len) {
144      const char *p = *environ;
145      if (!internal_memcmp(p, name, name_len) &&
146          p[name_len] == '=') {  // Match.
147        return *environ + name_len + 1;  // String starting after =.
148      }
149    }
150    environ++;
151  }
152  return 0;
153}
154
155AsanProcMaps::AsanProcMaps() {
156  Reset();
157}
158
159AsanProcMaps::~AsanProcMaps() {
160}
161
162// More information about Mach-O headers can be found in mach-o/loader.h
163// Each Mach-O image has a header (mach_header or mach_header_64) starting with
164// a magic number, and a list of linker load commands directly following the
165// header.
166// A load command is at least two 32-bit words: the command type and the
167// command size in bytes. We're interested only in segment load commands
168// (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped
169// into the task's address space.
170// The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or
171// segment_command_64 correspond to the memory address, memory size and the
172// file offset of the current memory segment.
173// Because these fields are taken from the images as is, one needs to add
174// _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime.
175
176void AsanProcMaps::Reset() {
177  // Count down from the top.
178  // TODO(glider): as per man 3 dyld, iterating over the headers with
179  // _dyld_image_count is thread-unsafe. We need to register callbacks for
180  // adding and removing images which will invalidate the AsanProcMaps state.
181  current_image_ = _dyld_image_count();
182  current_load_cmd_count_ = -1;
183  current_load_cmd_addr_ = 0;
184  current_magic_ = 0;
185}
186
187// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
188// Google Perftools, http://code.google.com/p/google-perftools.
189
190// NextSegmentLoad scans the current image for the next segment load command
191// and returns the start and end addresses and file offset of the corresponding
192// segment.
193// Note that the segment addresses are not necessarily sorted.
194template<u32 kLCSegment, typename SegmentCommand>
195bool AsanProcMaps::NextSegmentLoad(
196    uptr *start, uptr *end, uptr *offset,
197    char filename[], uptr filename_size) {
198  const char* lc = current_load_cmd_addr_;
199  current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
200  if (((const load_command *)lc)->cmd == kLCSegment) {
201    const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
202    const SegmentCommand* sc = (const SegmentCommand *)lc;
203    if (start) *start = sc->vmaddr + dlloff;
204    if (end) *end = sc->vmaddr + sc->vmsize + dlloff;
205    if (offset) *offset = sc->fileoff;
206    if (filename) {
207      REAL(strncpy)(filename, _dyld_get_image_name(current_image_),
208                    filename_size);
209    }
210    if (FLAG_v >= 4)
211      Report("LC_SEGMENT: %p--%p %s+%p\n", *start, *end, filename, *offset);
212    return true;
213  }
214  return false;
215}
216
217bool AsanProcMaps::Next(uptr *start, uptr *end,
218                        uptr *offset, char filename[],
219                        uptr filename_size) {
220  for (; current_image_ >= 0; current_image_--) {
221    const mach_header* hdr = _dyld_get_image_header(current_image_);
222    if (!hdr) continue;
223    if (current_load_cmd_count_ < 0) {
224      // Set up for this image;
225      current_load_cmd_count_ = hdr->ncmds;
226      current_magic_ = hdr->magic;
227      switch (current_magic_) {
228#ifdef MH_MAGIC_64
229        case MH_MAGIC_64: {
230          current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64);
231          break;
232        }
233#endif
234        case MH_MAGIC: {
235          current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header);
236          break;
237        }
238        default: {
239          continue;
240        }
241      }
242    }
243
244    for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) {
245      switch (current_magic_) {
246        // current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
247#ifdef MH_MAGIC_64
248        case MH_MAGIC_64: {
249          if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
250                  start, end, offset, filename, filename_size))
251            return true;
252          break;
253        }
254#endif
255        case MH_MAGIC: {
256          if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
257                  start, end, offset, filename, filename_size))
258            return true;
259          break;
260        }
261      }
262    }
263    // If we get here, no more load_cmd's in this image talk about
264    // segments.  Go on to the next image.
265  }
266  return false;
267}
268
269bool AsanProcMaps::GetObjectNameAndOffset(uptr addr, uptr *offset,
270                                          char filename[],
271                                          uptr filename_size) {
272  return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
273}
274
275void AsanThread::SetThreadStackTopAndBottom() {
276  uptr stacksize = pthread_get_stacksize_np(pthread_self());
277  void *stackaddr = pthread_get_stackaddr_np(pthread_self());
278  stack_top_ = (uptr)stackaddr;
279  stack_bottom_ = stack_top_ - stacksize;
280  int local;
281  CHECK(AddrIsInStack((uptr)&local));
282}
283
284AsanLock::AsanLock(LinkerInitialized) {
285  // We assume that OS_SPINLOCK_INIT is zero
286}
287
288void AsanLock::Lock() {
289  CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
290  CHECK(OS_SPINLOCK_INIT == 0);
291  CHECK(owner_ != (uptr)pthread_self());
292  OSSpinLockLock((OSSpinLock*)&opaque_storage_);
293  CHECK(!owner_);
294  owner_ = (uptr)pthread_self();
295}
296
297void AsanLock::Unlock() {
298  CHECK(owner_ == (uptr)pthread_self());
299  owner_ = 0;
300  OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
301}
302
303void AsanStackTrace::GetStackTrace(uptr max_s, uptr pc, uptr bp) {
304  size = 0;
305  trace[0] = pc;
306  if ((max_s) > 1) {
307    max_size = max_s;
308    FastUnwindStack(pc, bp);
309  }
310}
311
312// The range of pages to be used for escape islands.
313// TODO(glider): instead of mapping a fixed range we must find a range of
314// unmapped pages in vmmap and take them.
315// These constants were chosen empirically and may not work if the shadow
316// memory layout changes. Unfortunately they do necessarily depend on
317// kHighMemBeg or kHighMemEnd.
318static void *island_allocator_pos = 0;
319
320#if __WORDSIZE == 32
321# define kIslandEnd (0xffdf0000 - kPageSize)
322# define kIslandBeg (kIslandEnd - 256 * kPageSize)
323#else
324# define kIslandEnd (0x7fffffdf0000 - kPageSize)
325# define kIslandBeg (kIslandEnd - 256 * kPageSize)
326#endif
327
328extern "C"
329mach_error_t __interception_allocate_island(void **ptr,
330                                            uptr unused_size,
331                                            void *unused_hint) {
332  if (!island_allocator_pos) {
333    island_allocator_pos =
334        internal_mmap((void*)kIslandBeg, kIslandEnd - kIslandBeg,
335                      PROT_READ | PROT_WRITE | PROT_EXEC,
336                      MAP_PRIVATE | MAP_ANON | MAP_FIXED,
337                      -1, 0);
338    if (island_allocator_pos != (void*)kIslandBeg) {
339      return KERN_NO_SPACE;
340    }
341    if (FLAG_v) {
342      Report("Mapped pages %p--%p for branch islands.\n",
343             kIslandBeg, kIslandEnd);
344    }
345    // Should not be very performance-critical.
346    internal_memset(island_allocator_pos, 0xCC, kIslandEnd - kIslandBeg);
347  };
348  *ptr = island_allocator_pos;
349  island_allocator_pos = (char*)island_allocator_pos + kPageSize;
350  if (FLAG_v) {
351    Report("Branch island allocated at %p\n", *ptr);
352  }
353  return err_none;
354}
355
356extern "C"
357mach_error_t __interception_deallocate_island(void *ptr) {
358  // Do nothing.
359  // TODO(glider): allow to free and reuse the island memory.
360  return err_none;
361}
362
363// Support for the following functions from libdispatch on Mac OS:
364//   dispatch_async_f()
365//   dispatch_async()
366//   dispatch_sync_f()
367//   dispatch_sync()
368//   dispatch_after_f()
369//   dispatch_after()
370//   dispatch_group_async_f()
371//   dispatch_group_async()
372// TODO(glider): libdispatch API contains other functions that we don't support
373// yet.
374//
375// dispatch_sync() and dispatch_sync_f() are synchronous, although chances are
376// they can cause jobs to run on a thread different from the current one.
377// TODO(glider): if so, we need a test for this (otherwise we should remove
378// them).
379//
380// The following functions use dispatch_barrier_async_f() (which isn't a library
381// function but is exported) and are thus supported:
382//   dispatch_source_set_cancel_handler_f()
383//   dispatch_source_set_cancel_handler()
384//   dispatch_source_set_event_handler_f()
385//   dispatch_source_set_event_handler()
386//
387// The reference manual for Grand Central Dispatch is available at
388//   http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html
389// The implementation details are at
390//   http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c
391
392typedef void* pthread_workqueue_t;
393typedef void* pthread_workitem_handle_t;
394
395typedef void* dispatch_group_t;
396typedef void* dispatch_queue_t;
397typedef u64 dispatch_time_t;
398typedef void (*dispatch_function_t)(void *block);
399typedef void* (*worker_t)(void *block);
400
401// A wrapper for the ObjC blocks used to support libdispatch.
402typedef struct {
403  void *block;
404  dispatch_function_t func;
405  int parent_tid;
406} asan_block_context_t;
407
408// We use extern declarations of libdispatch functions here instead
409// of including <dispatch/dispatch.h>. This header is not present on
410// Mac OS X Leopard and eariler, and although we don't expect ASan to
411// work on legacy systems, it's bad to break the build of
412// LLVM compiler-rt there.
413extern "C" {
414void dispatch_async_f(dispatch_queue_t dq, void *ctxt,
415                      dispatch_function_t func);
416void dispatch_sync_f(dispatch_queue_t dq, void *ctxt,
417                     dispatch_function_t func);
418void dispatch_after_f(dispatch_time_t when, dispatch_queue_t dq, void *ctxt,
419                      dispatch_function_t func);
420void dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt,
421                              dispatch_function_t func);
422void dispatch_group_async_f(dispatch_group_t group, dispatch_queue_t dq,
423                            void *ctxt, dispatch_function_t func);
424int pthread_workqueue_additem_np(pthread_workqueue_t workq,
425    void *(*workitem_func)(void *), void * workitem_arg,
426    pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp);
427}  // extern "C"
428
429extern "C"
430void asan_dispatch_call_block_and_release(void *block) {
431  GET_STACK_TRACE_HERE(kStackTraceMax);
432  asan_block_context_t *context = (asan_block_context_t*)block;
433  if (FLAG_v >= 2) {
434    Report("asan_dispatch_call_block_and_release(): "
435           "context: %p, pthread_self: %p\n",
436           block, pthread_self());
437  }
438  AsanThread *t = asanThreadRegistry().GetCurrent();
439  if (!t) {
440    t = AsanThread::Create(context->parent_tid, 0, 0, &stack);
441    asanThreadRegistry().RegisterThread(t);
442    t->Init();
443    asanThreadRegistry().SetCurrent(t);
444  }
445  // Call the original dispatcher for the block.
446  context->func(context->block);
447  asan_free(context, &stack);
448}
449
450}  // namespace __asan
451
452using namespace __asan;  // NOLINT
453
454// Wrap |ctxt| and |func| into an asan_block_context_t.
455// The caller retains control of the allocated context.
456extern "C"
457asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
458                                         AsanStackTrace *stack) {
459  asan_block_context_t *asan_ctxt =
460      (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack);
461  asan_ctxt->block = ctxt;
462  asan_ctxt->func = func;
463  asan_ctxt->parent_tid = asanThreadRegistry().GetCurrentTidOrMinusOne();
464  return asan_ctxt;
465}
466
467// TODO(glider): can we reduce code duplication by introducing a macro?
468INTERCEPTOR(void, dispatch_async_f, dispatch_queue_t dq, void *ctxt,
469                                    dispatch_function_t func) {
470  GET_STACK_TRACE_HERE(kStackTraceMax);
471  asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
472  if (FLAG_v >= 2) {
473    Report("dispatch_async_f(): context: %p, pthread_self: %p\n",
474        asan_ctxt, pthread_self());
475    PRINT_CURRENT_STACK();
476  }
477  return REAL(dispatch_async_f)(dq, (void*)asan_ctxt,
478                                asan_dispatch_call_block_and_release);
479}
480
481INTERCEPTOR(void, dispatch_sync_f, dispatch_queue_t dq, void *ctxt,
482                                   dispatch_function_t func) {
483  GET_STACK_TRACE_HERE(kStackTraceMax);
484  asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
485  if (FLAG_v >= 2) {
486    Report("dispatch_sync_f(): context: %p, pthread_self: %p\n",
487        asan_ctxt, pthread_self());
488    PRINT_CURRENT_STACK();
489  }
490  return REAL(dispatch_sync_f)(dq, (void*)asan_ctxt,
491                               asan_dispatch_call_block_and_release);
492}
493
494INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
495                                    dispatch_queue_t dq, void *ctxt,
496                                    dispatch_function_t func) {
497  GET_STACK_TRACE_HERE(kStackTraceMax);
498  asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
499  if (FLAG_v >= 2) {
500    Report("dispatch_after_f: %p\n", asan_ctxt);
501    PRINT_CURRENT_STACK();
502  }
503  return REAL(dispatch_after_f)(when, dq, (void*)asan_ctxt,
504                                asan_dispatch_call_block_and_release);
505}
506
507INTERCEPTOR(void, dispatch_barrier_async_f, dispatch_queue_t dq, void *ctxt,
508                                            dispatch_function_t func) {
509  GET_STACK_TRACE_HERE(kStackTraceMax);
510  asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
511  if (FLAG_v >= 2) {
512    Report("dispatch_barrier_async_f(): context: %p, pthread_self: %p\n",
513           asan_ctxt, pthread_self());
514    PRINT_CURRENT_STACK();
515  }
516  REAL(dispatch_barrier_async_f)(dq, (void*)asan_ctxt,
517                                 asan_dispatch_call_block_and_release);
518}
519
520INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
521                                          dispatch_queue_t dq, void *ctxt,
522                                          dispatch_function_t func) {
523  GET_STACK_TRACE_HERE(kStackTraceMax);
524  asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
525  if (FLAG_v >= 2) {
526    Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
527           asan_ctxt, pthread_self());
528    PRINT_CURRENT_STACK();
529  }
530  REAL(dispatch_group_async_f)(group, dq, (void*)asan_ctxt,
531                               asan_dispatch_call_block_and_release);
532}
533
534// The following stuff has been extremely helpful while looking for the
535// unhandled functions that spawned jobs on Chromium shutdown. If the verbosity
536// level is 2 or greater, we wrap pthread_workqueue_additem_np() in order to
537// find the points of worker thread creation (each of such threads may be used
538// to run several tasks, that's why this is not enough to support the whole
539// libdispatch API.
540extern "C"
541void *wrap_workitem_func(void *arg) {
542  if (FLAG_v >= 2) {
543    Report("wrap_workitem_func: %p, pthread_self: %p\n", arg, pthread_self());
544  }
545  asan_block_context_t *ctxt = (asan_block_context_t*)arg;
546  worker_t fn = (worker_t)(ctxt->func);
547  void *result =  fn(ctxt->block);
548  GET_STACK_TRACE_HERE(kStackTraceMax);
549  asan_free(arg, &stack);
550  return result;
551}
552
553INTERCEPTOR(int, pthread_workqueue_additem_np, pthread_workqueue_t workq,
554    void *(*workitem_func)(void *), void * workitem_arg,
555    pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp) {
556  GET_STACK_TRACE_HERE(kStackTraceMax);
557  asan_block_context_t *asan_ctxt =
558      (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), &stack);
559  asan_ctxt->block = workitem_arg;
560  asan_ctxt->func = (dispatch_function_t)workitem_func;
561  asan_ctxt->parent_tid = asanThreadRegistry().GetCurrentTidOrMinusOne();
562  if (FLAG_v >= 2) {
563    Report("pthread_workqueue_additem_np: %p\n", asan_ctxt);
564    PRINT_CURRENT_STACK();
565  }
566  return REAL(pthread_workqueue_additem_np)(workq, wrap_workitem_func,
567                                            asan_ctxt, itemhandlep,
568                                            gencountp);
569}
570
571// CF_RC_BITS, the layout of CFRuntimeBase and __CFStrIsConstant are internal
572// and subject to change in further CoreFoundation versions. Apple does not
573// guarantee any binary compatibility from release to release.
574
575// See http://opensource.apple.com/source/CF/CF-635.15/CFInternal.h
576#if defined(__BIG_ENDIAN__)
577#define CF_RC_BITS 0
578#endif
579
580#if defined(__LITTLE_ENDIAN__)
581#define CF_RC_BITS 3
582#endif
583
584// See http://opensource.apple.com/source/CF/CF-635.15/CFRuntime.h
585typedef struct __CFRuntimeBase {
586  uptr _cfisa;
587  u8 _cfinfo[4];
588#if __LP64__
589  u32 _rc;
590#endif
591} CFRuntimeBase;
592
593// See http://opensource.apple.com/source/CF/CF-635.15/CFString.c
594int __CFStrIsConstant(CFStringRef str) {
595  CFRuntimeBase *base = (CFRuntimeBase*)str;
596#if __LP64__
597  return base->_rc == 0;
598#else
599  return (base->_cfinfo[CF_RC_BITS]) == 0;
600#endif
601}
602
603INTERCEPTOR(CFStringRef, CFStringCreateCopy, CFAllocatorRef alloc,
604                                             CFStringRef str) {
605  if (__CFStrIsConstant(str)) {
606    return str;
607  } else {
608    return REAL(CFStringCreateCopy)(alloc, str);
609  }
610}
611
612namespace __asan {
613
614void InitializeMacInterceptors() {
615  CHECK(INTERCEPT_FUNCTION(dispatch_async_f));
616  CHECK(INTERCEPT_FUNCTION(dispatch_sync_f));
617  CHECK(INTERCEPT_FUNCTION(dispatch_after_f));
618  CHECK(INTERCEPT_FUNCTION(dispatch_barrier_async_f));
619  CHECK(INTERCEPT_FUNCTION(dispatch_group_async_f));
620  // We don't need to intercept pthread_workqueue_additem_np() to support the
621  // libdispatch API, but it helps us to debug the unsupported functions. Let's
622  // intercept it only during verbose runs.
623  if (FLAG_v >= 2) {
624    CHECK(INTERCEPT_FUNCTION(pthread_workqueue_additem_np));
625  }
626  // Normally CFStringCreateCopy should not copy constant CF strings.
627  // Replacing the default CFAllocator causes constant strings to be copied
628  // rather than just returned, which leads to bugs in big applications like
629  // Chromium and WebKit, see
630  // http://code.google.com/p/address-sanitizer/issues/detail?id=10
631  // Until this problem is fixed we need to check that the string is
632  // non-constant before calling CFStringCreateCopy.
633  CHECK(INTERCEPT_FUNCTION(CFStringCreateCopy));
634}
635
636}  // namespace __asan
637
638#endif  // __APPLE__
639