asan_mac.cc revision 2221f553886c37401b5d84923634ebf04bc482f1
1//===-- asan_mac.cc -------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Mac-specific details.
13//===----------------------------------------------------------------------===//
14
15#ifdef __APPLE__
16
17#include "asan_interceptors.h"
18#include "asan_internal.h"
19#include "asan_mapping.h"
20#include "asan_procmaps.h"
21#include "asan_stack.h"
22#include "asan_thread.h"
23#include "asan_thread_registry.h"
24#include "sanitizer_common/sanitizer_libc.h"
25
26#include <crt_externs.h>  // for _NSGetEnviron
27#include <mach-o/dyld.h>
28#include <mach-o/loader.h>
29#include <sys/mman.h>
30#include <sys/resource.h>
31#include <sys/sysctl.h>
32#include <sys/ucontext.h>
33#include <pthread.h>
34#include <fcntl.h>
35#include <unistd.h>
36#include <libkern/OSAtomic.h>
37#include <CoreFoundation/CFString.h>
38
39using namespace __sanitizer;  // NOLINT
40
41namespace __asan {
42
43void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
44  ucontext_t *ucontext = (ucontext_t*)context;
45# if __WORDSIZE == 64
46  *pc = ucontext->uc_mcontext->__ss.__rip;
47  *bp = ucontext->uc_mcontext->__ss.__rbp;
48  *sp = ucontext->uc_mcontext->__ss.__rsp;
49# else
50  *pc = ucontext->uc_mcontext->__ss.__eip;
51  *bp = ucontext->uc_mcontext->__ss.__ebp;
52  *sp = ucontext->uc_mcontext->__ss.__esp;
53# endif  // __WORDSIZE
54}
55
56enum {
57  MACOS_VERSION_UNKNOWN = 0,
58  MACOS_VERSION_LEOPARD,
59  MACOS_VERSION_SNOW_LEOPARD,
60  MACOS_VERSION_LION,
61};
62
63static int GetMacosVersion() {
64  int mib[2] = { CTL_KERN, KERN_OSRELEASE };
65  char version[100];
66  size_t len = 0, maxlen = sizeof(version) / sizeof(version[0]);
67  for (int i = 0; i < maxlen; i++) version[i] = '\0';
68  // Get the version length.
69  CHECK(sysctl(mib, 2, 0, &len, 0, 0) != -1);
70  CHECK(len < maxlen);
71  CHECK(sysctl(mib, 2, version, &len, 0, 0) != -1);
72  switch (version[0]) {
73    case '9': return MACOS_VERSION_LEOPARD;
74    case '1': {
75      switch (version[1]) {
76        case '0': return MACOS_VERSION_SNOW_LEOPARD;
77        case '1': return MACOS_VERSION_LION;
78        default: return MACOS_VERSION_UNKNOWN;
79      }
80    }
81    default: return MACOS_VERSION_UNKNOWN;
82  }
83}
84
85bool PlatformHasDifferentMemcpyAndMemmove() {
86  // On OS X 10.7 memcpy() and memmove() are both resolved
87  // into memmove$VARIANT$sse42.
88  // See also http://code.google.com/p/address-sanitizer/issues/detail?id=34.
89  // TODO(glider): need to check dynamically that memcpy() and memmove() are
90  // actually the same function.
91  return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD;
92}
93
94// No-op. Mac does not support static linkage anyway.
95void *AsanDoesNotSupportStaticLinkage() {
96  return 0;
97}
98
99bool AsanInterceptsSignal(int signum) {
100  return (signum == SIGSEGV || signum == SIGBUS) && FLAG_handle_segv;
101}
102
103void *AsanMmapSomewhereOrDie(size_t size, const char *mem_type) {
104  size = RoundUpTo(size, kPageSize);
105  void *res = internal_mmap(0, size,
106                            PROT_READ | PROT_WRITE,
107                            MAP_PRIVATE | MAP_ANON, -1, 0);
108  if (res == (void*)-1) {
109    OutOfMemoryMessageAndDie(mem_type, size);
110  }
111  return res;
112}
113
114void *AsanMmapFixedNoReserve(uptr fixed_addr, size_t size) {
115  return internal_mmap((void*)fixed_addr, size,
116                      PROT_READ | PROT_WRITE,
117                      MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
118                      0, 0);
119}
120
121void *AsanMprotect(uptr fixed_addr, size_t size) {
122  return internal_mmap((void*)fixed_addr, size,
123                       PROT_NONE,
124                       MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
125                       0, 0);
126}
127
128void AsanUnmapOrDie(void *addr, size_t size) {
129  if (!addr || !size) return;
130  int res = munmap(addr, size);
131  if (res != 0) {
132    Report("Failed to unmap\n");
133    AsanDie();
134  }
135}
136
137const char *AsanGetEnv(const char *name) {
138  char ***env_ptr = _NSGetEnviron();
139  CHECK(env_ptr);
140  char **environ = *env_ptr;
141  CHECK(environ);
142  size_t name_len = internal_strlen(name);
143  while (*environ != 0) {
144    size_t len = internal_strlen(*environ);
145    if (len > name_len) {
146      const char *p = *environ;
147      if (!internal_memcmp(p, name, name_len) &&
148          p[name_len] == '=') {  // Match.
149        return *environ + name_len + 1;  // String starting after =.
150      }
151    }
152    environ++;
153  }
154  return 0;
155}
156
157AsanProcMaps::AsanProcMaps() {
158  Reset();
159}
160
161AsanProcMaps::~AsanProcMaps() {
162}
163
164// More information about Mach-O headers can be found in mach-o/loader.h
165// Each Mach-O image has a header (mach_header or mach_header_64) starting with
166// a magic number, and a list of linker load commands directly following the
167// header.
168// A load command is at least two 32-bit words: the command type and the
169// command size in bytes. We're interested only in segment load commands
170// (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped
171// into the task's address space.
172// The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or
173// segment_command_64 correspond to the memory address, memory size and the
174// file offset of the current memory segment.
175// Because these fields are taken from the images as is, one needs to add
176// _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime.
177
178void AsanProcMaps::Reset() {
179  // Count down from the top.
180  // TODO(glider): as per man 3 dyld, iterating over the headers with
181  // _dyld_image_count is thread-unsafe. We need to register callbacks for
182  // adding and removing images which will invalidate the AsanProcMaps state.
183  current_image_ = _dyld_image_count();
184  current_load_cmd_count_ = -1;
185  current_load_cmd_addr_ = 0;
186  current_magic_ = 0;
187}
188
189// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
190// Google Perftools, http://code.google.com/p/google-perftools.
191
192// NextSegmentLoad scans the current image for the next segment load command
193// and returns the start and end addresses and file offset of the corresponding
194// segment.
195// Note that the segment addresses are not necessarily sorted.
196template<u32 kLCSegment, typename SegmentCommand>
197bool AsanProcMaps::NextSegmentLoad(
198    uptr *start, uptr *end, uptr *offset,
199    char filename[], size_t filename_size) {
200  const char* lc = current_load_cmd_addr_;
201  current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
202  if (((const load_command *)lc)->cmd == kLCSegment) {
203    const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
204    const SegmentCommand* sc = (const SegmentCommand *)lc;
205    if (start) *start = sc->vmaddr + dlloff;
206    if (end) *end = sc->vmaddr + sc->vmsize + dlloff;
207    if (offset) *offset = sc->fileoff;
208    if (filename) {
209      REAL(strncpy)(filename, _dyld_get_image_name(current_image_),
210                    filename_size);
211    }
212    if (FLAG_v >= 4)
213      Report("LC_SEGMENT: %p--%p %s+%p\n", *start, *end, filename, *offset);
214    return true;
215  }
216  return false;
217}
218
219bool AsanProcMaps::Next(uptr *start, uptr *end,
220                        uptr *offset, char filename[],
221                        size_t filename_size) {
222  for (; current_image_ >= 0; current_image_--) {
223    const mach_header* hdr = _dyld_get_image_header(current_image_);
224    if (!hdr) continue;
225    if (current_load_cmd_count_ < 0) {
226      // Set up for this image;
227      current_load_cmd_count_ = hdr->ncmds;
228      current_magic_ = hdr->magic;
229      switch (current_magic_) {
230#ifdef MH_MAGIC_64
231        case MH_MAGIC_64: {
232          current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64);
233          break;
234        }
235#endif
236        case MH_MAGIC: {
237          current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header);
238          break;
239        }
240        default: {
241          continue;
242        }
243      }
244    }
245
246    for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) {
247      switch (current_magic_) {
248        // current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
249#ifdef MH_MAGIC_64
250        case MH_MAGIC_64: {
251          if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
252                  start, end, offset, filename, filename_size))
253            return true;
254          break;
255        }
256#endif
257        case MH_MAGIC: {
258          if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
259                  start, end, offset, filename, filename_size))
260            return true;
261          break;
262        }
263      }
264    }
265    // If we get here, no more load_cmd's in this image talk about
266    // segments.  Go on to the next image.
267  }
268  return false;
269}
270
271bool AsanProcMaps::GetObjectNameAndOffset(uptr addr, uptr *offset,
272                                          char filename[],
273                                          size_t filename_size) {
274  return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
275}
276
277void AsanThread::SetThreadStackTopAndBottom() {
278  size_t stacksize = pthread_get_stacksize_np(pthread_self());
279  void *stackaddr = pthread_get_stackaddr_np(pthread_self());
280  stack_top_ = (uptr)stackaddr;
281  stack_bottom_ = stack_top_ - stacksize;
282  int local;
283  CHECK(AddrIsInStack((uptr)&local));
284}
285
286AsanLock::AsanLock(LinkerInitialized) {
287  // We assume that OS_SPINLOCK_INIT is zero
288}
289
290void AsanLock::Lock() {
291  CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
292  CHECK(OS_SPINLOCK_INIT == 0);
293  CHECK(owner_ != (uptr)pthread_self());
294  OSSpinLockLock((OSSpinLock*)&opaque_storage_);
295  CHECK(!owner_);
296  owner_ = (uptr)pthread_self();
297}
298
299void AsanLock::Unlock() {
300  CHECK(owner_ == (uptr)pthread_self());
301  owner_ = 0;
302  OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
303}
304
305void AsanStackTrace::GetStackTrace(size_t max_s, uptr pc, uptr bp) {
306  size = 0;
307  trace[0] = pc;
308  if ((max_s) > 1) {
309    max_size = max_s;
310    FastUnwindStack(pc, bp);
311  }
312}
313
314// The range of pages to be used for escape islands.
315// TODO(glider): instead of mapping a fixed range we must find a range of
316// unmapped pages in vmmap and take them.
317// These constants were chosen empirically and may not work if the shadow
318// memory layout changes. Unfortunately they do necessarily depend on
319// kHighMemBeg or kHighMemEnd.
320static void *island_allocator_pos = 0;
321
322#if __WORDSIZE == 32
323# define kIslandEnd (0xffdf0000 - kPageSize)
324# define kIslandBeg (kIslandEnd - 256 * kPageSize)
325#else
326# define kIslandEnd (0x7fffffdf0000 - kPageSize)
327# define kIslandBeg (kIslandEnd - 256 * kPageSize)
328#endif
329
330extern "C"
331mach_error_t __interception_allocate_island(void **ptr,
332                                            size_t unused_size,
333                                            void *unused_hint) {
334  if (!island_allocator_pos) {
335    island_allocator_pos =
336        internal_mmap((void*)kIslandBeg, kIslandEnd - kIslandBeg,
337                      PROT_READ | PROT_WRITE | PROT_EXEC,
338                      MAP_PRIVATE | MAP_ANON | MAP_FIXED,
339                      -1, 0);
340    if (island_allocator_pos != (void*)kIslandBeg) {
341      return KERN_NO_SPACE;
342    }
343    if (FLAG_v) {
344      Report("Mapped pages %p--%p for branch islands.\n",
345             kIslandBeg, kIslandEnd);
346    }
347    // Should not be very performance-critical.
348    internal_memset(island_allocator_pos, 0xCC, kIslandEnd - kIslandBeg);
349  };
350  *ptr = island_allocator_pos;
351  island_allocator_pos = (char*)island_allocator_pos + kPageSize;
352  if (FLAG_v) {
353    Report("Branch island allocated at %p\n", *ptr);
354  }
355  return err_none;
356}
357
358extern "C"
359mach_error_t __interception_deallocate_island(void *ptr) {
360  // Do nothing.
361  // TODO(glider): allow to free and reuse the island memory.
362  return err_none;
363}
364
365// Support for the following functions from libdispatch on Mac OS:
366//   dispatch_async_f()
367//   dispatch_async()
368//   dispatch_sync_f()
369//   dispatch_sync()
370//   dispatch_after_f()
371//   dispatch_after()
372//   dispatch_group_async_f()
373//   dispatch_group_async()
374// TODO(glider): libdispatch API contains other functions that we don't support
375// yet.
376//
377// dispatch_sync() and dispatch_sync_f() are synchronous, although chances are
378// they can cause jobs to run on a thread different from the current one.
379// TODO(glider): if so, we need a test for this (otherwise we should remove
380// them).
381//
382// The following functions use dispatch_barrier_async_f() (which isn't a library
383// function but is exported) and are thus supported:
384//   dispatch_source_set_cancel_handler_f()
385//   dispatch_source_set_cancel_handler()
386//   dispatch_source_set_event_handler_f()
387//   dispatch_source_set_event_handler()
388//
389// The reference manual for Grand Central Dispatch is available at
390//   http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html
391// The implementation details are at
392//   http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c
393
394typedef void* pthread_workqueue_t;
395typedef void* pthread_workitem_handle_t;
396
397typedef void* dispatch_group_t;
398typedef void* dispatch_queue_t;
399typedef u64 dispatch_time_t;
400typedef void (*dispatch_function_t)(void *block);
401typedef void* (*worker_t)(void *block);
402
403// A wrapper for the ObjC blocks used to support libdispatch.
404typedef struct {
405  void *block;
406  dispatch_function_t func;
407  int parent_tid;
408} asan_block_context_t;
409
410// We use extern declarations of libdispatch functions here instead
411// of including <dispatch/dispatch.h>. This header is not present on
412// Mac OS X Leopard and eariler, and although we don't expect ASan to
413// work on legacy systems, it's bad to break the build of
414// LLVM compiler-rt there.
415extern "C" {
416void dispatch_async_f(dispatch_queue_t dq, void *ctxt,
417                      dispatch_function_t func);
418void dispatch_sync_f(dispatch_queue_t dq, void *ctxt,
419                     dispatch_function_t func);
420void dispatch_after_f(dispatch_time_t when, dispatch_queue_t dq, void *ctxt,
421                      dispatch_function_t func);
422void dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt,
423                              dispatch_function_t func);
424void dispatch_group_async_f(dispatch_group_t group, dispatch_queue_t dq,
425                            void *ctxt, dispatch_function_t func);
426int pthread_workqueue_additem_np(pthread_workqueue_t workq,
427    void *(*workitem_func)(void *), void * workitem_arg,
428    pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp);
429}  // extern "C"
430
431extern "C"
432void asan_dispatch_call_block_and_release(void *block) {
433  GET_STACK_TRACE_HERE(kStackTraceMax);
434  asan_block_context_t *context = (asan_block_context_t*)block;
435  if (FLAG_v >= 2) {
436    Report("asan_dispatch_call_block_and_release(): "
437           "context: %p, pthread_self: %p\n",
438           block, pthread_self());
439  }
440  AsanThread *t = asanThreadRegistry().GetCurrent();
441  if (!t) {
442    t = AsanThread::Create(context->parent_tid, 0, 0, &stack);
443    asanThreadRegistry().RegisterThread(t);
444    t->Init();
445    asanThreadRegistry().SetCurrent(t);
446  }
447  // Call the original dispatcher for the block.
448  context->func(context->block);
449  asan_free(context, &stack);
450}
451
452}  // namespace __asan
453
454using namespace __asan;  // NOLINT
455
456// Wrap |ctxt| and |func| into an asan_block_context_t.
457// The caller retains control of the allocated context.
458extern "C"
459asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
460                                         AsanStackTrace *stack) {
461  asan_block_context_t *asan_ctxt =
462      (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack);
463  asan_ctxt->block = ctxt;
464  asan_ctxt->func = func;
465  asan_ctxt->parent_tid = asanThreadRegistry().GetCurrentTidOrMinusOne();
466  return asan_ctxt;
467}
468
469// TODO(glider): can we reduce code duplication by introducing a macro?
470INTERCEPTOR(void, dispatch_async_f, dispatch_queue_t dq, void *ctxt,
471                                    dispatch_function_t func) {
472  GET_STACK_TRACE_HERE(kStackTraceMax);
473  asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
474  if (FLAG_v >= 2) {
475    Report("dispatch_async_f(): context: %p, pthread_self: %p\n",
476        asan_ctxt, pthread_self());
477    PRINT_CURRENT_STACK();
478  }
479  return REAL(dispatch_async_f)(dq, (void*)asan_ctxt,
480                                asan_dispatch_call_block_and_release);
481}
482
483INTERCEPTOR(void, dispatch_sync_f, dispatch_queue_t dq, void *ctxt,
484                                   dispatch_function_t func) {
485  GET_STACK_TRACE_HERE(kStackTraceMax);
486  asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
487  if (FLAG_v >= 2) {
488    Report("dispatch_sync_f(): context: %p, pthread_self: %p\n",
489        asan_ctxt, pthread_self());
490    PRINT_CURRENT_STACK();
491  }
492  return REAL(dispatch_sync_f)(dq, (void*)asan_ctxt,
493                               asan_dispatch_call_block_and_release);
494}
495
496INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
497                                    dispatch_queue_t dq, void *ctxt,
498                                    dispatch_function_t func) {
499  GET_STACK_TRACE_HERE(kStackTraceMax);
500  asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
501  if (FLAG_v >= 2) {
502    Report("dispatch_after_f: %p\n", asan_ctxt);
503    PRINT_CURRENT_STACK();
504  }
505  return REAL(dispatch_after_f)(when, dq, (void*)asan_ctxt,
506                                asan_dispatch_call_block_and_release);
507}
508
509INTERCEPTOR(void, dispatch_barrier_async_f, dispatch_queue_t dq, void *ctxt,
510                                            dispatch_function_t func) {
511  GET_STACK_TRACE_HERE(kStackTraceMax);
512  asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
513  if (FLAG_v >= 2) {
514    Report("dispatch_barrier_async_f(): context: %p, pthread_self: %p\n",
515           asan_ctxt, pthread_self());
516    PRINT_CURRENT_STACK();
517  }
518  REAL(dispatch_barrier_async_f)(dq, (void*)asan_ctxt,
519                                 asan_dispatch_call_block_and_release);
520}
521
522INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
523                                          dispatch_queue_t dq, void *ctxt,
524                                          dispatch_function_t func) {
525  GET_STACK_TRACE_HERE(kStackTraceMax);
526  asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
527  if (FLAG_v >= 2) {
528    Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
529           asan_ctxt, pthread_self());
530    PRINT_CURRENT_STACK();
531  }
532  REAL(dispatch_group_async_f)(group, dq, (void*)asan_ctxt,
533                               asan_dispatch_call_block_and_release);
534}
535
536// The following stuff has been extremely helpful while looking for the
537// unhandled functions that spawned jobs on Chromium shutdown. If the verbosity
538// level is 2 or greater, we wrap pthread_workqueue_additem_np() in order to
539// find the points of worker thread creation (each of such threads may be used
540// to run several tasks, that's why this is not enough to support the whole
541// libdispatch API.
542extern "C"
543void *wrap_workitem_func(void *arg) {
544  if (FLAG_v >= 2) {
545    Report("wrap_workitem_func: %p, pthread_self: %p\n", arg, pthread_self());
546  }
547  asan_block_context_t *ctxt = (asan_block_context_t*)arg;
548  worker_t fn = (worker_t)(ctxt->func);
549  void *result =  fn(ctxt->block);
550  GET_STACK_TRACE_HERE(kStackTraceMax);
551  asan_free(arg, &stack);
552  return result;
553}
554
555INTERCEPTOR(int, pthread_workqueue_additem_np, pthread_workqueue_t workq,
556    void *(*workitem_func)(void *), void * workitem_arg,
557    pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp) {
558  GET_STACK_TRACE_HERE(kStackTraceMax);
559  asan_block_context_t *asan_ctxt =
560      (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), &stack);
561  asan_ctxt->block = workitem_arg;
562  asan_ctxt->func = (dispatch_function_t)workitem_func;
563  asan_ctxt->parent_tid = asanThreadRegistry().GetCurrentTidOrMinusOne();
564  if (FLAG_v >= 2) {
565    Report("pthread_workqueue_additem_np: %p\n", asan_ctxt);
566    PRINT_CURRENT_STACK();
567  }
568  return REAL(pthread_workqueue_additem_np)(workq, wrap_workitem_func,
569                                            asan_ctxt, itemhandlep,
570                                            gencountp);
571}
572
573// CF_RC_BITS, the layout of CFRuntimeBase and __CFStrIsConstant are internal
574// and subject to change in further CoreFoundation versions. Apple does not
575// guarantee any binary compatibility from release to release.
576
577// See http://opensource.apple.com/source/CF/CF-635.15/CFInternal.h
578#if defined(__BIG_ENDIAN__)
579#define CF_RC_BITS 0
580#endif
581
582#if defined(__LITTLE_ENDIAN__)
583#define CF_RC_BITS 3
584#endif
585
586// See http://opensource.apple.com/source/CF/CF-635.15/CFRuntime.h
587typedef struct __CFRuntimeBase {
588  uptr _cfisa;
589  u8 _cfinfo[4];
590#if __LP64__
591  u32 _rc;
592#endif
593} CFRuntimeBase;
594
595// See http://opensource.apple.com/source/CF/CF-635.15/CFString.c
596int __CFStrIsConstant(CFStringRef str) {
597  CFRuntimeBase *base = (CFRuntimeBase*)str;
598#if __LP64__
599  return base->_rc == 0;
600#else
601  return (base->_cfinfo[CF_RC_BITS]) == 0;
602#endif
603}
604
605INTERCEPTOR(CFStringRef, CFStringCreateCopy, CFAllocatorRef alloc,
606                                             CFStringRef str) {
607  if (__CFStrIsConstant(str)) {
608    return str;
609  } else {
610    return REAL(CFStringCreateCopy)(alloc, str);
611  }
612}
613
614namespace __asan {
615
616void InitializeMacInterceptors() {
617  CHECK(INTERCEPT_FUNCTION(dispatch_async_f));
618  CHECK(INTERCEPT_FUNCTION(dispatch_sync_f));
619  CHECK(INTERCEPT_FUNCTION(dispatch_after_f));
620  CHECK(INTERCEPT_FUNCTION(dispatch_barrier_async_f));
621  CHECK(INTERCEPT_FUNCTION(dispatch_group_async_f));
622  // We don't need to intercept pthread_workqueue_additem_np() to support the
623  // libdispatch API, but it helps us to debug the unsupported functions. Let's
624  // intercept it only during verbose runs.
625  if (FLAG_v >= 2) {
626    CHECK(INTERCEPT_FUNCTION(pthread_workqueue_additem_np));
627  }
628  // Normally CFStringCreateCopy should not copy constant CF strings.
629  // Replacing the default CFAllocator causes constant strings to be copied
630  // rather than just returned, which leads to bugs in big applications like
631  // Chromium and WebKit, see
632  // http://code.google.com/p/address-sanitizer/issues/detail?id=10
633  // Until this problem is fixed we need to check that the string is
634  // non-constant before calling CFStringCreateCopy.
635  CHECK(INTERCEPT_FUNCTION(CFStringCreateCopy));
636}
637
638}  // namespace __asan
639
640#endif  // __APPLE__
641