asan_mac.cc revision ee3925515e4c7966f3ef489f687aa7e5692806a9
1//===-- asan_mac.cc -------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Mac-specific details.
13//===----------------------------------------------------------------------===//
14
15#ifdef __APPLE__
16
17#include "asan_interceptors.h"
18#include "asan_internal.h"
19#include "asan_mapping.h"
20#include "asan_procmaps.h"
21#include "asan_stack.h"
22#include "asan_thread.h"
23#include "asan_thread_registry.h"
24
25#include <crt_externs.h>  // for _NSGetEnviron
26#include <mach-o/dyld.h>
27#include <mach-o/loader.h>
28#include <sys/mman.h>
29#include <sys/resource.h>
30#include <sys/sysctl.h>
31#include <sys/ucontext.h>
32#include <pthread.h>
33#include <fcntl.h>
34#include <unistd.h>
35#include <libkern/OSAtomic.h>
36#include <CoreFoundation/CFString.h>
37
38namespace __asan {
39
40void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
41  ucontext_t *ucontext = (ucontext_t*)context;
42# if __WORDSIZE == 64
43  *pc = ucontext->uc_mcontext->__ss.__rip;
44  *bp = ucontext->uc_mcontext->__ss.__rbp;
45  *sp = ucontext->uc_mcontext->__ss.__rsp;
46# else
47  *pc = ucontext->uc_mcontext->__ss.__eip;
48  *bp = ucontext->uc_mcontext->__ss.__ebp;
49  *sp = ucontext->uc_mcontext->__ss.__esp;
50# endif  // __WORDSIZE
51}
52
53enum {
54  MACOS_VERSION_UNKNOWN = 0,
55  MACOS_VERSION_LEOPARD,
56  MACOS_VERSION_SNOW_LEOPARD,
57  MACOS_VERSION_LION,
58};
59
60static int GetMacosVersion() {
61  int mib[2] = { CTL_KERN, KERN_OSRELEASE };
62  char version[100];
63  size_t len = 0, maxlen = sizeof(version) / sizeof(version[0]);
64  for (int i = 0; i < maxlen; i++) version[i] = '\0';
65  // Get the version length.
66  CHECK(sysctl(mib, 2, 0, &len, 0, 0) != -1);
67  CHECK(len < maxlen);
68  CHECK(sysctl(mib, 2, version, &len, 0, 0) != -1);
69  switch (version[0]) {
70    case '9': return MACOS_VERSION_LEOPARD;
71    case '1': {
72      switch (version[1]) {
73        case '0': return MACOS_VERSION_SNOW_LEOPARD;
74        case '1': return MACOS_VERSION_LION;
75        default: return MACOS_VERSION_UNKNOWN;
76      }
77    }
78    default: return MACOS_VERSION_UNKNOWN;
79  }
80}
81
82bool PlatformHasDifferentMemcpyAndMemmove() {
83  // On OS X 10.7 memcpy() and memmove() are both resolved
84  // into memmove$VARIANT$sse42.
85  // See also http://code.google.com/p/address-sanitizer/issues/detail?id=34.
86  // TODO(glider): need to check dynamically that memcpy() and memmove() are
87  // actually the same function.
88  return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD;
89}
90
91// No-op. Mac does not support static linkage anyway.
92void *AsanDoesNotSupportStaticLinkage() {
93  return 0;
94}
95
96bool AsanInterceptsSignal(int signum) {
97  return (signum == SIGSEGV || signum == SIGBUS) && FLAG_handle_segv;
98}
99
100static void *asan_mmap(void *addr, size_t length, int prot, int flags,
101                int fd, u64 offset) {
102  return mmap(addr, length, prot, flags, fd, offset);
103}
104
105size_t AsanWrite(int fd, const void *buf, size_t count) {
106  return write(fd, buf, count);
107}
108
109void *AsanMmapSomewhereOrDie(size_t size, const char *mem_type) {
110  size = RoundUpTo(size, kPageSize);
111  void *res = asan_mmap(0, size,
112                        PROT_READ | PROT_WRITE,
113                        MAP_PRIVATE | MAP_ANON, -1, 0);
114  if (res == (void*)-1) {
115    OutOfMemoryMessageAndDie(mem_type, size);
116  }
117  return res;
118}
119
120void *AsanMmapFixedNoReserve(uptr fixed_addr, size_t size) {
121  return asan_mmap((void*)fixed_addr, size,
122                   PROT_READ | PROT_WRITE,
123                   MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
124                   0, 0);
125}
126
127void *AsanMprotect(uptr fixed_addr, size_t size) {
128  return asan_mmap((void*)fixed_addr, size,
129                   PROT_NONE,
130                   MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
131                   0, 0);
132}
133
134void AsanUnmapOrDie(void *addr, size_t size) {
135  if (!addr || !size) return;
136  int res = munmap(addr, size);
137  if (res != 0) {
138    Report("Failed to unmap\n");
139    AsanDie();
140  }
141}
142
143int AsanOpenReadonly(const char* filename) {
144  return open(filename, O_RDONLY);
145}
146
147const char *AsanGetEnv(const char *name) {
148  char ***env_ptr = _NSGetEnviron();
149  CHECK(env_ptr);
150  char **environ = *env_ptr;
151  CHECK(environ);
152  size_t name_len = internal_strlen(name);
153  while (*environ != 0) {
154    size_t len = internal_strlen(*environ);
155    if (len > name_len) {
156      const char *p = *environ;
157      if (!internal_memcmp(p, name, name_len) &&
158          p[name_len] == '=') {  // Match.
159        return *environ + name_len + 1;  // String starting after =.
160      }
161    }
162    environ++;
163  }
164  return 0;
165}
166
167size_t AsanRead(int fd, void *buf, size_t count) {
168  return read(fd, buf, count);
169}
170
171int AsanClose(int fd) {
172  return close(fd);
173}
174
175AsanProcMaps::AsanProcMaps() {
176  Reset();
177}
178
179AsanProcMaps::~AsanProcMaps() {
180}
181
182// More information about Mach-O headers can be found in mach-o/loader.h
183// Each Mach-O image has a header (mach_header or mach_header_64) starting with
184// a magic number, and a list of linker load commands directly following the
185// header.
186// A load command is at least two 32-bit words: the command type and the
187// command size in bytes. We're interested only in segment load commands
188// (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped
189// into the task's address space.
190// The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or
191// segment_command_64 correspond to the memory address, memory size and the
192// file offset of the current memory segment.
193// Because these fields are taken from the images as is, one needs to add
194// _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime.
195
196void AsanProcMaps::Reset() {
197  // Count down from the top.
198  // TODO(glider): as per man 3 dyld, iterating over the headers with
199  // _dyld_image_count is thread-unsafe. We need to register callbacks for
200  // adding and removing images which will invalidate the AsanProcMaps state.
201  current_image_ = _dyld_image_count();
202  current_load_cmd_count_ = -1;
203  current_load_cmd_addr_ = 0;
204  current_magic_ = 0;
205}
206
207// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
208// Google Perftools, http://code.google.com/p/google-perftools.
209
210// NextSegmentLoad scans the current image for the next segment load command
211// and returns the start and end addresses and file offset of the corresponding
212// segment.
213// Note that the segment addresses are not necessarily sorted.
214template<u32 kLCSegment, typename SegmentCommand>
215bool AsanProcMaps::NextSegmentLoad(
216    uptr *start, uptr *end, uptr *offset,
217    char filename[], size_t filename_size) {
218  const char* lc = current_load_cmd_addr_;
219  current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
220  if (((const load_command *)lc)->cmd == kLCSegment) {
221    const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_);
222    const SegmentCommand* sc = (const SegmentCommand *)lc;
223    if (start) *start = sc->vmaddr + dlloff;
224    if (end) *end = sc->vmaddr + sc->vmsize + dlloff;
225    if (offset) *offset = sc->fileoff;
226    if (filename) {
227      REAL(strncpy)(filename, _dyld_get_image_name(current_image_),
228                    filename_size);
229    }
230    if (FLAG_v >= 4)
231      Report("LC_SEGMENT: %p--%p %s+%p\n", *start, *end, filename, *offset);
232    return true;
233  }
234  return false;
235}
236
237bool AsanProcMaps::Next(uptr *start, uptr *end,
238                        uptr *offset, char filename[],
239                        size_t filename_size) {
240  for (; current_image_ >= 0; current_image_--) {
241    const mach_header* hdr = _dyld_get_image_header(current_image_);
242    if (!hdr) continue;
243    if (current_load_cmd_count_ < 0) {
244      // Set up for this image;
245      current_load_cmd_count_ = hdr->ncmds;
246      current_magic_ = hdr->magic;
247      switch (current_magic_) {
248#ifdef MH_MAGIC_64
249        case MH_MAGIC_64: {
250          current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64);
251          break;
252        }
253#endif
254        case MH_MAGIC: {
255          current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header);
256          break;
257        }
258        default: {
259          continue;
260        }
261      }
262    }
263
264    for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) {
265      switch (current_magic_) {
266        // current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
267#ifdef MH_MAGIC_64
268        case MH_MAGIC_64: {
269          if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
270                  start, end, offset, filename, filename_size))
271            return true;
272          break;
273        }
274#endif
275        case MH_MAGIC: {
276          if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
277                  start, end, offset, filename, filename_size))
278            return true;
279          break;
280        }
281      }
282    }
283    // If we get here, no more load_cmd's in this image talk about
284    // segments.  Go on to the next image.
285  }
286  return false;
287}
288
289bool AsanProcMaps::GetObjectNameAndOffset(uptr addr, uptr *offset,
290                                          char filename[],
291                                          size_t filename_size) {
292  return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
293}
294
295void AsanThread::SetThreadStackTopAndBottom() {
296  size_t stacksize = pthread_get_stacksize_np(pthread_self());
297  void *stackaddr = pthread_get_stackaddr_np(pthread_self());
298  stack_top_ = (uptr)stackaddr;
299  stack_bottom_ = stack_top_ - stacksize;
300  int local;
301  CHECK(AddrIsInStack((uptr)&local));
302}
303
304AsanLock::AsanLock(LinkerInitialized) {
305  // We assume that OS_SPINLOCK_INIT is zero
306}
307
308void AsanLock::Lock() {
309  CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
310  CHECK(OS_SPINLOCK_INIT == 0);
311  CHECK(owner_ != (uptr)pthread_self());
312  OSSpinLockLock((OSSpinLock*)&opaque_storage_);
313  CHECK(!owner_);
314  owner_ = (uptr)pthread_self();
315}
316
317void AsanLock::Unlock() {
318  CHECK(owner_ == (uptr)pthread_self());
319  owner_ = 0;
320  OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
321}
322
323void AsanStackTrace::GetStackTrace(size_t max_s, uptr pc, uptr bp) {
324  size = 0;
325  trace[0] = pc;
326  if ((max_s) > 1) {
327    max_size = max_s;
328    FastUnwindStack(pc, bp);
329  }
330}
331
332// The range of pages to be used for escape islands.
333// TODO(glider): instead of mapping a fixed range we must find a range of
334// unmapped pages in vmmap and take them.
335// These constants were chosen empirically and may not work if the shadow
336// memory layout changes. Unfortunately they do necessarily depend on
337// kHighMemBeg or kHighMemEnd.
338static void *island_allocator_pos = 0;
339
340#if __WORDSIZE == 32
341# define kIslandEnd (0xffdf0000 - kPageSize)
342# define kIslandBeg (kIslandEnd - 256 * kPageSize)
343#else
344# define kIslandEnd (0x7fffffdf0000 - kPageSize)
345# define kIslandBeg (kIslandEnd - 256 * kPageSize)
346#endif
347
348extern "C"
349mach_error_t __interception_allocate_island(void **ptr,
350                                            size_t unused_size,
351                                            void *unused_hint) {
352  if (!island_allocator_pos) {
353    island_allocator_pos =
354        asan_mmap((void*)kIslandBeg, kIslandEnd - kIslandBeg,
355                  PROT_READ | PROT_WRITE | PROT_EXEC,
356                  MAP_PRIVATE | MAP_ANON | MAP_FIXED,
357                 -1, 0);
358    if (island_allocator_pos != (void*)kIslandBeg) {
359      return KERN_NO_SPACE;
360    }
361    if (FLAG_v) {
362      Report("Mapped pages %p--%p for branch islands.\n",
363             kIslandBeg, kIslandEnd);
364    }
365    // Should not be very performance-critical.
366    internal_memset(island_allocator_pos, 0xCC, kIslandEnd - kIslandBeg);
367  };
368  *ptr = island_allocator_pos;
369  island_allocator_pos = (char*)island_allocator_pos + kPageSize;
370  if (FLAG_v) {
371    Report("Branch island allocated at %p\n", *ptr);
372  }
373  return err_none;
374}
375
376extern "C"
377mach_error_t __interception_deallocate_island(void *ptr) {
378  // Do nothing.
379  // TODO(glider): allow to free and reuse the island memory.
380  return err_none;
381}
382
383// Support for the following functions from libdispatch on Mac OS:
384//   dispatch_async_f()
385//   dispatch_async()
386//   dispatch_sync_f()
387//   dispatch_sync()
388//   dispatch_after_f()
389//   dispatch_after()
390//   dispatch_group_async_f()
391//   dispatch_group_async()
392// TODO(glider): libdispatch API contains other functions that we don't support
393// yet.
394//
395// dispatch_sync() and dispatch_sync_f() are synchronous, although chances are
396// they can cause jobs to run on a thread different from the current one.
397// TODO(glider): if so, we need a test for this (otherwise we should remove
398// them).
399//
400// The following functions use dispatch_barrier_async_f() (which isn't a library
401// function but is exported) and are thus supported:
402//   dispatch_source_set_cancel_handler_f()
403//   dispatch_source_set_cancel_handler()
404//   dispatch_source_set_event_handler_f()
405//   dispatch_source_set_event_handler()
406//
407// The reference manual for Grand Central Dispatch is available at
408//   http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html
409// The implementation details are at
410//   http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c
411
412typedef void* pthread_workqueue_t;
413typedef void* pthread_workitem_handle_t;
414
415typedef void* dispatch_group_t;
416typedef void* dispatch_queue_t;
417typedef u64 dispatch_time_t;
418typedef void (*dispatch_function_t)(void *block);
419typedef void* (*worker_t)(void *block);
420
421// A wrapper for the ObjC blocks used to support libdispatch.
422typedef struct {
423  void *block;
424  dispatch_function_t func;
425  int parent_tid;
426} asan_block_context_t;
427
428// We use extern declarations of libdispatch functions here instead
429// of including <dispatch/dispatch.h>. This header is not present on
430// Mac OS X Leopard and eariler, and although we don't expect ASan to
431// work on legacy systems, it's bad to break the build of
432// LLVM compiler-rt there.
433extern "C" {
434void dispatch_async_f(dispatch_queue_t dq, void *ctxt,
435                      dispatch_function_t func);
436void dispatch_sync_f(dispatch_queue_t dq, void *ctxt,
437                     dispatch_function_t func);
438void dispatch_after_f(dispatch_time_t when, dispatch_queue_t dq, void *ctxt,
439                      dispatch_function_t func);
440void dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt,
441                              dispatch_function_t func);
442void dispatch_group_async_f(dispatch_group_t group, dispatch_queue_t dq,
443                            void *ctxt, dispatch_function_t func);
444int pthread_workqueue_additem_np(pthread_workqueue_t workq,
445    void *(*workitem_func)(void *), void * workitem_arg,
446    pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp);
447}  // extern "C"
448
449extern "C"
450void asan_dispatch_call_block_and_release(void *block) {
451  GET_STACK_TRACE_HERE(kStackTraceMax);
452  asan_block_context_t *context = (asan_block_context_t*)block;
453  if (FLAG_v >= 2) {
454    Report("asan_dispatch_call_block_and_release(): "
455           "context: %p, pthread_self: %p\n",
456           block, pthread_self());
457  }
458  AsanThread *t = asanThreadRegistry().GetCurrent();
459  if (!t) {
460    t = AsanThread::Create(context->parent_tid, 0, 0, &stack);
461    asanThreadRegistry().RegisterThread(t);
462    t->Init();
463    asanThreadRegistry().SetCurrent(t);
464  }
465  // Call the original dispatcher for the block.
466  context->func(context->block);
467  asan_free(context, &stack);
468}
469
470}  // namespace __asan
471
472using namespace __asan;  // NOLINT
473
474// Wrap |ctxt| and |func| into an asan_block_context_t.
475// The caller retains control of the allocated context.
476extern "C"
477asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
478                                         AsanStackTrace *stack) {
479  asan_block_context_t *asan_ctxt =
480      (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack);
481  asan_ctxt->block = ctxt;
482  asan_ctxt->func = func;
483  asan_ctxt->parent_tid = asanThreadRegistry().GetCurrentTidOrMinusOne();
484  return asan_ctxt;
485}
486
487// TODO(glider): can we reduce code duplication by introducing a macro?
488INTERCEPTOR(void, dispatch_async_f, dispatch_queue_t dq, void *ctxt,
489                                    dispatch_function_t func) {
490  GET_STACK_TRACE_HERE(kStackTraceMax);
491  asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
492  if (FLAG_v >= 2) {
493    Report("dispatch_async_f(): context: %p, pthread_self: %p\n",
494        asan_ctxt, pthread_self());
495    PRINT_CURRENT_STACK();
496  }
497  return REAL(dispatch_async_f)(dq, (void*)asan_ctxt,
498                                asan_dispatch_call_block_and_release);
499}
500
501INTERCEPTOR(void, dispatch_sync_f, dispatch_queue_t dq, void *ctxt,
502                                   dispatch_function_t func) {
503  GET_STACK_TRACE_HERE(kStackTraceMax);
504  asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
505  if (FLAG_v >= 2) {
506    Report("dispatch_sync_f(): context: %p, pthread_self: %p\n",
507        asan_ctxt, pthread_self());
508    PRINT_CURRENT_STACK();
509  }
510  return REAL(dispatch_sync_f)(dq, (void*)asan_ctxt,
511                               asan_dispatch_call_block_and_release);
512}
513
514INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
515                                    dispatch_queue_t dq, void *ctxt,
516                                    dispatch_function_t func) {
517  GET_STACK_TRACE_HERE(kStackTraceMax);
518  asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
519  if (FLAG_v >= 2) {
520    Report("dispatch_after_f: %p\n", asan_ctxt);
521    PRINT_CURRENT_STACK();
522  }
523  return REAL(dispatch_after_f)(when, dq, (void*)asan_ctxt,
524                                asan_dispatch_call_block_and_release);
525}
526
527INTERCEPTOR(void, dispatch_barrier_async_f, dispatch_queue_t dq, void *ctxt,
528                                            dispatch_function_t func) {
529  GET_STACK_TRACE_HERE(kStackTraceMax);
530  asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
531  if (FLAG_v >= 2) {
532    Report("dispatch_barrier_async_f(): context: %p, pthread_self: %p\n",
533           asan_ctxt, pthread_self());
534    PRINT_CURRENT_STACK();
535  }
536  REAL(dispatch_barrier_async_f)(dq, (void*)asan_ctxt,
537                                 asan_dispatch_call_block_and_release);
538}
539
540INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
541                                          dispatch_queue_t dq, void *ctxt,
542                                          dispatch_function_t func) {
543  GET_STACK_TRACE_HERE(kStackTraceMax);
544  asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
545  if (FLAG_v >= 2) {
546    Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
547           asan_ctxt, pthread_self());
548    PRINT_CURRENT_STACK();
549  }
550  REAL(dispatch_group_async_f)(group, dq, (void*)asan_ctxt,
551                               asan_dispatch_call_block_and_release);
552}
553
554// The following stuff has been extremely helpful while looking for the
555// unhandled functions that spawned jobs on Chromium shutdown. If the verbosity
556// level is 2 or greater, we wrap pthread_workqueue_additem_np() in order to
557// find the points of worker thread creation (each of such threads may be used
558// to run several tasks, that's why this is not enough to support the whole
559// libdispatch API.
560extern "C"
561void *wrap_workitem_func(void *arg) {
562  if (FLAG_v >= 2) {
563    Report("wrap_workitem_func: %p, pthread_self: %p\n", arg, pthread_self());
564  }
565  asan_block_context_t *ctxt = (asan_block_context_t*)arg;
566  worker_t fn = (worker_t)(ctxt->func);
567  void *result =  fn(ctxt->block);
568  GET_STACK_TRACE_HERE(kStackTraceMax);
569  asan_free(arg, &stack);
570  return result;
571}
572
573INTERCEPTOR(int, pthread_workqueue_additem_np, pthread_workqueue_t workq,
574    void *(*workitem_func)(void *), void * workitem_arg,
575    pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp) {
576  GET_STACK_TRACE_HERE(kStackTraceMax);
577  asan_block_context_t *asan_ctxt =
578      (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), &stack);
579  asan_ctxt->block = workitem_arg;
580  asan_ctxt->func = (dispatch_function_t)workitem_func;
581  asan_ctxt->parent_tid = asanThreadRegistry().GetCurrentTidOrMinusOne();
582  if (FLAG_v >= 2) {
583    Report("pthread_workqueue_additem_np: %p\n", asan_ctxt);
584    PRINT_CURRENT_STACK();
585  }
586  return REAL(pthread_workqueue_additem_np)(workq, wrap_workitem_func,
587                                            asan_ctxt, itemhandlep,
588                                            gencountp);
589}
590
591// CF_RC_BITS, the layout of CFRuntimeBase and __CFStrIsConstant are internal
592// and subject to change in further CoreFoundation versions. Apple does not
593// guarantee any binary compatibility from release to release.
594
595// See http://opensource.apple.com/source/CF/CF-635.15/CFInternal.h
596#if defined(__BIG_ENDIAN__)
597#define CF_RC_BITS 0
598#endif
599
600#if defined(__LITTLE_ENDIAN__)
601#define CF_RC_BITS 3
602#endif
603
604// See http://opensource.apple.com/source/CF/CF-635.15/CFRuntime.h
605typedef struct __CFRuntimeBase {
606  uptr _cfisa;
607  u8 _cfinfo[4];
608#if __LP64__
609  u32 _rc;
610#endif
611} CFRuntimeBase;
612
613// See http://opensource.apple.com/source/CF/CF-635.15/CFString.c
614int __CFStrIsConstant(CFStringRef str) {
615  CFRuntimeBase *base = (CFRuntimeBase*)str;
616#if __LP64__
617  return base->_rc == 0;
618#else
619  return (base->_cfinfo[CF_RC_BITS]) == 0;
620#endif
621}
622
623INTERCEPTOR(CFStringRef, CFStringCreateCopy, CFAllocatorRef alloc,
624                                             CFStringRef str) {
625  if (__CFStrIsConstant(str)) {
626    return str;
627  } else {
628    return REAL(CFStringCreateCopy)(alloc, str);
629  }
630}
631
632namespace __asan {
633
634void InitializeMacInterceptors() {
635  CHECK(INTERCEPT_FUNCTION(dispatch_async_f));
636  CHECK(INTERCEPT_FUNCTION(dispatch_sync_f));
637  CHECK(INTERCEPT_FUNCTION(dispatch_after_f));
638  CHECK(INTERCEPT_FUNCTION(dispatch_barrier_async_f));
639  CHECK(INTERCEPT_FUNCTION(dispatch_group_async_f));
640  // We don't need to intercept pthread_workqueue_additem_np() to support the
641  // libdispatch API, but it helps us to debug the unsupported functions. Let's
642  // intercept it only during verbose runs.
643  if (FLAG_v >= 2) {
644    CHECK(INTERCEPT_FUNCTION(pthread_workqueue_additem_np));
645  }
646  // Normally CFStringCreateCopy should not copy constant CF strings.
647  // Replacing the default CFAllocator causes constant strings to be copied
648  // rather than just returned, which leads to bugs in big applications like
649  // Chromium and WebKit, see
650  // http://code.google.com/p/address-sanitizer/issues/detail?id=10
651  // Until this problem is fixed we need to check that the string is
652  // non-constant before calling CFStringCreateCopy.
653  CHECK(INTERCEPT_FUNCTION(CFStringCreateCopy));
654}
655
656}  // namespace __asan
657
658#endif  // __APPLE__
659