asan_mac.cc revision 5cf832dc0a6566ae4bb8d48b1f41da623d2c2c1a
1//===-- asan_mac.cc -------------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of AddressSanitizer, an address sanity checker.
11//
12// Mac-specific details.
13//===----------------------------------------------------------------------===//
14
15#ifdef __APPLE__
16
17#include "asan_interceptors.h"
18#include "asan_internal.h"
19#include "asan_mapping.h"
20#include "asan_procmaps.h"
21#include "asan_stack.h"
22#include "asan_thread.h"
23#include "asan_thread_registry.h"
24
25#include <crt_externs.h>  // for _NSGetEnviron
26#include <mach-o/dyld.h>
27#include <mach-o/loader.h>
28#include <sys/mman.h>
29#include <sys/resource.h>
30#include <sys/sysctl.h>
31#include <sys/ucontext.h>
32#include <pthread.h>
33#include <fcntl.h>
34#include <unistd.h>
35#include <libkern/OSAtomic.h>
36#include <CoreFoundation/CFString.h>
37
38namespace __asan {
39
40void GetPcSpBp(void *context, uintptr_t *pc, uintptr_t *sp, uintptr_t *bp) {
41  ucontext_t *ucontext = (ucontext_t*)context;
42# if __WORDSIZE == 64
43  *pc = ucontext->uc_mcontext->__ss.__rip;
44  *bp = ucontext->uc_mcontext->__ss.__rbp;
45  *sp = ucontext->uc_mcontext->__ss.__rsp;
46# else
47  *pc = ucontext->uc_mcontext->__ss.__eip;
48  *bp = ucontext->uc_mcontext->__ss.__ebp;
49  *sp = ucontext->uc_mcontext->__ss.__esp;
50# endif  // __WORDSIZE
51}
52
53enum {
54  MACOS_VERSION_UNKNOWN = 0,
55  MACOS_VERSION_LEOPARD,
56  MACOS_VERSION_SNOW_LEOPARD,
57  MACOS_VERSION_LION,
58};
59
60static int GetMacosVersion() {
61  int mib[2] = { CTL_KERN, KERN_OSRELEASE };
62  char version[100];
63  size_t len = 0, maxlen = sizeof(version) / sizeof(version[0]);
64  for (int i = 0; i < maxlen; i++) version[i] = '\0';
65  // Get the version length.
66  CHECK(sysctl(mib, 2, NULL, &len, NULL, 0) != -1);
67  CHECK(len < maxlen);
68  CHECK(sysctl(mib, 2, version, &len, NULL, 0) != -1);
69  switch (version[0]) {
70    case '9': return MACOS_VERSION_LEOPARD;
71    case '1': {
72      switch (version[1]) {
73        case '0': return MACOS_VERSION_SNOW_LEOPARD;
74        case '1': return MACOS_VERSION_LION;
75        default: return MACOS_VERSION_UNKNOWN;
76      }
77    }
78    default: return MACOS_VERSION_UNKNOWN;
79  }
80}
81
82bool PlatformHasDifferentMemcpyAndMemmove() {
83  // On OS X 10.7 memcpy() and memmove() are both resolved
84  // into memmove$VARIANT$sse42.
85  // See also http://code.google.com/p/address-sanitizer/issues/detail?id=34.
86  // TODO(glider): need to check dynamically that memcpy() and memmove() are
87  // actually the same function.
88  return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD;
89}
90
91// No-op. Mac does not support static linkage anyway.
92void *AsanDoesNotSupportStaticLinkage() {
93  return NULL;
94}
95
96static inline bool IntervalsAreSeparate(uintptr_t start1, uintptr_t end1,
97                                        uintptr_t start2, uintptr_t end2) {
98  CHECK(start1 <= end1);
99  CHECK(start2 <= end2);
100  return (end1 < start2) || (end2 < start1);
101}
102
103// FIXME: this is thread-unsafe, but should not cause problems most of the time.
104// When the shadow is mapped only a single thread usually exists (plus maybe
105// several worker threads on Mac, which aren't expected to map big chunks of
106// memory).
107bool AsanShadowRangeIsAvailable() {
108  AsanProcMaps procmaps;
109  uintptr_t start, end;
110  bool available = true;
111  while (procmaps.Next(&start, &end,
112                       /*offset*/NULL, /*filename*/NULL, /*filename_size*/0)) {
113    if (!IntervalsAreSeparate(start, end,
114                              kLowShadowBeg - kMmapGranularity,
115                              kHighShadowEnd)) {
116      available = false;
117      break;
118    }
119  }
120  return available;
121}
122
123bool AsanInterceptsSignal(int signum) {
124  return (signum == SIGSEGV || signum == SIGBUS) && FLAG_handle_segv;
125}
126
127static void *asan_mmap(void *addr, size_t length, int prot, int flags,
128                int fd, uint64_t offset) {
129  return mmap(addr, length, prot, flags, fd, offset);
130}
131
132size_t AsanWrite(int fd, const void *buf, size_t count) {
133  return write(fd, buf, count);
134}
135
136void *AsanMmapSomewhereOrDie(size_t size, const char *mem_type) {
137  size = RoundUpTo(size, kPageSize);
138  void *res = asan_mmap(0, size,
139                        PROT_READ | PROT_WRITE,
140                        MAP_PRIVATE | MAP_ANON, -1, 0);
141  if (res == (void*)-1) {
142    OutOfMemoryMessageAndDie(mem_type, size);
143  }
144  return res;
145}
146
147void *AsanMmapFixedNoReserve(uintptr_t fixed_addr, size_t size) {
148  return asan_mmap((void*)fixed_addr, size,
149                   PROT_READ | PROT_WRITE,
150                   MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
151                   0, 0);
152}
153
154void *AsanMprotect(uintptr_t fixed_addr, size_t size) {
155  return asan_mmap((void*)fixed_addr, size,
156                   PROT_NONE,
157                   MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
158                   0, 0);
159}
160
161void AsanUnmapOrDie(void *addr, size_t size) {
162  if (!addr || !size) return;
163  int res = munmap(addr, size);
164  if (res != 0) {
165    Report("Failed to unmap\n");
166    AsanDie();
167  }
168}
169
170int AsanOpenReadonly(const char* filename) {
171  return open(filename, O_RDONLY);
172}
173
174const char *AsanGetEnv(const char *name) {
175  char ***env_ptr = _NSGetEnviron();
176  CHECK(env_ptr);
177  char **environ = *env_ptr;
178  CHECK(environ);
179  size_t name_len = internal_strlen(name);
180  while (*environ != NULL) {
181    size_t len = internal_strlen(*environ);
182    if (len > name_len) {
183      const char *p = *environ;
184      if (!internal_memcmp(p, name, name_len) &&
185          p[name_len] == '=') {  // Match.
186        return *environ + name_len + 1;  // String starting after =.
187      }
188    }
189    environ++;
190  }
191  return NULL;
192}
193
194size_t AsanRead(int fd, void *buf, size_t count) {
195  return read(fd, buf, count);
196}
197
198int AsanClose(int fd) {
199  return close(fd);
200}
201
202AsanProcMaps::AsanProcMaps() {
203  Reset();
204}
205
206AsanProcMaps::~AsanProcMaps() {
207}
208
209// More information about Mach-O headers can be found in mach-o/loader.h
210// Each Mach-O image has a header (mach_header or mach_header_64) starting with
211// a magic number, and a list of linker load commands directly following the
212// header.
213// A load command is at least two 32-bit words: the command type and the
214// command size in bytes. We're interested only in segment load commands
215// (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped
216// into the task's address space.
217// The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or
218// segment_command_64 correspond to the memory address, memory size and the
219// file offset of the current memory segment.
220// Because these fields are taken from the images as is, one needs to add
221// _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime.
222
223void AsanProcMaps::Reset() {
224  // Count down from the top.
225  // TODO(glider): as per man 3 dyld, iterating over the headers with
226  // _dyld_image_count is thread-unsafe. We need to register callbacks for
227  // adding and removing images which will invalidate the AsanProcMaps state.
228  current_image_ = _dyld_image_count();
229  current_load_cmd_count_ = -1;
230  current_load_cmd_addr_ = NULL;
231  current_magic_ = 0;
232}
233
234// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
235// Google Perftools, http://code.google.com/p/google-perftools.
236
237// NextSegmentLoad scans the current image for the next segment load command
238// and returns the start and end addresses and file offset of the corresponding
239// segment.
240// Note that the segment addresses are not necessarily sorted.
241template<uint32_t kLCSegment, typename SegmentCommand>
242bool AsanProcMaps::NextSegmentLoad(
243    uintptr_t *start, uintptr_t *end, uintptr_t *offset,
244    char filename[], size_t filename_size) {
245  const char* lc = current_load_cmd_addr_;
246  current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
247  if (((const load_command *)lc)->cmd == kLCSegment) {
248    const intptr_t dlloff = _dyld_get_image_vmaddr_slide(current_image_);
249    const SegmentCommand* sc = (const SegmentCommand *)lc;
250    if (start) *start = sc->vmaddr + dlloff;
251    if (end) *end = sc->vmaddr + sc->vmsize + dlloff;
252    if (offset) *offset = sc->fileoff;
253    if (filename) {
254      REAL(strncpy)(filename, _dyld_get_image_name(current_image_),
255                    filename_size);
256    }
257    if (FLAG_v >= 4)
258      Report("LC_SEGMENT: %p--%p %s+%p\n", *start, *end, filename, *offset);
259    return true;
260  }
261  return false;
262}
263
264bool AsanProcMaps::Next(uintptr_t *start, uintptr_t *end,
265                        uintptr_t *offset, char filename[],
266                        size_t filename_size) {
267  for (; current_image_ >= 0; current_image_--) {
268    const mach_header* hdr = _dyld_get_image_header(current_image_);
269    if (!hdr) continue;
270    if (current_load_cmd_count_ < 0) {
271      // Set up for this image;
272      current_load_cmd_count_ = hdr->ncmds;
273      current_magic_ = hdr->magic;
274      switch (current_magic_) {
275#ifdef MH_MAGIC_64
276        case MH_MAGIC_64: {
277          current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64);
278          break;
279        }
280#endif
281        case MH_MAGIC: {
282          current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header);
283          break;
284        }
285        default: {
286          continue;
287        }
288      }
289    }
290
291    for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) {
292      switch (current_magic_) {
293        // current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
294#ifdef MH_MAGIC_64
295        case MH_MAGIC_64: {
296          if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
297                  start, end, offset, filename, filename_size))
298            return true;
299          break;
300        }
301#endif
302        case MH_MAGIC: {
303          if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
304                  start, end, offset, filename, filename_size))
305            return true;
306          break;
307        }
308      }
309    }
310    // If we get here, no more load_cmd's in this image talk about
311    // segments.  Go on to the next image.
312  }
313  return false;
314}
315
316bool AsanProcMaps::GetObjectNameAndOffset(uintptr_t addr, uintptr_t *offset,
317                                          char filename[],
318                                          size_t filename_size) {
319  return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
320}
321
322void AsanThread::SetThreadStackTopAndBottom() {
323  size_t stacksize = pthread_get_stacksize_np(pthread_self());
324  void *stackaddr = pthread_get_stackaddr_np(pthread_self());
325  stack_top_ = (uintptr_t)stackaddr;
326  stack_bottom_ = stack_top_ - stacksize;
327  int local;
328  CHECK(AddrIsInStack((uintptr_t)&local));
329}
330
331AsanLock::AsanLock(LinkerInitialized) {
332  // We assume that OS_SPINLOCK_INIT is zero
333}
334
335void AsanLock::Lock() {
336  CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
337  CHECK(OS_SPINLOCK_INIT == 0);
338  CHECK(owner_ != (uintptr_t)pthread_self());
339  OSSpinLockLock((OSSpinLock*)&opaque_storage_);
340  CHECK(!owner_);
341  owner_ = (uintptr_t)pthread_self();
342}
343
344void AsanLock::Unlock() {
345  CHECK(owner_ == (uintptr_t)pthread_self());
346  owner_ = 0;
347  OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
348}
349
350void AsanStackTrace::GetStackTrace(size_t max_s, uintptr_t pc, uintptr_t bp) {
351  size = 0;
352  trace[0] = pc;
353  if ((max_s) > 1) {
354    max_size = max_s;
355    FastUnwindStack(pc, bp);
356  }
357}
358
359// The range of pages to be used for escape islands.
360// TODO(glider): instead of mapping a fixed range we must find a range of
361// unmapped pages in vmmap and take them.
362// These constants were chosen empirically and may not work if the shadow
363// memory layout changes. Unfortunately they do necessarily depend on
364// kHighMemBeg or kHighMemEnd.
365static void *island_allocator_pos = NULL;
366
367#if __WORDSIZE == 32
368# define kIslandEnd (0xffdf0000 - kPageSize)
369# define kIslandBeg (kIslandEnd - 256 * kPageSize)
370#else
371# define kIslandEnd (0x7fffffdf0000 - kPageSize)
372# define kIslandBeg (kIslandEnd - 256 * kPageSize)
373#endif
374
375extern "C"
376mach_error_t __interception_allocate_island(void **ptr,
377                                            size_t unused_size,
378                                            void *unused_hint) {
379  if (!island_allocator_pos) {
380    island_allocator_pos =
381        asan_mmap((void*)kIslandBeg, kIslandEnd - kIslandBeg,
382                  PROT_READ | PROT_WRITE | PROT_EXEC,
383                  MAP_PRIVATE | MAP_ANON | MAP_FIXED,
384                 -1, 0);
385    if (island_allocator_pos != (void*)kIslandBeg) {
386      return KERN_NO_SPACE;
387    }
388  };
389  *ptr = island_allocator_pos;
390  island_allocator_pos = (char*)island_allocator_pos + kPageSize;
391  return err_none;
392}
393
394extern "C"
395mach_error_t __interception_deallocate_island(void *ptr) {
396  // Do nothing.
397  // TODO(glider): allow to free and reuse the island memory.
398  return err_none;
399}
400
401// Support for the following functions from libdispatch on Mac OS:
402//   dispatch_async_f()
403//   dispatch_async()
404//   dispatch_sync_f()
405//   dispatch_sync()
406//   dispatch_after_f()
407//   dispatch_after()
408//   dispatch_group_async_f()
409//   dispatch_group_async()
410// TODO(glider): libdispatch API contains other functions that we don't support
411// yet.
412//
413// dispatch_sync() and dispatch_sync_f() are synchronous, although chances are
414// they can cause jobs to run on a thread different from the current one.
415// TODO(glider): if so, we need a test for this (otherwise we should remove
416// them).
417//
418// The following functions use dispatch_barrier_async_f() (which isn't a library
419// function but is exported) and are thus supported:
420//   dispatch_source_set_cancel_handler_f()
421//   dispatch_source_set_cancel_handler()
422//   dispatch_source_set_event_handler_f()
423//   dispatch_source_set_event_handler()
424//
425// The reference manual for Grand Central Dispatch is available at
426//   http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html
427// The implementation details are at
428//   http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c
429
430typedef void* pthread_workqueue_t;
431typedef void* pthread_workitem_handle_t;
432
433typedef void* dispatch_group_t;
434typedef void* dispatch_queue_t;
435typedef uint64_t dispatch_time_t;
436typedef void (*dispatch_function_t)(void *block);
437typedef void* (*worker_t)(void *block);
438
439// A wrapper for the ObjC blocks used to support libdispatch.
440typedef struct {
441  void *block;
442  dispatch_function_t func;
443  int parent_tid;
444} asan_block_context_t;
445
446extern "C" {
447void dispatch_async_f(dispatch_queue_t dq, void *ctxt,
448                      dispatch_function_t func);
449void dispatch_sync_f(dispatch_queue_t dq, void *ctxt,
450                     dispatch_function_t func);
451void dispatch_after_f(dispatch_time_t when, dispatch_queue_t dq, void *ctxt,
452                      dispatch_function_t func);
453void dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt,
454                              dispatch_function_t func);
455void dispatch_group_async_f(dispatch_group_t group, dispatch_queue_t dq,
456                            void *ctxt, dispatch_function_t func);
457int pthread_workqueue_additem_np(pthread_workqueue_t workq,
458    void *(*workitem_func)(void *), void * workitem_arg,
459    pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp);
460}  // extern "C"
461
462extern "C"
463void asan_dispatch_call_block_and_release(void *block) {
464  GET_STACK_TRACE_HERE(kStackTraceMax);
465  asan_block_context_t *context = (asan_block_context_t*)block;
466  if (FLAG_v >= 2) {
467    Report("asan_dispatch_call_block_and_release(): "
468           "context: %p, pthread_self: %p\n",
469           block, pthread_self());
470  }
471  AsanThread *t = asanThreadRegistry().GetCurrent();
472  if (!t) {
473    t = AsanThread::Create(context->parent_tid, NULL, NULL, &stack);
474    asanThreadRegistry().RegisterThread(t);
475    t->Init();
476    asanThreadRegistry().SetCurrent(t);
477  }
478  // Call the original dispatcher for the block.
479  context->func(context->block);
480  asan_free(context, &stack);
481}
482
483}  // namespace __asan
484
485using namespace __asan;  // NOLINT
486
487// Wrap |ctxt| and |func| into an asan_block_context_t.
488// The caller retains control of the allocated context.
489extern "C"
490asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
491                                         AsanStackTrace *stack) {
492  asan_block_context_t *asan_ctxt =
493      (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack);
494  asan_ctxt->block = ctxt;
495  asan_ctxt->func = func;
496  asan_ctxt->parent_tid = asanThreadRegistry().GetCurrentTidOrMinusOne();
497  return asan_ctxt;
498}
499
500// TODO(glider): can we reduce code duplication by introducing a macro?
501INTERCEPTOR(void, dispatch_async_f, dispatch_queue_t dq, void *ctxt,
502                                    dispatch_function_t func) {
503  GET_STACK_TRACE_HERE(kStackTraceMax);
504  asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
505  if (FLAG_v >= 2) {
506    Report("dispatch_async_f(): context: %p, pthread_self: %p\n",
507        asan_ctxt, pthread_self());
508    PRINT_CURRENT_STACK();
509  }
510  return REAL(dispatch_async_f)(dq, (void*)asan_ctxt,
511                                asan_dispatch_call_block_and_release);
512}
513
514INTERCEPTOR(void, dispatch_sync_f, dispatch_queue_t dq, void *ctxt,
515                                   dispatch_function_t func) {
516  GET_STACK_TRACE_HERE(kStackTraceMax);
517  asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
518  if (FLAG_v >= 2) {
519    Report("dispatch_sync_f(): context: %p, pthread_self: %p\n",
520        asan_ctxt, pthread_self());
521    PRINT_CURRENT_STACK();
522  }
523  return REAL(dispatch_sync_f)(dq, (void*)asan_ctxt,
524                               asan_dispatch_call_block_and_release);
525}
526
527INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
528                                    dispatch_queue_t dq, void *ctxt,
529                                    dispatch_function_t func) {
530  GET_STACK_TRACE_HERE(kStackTraceMax);
531  asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
532  if (FLAG_v >= 2) {
533    Report("dispatch_after_f: %p\n", asan_ctxt);
534    PRINT_CURRENT_STACK();
535  }
536  return REAL(dispatch_after_f)(when, dq, (void*)asan_ctxt,
537                                asan_dispatch_call_block_and_release);
538}
539
540INTERCEPTOR(void, dispatch_barrier_async_f, dispatch_queue_t dq, void *ctxt,
541                                            dispatch_function_t func) {
542  GET_STACK_TRACE_HERE(kStackTraceMax);
543  asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
544  if (FLAG_v >= 2) {
545    Report("dispatch_barrier_async_f(): context: %p, pthread_self: %p\n",
546           asan_ctxt, pthread_self());
547    PRINT_CURRENT_STACK();
548  }
549  REAL(dispatch_barrier_async_f)(dq, (void*)asan_ctxt,
550                                 asan_dispatch_call_block_and_release);
551}
552
553INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
554                                          dispatch_queue_t dq, void *ctxt,
555                                          dispatch_function_t func) {
556  GET_STACK_TRACE_HERE(kStackTraceMax);
557  asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
558  if (FLAG_v >= 2) {
559    Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
560           asan_ctxt, pthread_self());
561    PRINT_CURRENT_STACK();
562  }
563  REAL(dispatch_group_async_f)(group, dq, (void*)asan_ctxt,
564                               asan_dispatch_call_block_and_release);
565}
566
567// The following stuff has been extremely helpful while looking for the
568// unhandled functions that spawned jobs on Chromium shutdown. If the verbosity
569// level is 2 or greater, we wrap pthread_workqueue_additem_np() in order to
570// find the points of worker thread creation (each of such threads may be used
571// to run several tasks, that's why this is not enough to support the whole
572// libdispatch API.
573extern "C"
574void *wrap_workitem_func(void *arg) {
575  if (FLAG_v >= 2) {
576    Report("wrap_workitem_func: %p, pthread_self: %p\n", arg, pthread_self());
577  }
578  asan_block_context_t *ctxt = (asan_block_context_t*)arg;
579  worker_t fn = (worker_t)(ctxt->func);
580  void *result =  fn(ctxt->block);
581  GET_STACK_TRACE_HERE(kStackTraceMax);
582  asan_free(arg, &stack);
583  return result;
584}
585
586INTERCEPTOR(int, pthread_workqueue_additem_np, pthread_workqueue_t workq,
587    void *(*workitem_func)(void *), void * workitem_arg,
588    pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp) {
589  GET_STACK_TRACE_HERE(kStackTraceMax);
590  asan_block_context_t *asan_ctxt =
591      (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), &stack);
592  asan_ctxt->block = workitem_arg;
593  asan_ctxt->func = (dispatch_function_t)workitem_func;
594  asan_ctxt->parent_tid = asanThreadRegistry().GetCurrentTidOrMinusOne();
595  if (FLAG_v >= 2) {
596    Report("pthread_workqueue_additem_np: %p\n", asan_ctxt);
597    PRINT_CURRENT_STACK();
598  }
599  return REAL(pthread_workqueue_additem_np)(workq, wrap_workitem_func,
600                                            asan_ctxt, itemhandlep,
601                                            gencountp);
602}
603
604// CF_RC_BITS, the layout of CFRuntimeBase and __CFStrIsConstant are internal
605// and subject to change in further CoreFoundation versions. Apple does not
606// guarantee any binary compatibility from release to release.
607
608// See http://opensource.apple.com/source/CF/CF-635.15/CFInternal.h
609#if defined(__BIG_ENDIAN__)
610#define CF_RC_BITS 0
611#endif
612
613#if defined(__LITTLE_ENDIAN__)
614#define CF_RC_BITS 3
615#endif
616
617// See http://opensource.apple.com/source/CF/CF-635.15/CFRuntime.h
618typedef struct __CFRuntimeBase {
619  uintptr_t _cfisa;
620  uint8_t _cfinfo[4];
621#if __LP64__
622  uint32_t _rc;
623#endif
624} CFRuntimeBase;
625
626// See http://opensource.apple.com/source/CF/CF-635.15/CFString.c
627int __CFStrIsConstant(CFStringRef str) {
628  CFRuntimeBase *base = (CFRuntimeBase*)str;
629#if __LP64__
630  return base->_rc == 0;
631#else
632  return (base->_cfinfo[CF_RC_BITS]) == 0;
633#endif
634}
635
636INTERCEPTOR(CFStringRef, CFStringCreateCopy, CFAllocatorRef alloc,
637                                             CFStringRef str) {
638  if (__CFStrIsConstant(str)) {
639    return str;
640  } else {
641    return REAL(CFStringCreateCopy)(alloc, str);
642  }
643}
644
645namespace __asan {
646void InitializeMacGCDInterceptors() {
647  CHECK(INTERCEPT_FUNCTION(dispatch_async_f));
648  CHECK(INTERCEPT_FUNCTION(dispatch_sync_f));
649  CHECK(INTERCEPT_FUNCTION(dispatch_after_f));
650  CHECK(INTERCEPT_FUNCTION(dispatch_barrier_async_f));
651  CHECK(INTERCEPT_FUNCTION(dispatch_group_async_f));
652  // We don't need to intercept pthread_workqueue_additem_np() to support the
653  // libdispatch API, but it helps us to debug the unsupported functions. Let's
654  // intercept it only during verbose runs.
655  if (FLAG_v >= 2) {
656    CHECK(INTERCEPT_FUNCTION(pthread_workqueue_additem_np));
657  }
658}
659void PatchCFStringCreateCopy() {
660  // Normally CFStringCreateCopy should not copy constant CF strings.
661  // Replacing the default CFAllocator causes constant strings to be copied
662  // rather than just returned, which leads to bugs in big applications like
663  // Chromium and WebKit, see
664  // http://code.google.com/p/address-sanitizer/issues/detail?id=10
665  // Until this problem is fixed we need to check that the string is
666  // non-constant before calling CFStringCreateCopy.
667  CHECK(INTERCEPT_FUNCTION(CFStringCreateCopy));
668}
669}  // namespace __asan
670
671#endif  // __APPLE__
672