sanitizer_mac.cc revision 93af5948d3e0c5bdc396f432dd0ae782f499c449
1//===-- sanitizer_mac.cc --------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is shared between AddressSanitizer and ThreadSanitizer 11// run-time libraries and implements mac-specific functions from 12// sanitizer_libc.h. 13//===----------------------------------------------------------------------===// 14 15#ifdef __APPLE__ 16// Use 64-bit inodes in file operations. ASan does not support OS X 10.5, so 17// the clients will most certainly use 64-bit ones as well. 18#ifndef _DARWIN_USE_64_BIT_INODE 19#define _DARWIN_USE_64_BIT_INODE 1 20#endif 21#include <stdio.h> 22 23#include "sanitizer_common.h" 24#include "sanitizer_internal_defs.h" 25#include "sanitizer_libc.h" 26#include "sanitizer_procmaps.h" 27 28#include <crt_externs.h> // for _NSGetEnviron 29#include <fcntl.h> 30#include <mach-o/dyld.h> 31#include <mach-o/loader.h> 32#include <pthread.h> 33#include <sched.h> 34#include <sys/mman.h> 35#include <sys/resource.h> 36#include <sys/stat.h> 37#include <sys/types.h> 38#include <unistd.h> 39#include <libkern/OSAtomic.h> 40 41namespace __sanitizer { 42 43// ---------------------- sanitizer_libc.h 44void *internal_mmap(void *addr, size_t length, int prot, int flags, 45 int fd, u64 offset) { 46 return mmap(addr, length, prot, flags, fd, offset); 47} 48 49int internal_munmap(void *addr, uptr length) { 50 return munmap(addr, length); 51} 52 53int internal_close(fd_t fd) { 54 return close(fd); 55} 56 57fd_t internal_open(const char *filename, int flags) { 58 return open(filename, flags); 59} 60 61fd_t internal_open(const char *filename, int flags, u32 mode) { 62 return open(filename, flags, mode); 63} 64 65fd_t OpenFile(const char *filename, bool write) { 66 return internal_open(filename, 67 write ? O_WRONLY | O_CREAT : O_RDONLY, 0660); 68} 69 70uptr internal_read(fd_t fd, void *buf, uptr count) { 71 return read(fd, buf, count); 72} 73 74uptr internal_write(fd_t fd, const void *buf, uptr count) { 75 return write(fd, buf, count); 76} 77 78int internal_stat(const char *path, void *buf) { 79 return stat(path, (struct stat *)buf); 80} 81 82int internal_lstat(const char *path, void *buf) { 83 return lstat(path, (struct stat *)buf); 84} 85 86int internal_fstat(fd_t fd, void *buf) { 87 return fstat(fd, (struct stat *)buf); 88} 89 90uptr internal_filesize(fd_t fd) { 91 struct stat st; 92 if (internal_fstat(fd, &st)) 93 return -1; 94 return (uptr)st.st_size; 95} 96 97int internal_dup2(int oldfd, int newfd) { 98 return dup2(oldfd, newfd); 99} 100 101uptr internal_readlink(const char *path, char *buf, uptr bufsize) { 102 return readlink(path, buf, bufsize); 103} 104 105int internal_sched_yield() { 106 return sched_yield(); 107} 108 109void internal__exit(int exitcode) { 110 _exit(exitcode); 111} 112 113// ----------------- sanitizer_common.h 114bool FileExists(const char *filename) { 115 struct stat st; 116 if (stat(filename, &st)) 117 return false; 118 // Sanity check: filename is a regular file. 119 return S_ISREG(st.st_mode); 120} 121 122uptr GetTid() { 123 return reinterpret_cast<uptr>(pthread_self()); 124} 125 126void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, 127 uptr *stack_bottom) { 128 CHECK(stack_top); 129 CHECK(stack_bottom); 130 uptr stacksize = pthread_get_stacksize_np(pthread_self()); 131 void *stackaddr = pthread_get_stackaddr_np(pthread_self()); 132 *stack_top = (uptr)stackaddr; 133 *stack_bottom = *stack_top - stacksize; 134} 135 136const char *GetEnv(const char *name) { 137 char ***env_ptr = _NSGetEnviron(); 138 CHECK(env_ptr); 139 char **environ = *env_ptr; 140 CHECK(environ); 141 uptr name_len = internal_strlen(name); 142 while (*environ != 0) { 143 uptr len = internal_strlen(*environ); 144 if (len > name_len) { 145 const char *p = *environ; 146 if (!internal_memcmp(p, name, name_len) && 147 p[name_len] == '=') { // Match. 148 return *environ + name_len + 1; // String starting after =. 149 } 150 } 151 environ++; 152 } 153 return 0; 154} 155 156void ReExec() { 157 UNIMPLEMENTED(); 158} 159 160void PrepareForSandboxing() { 161 // Nothing here for now. 162} 163 164// ----------------- sanitizer_procmaps.h 165 166MemoryMappingLayout::MemoryMappingLayout() { 167 Reset(); 168} 169 170MemoryMappingLayout::~MemoryMappingLayout() { 171} 172 173// More information about Mach-O headers can be found in mach-o/loader.h 174// Each Mach-O image has a header (mach_header or mach_header_64) starting with 175// a magic number, and a list of linker load commands directly following the 176// header. 177// A load command is at least two 32-bit words: the command type and the 178// command size in bytes. We're interested only in segment load commands 179// (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped 180// into the task's address space. 181// The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or 182// segment_command_64 correspond to the memory address, memory size and the 183// file offset of the current memory segment. 184// Because these fields are taken from the images as is, one needs to add 185// _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime. 186 187void MemoryMappingLayout::Reset() { 188 // Count down from the top. 189 // TODO(glider): as per man 3 dyld, iterating over the headers with 190 // _dyld_image_count is thread-unsafe. We need to register callbacks for 191 // adding and removing images which will invalidate the MemoryMappingLayout 192 // state. 193 current_image_ = _dyld_image_count(); 194 current_load_cmd_count_ = -1; 195 current_load_cmd_addr_ = 0; 196 current_magic_ = 0; 197 current_filetype_ = 0; 198} 199 200// static 201void MemoryMappingLayout::CacheMemoryMappings() { 202 // No-op on Mac for now. 203} 204 205void MemoryMappingLayout::LoadFromCache() { 206 // No-op on Mac for now. 207} 208 209// Next and NextSegmentLoad were inspired by base/sysinfo.cc in 210// Google Perftools, http://code.google.com/p/google-perftools. 211 212// NextSegmentLoad scans the current image for the next segment load command 213// and returns the start and end addresses and file offset of the corresponding 214// segment. 215// Note that the segment addresses are not necessarily sorted. 216template<u32 kLCSegment, typename SegmentCommand> 217bool MemoryMappingLayout::NextSegmentLoad( 218 uptr *start, uptr *end, uptr *offset, 219 char filename[], uptr filename_size, uptr *protection) { 220 if (protection) 221 UNIMPLEMENTED(); 222 const char* lc = current_load_cmd_addr_; 223 current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize; 224 if (((const load_command *)lc)->cmd == kLCSegment) { 225 const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_); 226 const SegmentCommand* sc = (const SegmentCommand *)lc; 227 if (start) *start = sc->vmaddr + dlloff; 228 if (end) *end = sc->vmaddr + sc->vmsize + dlloff; 229 if (offset) { 230 if (current_filetype_ == /*MH_EXECUTE*/ 0x2) { 231 *offset = sc->vmaddr; 232 } else { 233 *offset = sc->fileoff; 234 } 235 } 236 if (filename) { 237 internal_strncpy(filename, _dyld_get_image_name(current_image_), 238 filename_size); 239 } 240 return true; 241 } 242 return false; 243} 244 245bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset, 246 char filename[], uptr filename_size, 247 uptr *protection) { 248 for (; current_image_ >= 0; current_image_--) { 249 const mach_header* hdr = _dyld_get_image_header(current_image_); 250 if (!hdr) continue; 251 if (current_load_cmd_count_ < 0) { 252 // Set up for this image; 253 current_load_cmd_count_ = hdr->ncmds; 254 current_magic_ = hdr->magic; 255 current_filetype_ = hdr->filetype; 256 switch (current_magic_) { 257#ifdef MH_MAGIC_64 258 case MH_MAGIC_64: { 259 current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64); 260 break; 261 } 262#endif 263 case MH_MAGIC: { 264 current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header); 265 break; 266 } 267 default: { 268 continue; 269 } 270 } 271 } 272 273 for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) { 274 switch (current_magic_) { 275 // current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64. 276#ifdef MH_MAGIC_64 277 case MH_MAGIC_64: { 278 if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>( 279 start, end, offset, filename, filename_size, protection)) 280 return true; 281 break; 282 } 283#endif 284 case MH_MAGIC: { 285 if (NextSegmentLoad<LC_SEGMENT, struct segment_command>( 286 start, end, offset, filename, filename_size, protection)) 287 return true; 288 break; 289 } 290 } 291 } 292 // If we get here, no more load_cmd's in this image talk about 293 // segments. Go on to the next image. 294 } 295 return false; 296} 297 298bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset, 299 char filename[], 300 uptr filename_size, 301 uptr *protection) { 302 return IterateForObjectNameAndOffset(addr, offset, filename, filename_size, 303 protection); 304} 305 306BlockingMutex::BlockingMutex(LinkerInitialized) { 307 // We assume that OS_SPINLOCK_INIT is zero 308} 309 310BlockingMutex::BlockingMutex() { 311 internal_memset(this, 0, sizeof(*this)); 312} 313 314void BlockingMutex::Lock() { 315 CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_)); 316 CHECK_EQ(OS_SPINLOCK_INIT, 0); 317 CHECK_NE(owner_, (uptr)pthread_self()); 318 OSSpinLockLock((OSSpinLock*)&opaque_storage_); 319 CHECK(!owner_); 320 owner_ = (uptr)pthread_self(); 321} 322 323void BlockingMutex::Unlock() { 324 CHECK(owner_ == (uptr)pthread_self()); 325 owner_ = 0; 326 OSSpinLockUnlock((OSSpinLock*)&opaque_storage_); 327} 328 329void BlockingMutex::CheckLocked() { 330 CHECK_EQ((uptr)pthread_self(), owner_); 331} 332 333uptr GetTlsSize() { 334 return 0; 335} 336 337void InitTlsSize() { 338} 339 340} // namespace __sanitizer 341 342#endif // __APPLE__ 343