sanitizer_mac.cc revision 7847d77b246635211c3bf465421d49d7af5226c1
1//===-- sanitizer_mac.cc --------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is shared between AddressSanitizer and ThreadSanitizer 11// run-time libraries and implements mac-specific functions from 12// sanitizer_libc.h. 13//===----------------------------------------------------------------------===// 14 15#include "sanitizer_platform.h" 16#if SANITIZER_MAC 17 18// Use 64-bit inodes in file operations. ASan does not support OS X 10.5, so 19// the clients will most certainly use 64-bit ones as well. 20#ifndef _DARWIN_USE_64_BIT_INODE 21#define _DARWIN_USE_64_BIT_INODE 1 22#endif 23#include <stdio.h> 24 25#include "sanitizer_common.h" 26#include "sanitizer_internal_defs.h" 27#include "sanitizer_libc.h" 28#include "sanitizer_placement_new.h" 29#include "sanitizer_procmaps.h" 30 31#include <crt_externs.h> // for _NSGetEnviron 32#include <fcntl.h> 33#include <mach-o/dyld.h> 34#include <mach-o/loader.h> 35#include <pthread.h> 36#include <sched.h> 37#include <sys/mman.h> 38#include <sys/resource.h> 39#include <sys/stat.h> 40#include <sys/types.h> 41#include <unistd.h> 42#include <libkern/OSAtomic.h> 43#include <errno.h> 44 45namespace __sanitizer { 46 47#include "sanitizer_syscall_generic.inc" 48 49// ---------------------- sanitizer_libc.h 50uptr internal_mmap(void *addr, size_t length, int prot, int flags, 51 int fd, u64 offset) { 52 return (uptr)mmap(addr, length, prot, flags, fd, offset); 53} 54 55uptr internal_munmap(void *addr, uptr length) { 56 return munmap(addr, length); 57} 58 59uptr internal_close(fd_t fd) { 60 return close(fd); 61} 62 63uptr internal_open(const char *filename, int flags) { 64 return open(filename, flags); 65} 66 67uptr internal_open(const char *filename, int flags, u32 mode) { 68 return open(filename, flags, mode); 69} 70 71uptr OpenFile(const char *filename, bool write) { 72 return internal_open(filename, 73 write ? O_WRONLY | O_CREAT : O_RDONLY, 0660); 74} 75 76uptr internal_read(fd_t fd, void *buf, uptr count) { 77 return read(fd, buf, count); 78} 79 80uptr internal_write(fd_t fd, const void *buf, uptr count) { 81 return write(fd, buf, count); 82} 83 84uptr internal_stat(const char *path, void *buf) { 85 return stat(path, (struct stat *)buf); 86} 87 88uptr internal_lstat(const char *path, void *buf) { 89 return lstat(path, (struct stat *)buf); 90} 91 92uptr internal_fstat(fd_t fd, void *buf) { 93 return fstat(fd, (struct stat *)buf); 94} 95 96uptr internal_filesize(fd_t fd) { 97 struct stat st; 98 if (internal_fstat(fd, &st)) 99 return -1; 100 return (uptr)st.st_size; 101} 102 103uptr internal_dup2(int oldfd, int newfd) { 104 return dup2(oldfd, newfd); 105} 106 107uptr internal_readlink(const char *path, char *buf, uptr bufsize) { 108 return readlink(path, buf, bufsize); 109} 110 111uptr internal_sched_yield() { 112 return sched_yield(); 113} 114 115void internal__exit(int exitcode) { 116 _exit(exitcode); 117} 118 119uptr internal_getpid() { 120 return getpid(); 121} 122 123// ----------------- sanitizer_common.h 124bool FileExists(const char *filename) { 125 struct stat st; 126 if (stat(filename, &st)) 127 return false; 128 // Sanity check: filename is a regular file. 129 return S_ISREG(st.st_mode); 130} 131 132uptr GetTid() { 133 return reinterpret_cast<uptr>(pthread_self()); 134} 135 136void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, 137 uptr *stack_bottom) { 138 CHECK(stack_top); 139 CHECK(stack_bottom); 140 uptr stacksize = pthread_get_stacksize_np(pthread_self()); 141 void *stackaddr = pthread_get_stackaddr_np(pthread_self()); 142 *stack_top = (uptr)stackaddr; 143 *stack_bottom = *stack_top - stacksize; 144} 145 146const char *GetEnv(const char *name) { 147 char ***env_ptr = _NSGetEnviron(); 148 CHECK(env_ptr); 149 char **environ = *env_ptr; 150 CHECK(environ); 151 uptr name_len = internal_strlen(name); 152 while (*environ != 0) { 153 uptr len = internal_strlen(*environ); 154 if (len > name_len) { 155 const char *p = *environ; 156 if (!internal_memcmp(p, name, name_len) && 157 p[name_len] == '=') { // Match. 158 return *environ + name_len + 1; // String starting after =. 159 } 160 } 161 environ++; 162 } 163 return 0; 164} 165 166void ReExec() { 167 UNIMPLEMENTED(); 168} 169 170void PrepareForSandboxing() { 171 // Nothing here for now. 172} 173 174uptr GetPageSize() { 175 return sysconf(_SC_PAGESIZE); 176} 177 178// ----------------- sanitizer_procmaps.h 179 180MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) { 181 Reset(); 182} 183 184MemoryMappingLayout::~MemoryMappingLayout() { 185} 186 187// More information about Mach-O headers can be found in mach-o/loader.h 188// Each Mach-O image has a header (mach_header or mach_header_64) starting with 189// a magic number, and a list of linker load commands directly following the 190// header. 191// A load command is at least two 32-bit words: the command type and the 192// command size in bytes. We're interested only in segment load commands 193// (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped 194// into the task's address space. 195// The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or 196// segment_command_64 correspond to the memory address, memory size and the 197// file offset of the current memory segment. 198// Because these fields are taken from the images as is, one needs to add 199// _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime. 200 201void MemoryMappingLayout::Reset() { 202 // Count down from the top. 203 // TODO(glider): as per man 3 dyld, iterating over the headers with 204 // _dyld_image_count is thread-unsafe. We need to register callbacks for 205 // adding and removing images which will invalidate the MemoryMappingLayout 206 // state. 207 current_image_ = _dyld_image_count(); 208 current_load_cmd_count_ = -1; 209 current_load_cmd_addr_ = 0; 210 current_magic_ = 0; 211 current_filetype_ = 0; 212} 213 214// static 215void MemoryMappingLayout::CacheMemoryMappings() { 216 // No-op on Mac for now. 217} 218 219void MemoryMappingLayout::LoadFromCache() { 220 // No-op on Mac for now. 221} 222 223// Next and NextSegmentLoad were inspired by base/sysinfo.cc in 224// Google Perftools, http://code.google.com/p/google-perftools. 225 226// NextSegmentLoad scans the current image for the next segment load command 227// and returns the start and end addresses and file offset of the corresponding 228// segment. 229// Note that the segment addresses are not necessarily sorted. 230template<u32 kLCSegment, typename SegmentCommand> 231bool MemoryMappingLayout::NextSegmentLoad( 232 uptr *start, uptr *end, uptr *offset, 233 char filename[], uptr filename_size, uptr *protection) { 234 if (protection) 235 UNIMPLEMENTED(); 236 const char* lc = current_load_cmd_addr_; 237 current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize; 238 if (((const load_command *)lc)->cmd == kLCSegment) { 239 const sptr dlloff = _dyld_get_image_vmaddr_slide(current_image_); 240 const SegmentCommand* sc = (const SegmentCommand *)lc; 241 if (start) *start = sc->vmaddr + dlloff; 242 if (end) *end = sc->vmaddr + sc->vmsize + dlloff; 243 if (offset) { 244 if (current_filetype_ == /*MH_EXECUTE*/ 0x2) { 245 *offset = sc->vmaddr; 246 } else { 247 *offset = sc->fileoff; 248 } 249 } 250 if (filename) { 251 internal_strncpy(filename, _dyld_get_image_name(current_image_), 252 filename_size); 253 } 254 return true; 255 } 256 return false; 257} 258 259bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset, 260 char filename[], uptr filename_size, 261 uptr *protection) { 262 for (; current_image_ >= 0; current_image_--) { 263 const mach_header* hdr = _dyld_get_image_header(current_image_); 264 if (!hdr) continue; 265 if (current_load_cmd_count_ < 0) { 266 // Set up for this image; 267 current_load_cmd_count_ = hdr->ncmds; 268 current_magic_ = hdr->magic; 269 current_filetype_ = hdr->filetype; 270 switch (current_magic_) { 271#ifdef MH_MAGIC_64 272 case MH_MAGIC_64: { 273 current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64); 274 break; 275 } 276#endif 277 case MH_MAGIC: { 278 current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header); 279 break; 280 } 281 default: { 282 continue; 283 } 284 } 285 } 286 287 for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) { 288 switch (current_magic_) { 289 // current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64. 290#ifdef MH_MAGIC_64 291 case MH_MAGIC_64: { 292 if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>( 293 start, end, offset, filename, filename_size, protection)) 294 return true; 295 break; 296 } 297#endif 298 case MH_MAGIC: { 299 if (NextSegmentLoad<LC_SEGMENT, struct segment_command>( 300 start, end, offset, filename, filename_size, protection)) 301 return true; 302 break; 303 } 304 } 305 } 306 // If we get here, no more load_cmd's in this image talk about 307 // segments. Go on to the next image. 308 } 309 return false; 310} 311 312bool MemoryMappingLayout::GetObjectNameAndOffset(uptr addr, uptr *offset, 313 char filename[], 314 uptr filename_size, 315 uptr *protection) { 316 return IterateForObjectNameAndOffset(addr, offset, filename, filename_size, 317 protection); 318} 319 320BlockingMutex::BlockingMutex(LinkerInitialized) { 321 // We assume that OS_SPINLOCK_INIT is zero 322} 323 324BlockingMutex::BlockingMutex() { 325 internal_memset(this, 0, sizeof(*this)); 326} 327 328void BlockingMutex::Lock() { 329 CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_)); 330 CHECK_EQ(OS_SPINLOCK_INIT, 0); 331 CHECK_NE(owner_, (uptr)pthread_self()); 332 OSSpinLockLock((OSSpinLock*)&opaque_storage_); 333 CHECK(!owner_); 334 owner_ = (uptr)pthread_self(); 335} 336 337void BlockingMutex::Unlock() { 338 CHECK(owner_ == (uptr)pthread_self()); 339 owner_ = 0; 340 OSSpinLockUnlock((OSSpinLock*)&opaque_storage_); 341} 342 343void BlockingMutex::CheckLocked() { 344 CHECK_EQ((uptr)pthread_self(), owner_); 345} 346 347u64 NanoTime() { 348 return 0; 349} 350 351uptr GetTlsSize() { 352 return 0; 353} 354 355void InitTlsSize() { 356} 357 358void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, 359 uptr *tls_addr, uptr *tls_size) { 360#ifndef SANITIZER_GO 361 uptr stack_top, stack_bottom; 362 GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); 363 *stk_addr = stack_bottom; 364 *stk_size = stack_top - stack_bottom; 365 *tls_addr = 0; 366 *tls_size = 0; 367#else 368 *stk_addr = 0; 369 *stk_size = 0; 370 *tls_addr = 0; 371 *tls_size = 0; 372#endif 373} 374 375uptr GetListOfModules(LoadedModule *modules, uptr max_modules, 376 string_predicate_t filter) { 377 MemoryMappingLayout memory_mapping(false); 378 memory_mapping.Reset(); 379 uptr cur_beg, cur_end, cur_offset; 380 InternalScopedBuffer<char> module_name(kMaxPathLength); 381 uptr n_modules = 0; 382 for (uptr i = 0; 383 n_modules < max_modules && 384 memory_mapping.Next(&cur_beg, &cur_end, &cur_offset, 385 module_name.data(), module_name.size(), 0); 386 i++) { 387 const char *cur_name = module_name.data(); 388 if (cur_name[0] == '\0') 389 continue; 390 if (filter && !filter(cur_name)) 391 continue; 392 LoadedModule *cur_module = 0; 393 if (n_modules > 0 && 394 0 == internal_strcmp(cur_name, modules[n_modules - 1].full_name())) { 395 cur_module = &modules[n_modules - 1]; 396 } else { 397 void *mem = &modules[n_modules]; 398 cur_module = new(mem) LoadedModule(cur_name, cur_beg); 399 n_modules++; 400 } 401 cur_module->addAddressRange(cur_beg, cur_end); 402 } 403 return n_modules; 404} 405 406} // namespace __sanitizer 407 408#endif // SANITIZER_MAC 409