sanitizer_posix.cc revision d698edc4f74a17048eef3342a9fa42b3ebba802a
1//===-- sanitizer_posix.cc ------------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is shared between AddressSanitizer and ThreadSanitizer 11// run-time libraries and implements POSIX-specific functions from 12// sanitizer_libc.h. 13//===----------------------------------------------------------------------===// 14#if defined(__linux__) || defined(__APPLE__) 15 16#include "sanitizer_common.h" 17#include "sanitizer_libc.h" 18#include "sanitizer_procmaps.h" 19 20#include <errno.h> 21#include <pthread.h> 22#include <stdarg.h> 23#include <stdio.h> 24#include <stdlib.h> 25#include <string.h> 26#include <sys/mman.h> 27#include <sys/resource.h> 28#include <sys/time.h> 29#include <sys/types.h> 30#include <unistd.h> 31 32namespace __sanitizer { 33 34// ------------- sanitizer_common.h 35uptr GetPageSize() { 36 return sysconf(_SC_PAGESIZE); 37} 38 39uptr GetMmapGranularity() { 40 return GetPageSize(); 41} 42 43int GetPid() { 44 return getpid(); 45} 46 47uptr GetThreadSelf() { 48 return (uptr)pthread_self(); 49} 50 51void *MmapOrDie(uptr size, const char *mem_type) { 52 size = RoundUpTo(size, GetPageSizeCached()); 53 void *res = internal_mmap(0, size, 54 PROT_READ | PROT_WRITE, 55 MAP_PRIVATE | MAP_ANON, -1, 0); 56 if (res == (void*)-1) { 57 static int recursion_count; 58 if (recursion_count) { 59 // The Report() and CHECK calls below may call mmap recursively and fail. 60 // If we went into recursion, just die. 61 RawWrite("AddressSanitizer is unable to mmap\n"); 62 Die(); 63 } 64 recursion_count++; 65 Report("ERROR: Failed to allocate 0x%zx (%zd) bytes of %s: %s\n", 66 size, size, mem_type, strerror(errno)); 67 DumpProcessMap(); 68 CHECK("unable to mmap" && 0); 69 } 70 return res; 71} 72 73void UnmapOrDie(void *addr, uptr size) { 74 if (!addr || !size) return; 75 int res = internal_munmap(addr, size); 76 if (res != 0) { 77 Report("ERROR: Failed to deallocate 0x%zx (%zd) bytes at address %p\n", 78 size, size, addr); 79 CHECK("unable to unmap" && 0); 80 } 81} 82 83void *MmapFixedNoReserve(uptr fixed_addr, uptr size) { 84 uptr PageSize = GetPageSizeCached(); 85 void *p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)), 86 RoundUpTo(size, PageSize), 87 PROT_READ | PROT_WRITE, 88 MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, 89 -1, 0); 90 if (p == (void*)-1) 91 Report("ERROR: Failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n", 92 size, size, fixed_addr, errno); 93 return p; 94} 95 96void *Mprotect(uptr fixed_addr, uptr size) { 97 return internal_mmap((void*)fixed_addr, size, 98 PROT_NONE, 99 MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE, 100 -1, 0); 101} 102 103void *MapFileToMemory(const char *file_name, uptr *buff_size) { 104 fd_t fd = internal_open(file_name, false); 105 CHECK_NE(fd, kInvalidFd); 106 uptr fsize = internal_filesize(fd); 107 CHECK_NE(fsize, (uptr)-1); 108 CHECK_GT(fsize, 0); 109 *buff_size = RoundUpTo(fsize, GetPageSizeCached()); 110 void *map = internal_mmap(0, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0); 111 return (map == MAP_FAILED) ? 0 : map; 112} 113 114 115static inline bool IntervalsAreSeparate(uptr start1, uptr end1, 116 uptr start2, uptr end2) { 117 CHECK(start1 <= end1); 118 CHECK(start2 <= end2); 119 return (end1 < start2) || (end2 < start1); 120} 121 122// FIXME: this is thread-unsafe, but should not cause problems most of the time. 123// When the shadow is mapped only a single thread usually exists (plus maybe 124// several worker threads on Mac, which aren't expected to map big chunks of 125// memory). 126bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) { 127 MemoryMappingLayout procmaps; 128 uptr start, end; 129 while (procmaps.Next(&start, &end, 130 /*offset*/0, /*filename*/0, /*filename_size*/0)) { 131 if (!IntervalsAreSeparate(start, end, range_start, range_end)) 132 return false; 133 } 134 return true; 135} 136 137void DumpProcessMap() { 138 MemoryMappingLayout proc_maps; 139 uptr start, end; 140 const sptr kBufSize = 4095; 141 char *filename = (char*)MmapOrDie(kBufSize, __FUNCTION__); 142 Report("Process memory map follows:\n"); 143 while (proc_maps.Next(&start, &end, /* file_offset */0, 144 filename, kBufSize)) { 145 Printf("\t%p-%p\t%s\n", (void*)start, (void*)end, filename); 146 } 147 Report("End of process memory map.\n"); 148 UnmapOrDie(filename, kBufSize); 149} 150 151const char *GetPwd() { 152 return GetEnv("PWD"); 153} 154 155void DisableCoreDumper() { 156 struct rlimit nocore; 157 nocore.rlim_cur = 0; 158 nocore.rlim_max = 0; 159 setrlimit(RLIMIT_CORE, &nocore); 160} 161 162bool StackSizeIsUnlimited() { 163 struct rlimit rlim; 164 CHECK_EQ(0, getrlimit(RLIMIT_STACK, &rlim)); 165 return (rlim.rlim_cur == (uptr)-1); 166} 167 168void SetStackSizeLimitInBytes(uptr limit) { 169 struct rlimit rlim; 170 rlim.rlim_cur = limit; 171 rlim.rlim_max = limit; 172 if (setrlimit(RLIMIT_STACK, &rlim)) { 173 Report("setrlimit() failed %d\n", errno); 174 Die(); 175 } 176 CHECK(!StackSizeIsUnlimited()); 177} 178 179void SleepForSeconds(int seconds) { 180 sleep(seconds); 181} 182 183void SleepForMillis(int millis) { 184 usleep(millis * 1000); 185} 186 187void Exit(int exitcode) { 188 _exit(exitcode); 189} 190 191void Abort() { 192 abort(); 193} 194 195int Atexit(void (*function)(void)) { 196#ifndef SANITIZER_GO 197 return atexit(function); 198#else 199 return 0; 200#endif 201} 202 203int internal_isatty(fd_t fd) { 204 return isatty(fd); 205} 206 207} // namespace __sanitizer 208 209#endif // __linux__ || __APPLE_ 210