1//===-- tsan_platform_linux.cc --------------------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file is a part of ThreadSanitizer (TSan), a race detector. 11// 12// Linux- and FreeBSD-specific code. 13//===----------------------------------------------------------------------===// 14 15 16#include "sanitizer_common/sanitizer_platform.h" 17#if SANITIZER_LINUX || SANITIZER_FREEBSD 18 19#include "sanitizer_common/sanitizer_common.h" 20#include "sanitizer_common/sanitizer_libc.h" 21#include "sanitizer_common/sanitizer_posix.h" 22#include "sanitizer_common/sanitizer_procmaps.h" 23#include "sanitizer_common/sanitizer_stoptheworld.h" 24#include "sanitizer_common/sanitizer_stackdepot.h" 25#include "tsan_platform.h" 26#include "tsan_rtl.h" 27#include "tsan_flags.h" 28 29#include <fcntl.h> 30#include <pthread.h> 31#include <signal.h> 32#include <stdio.h> 33#include <stdlib.h> 34#include <string.h> 35#include <stdarg.h> 36#include <sys/mman.h> 37#include <sys/syscall.h> 38#include <sys/socket.h> 39#include <sys/time.h> 40#include <sys/types.h> 41#include <sys/resource.h> 42#include <sys/stat.h> 43#include <unistd.h> 44#include <errno.h> 45#include <sched.h> 46#include <dlfcn.h> 47#if SANITIZER_LINUX 48#define __need_res_state 49#include <resolv.h> 50#endif 51 52#ifdef sa_handler 53# undef sa_handler 54#endif 55 56#ifdef sa_sigaction 57# undef sa_sigaction 58#endif 59 60#if SANITIZER_FREEBSD 61extern "C" void *__libc_stack_end; 62void *__libc_stack_end = 0; 63#endif 64 65namespace __tsan { 66 67static uptr g_data_start; 68static uptr g_data_end; 69 70#ifdef TSAN_RUNTIME_VMA 71// Runtime detected VMA size. 72uptr vmaSize; 73#endif 74 75enum { 76 MemTotal = 0, 77 MemShadow = 1, 78 MemMeta = 2, 79 MemFile = 3, 80 MemMmap = 4, 81 MemTrace = 5, 82 MemHeap = 6, 83 MemOther = 7, 84 MemCount = 8, 85}; 86 87void FillProfileCallback(uptr p, uptr rss, bool file, 88 uptr *mem, uptr stats_size) { 89 mem[MemTotal] += rss; 90 if (p >= ShadowBeg() && p < ShadowEnd()) 91 mem[MemShadow] += rss; 92 else if (p >= MetaShadowBeg() && p < MetaShadowEnd()) 93 mem[MemMeta] += rss; 94#ifndef SANITIZER_GO 95 else if (p >= HeapMemBeg() && p < HeapMemEnd()) 96 mem[MemHeap] += rss; 97 else if (p >= LoAppMemBeg() && p < LoAppMemEnd()) 98 mem[file ? MemFile : MemMmap] += rss; 99 else if (p >= HiAppMemBeg() && p < HiAppMemEnd()) 100 mem[file ? MemFile : MemMmap] += rss; 101#else 102 else if (p >= AppMemBeg() && p < AppMemEnd()) 103 mem[file ? MemFile : MemMmap] += rss; 104#endif 105 else if (p >= TraceMemBeg() && p < TraceMemEnd()) 106 mem[MemTrace] += rss; 107 else 108 mem[MemOther] += rss; 109} 110 111void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) { 112 uptr mem[MemCount]; 113 internal_memset(mem, 0, sizeof(mem[0]) * MemCount); 114 __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7); 115 StackDepotStats *stacks = StackDepotGetStats(); 116 internal_snprintf(buf, buf_size, 117 "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd" 118 " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n", 119 mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20, 120 mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20, 121 mem[MemHeap] >> 20, mem[MemOther] >> 20, 122 stacks->allocated >> 20, stacks->n_uniq_ids, 123 nlive, nthread); 124} 125 126#if SANITIZER_LINUX 127void FlushShadowMemoryCallback( 128 const SuspendedThreadsList &suspended_threads_list, 129 void *argument) { 130 FlushUnneededShadowMemory(ShadowBeg(), ShadowEnd() - ShadowBeg()); 131} 132#endif 133 134void FlushShadowMemory() { 135#if SANITIZER_LINUX 136 StopTheWorld(FlushShadowMemoryCallback, 0); 137#endif 138} 139 140#ifndef SANITIZER_GO 141// Mark shadow for .rodata sections with the special kShadowRodata marker. 142// Accesses to .rodata can't race, so this saves time, memory and trace space. 143static void MapRodata() { 144 // First create temp file. 145 const char *tmpdir = GetEnv("TMPDIR"); 146 if (tmpdir == 0) 147 tmpdir = GetEnv("TEST_TMPDIR"); 148#ifdef P_tmpdir 149 if (tmpdir == 0) 150 tmpdir = P_tmpdir; 151#endif 152 if (tmpdir == 0) 153 return; 154 char name[256]; 155 internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d", 156 tmpdir, (int)internal_getpid()); 157 uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600); 158 if (internal_iserror(openrv)) 159 return; 160 internal_unlink(name); // Unlink it now, so that we can reuse the buffer. 161 fd_t fd = openrv; 162 // Fill the file with kShadowRodata. 163 const uptr kMarkerSize = 512 * 1024 / sizeof(u64); 164 InternalScopedBuffer<u64> marker(kMarkerSize); 165 // volatile to prevent insertion of memset 166 for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++) 167 *p = kShadowRodata; 168 internal_write(fd, marker.data(), marker.size()); 169 // Map the file into memory. 170 uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE, 171 MAP_PRIVATE | MAP_ANONYMOUS, fd, 0); 172 if (internal_iserror(page)) { 173 internal_close(fd); 174 return; 175 } 176 // Map the file into shadow of .rodata sections. 177 MemoryMappingLayout proc_maps(/*cache_enabled*/true); 178 uptr start, end, offset, prot; 179 // Reusing the buffer 'name'. 180 while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), &prot)) { 181 if (name[0] != 0 && name[0] != '[' 182 && (prot & MemoryMappingLayout::kProtectionRead) 183 && (prot & MemoryMappingLayout::kProtectionExecute) 184 && !(prot & MemoryMappingLayout::kProtectionWrite) 185 && IsAppMem(start)) { 186 // Assume it's .rodata 187 char *shadow_start = (char*)MemToShadow(start); 188 char *shadow_end = (char*)MemToShadow(end); 189 for (char *p = shadow_start; p < shadow_end; p += marker.size()) { 190 internal_mmap(p, Min<uptr>(marker.size(), shadow_end - p), 191 PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0); 192 } 193 } 194 } 195 internal_close(fd); 196} 197 198void InitializeShadowMemoryPlatform() { 199 MapRodata(); 200} 201 202static void InitDataSeg() { 203 MemoryMappingLayout proc_maps(true); 204 uptr start, end, offset; 205 char name[128]; 206#if SANITIZER_FREEBSD 207 // On FreeBSD BSS is usually the last block allocated within the 208 // low range and heap is the last block allocated within the range 209 // 0x800000000-0x8ffffffff. 210 while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), 211 /*protection*/ 0)) { 212 DPrintf("%p-%p %p %s\n", start, end, offset, name); 213 if ((start & 0xffff00000000ULL) == 0 && (end & 0xffff00000000ULL) == 0 && 214 name[0] == '\0') { 215 g_data_start = start; 216 g_data_end = end; 217 } 218 } 219#else 220 bool prev_is_data = false; 221 while (proc_maps.Next(&start, &end, &offset, name, ARRAY_SIZE(name), 222 /*protection*/ 0)) { 223 DPrintf("%p-%p %p %s\n", start, end, offset, name); 224 bool is_data = offset != 0 && name[0] != 0; 225 // BSS may get merged with [heap] in /proc/self/maps. This is not very 226 // reliable. 227 bool is_bss = offset == 0 && 228 (name[0] == 0 || internal_strcmp(name, "[heap]") == 0) && prev_is_data; 229 if (g_data_start == 0 && is_data) 230 g_data_start = start; 231 if (is_bss) 232 g_data_end = end; 233 prev_is_data = is_data; 234 } 235#endif 236 DPrintf("guessed data_start=%p data_end=%p\n", g_data_start, g_data_end); 237 CHECK_LT(g_data_start, g_data_end); 238 CHECK_GE((uptr)&g_data_start, g_data_start); 239 CHECK_LT((uptr)&g_data_start, g_data_end); 240} 241 242#endif // #ifndef SANITIZER_GO 243 244void InitializePlatformEarly() { 245#ifdef TSAN_RUNTIME_VMA 246 vmaSize = 247 (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); 248#if defined(__aarch64__) 249 if (vmaSize != 39 && vmaSize != 42) { 250 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 251 Printf("FATAL: Found %d - Supported 39 and 42\n", vmaSize); 252 Die(); 253 } 254#elif defined(__powerpc64__) 255 if (vmaSize != 44 && vmaSize != 46) { 256 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 257 Printf("FATAL: Found %d - Supported 44 and 46\n", vmaSize); 258 Die(); 259 } 260#endif 261#endif 262} 263 264void InitializePlatform() { 265 DisableCoreDumperIfNecessary(); 266 267 // Go maps shadow memory lazily and works fine with limited address space. 268 // Unlimited stack is not a problem as well, because the executable 269 // is not compiled with -pie. 270 if (kCppMode) { 271 bool reexec = false; 272 // TSan doesn't play well with unlimited stack size (as stack 273 // overlaps with shadow memory). If we detect unlimited stack size, 274 // we re-exec the program with limited stack size as a best effort. 275 if (StackSizeIsUnlimited()) { 276 const uptr kMaxStackSize = 32 * 1024 * 1024; 277 VReport(1, "Program is run with unlimited stack size, which wouldn't " 278 "work with ThreadSanitizer.\n" 279 "Re-execing with stack size limited to %zd bytes.\n", 280 kMaxStackSize); 281 SetStackSizeLimitInBytes(kMaxStackSize); 282 reexec = true; 283 } 284 285 if (!AddressSpaceIsUnlimited()) { 286 Report("WARNING: Program is run with limited virtual address space," 287 " which wouldn't work with ThreadSanitizer.\n"); 288 Report("Re-execing with unlimited virtual address space.\n"); 289 SetAddressSpaceUnlimited(); 290 reexec = true; 291 } 292 if (reexec) 293 ReExec(); 294 } 295 296#ifndef SANITIZER_GO 297 CheckAndProtect(); 298 InitTlsSize(); 299 InitDataSeg(); 300#endif 301} 302 303bool IsGlobalVar(uptr addr) { 304 return g_data_start && addr >= g_data_start && addr < g_data_end; 305} 306 307#ifndef SANITIZER_GO 308// Extract file descriptors passed to glibc internal __res_iclose function. 309// This is required to properly "close" the fds, because we do not see internal 310// closes within glibc. The code is a pure hack. 311int ExtractResolvFDs(void *state, int *fds, int nfd) { 312#if SANITIZER_LINUX && !SANITIZER_ANDROID 313 int cnt = 0; 314 __res_state *statp = (__res_state*)state; 315 for (int i = 0; i < MAXNS && cnt < nfd; i++) { 316 if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1) 317 fds[cnt++] = statp->_u._ext.nssocks[i]; 318 } 319 return cnt; 320#else 321 return 0; 322#endif 323} 324 325// Extract file descriptors passed via UNIX domain sockets. 326// This is requried to properly handle "open" of these fds. 327// see 'man recvmsg' and 'man 3 cmsg'. 328int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) { 329 int res = 0; 330 msghdr *msg = (msghdr*)msgp; 331 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg); 332 for (; cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { 333 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) 334 continue; 335 int n = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(fds[0]); 336 for (int i = 0; i < n; i++) { 337 fds[res++] = ((int*)CMSG_DATA(cmsg))[i]; 338 if (res == nfd) 339 return res; 340 } 341 } 342 return res; 343} 344 345// Note: this function runs with async signals enabled, 346// so it must not touch any tsan state. 347int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m, 348 void *abstime), void *c, void *m, void *abstime, 349 void(*cleanup)(void *arg), void *arg) { 350 // pthread_cleanup_push/pop are hardcore macros mess. 351 // We can't intercept nor call them w/o including pthread.h. 352 int res; 353 pthread_cleanup_push(cleanup, arg); 354 res = fn(c, m, abstime); 355 pthread_cleanup_pop(0); 356 return res; 357} 358#endif 359 360#ifndef SANITIZER_GO 361void ReplaceSystemMalloc() { } 362#endif 363 364} // namespace __tsan 365 366#endif // SANITIZER_LINUX || SANITIZER_FREEBSD 367