sanitizer_linux_libcdep.cc revision 3e0b8ff07e86e0858e016d187d842e97aea2255d
1//===-- sanitizer_linux_libcdep.cc ----------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is shared between AddressSanitizer and ThreadSanitizer
11// run-time libraries and implements linux-specific functions from
12// sanitizer_libc.h.
13//===----------------------------------------------------------------------===//
14
15#include "sanitizer_platform.h"
16#if SANITIZER_LINUX
17
18#include "sanitizer_common.h"
19#include "sanitizer_linux.h"
20#include "sanitizer_placement_new.h"
21#include "sanitizer_procmaps.h"
22#include "sanitizer_stacktrace.h"
23
24#include <dlfcn.h>
25#include <pthread.h>
26#include <sys/prctl.h>
27#include <sys/resource.h>
28#include <unwind.h>
29
30#if !SANITIZER_ANDROID
31#include <elf.h>
32#include <link.h>
33#endif
34
35namespace __sanitizer {
36
37void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
38                                uptr *stack_bottom) {
39  static const uptr kMaxThreadStackSize = 1 << 30;  // 1Gb
40  CHECK(stack_top);
41  CHECK(stack_bottom);
42  if (at_initialization) {
43    // This is the main thread. Libpthread may not be initialized yet.
44    struct rlimit rl;
45    CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
46
47    // Find the mapping that contains a stack variable.
48    MemoryMappingLayout proc_maps(/*cache_enabled*/true);
49    uptr start, end, offset;
50    uptr prev_end = 0;
51    while (proc_maps.Next(&start, &end, &offset, 0, 0, /* protection */0)) {
52      if ((uptr)&rl < end)
53        break;
54      prev_end = end;
55    }
56    CHECK((uptr)&rl >= start && (uptr)&rl < end);
57
58    // Get stacksize from rlimit, but clip it so that it does not overlap
59    // with other mappings.
60    uptr stacksize = rl.rlim_cur;
61    if (stacksize > end - prev_end)
62      stacksize = end - prev_end;
63    // When running with unlimited stack size, we still want to set some limit.
64    // The unlimited stack size is caused by 'ulimit -s unlimited'.
65    // Also, for some reason, GNU make spawns subprocesses with unlimited stack.
66    if (stacksize > kMaxThreadStackSize)
67      stacksize = kMaxThreadStackSize;
68    *stack_top = end;
69    *stack_bottom = end - stacksize;
70    return;
71  }
72  pthread_attr_t attr;
73  CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
74  uptr stacksize = 0;
75  void *stackaddr = 0;
76  pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
77  pthread_attr_destroy(&attr);
78
79  CHECK_LE(stacksize, kMaxThreadStackSize);  // Sanity check.
80  *stack_top = (uptr)stackaddr + stacksize;
81  *stack_bottom = (uptr)stackaddr;
82}
83
84// Does not compile for Go because dlsym() requires -ldl
85#ifndef SANITIZER_GO
86bool SetEnv(const char *name, const char *value) {
87  void *f = dlsym(RTLD_NEXT, "setenv");
88  if (f == 0)
89    return false;
90  typedef int(*setenv_ft)(const char *name, const char *value, int overwrite);
91  setenv_ft setenv_f;
92  CHECK_EQ(sizeof(setenv_f), sizeof(f));
93  internal_memcpy(&setenv_f, &f, sizeof(f));
94  return setenv_f(name, value, 1) == 0;
95}
96#endif
97
98bool SanitizerSetThreadName(const char *name) {
99#ifdef PR_SET_NAME
100  return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0);  // NOLINT
101#else
102  return false;
103#endif
104}
105
106bool SanitizerGetThreadName(char *name, int max_len) {
107#ifdef PR_GET_NAME
108  char buff[17];
109  if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0))  // NOLINT
110    return false;
111  internal_strncpy(name, buff, max_len);
112  name[max_len] = 0;
113  return true;
114#else
115  return false;
116#endif
117}
118
119#ifndef SANITIZER_GO
120//------------------------- SlowUnwindStack -----------------------------------
121#ifdef __arm__
122#define UNWIND_STOP _URC_END_OF_STACK
123#define UNWIND_CONTINUE _URC_NO_REASON
124#else
125#define UNWIND_STOP _URC_NORMAL_STOP
126#define UNWIND_CONTINUE _URC_NO_REASON
127#endif
128
129uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
130#ifdef __arm__
131  uptr val;
132  _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
133      15 /* r15 = PC */, _UVRSD_UINT32, &val);
134  CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
135  // Clear the Thumb bit.
136  return val & ~(uptr)1;
137#else
138  return _Unwind_GetIP(ctx);
139#endif
140}
141
142struct UnwindTraceArg {
143  StackTrace *stack;
144  uptr max_depth;
145};
146
147_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
148  UnwindTraceArg *arg = (UnwindTraceArg*)param;
149  CHECK_LT(arg->stack->size, arg->max_depth);
150  uptr pc = Unwind_GetIP(ctx);
151  arg->stack->trace[arg->stack->size++] = pc;
152  if (arg->stack->size == arg->max_depth) return UNWIND_STOP;
153  return UNWIND_CONTINUE;
154}
155
156static bool MatchPc(uptr cur_pc, uptr trace_pc) {
157  return cur_pc - trace_pc <= 64 || trace_pc - cur_pc <= 64;
158}
159
160void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
161  size = 0;
162  UnwindTraceArg arg = {this, max_depth};
163  if (max_depth > 1) {
164    _Unwind_Backtrace(Unwind_Trace, &arg);
165    // We need to pop a few frames so that pc is on top.
166    // trace[0] belongs to the current function so we always pop it.
167    int to_pop = 1;
168    /**/ if (size > 1 && MatchPc(pc, trace[1])) to_pop = 1;
169    else if (size > 2 && MatchPc(pc, trace[2])) to_pop = 2;
170    else if (size > 3 && MatchPc(pc, trace[3])) to_pop = 3;
171    else if (size > 4 && MatchPc(pc, trace[4])) to_pop = 4;
172    else if (size > 5 && MatchPc(pc, trace[5])) to_pop = 5;
173    PopStackFrames(to_pop);
174  }
175  trace[0] = pc;
176}
177
178#endif  // !SANITIZER_GO
179
180static uptr g_tls_size;
181
182#ifdef __i386__
183# define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
184#else
185# define DL_INTERNAL_FUNCTION
186#endif
187
188void InitTlsSize() {
189#if !defined(SANITIZER_GO) && !SANITIZER_ANDROID
190  typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
191  get_tls_func get_tls;
192  void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
193  CHECK_EQ(sizeof(get_tls), sizeof(get_tls_static_info_ptr));
194  internal_memcpy(&get_tls, &get_tls_static_info_ptr,
195                  sizeof(get_tls_static_info_ptr));
196  CHECK_NE(get_tls, 0);
197  size_t tls_size = 0;
198  size_t tls_align = 0;
199  get_tls(&tls_size, &tls_align);
200  g_tls_size = tls_size;
201#endif
202}
203
204uptr GetTlsSize() {
205  return g_tls_size;
206}
207
208#if defined(__x86_64__) || defined(__i386__)
209// sizeof(struct thread) from glibc.
210// There has been a report of this being different on glibc 2.11 and 2.13. We
211// don't know when this change happened, so 2.14 is a conservative estimate.
212#if __GLIBC_PREREQ(2, 14)
213const uptr kThreadDescriptorSize = FIRST_32_SECOND_64(1216, 2304);
214#else
215const uptr kThreadDescriptorSize = FIRST_32_SECOND_64(1168, 2304);
216#endif
217
218uptr ThreadDescriptorSize() {
219  return kThreadDescriptorSize;
220}
221
222// The offset at which pointer to self is located in the thread descriptor.
223const uptr kThreadSelfOffset = FIRST_32_SECOND_64(8, 16);
224
225uptr ThreadSelfOffset() {
226  return kThreadSelfOffset;
227}
228
229uptr ThreadSelf() {
230  uptr descr_addr;
231#ifdef __i386__
232  asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
233#else
234  asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
235#endif
236  return descr_addr;
237}
238#endif  // defined(__x86_64__) || defined(__i386__)
239
240void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
241                          uptr *tls_addr, uptr *tls_size) {
242#ifndef SANITIZER_GO
243#if defined(__x86_64__) || defined(__i386__)
244  *tls_addr = ThreadSelf();
245  *tls_size = GetTlsSize();
246  *tls_addr -= *tls_size;
247  *tls_addr += kThreadDescriptorSize;
248#else
249  *tls_addr = 0;
250  *tls_size = 0;
251#endif
252
253  uptr stack_top, stack_bottom;
254  GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
255  *stk_addr = stack_bottom;
256  *stk_size = stack_top - stack_bottom;
257
258  if (!main) {
259    // If stack and tls intersect, make them non-intersecting.
260    if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) {
261      CHECK_GT(*tls_addr + *tls_size, *stk_addr);
262      CHECK_LE(*tls_addr + *tls_size, *stk_addr + *stk_size);
263      *stk_size -= *tls_size;
264      *tls_addr = *stk_addr + *stk_size;
265    }
266  }
267#else  // SANITIZER_GO
268  *stk_addr = 0;
269  *stk_size = 0;
270  *tls_addr = 0;
271  *tls_size = 0;
272#endif  // SANITIZER_GO
273}
274
275void AdjustStackSizeLinux(void *attr_, int verbosity) {
276  pthread_attr_t *attr = (pthread_attr_t *)attr_;
277  uptr stackaddr = 0;
278  size_t stacksize = 0;
279  pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
280  // GLibC will return (0 - stacksize) as the stack address in the case when
281  // stacksize is set, but stackaddr is not.
282  bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);
283  // We place a lot of tool data into TLS, account for that.
284  const uptr minstacksize = GetTlsSize() + 128*1024;
285  if (stacksize < minstacksize) {
286    if (!stack_set) {
287      if (verbosity && stacksize != 0)
288        Printf("Sanitizer: increasing stacksize %zu->%zu\n", stacksize,
289               minstacksize);
290      pthread_attr_setstacksize(attr, minstacksize);
291    } else {
292      Printf("Sanitizer: pre-allocated stack size is insufficient: "
293             "%zu < %zu\n", stacksize, minstacksize);
294      Printf("Sanitizer: pthread_create is likely to fail.\n");
295    }
296  }
297}
298
299#if SANITIZER_ANDROID
300uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
301                      string_predicate_t filter) {
302  return 0;
303}
304#else  // SANITIZER_ANDROID
305typedef ElfW(Phdr) Elf_Phdr;
306
307struct DlIteratePhdrData {
308  LoadedModule *modules;
309  uptr current_n;
310  bool first;
311  uptr max_n;
312  string_predicate_t filter;
313};
314
315static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
316  DlIteratePhdrData *data = (DlIteratePhdrData*)arg;
317  if (data->current_n == data->max_n)
318    return 0;
319  InternalScopedBuffer<char> module_name(kMaxPathLength);
320  module_name.data()[0] = '\0';
321  if (data->first) {
322    data->first = false;
323    // First module is the binary itself.
324    ReadBinaryName(module_name.data(), module_name.size());
325  } else if (info->dlpi_name) {
326    internal_strncpy(module_name.data(), info->dlpi_name, module_name.size());
327  }
328  if (module_name.data()[0] == '\0')
329    return 0;
330  if (data->filter && !data->filter(module_name.data()))
331    return 0;
332  void *mem = &data->modules[data->current_n];
333  LoadedModule *cur_module = new(mem) LoadedModule(module_name.data(),
334                                                   info->dlpi_addr);
335  data->current_n++;
336  for (int i = 0; i < info->dlpi_phnum; i++) {
337    const Elf_Phdr *phdr = &info->dlpi_phdr[i];
338    if (phdr->p_type == PT_LOAD) {
339      uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
340      uptr cur_end = cur_beg + phdr->p_memsz;
341      cur_module->addAddressRange(cur_beg, cur_end);
342    }
343  }
344  return 0;
345}
346
347uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
348                      string_predicate_t filter) {
349  CHECK(modules);
350  DlIteratePhdrData data = {modules, 0, true, max_modules, filter};
351  dl_iterate_phdr(dl_iterate_phdr_cb, &data);
352  return data.current_n;
353}
354#endif  // SANITIZER_ANDROID
355
356}  // namespace __sanitizer
357
358#endif  // SANITIZER_LINUX
359