sanitizer_linux_libcdep.cc revision 4b9f050c2acab536356342ab96e6cc76c281ac24
1//===-- sanitizer_linux_libcdep.cc ----------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is shared between AddressSanitizer and ThreadSanitizer
11// run-time libraries and implements linux-specific functions from
12// sanitizer_libc.h.
13//===----------------------------------------------------------------------===//
14
15#include "sanitizer_platform.h"
16#if SANITIZER_LINUX
17
18#include "sanitizer_common.h"
19#include "sanitizer_flags.h"
20#include "sanitizer_linux.h"
21#include "sanitizer_placement_new.h"
22#include "sanitizer_procmaps.h"
23#include "sanitizer_stacktrace.h"
24
25#include <dlfcn.h>
26#include <pthread.h>
27#include <sys/prctl.h>
28#include <sys/resource.h>
29#include <unwind.h>
30
31#if !SANITIZER_ANDROID
32#include <elf.h>
33#include <link.h>
34#endif
35
36namespace __sanitizer {
37
38void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
39                                uptr *stack_bottom) {
40  static const uptr kMaxThreadStackSize = 1 << 30;  // 1Gb
41  CHECK(stack_top);
42  CHECK(stack_bottom);
43  if (at_initialization) {
44    // This is the main thread. Libpthread may not be initialized yet.
45    struct rlimit rl;
46    CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
47
48    // Find the mapping that contains a stack variable.
49    MemoryMappingLayout proc_maps(/*cache_enabled*/true);
50    uptr start, end, offset;
51    uptr prev_end = 0;
52    while (proc_maps.Next(&start, &end, &offset, 0, 0, /* protection */0)) {
53      if ((uptr)&rl < end)
54        break;
55      prev_end = end;
56    }
57    CHECK((uptr)&rl >= start && (uptr)&rl < end);
58
59    // Get stacksize from rlimit, but clip it so that it does not overlap
60    // with other mappings.
61    uptr stacksize = rl.rlim_cur;
62    if (stacksize > end - prev_end)
63      stacksize = end - prev_end;
64    // When running with unlimited stack size, we still want to set some limit.
65    // The unlimited stack size is caused by 'ulimit -s unlimited'.
66    // Also, for some reason, GNU make spawns subprocesses with unlimited stack.
67    if (stacksize > kMaxThreadStackSize)
68      stacksize = kMaxThreadStackSize;
69    *stack_top = end;
70    *stack_bottom = end - stacksize;
71    return;
72  }
73  pthread_attr_t attr;
74  CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
75  uptr stacksize = 0;
76  void *stackaddr = 0;
77  pthread_attr_getstack(&attr, &stackaddr, (size_t*)&stacksize);
78  pthread_attr_destroy(&attr);
79
80  CHECK_LE(stacksize, kMaxThreadStackSize);  // Sanity check.
81  *stack_top = (uptr)stackaddr + stacksize;
82  *stack_bottom = (uptr)stackaddr;
83}
84
85// Does not compile for Go because dlsym() requires -ldl
86#ifndef SANITIZER_GO
87bool SetEnv(const char *name, const char *value) {
88  void *f = dlsym(RTLD_NEXT, "setenv");
89  if (f == 0)
90    return false;
91  typedef int(*setenv_ft)(const char *name, const char *value, int overwrite);
92  setenv_ft setenv_f;
93  CHECK_EQ(sizeof(setenv_f), sizeof(f));
94  internal_memcpy(&setenv_f, &f, sizeof(f));
95  return setenv_f(name, value, 1) == 0;
96}
97#endif
98
99bool SanitizerSetThreadName(const char *name) {
100#ifdef PR_SET_NAME
101  return 0 == prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0);  // NOLINT
102#else
103  return false;
104#endif
105}
106
107bool SanitizerGetThreadName(char *name, int max_len) {
108#ifdef PR_GET_NAME
109  char buff[17];
110  if (prctl(PR_GET_NAME, (unsigned long)buff, 0, 0, 0))  // NOLINT
111    return false;
112  internal_strncpy(name, buff, max_len);
113  name[max_len] = 0;
114  return true;
115#else
116  return false;
117#endif
118}
119
120#ifndef SANITIZER_GO
121//------------------------- SlowUnwindStack -----------------------------------
122#ifdef __arm__
123#define UNWIND_STOP _URC_END_OF_STACK
124#define UNWIND_CONTINUE _URC_NO_REASON
125#else
126#define UNWIND_STOP _URC_NORMAL_STOP
127#define UNWIND_CONTINUE _URC_NO_REASON
128#endif
129
130uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
131#ifdef __arm__
132  uptr val;
133  _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
134      15 /* r15 = PC */, _UVRSD_UINT32, &val);
135  CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
136  // Clear the Thumb bit.
137  return val & ~(uptr)1;
138#else
139  return _Unwind_GetIP(ctx);
140#endif
141}
142
143struct UnwindTraceArg {
144  StackTrace *stack;
145  uptr max_depth;
146};
147
148_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
149  UnwindTraceArg *arg = (UnwindTraceArg*)param;
150  CHECK_LT(arg->stack->size, arg->max_depth);
151  uptr pc = Unwind_GetIP(ctx);
152  arg->stack->trace[arg->stack->size++] = pc;
153  if (arg->stack->size == arg->max_depth) return UNWIND_STOP;
154  return UNWIND_CONTINUE;
155}
156
157static bool MatchPc(uptr cur_pc, uptr trace_pc) {
158  return cur_pc - trace_pc <= 64 || trace_pc - cur_pc <= 64;
159}
160
161void StackTrace::SlowUnwindStack(uptr pc, uptr max_depth) {
162  size = 0;
163  if (max_depth == 0)
164    return;
165  UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
166  _Unwind_Backtrace(Unwind_Trace, &arg);
167  // We need to pop a few frames so that pc is on top.
168  // trace[0] belongs to the current function so we always pop it.
169  int to_pop = 1;
170  /**/ if (size > 1 && MatchPc(pc, trace[1])) to_pop = 1;
171  else if (size > 2 && MatchPc(pc, trace[2])) to_pop = 2;
172  else if (size > 3 && MatchPc(pc, trace[3])) to_pop = 3;
173  else if (size > 4 && MatchPc(pc, trace[4])) to_pop = 4;
174  else if (size > 5 && MatchPc(pc, trace[5])) to_pop = 5;
175  PopStackFrames(to_pop);
176  trace[0] = pc;
177}
178
179#endif  // !SANITIZER_GO
180
181static uptr g_tls_size;
182
183#ifdef __i386__
184# define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
185#else
186# define DL_INTERNAL_FUNCTION
187#endif
188
189void InitTlsSize() {
190#if !defined(SANITIZER_GO) && !SANITIZER_ANDROID
191  typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
192  get_tls_func get_tls;
193  void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
194  CHECK_EQ(sizeof(get_tls), sizeof(get_tls_static_info_ptr));
195  internal_memcpy(&get_tls, &get_tls_static_info_ptr,
196                  sizeof(get_tls_static_info_ptr));
197  CHECK_NE(get_tls, 0);
198  size_t tls_size = 0;
199  size_t tls_align = 0;
200  get_tls(&tls_size, &tls_align);
201  g_tls_size = tls_size;
202#endif
203}
204
205uptr GetTlsSize() {
206  return g_tls_size;
207}
208
209#if defined(__x86_64__) || defined(__i386__)
210// sizeof(struct thread) from glibc.
211// There has been a report of this being different on glibc 2.11 and 2.13. We
212// don't know when this change happened, so 2.14 is a conservative estimate.
213#if __GLIBC_PREREQ(2, 14)
214const uptr kThreadDescriptorSize = FIRST_32_SECOND_64(1216, 2304);
215#else
216const uptr kThreadDescriptorSize = FIRST_32_SECOND_64(1168, 2304);
217#endif
218
219uptr ThreadDescriptorSize() {
220  return kThreadDescriptorSize;
221}
222
223// The offset at which pointer to self is located in the thread descriptor.
224const uptr kThreadSelfOffset = FIRST_32_SECOND_64(8, 16);
225
226uptr ThreadSelfOffset() {
227  return kThreadSelfOffset;
228}
229
230uptr ThreadSelf() {
231  uptr descr_addr;
232#ifdef __i386__
233  asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
234#else
235  asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
236#endif
237  return descr_addr;
238}
239#endif  // defined(__x86_64__) || defined(__i386__)
240
241void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
242                          uptr *tls_addr, uptr *tls_size) {
243#ifndef SANITIZER_GO
244#if defined(__x86_64__) || defined(__i386__)
245  *tls_addr = ThreadSelf();
246  *tls_size = GetTlsSize();
247  *tls_addr -= *tls_size;
248  *tls_addr += kThreadDescriptorSize;
249#else
250  *tls_addr = 0;
251  *tls_size = 0;
252#endif
253
254  uptr stack_top, stack_bottom;
255  GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
256  *stk_addr = stack_bottom;
257  *stk_size = stack_top - stack_bottom;
258
259  if (!main) {
260    // If stack and tls intersect, make them non-intersecting.
261    if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) {
262      CHECK_GT(*tls_addr + *tls_size, *stk_addr);
263      CHECK_LE(*tls_addr + *tls_size, *stk_addr + *stk_size);
264      *stk_size -= *tls_size;
265      *tls_addr = *stk_addr + *stk_size;
266    }
267  }
268#else  // SANITIZER_GO
269  *stk_addr = 0;
270  *stk_size = 0;
271  *tls_addr = 0;
272  *tls_size = 0;
273#endif  // SANITIZER_GO
274}
275
276void AdjustStackSizeLinux(void *attr_) {
277  pthread_attr_t *attr = (pthread_attr_t *)attr_;
278  uptr stackaddr = 0;
279  size_t stacksize = 0;
280  pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
281  // GLibC will return (0 - stacksize) as the stack address in the case when
282  // stacksize is set, but stackaddr is not.
283  bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);
284  // We place a lot of tool data into TLS, account for that.
285  const uptr minstacksize = GetTlsSize() + 128*1024;
286  if (stacksize < minstacksize) {
287    if (!stack_set) {
288      if (common_flags()->verbosity && stacksize != 0)
289        Printf("Sanitizer: increasing stacksize %zu->%zu\n", stacksize,
290               minstacksize);
291      pthread_attr_setstacksize(attr, minstacksize);
292    } else {
293      Printf("Sanitizer: pre-allocated stack size is insufficient: "
294             "%zu < %zu\n", stacksize, minstacksize);
295      Printf("Sanitizer: pthread_create is likely to fail.\n");
296    }
297  }
298}
299
300#if SANITIZER_ANDROID
301uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
302                      string_predicate_t filter) {
303  return 0;
304}
305#else  // SANITIZER_ANDROID
306typedef ElfW(Phdr) Elf_Phdr;
307
308struct DlIteratePhdrData {
309  LoadedModule *modules;
310  uptr current_n;
311  bool first;
312  uptr max_n;
313  string_predicate_t filter;
314};
315
316static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
317  DlIteratePhdrData *data = (DlIteratePhdrData*)arg;
318  if (data->current_n == data->max_n)
319    return 0;
320  InternalScopedBuffer<char> module_name(kMaxPathLength);
321  module_name.data()[0] = '\0';
322  if (data->first) {
323    data->first = false;
324    // First module is the binary itself.
325    ReadBinaryName(module_name.data(), module_name.size());
326  } else if (info->dlpi_name) {
327    internal_strncpy(module_name.data(), info->dlpi_name, module_name.size());
328  }
329  if (module_name.data()[0] == '\0')
330    return 0;
331  if (data->filter && !data->filter(module_name.data()))
332    return 0;
333  void *mem = &data->modules[data->current_n];
334  LoadedModule *cur_module = new(mem) LoadedModule(module_name.data(),
335                                                   info->dlpi_addr);
336  data->current_n++;
337  for (int i = 0; i < info->dlpi_phnum; i++) {
338    const Elf_Phdr *phdr = &info->dlpi_phdr[i];
339    if (phdr->p_type == PT_LOAD) {
340      uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
341      uptr cur_end = cur_beg + phdr->p_memsz;
342      cur_module->addAddressRange(cur_beg, cur_end);
343    }
344  }
345  return 0;
346}
347
348uptr GetListOfModules(LoadedModule *modules, uptr max_modules,
349                      string_predicate_t filter) {
350  CHECK(modules);
351  DlIteratePhdrData data = {modules, 0, true, max_modules, filter};
352  dl_iterate_phdr(dl_iterate_phdr_cb, &data);
353  return data.current_n;
354}
355#endif  // SANITIZER_ANDROID
356
357}  // namespace __sanitizer
358
359#endif  // SANITIZER_LINUX
360