1//=-- lsan_common_linux.cc ------------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is a part of LeakSanitizer.
11// Implementation of common leak checking functionality. Linux-specific code.
12//
13//===----------------------------------------------------------------------===//
14
15#include "sanitizer_common/sanitizer_platform.h"
16#include "lsan_common.h"
17
18#if CAN_SANITIZE_LEAKS && SANITIZER_LINUX
19#include <link.h>
20
21#include "sanitizer_common/sanitizer_common.h"
22#include "sanitizer_common/sanitizer_flags.h"
23#include "sanitizer_common/sanitizer_linux.h"
24#include "sanitizer_common/sanitizer_stackdepot.h"
25
26namespace __lsan {
27
28static const char kLinkerName[] = "ld";
29
30static char linker_placeholder[sizeof(LoadedModule)] ALIGNED(64);
31static LoadedModule *linker = nullptr;
32
33static bool IsLinker(const char* full_name) {
34  return LibraryNameIs(full_name, kLinkerName);
35}
36
37void InitializePlatformSpecificModules() {
38  ListOfModules modules;
39  modules.init();
40  for (LoadedModule &module : modules) {
41    if (!IsLinker(module.full_name())) continue;
42    if (linker == nullptr) {
43      linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
44      *linker = module;
45      module = LoadedModule();
46    } else {
47      VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
48              "TLS will not be handled correctly.\n", kLinkerName);
49      linker->clear();
50      linker = nullptr;
51      return;
52    }
53  }
54  VReport(1, "LeakSanitizer: Dynamic linker not found. "
55             "TLS will not be handled correctly.\n");
56}
57
58static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
59                                        void *data) {
60  Frontier *frontier = reinterpret_cast<Frontier *>(data);
61  for (uptr j = 0; j < info->dlpi_phnum; j++) {
62    const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]);
63    // We're looking for .data and .bss sections, which reside in writeable,
64    // loadable segments.
65    if (!(phdr->p_flags & PF_W) || (phdr->p_type != PT_LOAD) ||
66        (phdr->p_memsz == 0))
67      continue;
68    uptr begin = info->dlpi_addr + phdr->p_vaddr;
69    uptr end = begin + phdr->p_memsz;
70    uptr allocator_begin = 0, allocator_end = 0;
71    GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
72    if (begin <= allocator_begin && allocator_begin < end) {
73      CHECK_LE(allocator_begin, allocator_end);
74      CHECK_LT(allocator_end, end);
75      if (begin < allocator_begin)
76        ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
77                             kReachable);
78      if (allocator_end < end)
79        ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL",
80                             kReachable);
81    } else {
82      ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
83    }
84  }
85  return 0;
86}
87
88// Scans global variables for heap pointers.
89void ProcessGlobalRegions(Frontier *frontier) {
90  if (!flags()->use_globals) return;
91  dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
92}
93
94static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
95  CHECK(stack_id);
96  StackTrace stack = map->Get(stack_id);
97  // The top frame is our malloc/calloc/etc. The next frame is the caller.
98  if (stack.size >= 2)
99    return stack.trace[1];
100  return 0;
101}
102
103struct ProcessPlatformAllocParam {
104  Frontier *frontier;
105  StackDepotReverseMap *stack_depot_reverse_map;
106  bool skip_linker_allocations;
107};
108
109// ForEachChunk callback. Identifies unreachable chunks which must be treated as
110// reachable. Marks them as reachable and adds them to the frontier.
111static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
112  CHECK(arg);
113  ProcessPlatformAllocParam *param =
114      reinterpret_cast<ProcessPlatformAllocParam *>(arg);
115  chunk = GetUserBegin(chunk);
116  LsanMetadata m(chunk);
117  if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
118    u32 stack_id = m.stack_trace_id();
119    uptr caller_pc = 0;
120    if (stack_id > 0)
121      caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
122    // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
123    // it as reachable, as we can't properly report its allocation stack anyway.
124    if (caller_pc == 0 || (param->skip_linker_allocations &&
125                           linker->containsAddress(caller_pc))) {
126      m.set_tag(kReachable);
127      param->frontier->push_back(chunk);
128    }
129  }
130}
131
132// Handles dynamically allocated TLS blocks by treating all chunks allocated
133// from ld-linux.so as reachable.
134// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
135// They are allocated with a __libc_memalign() call in allocate_and_init()
136// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
137// blocks, but we can make sure they come from our own allocator by intercepting
138// __libc_memalign(). On top of that, there is no easy way to reach them. Their
139// addresses are stored in a dynamically allocated array (the DTV) which is
140// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
141// being reachable from the static TLS, and the dynamic TLS being reachable from
142// the DTV. This is because the initial DTV is allocated before our interception
143// mechanism kicks in, and thus we don't recognize it as allocated memory. We
144// can't special-case it either, since we don't know its size.
145// Our solution is to include in the root set all allocations made from
146// ld-linux.so (which is where allocate_and_init() is implemented). This is
147// guaranteed to include all dynamic TLS blocks (and possibly other allocations
148// which we don't care about).
149void ProcessPlatformSpecificAllocations(Frontier *frontier) {
150  StackDepotReverseMap stack_depot_reverse_map;
151  ProcessPlatformAllocParam arg;
152  arg.frontier = frontier;
153  arg.stack_depot_reverse_map = &stack_depot_reverse_map;
154  arg.skip_linker_allocations =
155      flags()->use_tls && flags()->use_ld_allocations && linker != nullptr;
156  ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg);
157}
158
159struct DoStopTheWorldParam {
160  StopTheWorldCallback callback;
161  void *argument;
162};
163
164static int DoStopTheWorldCallback(struct dl_phdr_info *info, size_t size,
165                                  void *data) {
166  DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data);
167  StopTheWorld(param->callback, param->argument);
168  return 1;
169}
170
171// LSan calls dl_iterate_phdr() from the tracer task. This may deadlock: if one
172// of the threads is frozen while holding the libdl lock, the tracer will hang
173// in dl_iterate_phdr() forever.
174// Luckily, (a) the lock is reentrant and (b) libc can't distinguish between the
175// tracer task and the thread that spawned it. Thus, if we run the tracer task
176// while holding the libdl lock in the parent thread, we can safely reenter it
177// in the tracer. The solution is to run stoptheworld from a dl_iterate_phdr()
178// callback in the parent thread.
179void DoStopTheWorld(StopTheWorldCallback callback, void *argument) {
180  DoStopTheWorldParam param = {callback, argument};
181  dl_iterate_phdr(DoStopTheWorldCallback, &param);
182}
183
184} // namespace __lsan
185
186#endif // CAN_SANITIZE_LEAKS && SANITIZER_LINUX
187