sanitizer_stoptheworld_linux_libcdep.cc revision ec29e26885a224627c2523422760e415c02cf225
1//===-- sanitizer_stoptheworld_linux_libcdep.cc ---------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// See sanitizer_stoptheworld.h for details.
11// This implementation was inspired by Markus Gutschke's linuxthreads.cc.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "sanitizer_platform.h"
17#if SANITIZER_LINUX
18
19#include "sanitizer_stoptheworld.h"
20
21#include <errno.h>
22#include <sched.h> // for clone
23#include <stddef.h>
24#include <sys/prctl.h> // for PR_* definitions
25#include <sys/ptrace.h> // for PTRACE_* definitions
26#include <sys/types.h> // for pid_t
27#if SANITIZER_ANDROID && defined(__arm__)
28# include <linux/user.h>  // for pt_regs
29#else
30# include <sys/user.h>  // for user_regs_struct
31#endif
32#include <sys/wait.h> // for signal-related stuff
33
34#include "sanitizer_common.h"
35#include "sanitizer_libc.h"
36#include "sanitizer_linux.h"
37#include "sanitizer_mutex.h"
38#include "sanitizer_placement_new.h"
39
40// This module works by spawning a Linux task which then attaches to every
41// thread in the caller process with ptrace. This suspends the threads, and
42// PTRACE_GETREGS can then be used to obtain their register state. The callback
43// supplied to StopTheWorld() is run in the tracer task while the threads are
44// suspended.
45// The tracer task must be placed in a different thread group for ptrace to
46// work, so it cannot be spawned as a pthread. Instead, we use the low-level
47// clone() interface (we want to share the address space with the caller
48// process, so we prefer clone() over fork()).
49//
50// We avoid the use of libc for two reasons:
51// 1. calling a library function while threads are suspended could cause a
52// deadlock, if one of the treads happens to be holding a libc lock;
53// 2. it's generally not safe to call libc functions from the tracer task,
54// because clone() does not set up a thread-local storage for it. Any
55// thread-local variables used by libc will be shared between the tracer task
56// and the thread which spawned it.
57//
58// We deal with this by replacing libc calls with calls to our own
59// implementations defined in sanitizer_libc.h and sanitizer_linux.h. However,
60// there are still some libc functions which are used here:
61//
62// * All of the system calls ultimately go through the libc syscall() function.
63// We're operating under the assumption that syscall()'s implementation does
64// not acquire any locks or use any thread-local data (except for the errno
65// variable, which we handle separately).
66//
67// * We lack custom implementations of sigfillset() and sigaction(), so we use
68// the libc versions instead. The same assumptions as above apply.
69//
70// * It is safe to call libc functions before the cloned thread is spawned or
71// after it has exited. The following functions are used in this manner:
72// sigdelset()
73// sigprocmask()
74// clone()
75
76COMPILER_CHECK(sizeof(SuspendedThreadID) == sizeof(pid_t));
77
78namespace __sanitizer {
79// This class handles thread suspending/unsuspending in the tracer thread.
80class ThreadSuspender {
81 public:
82  explicit ThreadSuspender(pid_t pid)
83    : pid_(pid) {
84      CHECK_GE(pid, 0);
85    }
86  bool SuspendAllThreads();
87  void ResumeAllThreads();
88  void KillAllThreads();
89  SuspendedThreadsList &suspended_threads_list() {
90    return suspended_threads_list_;
91  }
92 private:
93  SuspendedThreadsList suspended_threads_list_;
94  pid_t pid_;
95  bool SuspendThread(SuspendedThreadID thread_id);
96};
97
98bool ThreadSuspender::SuspendThread(SuspendedThreadID thread_id) {
99  // Are we already attached to this thread?
100  // Currently this check takes linear time, however the number of threads is
101  // usually small.
102  if (suspended_threads_list_.Contains(thread_id))
103    return false;
104  int pterrno;
105  if (internal_iserror(internal_ptrace(PTRACE_ATTACH, thread_id, NULL, NULL),
106                       &pterrno)) {
107    // Either the thread is dead, or something prevented us from attaching.
108    // Log this event and move on.
109    Report("Could not attach to thread %d (errno %d).\n", thread_id, pterrno);
110    return false;
111  } else {
112    if (SanitizerVerbosity > 0)
113      Report("Attached to thread %d.\n", thread_id);
114    // The thread is not guaranteed to stop before ptrace returns, so we must
115    // wait on it.
116    uptr waitpid_status;
117    HANDLE_EINTR(waitpid_status, internal_waitpid(thread_id, NULL, __WALL));
118    int wperrno;
119    if (internal_iserror(waitpid_status, &wperrno)) {
120      // Got a ECHILD error. I don't think this situation is possible, but it
121      // doesn't hurt to report it.
122      Report("Waiting on thread %d failed, detaching (errno %d).\n", thread_id,
123             wperrno);
124      internal_ptrace(PTRACE_DETACH, thread_id, NULL, NULL);
125      return false;
126    }
127    suspended_threads_list_.Append(thread_id);
128    return true;
129  }
130}
131
132void ThreadSuspender::ResumeAllThreads() {
133  for (uptr i = 0; i < suspended_threads_list_.thread_count(); i++) {
134    pid_t tid = suspended_threads_list_.GetThreadID(i);
135    int pterrno;
136    if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, NULL, NULL),
137                          &pterrno)) {
138      if (SanitizerVerbosity > 0)
139        Report("Detached from thread %d.\n", tid);
140    } else {
141      // Either the thread is dead, or we are already detached.
142      // The latter case is possible, for instance, if this function was called
143      // from a signal handler.
144      Report("Could not detach from thread %d (errno %d).\n", tid, pterrno);
145    }
146  }
147}
148
149void ThreadSuspender::KillAllThreads() {
150  for (uptr i = 0; i < suspended_threads_list_.thread_count(); i++)
151    internal_ptrace(PTRACE_KILL, suspended_threads_list_.GetThreadID(i),
152                    NULL, NULL);
153}
154
155bool ThreadSuspender::SuspendAllThreads() {
156  ThreadLister thread_lister(pid_);
157  bool added_threads;
158  do {
159    // Run through the directory entries once.
160    added_threads = false;
161    pid_t tid = thread_lister.GetNextTID();
162    while (tid >= 0) {
163      if (SuspendThread(tid))
164        added_threads = true;
165      tid = thread_lister.GetNextTID();
166    }
167    if (thread_lister.error()) {
168      // Detach threads and fail.
169      ResumeAllThreads();
170      return false;
171    }
172    thread_lister.Reset();
173  } while (added_threads);
174  return true;
175}
176
177// Pointer to the ThreadSuspender instance for use in signal handler.
178static ThreadSuspender *thread_suspender_instance = NULL;
179
180// Signals that should not be blocked (this is used in the parent thread as well
181// as the tracer thread).
182static const int kUnblockedSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV,
183                                         SIGBUS, SIGXCPU, SIGXFSZ };
184
185// Structure for passing arguments into the tracer thread.
186struct TracerThreadArgument {
187  StopTheWorldCallback callback;
188  void *callback_argument;
189  // The tracer thread waits on this mutex while the parent finished its
190  // preparations.
191  BlockingMutex mutex;
192};
193
194// Signal handler to wake up suspended threads when the tracer thread dies.
195void TracerThreadSignalHandler(int signum, siginfo_t *siginfo, void *) {
196  if (thread_suspender_instance != NULL) {
197    if (signum == SIGABRT)
198      thread_suspender_instance->KillAllThreads();
199    else
200      thread_suspender_instance->ResumeAllThreads();
201  }
202  internal__exit((signum == SIGABRT) ? 1 : 2);
203}
204
205// Size of alternative stack for signal handlers in the tracer thread.
206static const int kHandlerStackSize = 4096;
207
208// This function will be run as a cloned task.
209static int TracerThread(void* argument) {
210  TracerThreadArgument *tracer_thread_argument =
211      (TracerThreadArgument *)argument;
212
213  // Wait for the parent thread to finish preparations.
214  tracer_thread_argument->mutex.Lock();
215  tracer_thread_argument->mutex.Unlock();
216
217  ThreadSuspender thread_suspender(internal_getppid());
218  // Global pointer for the signal handler.
219  thread_suspender_instance = &thread_suspender;
220
221  // Alternate stack for signal handling.
222  InternalScopedBuffer<char> handler_stack_memory(kHandlerStackSize);
223  struct sigaltstack handler_stack;
224  internal_memset(&handler_stack, 0, sizeof(handler_stack));
225  handler_stack.ss_sp = handler_stack_memory.data();
226  handler_stack.ss_size = kHandlerStackSize;
227  internal_sigaltstack(&handler_stack, NULL);
228
229  // Install our handler for fatal signals. Other signals should be blocked by
230  // the mask we inherited from the caller thread.
231  for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
232       signal_index++) {
233    struct sigaction new_sigaction;
234    internal_memset(&new_sigaction, 0, sizeof(new_sigaction));
235    new_sigaction.sa_sigaction = TracerThreadSignalHandler;
236    new_sigaction.sa_flags = SA_ONSTACK | SA_SIGINFO;
237    sigfillset(&new_sigaction.sa_mask);
238    sigaction(kUnblockedSignals[signal_index], &new_sigaction, NULL);
239  }
240
241  int exit_code = 0;
242  if (!thread_suspender.SuspendAllThreads()) {
243    Report("Failed suspending threads.\n");
244    exit_code = 3;
245  } else {
246    tracer_thread_argument->callback(thread_suspender.suspended_threads_list(),
247                                     tracer_thread_argument->callback_argument);
248    thread_suspender.ResumeAllThreads();
249    exit_code = 0;
250  }
251  thread_suspender_instance = NULL;
252  handler_stack.ss_flags = SS_DISABLE;
253  internal_sigaltstack(&handler_stack, NULL);
254  return exit_code;
255}
256
257class ScopedStackSpaceWithGuard {
258 public:
259  explicit ScopedStackSpaceWithGuard(uptr stack_size) {
260    stack_size_ = stack_size;
261    guard_size_ = GetPageSizeCached();
262    // FIXME: Omitting MAP_STACK here works in current kernels but might break
263    // in the future.
264    guard_start_ = (uptr)MmapOrDie(stack_size_ + guard_size_,
265                                   "ScopedStackWithGuard");
266    CHECK_EQ(guard_start_, (uptr)Mprotect((uptr)guard_start_, guard_size_));
267  }
268  ~ScopedStackSpaceWithGuard() {
269    UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_);
270  }
271  void *Bottom() const {
272    return (void *)(guard_start_ + stack_size_ + guard_size_);
273  }
274
275 private:
276  uptr stack_size_;
277  uptr guard_size_;
278  uptr guard_start_;
279};
280
281NOINLINE static void WipeStack() {
282  char arr[256];
283  internal_memset(arr, 0, sizeof(arr));
284}
285
286static sigset_t blocked_sigset;
287static sigset_t old_sigset;
288static struct sigaction old_sigactions[ARRAY_SIZE(kUnblockedSignals)];
289
290void StopTheWorld(StopTheWorldCallback callback, void *argument) {
291  // Glibc's sigaction() has a side-effect where it copies garbage stack values
292  // into oldact, which can cause false negatives in LSan. As a quick workaround
293  // we zero some stack space here.
294  WipeStack();
295  // Block all signals that can be blocked safely, and install default handlers
296  // for the remaining signals.
297  // We cannot allow user-defined handlers to run while the ThreadSuspender
298  // thread is active, because they could conceivably call some libc functions
299  // which modify errno (which is shared between the two threads).
300  sigfillset(&blocked_sigset);
301  for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
302       signal_index++) {
303    // Remove the signal from the set of blocked signals.
304    sigdelset(&blocked_sigset, kUnblockedSignals[signal_index]);
305    // Install the default handler.
306    struct sigaction new_sigaction;
307    internal_memset(&new_sigaction, 0, sizeof(new_sigaction));
308    new_sigaction.sa_handler = SIG_DFL;
309    sigfillset(&new_sigaction.sa_mask);
310    sigaction(kUnblockedSignals[signal_index], &new_sigaction,
311                    &old_sigactions[signal_index]);
312  }
313  int sigprocmask_status = sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);
314  CHECK_EQ(sigprocmask_status, 0); // sigprocmask should never fail
315  // Make this process dumpable. Processes that are not dumpable cannot be
316  // attached to.
317  int process_was_dumpable = internal_prctl(PR_GET_DUMPABLE, 0, 0, 0, 0);
318  if (!process_was_dumpable)
319    internal_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
320  // Prepare the arguments for TracerThread.
321  struct TracerThreadArgument tracer_thread_argument;
322  tracer_thread_argument.callback = callback;
323  tracer_thread_argument.callback_argument = argument;
324  const uptr kTracerStackSize = 2 * 1024 * 1024;
325  ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize);
326  // Block the execution of TracerThread until after we have set ptrace
327  // permissions.
328  tracer_thread_argument.mutex.Lock();
329  pid_t tracer_pid = clone(TracerThread, tracer_stack.Bottom(),
330                           CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_UNTRACED,
331                           &tracer_thread_argument);
332  if (tracer_pid < 0) {
333    Report("Failed spawning a tracer thread (errno %d).\n", errno);
334    tracer_thread_argument.mutex.Unlock();
335  } else {
336    // On some systems we have to explicitly declare that we want to be traced
337    // by the tracer thread.
338#ifdef PR_SET_PTRACER
339    internal_prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0);
340#endif
341    // Allow the tracer thread to start.
342    tracer_thread_argument.mutex.Unlock();
343    // Since errno is shared between this thread and the tracer thread, we
344    // must avoid using errno while the tracer thread is running.
345    // At this point, any signal will either be blocked or kill us, so waitpid
346    // should never return (and set errno) while the tracer thread is alive.
347    uptr waitpid_status = internal_waitpid(tracer_pid, NULL, __WALL);
348    int wperrno;
349    if (internal_iserror(waitpid_status, &wperrno))
350      Report("Waiting on the tracer thread failed (errno %d).\n", wperrno);
351  }
352  // Restore the dumpable flag.
353  if (!process_was_dumpable)
354    internal_prctl(PR_SET_DUMPABLE, 0, 0, 0, 0);
355  // Restore the signal handlers.
356  for (uptr signal_index = 0; signal_index < ARRAY_SIZE(kUnblockedSignals);
357       signal_index++) {
358    sigaction(kUnblockedSignals[signal_index],
359              &old_sigactions[signal_index], NULL);
360  }
361  sigprocmask(SIG_SETMASK, &old_sigset, &old_sigset);
362}
363
364// Platform-specific methods from SuspendedThreadsList.
365#if SANITIZER_ANDROID && defined(__arm__)
366typedef pt_regs regs_struct;
367#define REG_SP ARM_sp
368
369#elif SANITIZER_LINUX && defined(__arm__)
370typedef user_regs regs_struct;
371#define REG_SP uregs[13]
372
373#elif defined(__i386__) || defined(__x86_64__)
374typedef user_regs_struct regs_struct;
375#if defined(__i386__)
376#define REG_SP esp
377#else
378#define REG_SP rsp
379#endif
380
381#elif defined(__powerpc__) || defined(__powerpc64__)
382typedef pt_regs regs_struct;
383#define REG_SP gpr[PT_R1]
384
385#elif defined(__mips__)
386typedef struct user regs_struct;
387#define REG_SP regs[EF_REG29]
388
389#else
390#error "Unsupported architecture"
391#endif // SANITIZER_ANDROID && defined(__arm__)
392
393int SuspendedThreadsList::GetRegistersAndSP(uptr index,
394                                            uptr *buffer,
395                                            uptr *sp) const {
396  pid_t tid = GetThreadID(index);
397  regs_struct regs;
398  int pterrno;
399  if (internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, NULL, &regs),
400                       &pterrno)) {
401    Report("Could not get registers from thread %d (errno %d).\n",
402           tid, pterrno);
403    return -1;
404  }
405
406  *sp = regs.REG_SP;
407  internal_memcpy(buffer, &regs, sizeof(regs));
408  return 0;
409}
410
411uptr SuspendedThreadsList::RegisterCount() {
412  return sizeof(regs_struct) / sizeof(uptr);
413}
414}  // namespace __sanitizer
415
416#endif  // SANITIZER_LINUX
417