backtrace_test.cpp revision 23f750b068ddf5b20fb5d153a798b22562368992
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define _GNU_SOURCE 1
18#include <dirent.h>
19#include <errno.h>
20#include <inttypes.h>
21#include <pthread.h>
22#include <signal.h>
23#include <stdint.h>
24#include <stdio.h>
25#include <stdlib.h>
26#include <string.h>
27#include <sys/ptrace.h>
28#include <sys/types.h>
29#include <sys/wait.h>
30#include <time.h>
31#include <unistd.h>
32
33#include <algorithm>
34#include <memory>
35#include <string>
36#include <vector>
37
38#include <backtrace/Backtrace.h>
39#include <backtrace/BacktraceMap.h>
40
41#include <cutils/atomic.h>
42#include <cutils/threads.h>
43
44#include <gtest/gtest.h>
45
46// For the THREAD_SIGNAL definition.
47#include "BacktraceCurrent.h"
48#include "thread_utils.h"
49
50// Number of microseconds per milliseconds.
51#define US_PER_MSEC             1000
52
53// Number of nanoseconds in a second.
54#define NS_PER_SEC              1000000000ULL
55
56// Number of simultaneous dumping operations to perform.
57#define NUM_THREADS  40
58
59// Number of simultaneous threads running in our forked process.
60#define NUM_PTRACE_THREADS 5
61
62struct thread_t {
63  pid_t tid;
64  int32_t state;
65  pthread_t threadId;
66  void* data;
67};
68
69struct dump_thread_t {
70  thread_t thread;
71  Backtrace* backtrace;
72  int32_t* now;
73  int32_t done;
74};
75
76extern "C" {
77// Prototypes for functions in the test library.
78int test_level_one(int, int, int, int, void (*)(void*), void*);
79
80int test_recursive_call(int, void (*)(void*), void*);
81}
82
83uint64_t NanoTime() {
84  struct timespec t = { 0, 0 };
85  clock_gettime(CLOCK_MONOTONIC, &t);
86  return static_cast<uint64_t>(t.tv_sec * NS_PER_SEC + t.tv_nsec);
87}
88
89std::string DumpFrames(Backtrace* backtrace) {
90  if (backtrace->NumFrames() == 0) {
91    return "   No frames to dump.\n";
92  }
93
94  std::string frame;
95  for (size_t i = 0; i < backtrace->NumFrames(); i++) {
96    frame += "   " + backtrace->FormatFrameData(i) + '\n';
97  }
98  return frame;
99}
100
101void WaitForStop(pid_t pid) {
102  uint64_t start = NanoTime();
103
104  siginfo_t si;
105  while (ptrace(PTRACE_GETSIGINFO, pid, 0, &si) < 0 && (errno == EINTR || errno == ESRCH)) {
106    if ((NanoTime() - start) > NS_PER_SEC) {
107      printf("The process did not get to a stopping point in 1 second.\n");
108      break;
109    }
110    usleep(US_PER_MSEC);
111  }
112}
113
114bool ReadyLevelBacktrace(Backtrace* backtrace) {
115  // See if test_level_four is in the backtrace.
116  bool found = false;
117  for (Backtrace::const_iterator it = backtrace->begin(); it != backtrace->end(); ++it) {
118    if (it->func_name == "test_level_four") {
119      found = true;
120      break;
121    }
122  }
123
124  return found;
125}
126
127void VerifyLevelDump(Backtrace* backtrace) {
128  ASSERT_GT(backtrace->NumFrames(), static_cast<size_t>(0))
129    << DumpFrames(backtrace);
130  ASSERT_LT(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES))
131    << DumpFrames(backtrace);
132
133  // Look through the frames starting at the highest to find the
134  // frame we want.
135  size_t frame_num = 0;
136  for (size_t i = backtrace->NumFrames()-1; i > 2; i--) {
137    if (backtrace->GetFrame(i)->func_name == "test_level_one") {
138      frame_num = i;
139      break;
140    }
141  }
142  ASSERT_LT(static_cast<size_t>(0), frame_num) << DumpFrames(backtrace);
143  ASSERT_LE(static_cast<size_t>(3), frame_num) << DumpFrames(backtrace);
144
145  ASSERT_EQ(backtrace->GetFrame(frame_num)->func_name, "test_level_one")
146    << DumpFrames(backtrace);
147  ASSERT_EQ(backtrace->GetFrame(frame_num-1)->func_name, "test_level_two")
148    << DumpFrames(backtrace);
149  ASSERT_EQ(backtrace->GetFrame(frame_num-2)->func_name, "test_level_three")
150    << DumpFrames(backtrace);
151  ASSERT_EQ(backtrace->GetFrame(frame_num-3)->func_name, "test_level_four")
152    << DumpFrames(backtrace);
153}
154
155void VerifyLevelBacktrace(void*) {
156  std::unique_ptr<Backtrace> backtrace(
157      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
158  ASSERT_TRUE(backtrace.get() != nullptr);
159  ASSERT_TRUE(backtrace->Unwind(0));
160
161  VerifyLevelDump(backtrace.get());
162}
163
164bool ReadyMaxBacktrace(Backtrace* backtrace) {
165  return (backtrace->NumFrames() == MAX_BACKTRACE_FRAMES);
166}
167
168void VerifyMaxDump(Backtrace* backtrace) {
169  ASSERT_EQ(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES))
170    << DumpFrames(backtrace);
171  // Verify that the last frame is our recursive call.
172  ASSERT_EQ(backtrace->GetFrame(MAX_BACKTRACE_FRAMES-1)->func_name, "test_recursive_call")
173    << DumpFrames(backtrace);
174}
175
176void VerifyMaxBacktrace(void*) {
177  std::unique_ptr<Backtrace> backtrace(
178      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
179  ASSERT_TRUE(backtrace.get() != nullptr);
180  ASSERT_TRUE(backtrace->Unwind(0));
181
182  VerifyMaxDump(backtrace.get());
183}
184
185void ThreadSetState(void* data) {
186  thread_t* thread = reinterpret_cast<thread_t*>(data);
187  android_atomic_acquire_store(1, &thread->state);
188  volatile int i = 0;
189  while (thread->state) {
190    i++;
191  }
192}
193
194void VerifyThreadTest(pid_t tid, void (*VerifyFunc)(Backtrace*)) {
195  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), tid));
196  ASSERT_TRUE(backtrace.get() != nullptr);
197  ASSERT_TRUE(backtrace->Unwind(0));
198
199  VerifyFunc(backtrace.get());
200}
201
202bool WaitForNonZero(int32_t* value, uint64_t seconds) {
203  uint64_t start = NanoTime();
204  do {
205    if (android_atomic_acquire_load(value)) {
206      return true;
207    }
208  } while ((NanoTime() - start) < seconds * NS_PER_SEC);
209  return false;
210}
211
212TEST(libbacktrace, local_no_unwind_frames) {
213  // Verify that a local unwind does not include any frames within
214  // libunwind or libbacktrace.
215  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), getpid()));
216  ASSERT_TRUE(backtrace.get() != nullptr);
217  ASSERT_TRUE(backtrace->Unwind(0));
218
219  ASSERT_TRUE(backtrace->NumFrames() != 0);
220  for (const auto& frame : *backtrace ) {
221    if (BacktraceMap::IsValid(frame.map)) {
222      const std::string name = basename(frame.map.name.c_str());
223      ASSERT_TRUE(name != "libunwind.so" && name != "libbacktrace.so")
224        << DumpFrames(backtrace.get());
225    }
226    break;
227  }
228}
229
230TEST(libbacktrace, local_trace) {
231  ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelBacktrace, nullptr), 0);
232}
233
234void VerifyIgnoreFrames(
235    Backtrace* bt_all, Backtrace* bt_ign1,
236    Backtrace* bt_ign2, const char* cur_proc) {
237  EXPECT_EQ(bt_all->NumFrames(), bt_ign1->NumFrames() + 1)
238    << "All backtrace:\n" << DumpFrames(bt_all) << "Ignore 1 backtrace:\n" << DumpFrames(bt_ign1);
239  EXPECT_EQ(bt_all->NumFrames(), bt_ign2->NumFrames() + 2)
240    << "All backtrace:\n" << DumpFrames(bt_all) << "Ignore 2 backtrace:\n" << DumpFrames(bt_ign2);
241
242  // Check all of the frames are the same > the current frame.
243  bool check = (cur_proc == nullptr);
244  for (size_t i = 0; i < bt_ign2->NumFrames(); i++) {
245    if (check) {
246      EXPECT_EQ(bt_ign2->GetFrame(i)->pc, bt_ign1->GetFrame(i+1)->pc);
247      EXPECT_EQ(bt_ign2->GetFrame(i)->sp, bt_ign1->GetFrame(i+1)->sp);
248      EXPECT_EQ(bt_ign2->GetFrame(i)->stack_size, bt_ign1->GetFrame(i+1)->stack_size);
249
250      EXPECT_EQ(bt_ign2->GetFrame(i)->pc, bt_all->GetFrame(i+2)->pc);
251      EXPECT_EQ(bt_ign2->GetFrame(i)->sp, bt_all->GetFrame(i+2)->sp);
252      EXPECT_EQ(bt_ign2->GetFrame(i)->stack_size, bt_all->GetFrame(i+2)->stack_size);
253    }
254    if (!check && bt_ign2->GetFrame(i)->func_name == cur_proc) {
255      check = true;
256    }
257  }
258}
259
260void VerifyLevelIgnoreFrames(void*) {
261  std::unique_ptr<Backtrace> all(
262      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
263  ASSERT_TRUE(all.get() != nullptr);
264  ASSERT_TRUE(all->Unwind(0));
265
266  std::unique_ptr<Backtrace> ign1(
267      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
268  ASSERT_TRUE(ign1.get() != nullptr);
269  ASSERT_TRUE(ign1->Unwind(1));
270
271  std::unique_ptr<Backtrace> ign2(
272      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
273  ASSERT_TRUE(ign2.get() != nullptr);
274  ASSERT_TRUE(ign2->Unwind(2));
275
276  VerifyIgnoreFrames(all.get(), ign1.get(), ign2.get(), "VerifyLevelIgnoreFrames");
277}
278
279TEST(libbacktrace, local_trace_ignore_frames) {
280  ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelIgnoreFrames, nullptr), 0);
281}
282
283TEST(libbacktrace, local_max_trace) {
284  ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, VerifyMaxBacktrace, nullptr), 0);
285}
286
287void VerifyProcTest(pid_t pid, pid_t tid, bool share_map,
288                    bool (*ReadyFunc)(Backtrace*),
289                    void (*VerifyFunc)(Backtrace*)) {
290  pid_t ptrace_tid;
291  if (tid < 0) {
292    ptrace_tid = pid;
293  } else {
294    ptrace_tid = tid;
295  }
296  uint64_t start = NanoTime();
297  bool verified = false;
298  std::string last_dump;
299  do {
300    usleep(US_PER_MSEC);
301    if (ptrace(PTRACE_ATTACH, ptrace_tid, 0, 0) == 0) {
302      // Wait for the process to get to a stopping point.
303      WaitForStop(ptrace_tid);
304
305      std::unique_ptr<BacktraceMap> map;
306      if (share_map) {
307        map.reset(BacktraceMap::Create(pid));
308      }
309      std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, tid, map.get()));
310      ASSERT_TRUE(backtrace.get() != nullptr);
311      ASSERT_TRUE(backtrace->Unwind(0));
312      if (ReadyFunc(backtrace.get())) {
313        VerifyFunc(backtrace.get());
314        verified = true;
315      } else {
316        last_dump = DumpFrames(backtrace.get());
317      }
318
319      ASSERT_TRUE(ptrace(PTRACE_DETACH, ptrace_tid, 0, 0) == 0);
320    }
321    // If 5 seconds have passed, then we are done.
322  } while (!verified && (NanoTime() - start) <= 5 * NS_PER_SEC);
323  ASSERT_TRUE(verified) << "Last backtrace:\n" << last_dump;
324}
325
326TEST(libbacktrace, ptrace_trace) {
327  pid_t pid;
328  if ((pid = fork()) == 0) {
329    ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
330    _exit(1);
331  }
332  VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyLevelBacktrace, VerifyLevelDump);
333
334  kill(pid, SIGKILL);
335  int status;
336  ASSERT_EQ(waitpid(pid, &status, 0), pid);
337}
338
339TEST(libbacktrace, ptrace_trace_shared_map) {
340  pid_t pid;
341  if ((pid = fork()) == 0) {
342    ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
343    _exit(1);
344  }
345
346  VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, true, ReadyLevelBacktrace, VerifyLevelDump);
347
348  kill(pid, SIGKILL);
349  int status;
350  ASSERT_EQ(waitpid(pid, &status, 0), pid);
351}
352
353TEST(libbacktrace, ptrace_max_trace) {
354  pid_t pid;
355  if ((pid = fork()) == 0) {
356    ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, nullptr, nullptr), 0);
357    _exit(1);
358  }
359  VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyMaxBacktrace, VerifyMaxDump);
360
361  kill(pid, SIGKILL);
362  int status;
363  ASSERT_EQ(waitpid(pid, &status, 0), pid);
364}
365
366void VerifyProcessIgnoreFrames(Backtrace* bt_all) {
367  std::unique_ptr<Backtrace> ign1(Backtrace::Create(bt_all->Pid(), BACKTRACE_CURRENT_THREAD));
368  ASSERT_TRUE(ign1.get() != nullptr);
369  ASSERT_TRUE(ign1->Unwind(1));
370
371  std::unique_ptr<Backtrace> ign2(Backtrace::Create(bt_all->Pid(), BACKTRACE_CURRENT_THREAD));
372  ASSERT_TRUE(ign2.get() != nullptr);
373  ASSERT_TRUE(ign2->Unwind(2));
374
375  VerifyIgnoreFrames(bt_all, ign1.get(), ign2.get(), nullptr);
376}
377
378TEST(libbacktrace, ptrace_ignore_frames) {
379  pid_t pid;
380  if ((pid = fork()) == 0) {
381    ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
382    _exit(1);
383  }
384  VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyLevelBacktrace, VerifyProcessIgnoreFrames);
385
386  kill(pid, SIGKILL);
387  int status;
388  ASSERT_EQ(waitpid(pid, &status, 0), pid);
389}
390
391// Create a process with multiple threads and dump all of the threads.
392void* PtraceThreadLevelRun(void*) {
393  EXPECT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
394  return nullptr;
395}
396
397void GetThreads(pid_t pid, std::vector<pid_t>* threads) {
398  // Get the list of tasks.
399  char task_path[128];
400  snprintf(task_path, sizeof(task_path), "/proc/%d/task", pid);
401
402  DIR* tasks_dir = opendir(task_path);
403  ASSERT_TRUE(tasks_dir != nullptr);
404  struct dirent* entry;
405  while ((entry = readdir(tasks_dir)) != nullptr) {
406    char* end;
407    pid_t tid = strtoul(entry->d_name, &end, 10);
408    if (*end == '\0') {
409      threads->push_back(tid);
410    }
411  }
412  closedir(tasks_dir);
413}
414
415TEST(libbacktrace, ptrace_threads) {
416  pid_t pid;
417  if ((pid = fork()) == 0) {
418    for (size_t i = 0; i < NUM_PTRACE_THREADS; i++) {
419      pthread_attr_t attr;
420      pthread_attr_init(&attr);
421      pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
422
423      pthread_t thread;
424      ASSERT_TRUE(pthread_create(&thread, &attr, PtraceThreadLevelRun, nullptr) == 0);
425    }
426    ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
427    _exit(1);
428  }
429
430  // Check to see that all of the threads are running before unwinding.
431  std::vector<pid_t> threads;
432  uint64_t start = NanoTime();
433  do {
434    usleep(US_PER_MSEC);
435    threads.clear();
436    GetThreads(pid, &threads);
437  } while ((threads.size() != NUM_PTRACE_THREADS + 1) &&
438      ((NanoTime() - start) <= 5 * NS_PER_SEC));
439  ASSERT_EQ(threads.size(), static_cast<size_t>(NUM_PTRACE_THREADS + 1));
440
441  ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
442  WaitForStop(pid);
443  for (std::vector<int>::const_iterator it = threads.begin(); it != threads.end(); ++it) {
444    // Skip the current forked process, we only care about the threads.
445    if (pid == *it) {
446      continue;
447    }
448    VerifyProcTest(pid, *it, false, ReadyLevelBacktrace, VerifyLevelDump);
449  }
450  ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
451
452  kill(pid, SIGKILL);
453  int status;
454  ASSERT_EQ(waitpid(pid, &status, 0), pid);
455}
456
457void VerifyLevelThread(void*) {
458  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), gettid()));
459  ASSERT_TRUE(backtrace.get() != nullptr);
460  ASSERT_TRUE(backtrace->Unwind(0));
461
462  VerifyLevelDump(backtrace.get());
463}
464
465TEST(libbacktrace, thread_current_level) {
466  ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelThread, nullptr), 0);
467}
468
469void VerifyMaxThread(void*) {
470  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), gettid()));
471  ASSERT_TRUE(backtrace.get() != nullptr);
472  ASSERT_TRUE(backtrace->Unwind(0));
473
474  VerifyMaxDump(backtrace.get());
475}
476
477TEST(libbacktrace, thread_current_max) {
478  ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, VerifyMaxThread, nullptr), 0);
479}
480
481void* ThreadLevelRun(void* data) {
482  thread_t* thread = reinterpret_cast<thread_t*>(data);
483
484  thread->tid = gettid();
485  EXPECT_NE(test_level_one(1, 2, 3, 4, ThreadSetState, data), 0);
486  return nullptr;
487}
488
489TEST(libbacktrace, thread_level_trace) {
490  pthread_attr_t attr;
491  pthread_attr_init(&attr);
492  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
493
494  thread_t thread_data = { 0, 0, 0, nullptr };
495  pthread_t thread;
496  ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
497
498  // Wait up to 2 seconds for the tid to be set.
499  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
500
501  // Make sure that the thread signal used is not visible when compiled for
502  // the target.
503#if !defined(__GLIBC__)
504  ASSERT_LT(THREAD_SIGNAL, SIGRTMIN);
505#endif
506
507  // Save the current signal action and make sure it is restored afterwards.
508  struct sigaction cur_action;
509  ASSERT_TRUE(sigaction(THREAD_SIGNAL, nullptr, &cur_action) == 0);
510
511  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
512  ASSERT_TRUE(backtrace.get() != nullptr);
513  ASSERT_TRUE(backtrace->Unwind(0));
514
515  VerifyLevelDump(backtrace.get());
516
517  // Tell the thread to exit its infinite loop.
518  android_atomic_acquire_store(0, &thread_data.state);
519
520  // Verify that the old action was restored.
521  struct sigaction new_action;
522  ASSERT_TRUE(sigaction(THREAD_SIGNAL, nullptr, &new_action) == 0);
523  EXPECT_EQ(cur_action.sa_sigaction, new_action.sa_sigaction);
524  // The SA_RESTORER flag gets set behind our back, so a direct comparison
525  // doesn't work unless we mask the value off. Mips doesn't have this
526  // flag, so skip this on that platform.
527#if defined(SA_RESTORER)
528  cur_action.sa_flags &= ~SA_RESTORER;
529  new_action.sa_flags &= ~SA_RESTORER;
530#elif defined(__GLIBC__)
531  // Our host compiler doesn't appear to define this flag for some reason.
532  cur_action.sa_flags &= ~0x04000000;
533  new_action.sa_flags &= ~0x04000000;
534#endif
535  EXPECT_EQ(cur_action.sa_flags, new_action.sa_flags);
536}
537
538TEST(libbacktrace, thread_ignore_frames) {
539  pthread_attr_t attr;
540  pthread_attr_init(&attr);
541  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
542
543  thread_t thread_data = { 0, 0, 0, nullptr };
544  pthread_t thread;
545  ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
546
547  // Wait up to 2 seconds for the tid to be set.
548  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
549
550  std::unique_ptr<Backtrace> all(Backtrace::Create(getpid(), thread_data.tid));
551  ASSERT_TRUE(all.get() != nullptr);
552  ASSERT_TRUE(all->Unwind(0));
553
554  std::unique_ptr<Backtrace> ign1(Backtrace::Create(getpid(), thread_data.tid));
555  ASSERT_TRUE(ign1.get() != nullptr);
556  ASSERT_TRUE(ign1->Unwind(1));
557
558  std::unique_ptr<Backtrace> ign2(Backtrace::Create(getpid(), thread_data.tid));
559  ASSERT_TRUE(ign2.get() != nullptr);
560  ASSERT_TRUE(ign2->Unwind(2));
561
562  VerifyIgnoreFrames(all.get(), ign1.get(), ign2.get(), nullptr);
563
564  // Tell the thread to exit its infinite loop.
565  android_atomic_acquire_store(0, &thread_data.state);
566}
567
568void* ThreadMaxRun(void* data) {
569  thread_t* thread = reinterpret_cast<thread_t*>(data);
570
571  thread->tid = gettid();
572  EXPECT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, ThreadSetState, data), 0);
573  return nullptr;
574}
575
576TEST(libbacktrace, thread_max_trace) {
577  pthread_attr_t attr;
578  pthread_attr_init(&attr);
579  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
580
581  thread_t thread_data = { 0, 0, 0, nullptr };
582  pthread_t thread;
583  ASSERT_TRUE(pthread_create(&thread, &attr, ThreadMaxRun, &thread_data) == 0);
584
585  // Wait for the tid to be set.
586  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
587
588  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
589  ASSERT_TRUE(backtrace.get() != nullptr);
590  ASSERT_TRUE(backtrace->Unwind(0));
591
592  VerifyMaxDump(backtrace.get());
593
594  // Tell the thread to exit its infinite loop.
595  android_atomic_acquire_store(0, &thread_data.state);
596}
597
598void* ThreadDump(void* data) {
599  dump_thread_t* dump = reinterpret_cast<dump_thread_t*>(data);
600  while (true) {
601    if (android_atomic_acquire_load(dump->now)) {
602      break;
603    }
604  }
605
606  // The status of the actual unwind will be checked elsewhere.
607  dump->backtrace = Backtrace::Create(getpid(), dump->thread.tid);
608  dump->backtrace->Unwind(0);
609
610  android_atomic_acquire_store(1, &dump->done);
611
612  return nullptr;
613}
614
615TEST(libbacktrace, thread_multiple_dump) {
616  // Dump NUM_THREADS simultaneously.
617  std::vector<thread_t> runners(NUM_THREADS);
618  std::vector<dump_thread_t> dumpers(NUM_THREADS);
619
620  pthread_attr_t attr;
621  pthread_attr_init(&attr);
622  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
623  for (size_t i = 0; i < NUM_THREADS; i++) {
624    // Launch the runners, they will spin in hard loops doing nothing.
625    runners[i].tid = 0;
626    runners[i].state = 0;
627    ASSERT_TRUE(pthread_create(&runners[i].threadId, &attr, ThreadMaxRun, &runners[i]) == 0);
628  }
629
630  // Wait for tids to be set.
631  for (std::vector<thread_t>::iterator it = runners.begin(); it != runners.end(); ++it) {
632    ASSERT_TRUE(WaitForNonZero(&it->state, 30));
633  }
634
635  // Start all of the dumpers at once, they will spin until they are signalled
636  // to begin their dump run.
637  int32_t dump_now = 0;
638  for (size_t i = 0; i < NUM_THREADS; i++) {
639    dumpers[i].thread.tid = runners[i].tid;
640    dumpers[i].thread.state = 0;
641    dumpers[i].done = 0;
642    dumpers[i].now = &dump_now;
643
644    ASSERT_TRUE(pthread_create(&dumpers[i].thread.threadId, &attr, ThreadDump, &dumpers[i]) == 0);
645  }
646
647  // Start all of the dumpers going at once.
648  android_atomic_acquire_store(1, &dump_now);
649
650  for (size_t i = 0; i < NUM_THREADS; i++) {
651    ASSERT_TRUE(WaitForNonZero(&dumpers[i].done, 30));
652
653    // Tell the runner thread to exit its infinite loop.
654    android_atomic_acquire_store(0, &runners[i].state);
655
656    ASSERT_TRUE(dumpers[i].backtrace != nullptr);
657    VerifyMaxDump(dumpers[i].backtrace);
658
659    delete dumpers[i].backtrace;
660    dumpers[i].backtrace = nullptr;
661  }
662}
663
664TEST(libbacktrace, thread_multiple_dump_same_thread) {
665  pthread_attr_t attr;
666  pthread_attr_init(&attr);
667  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
668  thread_t runner;
669  runner.tid = 0;
670  runner.state = 0;
671  ASSERT_TRUE(pthread_create(&runner.threadId, &attr, ThreadMaxRun, &runner) == 0);
672
673  // Wait for tids to be set.
674  ASSERT_TRUE(WaitForNonZero(&runner.state, 30));
675
676  // Start all of the dumpers at once, they will spin until they are signalled
677  // to begin their dump run.
678  int32_t dump_now = 0;
679  // Dump the same thread NUM_THREADS simultaneously.
680  std::vector<dump_thread_t> dumpers(NUM_THREADS);
681  for (size_t i = 0; i < NUM_THREADS; i++) {
682    dumpers[i].thread.tid = runner.tid;
683    dumpers[i].thread.state = 0;
684    dumpers[i].done = 0;
685    dumpers[i].now = &dump_now;
686
687    ASSERT_TRUE(pthread_create(&dumpers[i].thread.threadId, &attr, ThreadDump, &dumpers[i]) == 0);
688  }
689
690  // Start all of the dumpers going at once.
691  android_atomic_acquire_store(1, &dump_now);
692
693  for (size_t i = 0; i < NUM_THREADS; i++) {
694    ASSERT_TRUE(WaitForNonZero(&dumpers[i].done, 30));
695
696    ASSERT_TRUE(dumpers[i].backtrace != nullptr);
697    VerifyMaxDump(dumpers[i].backtrace);
698
699    delete dumpers[i].backtrace;
700    dumpers[i].backtrace = nullptr;
701  }
702
703  // Tell the runner thread to exit its infinite loop.
704  android_atomic_acquire_store(0, &runner.state);
705}
706
707// This test is for UnwindMaps that should share the same map cursor when
708// multiple maps are created for the current process at the same time.
709TEST(libbacktrace, simultaneous_maps) {
710  BacktraceMap* map1 = BacktraceMap::Create(getpid());
711  BacktraceMap* map2 = BacktraceMap::Create(getpid());
712  BacktraceMap* map3 = BacktraceMap::Create(getpid());
713
714  Backtrace* back1 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map1);
715  ASSERT_TRUE(back1 != nullptr);
716  EXPECT_TRUE(back1->Unwind(0));
717  delete back1;
718  delete map1;
719
720  Backtrace* back2 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map2);
721  ASSERT_TRUE(back2 != nullptr);
722  EXPECT_TRUE(back2->Unwind(0));
723  delete back2;
724  delete map2;
725
726  Backtrace* back3 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map3);
727  ASSERT_TRUE(back3 != nullptr);
728  EXPECT_TRUE(back3->Unwind(0));
729  delete back3;
730  delete map3;
731}
732
733TEST(libbacktrace, fillin_erases) {
734  BacktraceMap* back_map = BacktraceMap::Create(getpid());
735
736  backtrace_map_t map;
737
738  map.start = 1;
739  map.end = 3;
740  map.flags = 1;
741  map.name = "Initialized";
742  back_map->FillIn(0, &map);
743  delete back_map;
744
745  ASSERT_FALSE(BacktraceMap::IsValid(map));
746  ASSERT_EQ(static_cast<uintptr_t>(0), map.start);
747  ASSERT_EQ(static_cast<uintptr_t>(0), map.end);
748  ASSERT_EQ(0, map.flags);
749  ASSERT_EQ("", map.name);
750}
751
752TEST(libbacktrace, format_test) {
753  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD));
754  ASSERT_TRUE(backtrace.get() != nullptr);
755
756  backtrace_frame_data_t frame;
757  frame.num = 1;
758  frame.pc = 2;
759  frame.sp = 0;
760  frame.stack_size = 0;
761  frame.func_offset = 0;
762
763  // Check no map set.
764  frame.num = 1;
765#if defined(__LP64__)
766  EXPECT_EQ("#01 pc 0000000000000002  <unknown>",
767#else
768  EXPECT_EQ("#01 pc 00000002  <unknown>",
769#endif
770            backtrace->FormatFrameData(&frame));
771
772  // Check map name empty, but exists.
773  frame.map.start = 1;
774  frame.map.end = 1;
775#if defined(__LP64__)
776  EXPECT_EQ("#01 pc 0000000000000001  <unknown>",
777#else
778  EXPECT_EQ("#01 pc 00000001  <unknown>",
779#endif
780            backtrace->FormatFrameData(&frame));
781
782
783  // Check relative pc is set and map name is set.
784  frame.pc = 0x12345679;
785  frame.map.name = "MapFake";
786  frame.map.start =  1;
787  frame.map.end =  1;
788#if defined(__LP64__)
789  EXPECT_EQ("#01 pc 0000000012345678  MapFake",
790#else
791  EXPECT_EQ("#01 pc 12345678  MapFake",
792#endif
793            backtrace->FormatFrameData(&frame));
794
795  // Check func_name is set, but no func offset.
796  frame.func_name = "ProcFake";
797#if defined(__LP64__)
798  EXPECT_EQ("#01 pc 0000000012345678  MapFake (ProcFake)",
799#else
800  EXPECT_EQ("#01 pc 12345678  MapFake (ProcFake)",
801#endif
802            backtrace->FormatFrameData(&frame));
803
804  // Check func_name is set, and func offset is non-zero.
805  frame.func_offset = 645;
806#if defined(__LP64__)
807  EXPECT_EQ("#01 pc 0000000012345678  MapFake (ProcFake+645)",
808#else
809  EXPECT_EQ("#01 pc 12345678  MapFake (ProcFake+645)",
810#endif
811            backtrace->FormatFrameData(&frame));
812}
813
814struct map_test_t {
815  uintptr_t start;
816  uintptr_t end;
817};
818
819bool map_sort(map_test_t i, map_test_t j) {
820  return i.start < j.start;
821}
822
823void VerifyMap(pid_t pid) {
824  char buffer[4096];
825  snprintf(buffer, sizeof(buffer), "/proc/%d/maps", pid);
826
827  FILE* map_file = fopen(buffer, "r");
828  ASSERT_TRUE(map_file != nullptr);
829  std::vector<map_test_t> test_maps;
830  while (fgets(buffer, sizeof(buffer), map_file)) {
831    map_test_t map;
832    ASSERT_EQ(2, sscanf(buffer, "%" SCNxPTR "-%" SCNxPTR " ", &map.start, &map.end));
833    test_maps.push_back(map);
834  }
835  fclose(map_file);
836  std::sort(test_maps.begin(), test_maps.end(), map_sort);
837
838  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(pid));
839
840  // Basic test that verifies that the map is in the expected order.
841  std::vector<map_test_t>::const_iterator test_it = test_maps.begin();
842  for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
843    ASSERT_TRUE(test_it != test_maps.end());
844    ASSERT_EQ(test_it->start, it->start);
845    ASSERT_EQ(test_it->end, it->end);
846    ++test_it;
847  }
848  ASSERT_TRUE(test_it == test_maps.end());
849}
850
851TEST(libbacktrace, verify_map_remote) {
852  pid_t pid;
853
854  if ((pid = fork()) == 0) {
855    while (true) {
856    }
857    _exit(0);
858  }
859  ASSERT_LT(0, pid);
860
861  ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
862
863  // Wait for the process to get to a stopping point.
864  WaitForStop(pid);
865
866  // The maps should match exactly since the forked process has been paused.
867  VerifyMap(pid);
868
869  ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
870
871  kill(pid, SIGKILL);
872  ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
873}
874
875void* ThreadReadTest(void* data) {
876  thread_t* thread_data = reinterpret_cast<thread_t*>(data);
877
878  thread_data->tid = gettid();
879
880  // Create two map pages.
881  // Mark the second page as not-readable.
882  size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
883  uint8_t* memory;
884  if (posix_memalign(reinterpret_cast<void**>(&memory), pagesize, 2 * pagesize) != 0) {
885    return reinterpret_cast<void*>(-1);
886  }
887
888  if (mprotect(&memory[pagesize], pagesize, PROT_NONE) != 0) {
889    return reinterpret_cast<void*>(-1);
890  }
891
892  // Set up a simple pattern in memory.
893  for (size_t i = 0; i < pagesize; i++) {
894    memory[i] = i;
895  }
896
897  thread_data->data = memory;
898
899  // Tell the caller it's okay to start reading memory.
900  android_atomic_acquire_store(1, &thread_data->state);
901
902  // Loop waiting for the caller to finish reading the memory.
903  while (thread_data->state) {
904  }
905
906  // Re-enable read-write on the page so that we don't crash if we try
907  // and access data on this page when freeing the memory.
908  if (mprotect(&memory[pagesize], pagesize, PROT_READ | PROT_WRITE) != 0) {
909    return reinterpret_cast<void*>(-1);
910  }
911  free(memory);
912
913  android_atomic_acquire_store(1, &thread_data->state);
914
915  return nullptr;
916}
917
918void RunReadTest(Backtrace* backtrace, uintptr_t read_addr) {
919  size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
920
921  // Create a page of data to use to do quick compares.
922  uint8_t* expected = new uint8_t[pagesize];
923  for (size_t i = 0; i < pagesize; i++) {
924    expected[i] = i;
925  }
926  uint8_t* data = new uint8_t[2*pagesize];
927  // Verify that we can only read one page worth of data.
928  size_t bytes_read = backtrace->Read(read_addr, data, 2 * pagesize);
929  ASSERT_EQ(pagesize, bytes_read);
930  ASSERT_TRUE(memcmp(data, expected, pagesize) == 0);
931
932  // Verify unaligned reads.
933  for (size_t i = 1; i < sizeof(word_t); i++) {
934    bytes_read = backtrace->Read(read_addr + i, data, 2 * sizeof(word_t));
935    ASSERT_EQ(2 * sizeof(word_t), bytes_read);
936    ASSERT_TRUE(memcmp(data, &expected[i], 2 * sizeof(word_t)) == 0)
937        << "Offset at " << i << " failed";
938  }
939  delete data;
940  delete expected;
941}
942
943TEST(libbacktrace, thread_read) {
944  pthread_attr_t attr;
945  pthread_attr_init(&attr);
946  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
947  pthread_t thread;
948  thread_t thread_data = { 0, 0, 0, nullptr };
949  ASSERT_TRUE(pthread_create(&thread, &attr, ThreadReadTest, &thread_data) == 0);
950
951  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 10));
952
953  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
954  ASSERT_TRUE(backtrace.get() != nullptr);
955
956  RunReadTest(backtrace.get(), reinterpret_cast<uintptr_t>(thread_data.data));
957
958  android_atomic_acquire_store(0, &thread_data.state);
959
960  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 10));
961}
962
963volatile uintptr_t g_ready = 0;
964volatile uintptr_t g_addr = 0;
965
966void ForkedReadTest() {
967  // Create two map pages.
968  size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
969  uint8_t* memory;
970  if (posix_memalign(reinterpret_cast<void**>(&memory), pagesize, 2 * pagesize) != 0) {
971    perror("Failed to allocate memory\n");
972    exit(1);
973  }
974
975  // Mark the second page as not-readable.
976  if (mprotect(&memory[pagesize], pagesize, PROT_NONE) != 0) {
977    perror("Failed to mprotect memory\n");
978    exit(1);
979  }
980
981  // Set up a simple pattern in memory.
982  for (size_t i = 0; i < pagesize; i++) {
983    memory[i] = i;
984  }
985
986  g_addr = reinterpret_cast<uintptr_t>(memory);
987  g_ready = 1;
988
989  while (1) {
990    usleep(US_PER_MSEC);
991  }
992}
993
994TEST(libbacktrace, process_read) {
995  pid_t pid;
996  if ((pid = fork()) == 0) {
997    ForkedReadTest();
998    exit(0);
999  }
1000  ASSERT_NE(-1, pid);
1001
1002  bool test_executed = false;
1003  uint64_t start = NanoTime();
1004  while (1) {
1005    if (ptrace(PTRACE_ATTACH, pid, 0, 0) == 0) {
1006      WaitForStop(pid);
1007
1008      std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
1009      ASSERT_TRUE(backtrace.get() != nullptr);
1010
1011      uintptr_t read_addr;
1012      size_t bytes_read = backtrace->Read(reinterpret_cast<uintptr_t>(&g_ready),
1013                                          reinterpret_cast<uint8_t*>(&read_addr),
1014                                          sizeof(uintptr_t));
1015      ASSERT_EQ(sizeof(uintptr_t), bytes_read);
1016      if (read_addr) {
1017        // The forked process is ready to be read.
1018        bytes_read = backtrace->Read(reinterpret_cast<uintptr_t>(&g_addr),
1019                                     reinterpret_cast<uint8_t*>(&read_addr),
1020                                     sizeof(uintptr_t));
1021        ASSERT_EQ(sizeof(uintptr_t), bytes_read);
1022
1023        RunReadTest(backtrace.get(), read_addr);
1024
1025        test_executed = true;
1026        break;
1027      }
1028      ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1029    }
1030    if ((NanoTime() - start) > 5 * NS_PER_SEC) {
1031      break;
1032    }
1033    usleep(US_PER_MSEC);
1034  }
1035  kill(pid, SIGKILL);
1036  ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1037
1038  ASSERT_TRUE(test_executed);
1039}
1040
1041#if defined(ENABLE_PSS_TESTS)
1042#include "GetPss.h"
1043
1044#define MAX_LEAK_BYTES 32*1024UL
1045
1046void CheckForLeak(pid_t pid, pid_t tid) {
1047  // Do a few runs to get the PSS stable.
1048  for (size_t i = 0; i < 100; i++) {
1049    Backtrace* backtrace = Backtrace::Create(pid, tid);
1050    ASSERT_TRUE(backtrace != nullptr);
1051    ASSERT_TRUE(backtrace->Unwind(0));
1052    delete backtrace;
1053  }
1054  size_t stable_pss = GetPssBytes();
1055  ASSERT_TRUE(stable_pss != 0);
1056
1057  // Loop enough that even a small leak should be detectable.
1058  for (size_t i = 0; i < 4096; i++) {
1059    Backtrace* backtrace = Backtrace::Create(pid, tid);
1060    ASSERT_TRUE(backtrace != nullptr);
1061    ASSERT_TRUE(backtrace->Unwind(0));
1062    delete backtrace;
1063  }
1064  size_t new_pss = GetPssBytes();
1065  ASSERT_TRUE(new_pss != 0);
1066  size_t abs_diff = (new_pss > stable_pss) ? new_pss - stable_pss : stable_pss - new_pss;
1067  // As long as the new pss is within a certain amount, consider everything okay.
1068  ASSERT_LE(abs_diff, MAX_LEAK_BYTES);
1069}
1070
1071TEST(libbacktrace, check_for_leak_local) {
1072  CheckForLeak(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD);
1073}
1074
1075TEST(libbacktrace, check_for_leak_local_thread) {
1076  thread_t thread_data = { 0, 0, 0, nullptr };
1077  pthread_t thread;
1078  ASSERT_TRUE(pthread_create(&thread, nullptr, ThreadLevelRun, &thread_data) == 0);
1079
1080  // Wait up to 2 seconds for the tid to be set.
1081  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
1082
1083  CheckForLeak(BACKTRACE_CURRENT_PROCESS, thread_data.tid);
1084
1085  // Tell the thread to exit its infinite loop.
1086  android_atomic_acquire_store(0, &thread_data.state);
1087
1088  ASSERT_TRUE(pthread_join(thread, nullptr) == 0);
1089}
1090
1091TEST(libbacktrace, check_for_leak_remote) {
1092  pid_t pid;
1093
1094  if ((pid = fork()) == 0) {
1095    while (true) {
1096    }
1097    _exit(0);
1098  }
1099  ASSERT_LT(0, pid);
1100
1101  ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1102
1103  // Wait for the process to get to a stopping point.
1104  WaitForStop(pid);
1105
1106  CheckForLeak(pid, BACKTRACE_CURRENT_THREAD);
1107
1108  ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1109
1110  kill(pid, SIGKILL);
1111  ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1112}
1113#endif
1114