backtrace_test.cpp revision 2c43cff01d1271be451671567955158629b23670
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <dirent.h>
18#include <errno.h>
19#include <inttypes.h>
20#include <pthread.h>
21#include <signal.h>
22#include <stdint.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <string.h>
26#include <sys/ptrace.h>
27#include <sys/types.h>
28#include <sys/wait.h>
29#include <time.h>
30#include <unistd.h>
31
32#include <backtrace/Backtrace.h>
33#include <backtrace/BacktraceMap.h>
34
35// For the THREAD_SIGNAL definition.
36#include "BacktraceCurrent.h"
37
38#include <cutils/atomic.h>
39#include <gtest/gtest.h>
40
41#include <algorithm>
42#include <memory>
43#include <string>
44#include <vector>
45
46#include "thread_utils.h"
47
48// Number of microseconds per milliseconds.
49#define US_PER_MSEC             1000
50
51// Number of nanoseconds in a second.
52#define NS_PER_SEC              1000000000ULL
53
54// Number of simultaneous dumping operations to perform.
55#define NUM_THREADS  40
56
57// Number of simultaneous threads running in our forked process.
58#define NUM_PTRACE_THREADS 5
59
60struct thread_t {
61  pid_t tid;
62  int32_t state;
63  pthread_t threadId;
64  void* data;
65};
66
67struct dump_thread_t {
68  thread_t thread;
69  Backtrace* backtrace;
70  int32_t* now;
71  int32_t done;
72};
73
74extern "C" {
75// Prototypes for functions in the test library.
76int test_level_one(int, int, int, int, void (*)(void*), void*);
77
78int test_recursive_call(int, void (*)(void*), void*);
79}
80
81uint64_t NanoTime() {
82  struct timespec t = { 0, 0 };
83  clock_gettime(CLOCK_MONOTONIC, &t);
84  return static_cast<uint64_t>(t.tv_sec * NS_PER_SEC + t.tv_nsec);
85}
86
87std::string DumpFrames(Backtrace* backtrace) {
88  if (backtrace->NumFrames() == 0) {
89    return "   No frames to dump\n";
90  }
91
92  std::string frame;
93  for (size_t i = 0; i < backtrace->NumFrames(); i++) {
94    frame += "   " + backtrace->FormatFrameData(i) + '\n';
95  }
96  return frame;
97}
98
99void WaitForStop(pid_t pid) {
100  uint64_t start = NanoTime();
101
102  siginfo_t si;
103  while (ptrace(PTRACE_GETSIGINFO, pid, 0, &si) < 0 && (errno == EINTR || errno == ESRCH)) {
104    if ((NanoTime() - start) > NS_PER_SEC) {
105      printf("The process did not get to a stopping point in 1 second.\n");
106      break;
107    }
108    usleep(US_PER_MSEC);
109  }
110}
111
112bool ReadyLevelBacktrace(Backtrace* backtrace) {
113  // See if test_level_four is in the backtrace.
114  bool found = false;
115  for (Backtrace::const_iterator it = backtrace->begin(); it != backtrace->end(); ++it) {
116    if (it->func_name == "test_level_four") {
117      found = true;
118      break;
119    }
120  }
121
122  return found;
123}
124
125void VerifyLevelDump(Backtrace* backtrace) {
126  ASSERT_GT(backtrace->NumFrames(), static_cast<size_t>(0));
127  ASSERT_LT(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES));
128
129  // Look through the frames starting at the highest to find the
130  // frame we want.
131  size_t frame_num = 0;
132  for (size_t i = backtrace->NumFrames()-1; i > 2; i--) {
133    if (backtrace->GetFrame(i)->func_name == "test_level_one") {
134      frame_num = i;
135      break;
136    }
137  }
138  ASSERT_LT(static_cast<size_t>(0), frame_num) << DumpFrames(backtrace);
139  ASSERT_LE(static_cast<size_t>(3), frame_num) << DumpFrames(backtrace);
140
141  ASSERT_EQ(backtrace->GetFrame(frame_num)->func_name, "test_level_one");
142  ASSERT_EQ(backtrace->GetFrame(frame_num-1)->func_name, "test_level_two");
143  ASSERT_EQ(backtrace->GetFrame(frame_num-2)->func_name, "test_level_three");
144  ASSERT_EQ(backtrace->GetFrame(frame_num-3)->func_name, "test_level_four");
145}
146
147void VerifyLevelBacktrace(void*) {
148  std::unique_ptr<Backtrace> backtrace(
149      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
150  ASSERT_TRUE(backtrace.get() != nullptr);
151  ASSERT_TRUE(backtrace->Unwind(0));
152
153  VerifyLevelDump(backtrace.get());
154}
155
156bool ReadyMaxBacktrace(Backtrace* backtrace) {
157  return (backtrace->NumFrames() == MAX_BACKTRACE_FRAMES);
158}
159
160void VerifyMaxDump(Backtrace* backtrace) {
161  ASSERT_EQ(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES));
162  // Verify that the last frame is our recursive call.
163  ASSERT_EQ(backtrace->GetFrame(MAX_BACKTRACE_FRAMES-1)->func_name,
164            "test_recursive_call");
165}
166
167void VerifyMaxBacktrace(void*) {
168  std::unique_ptr<Backtrace> backtrace(
169      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
170  ASSERT_TRUE(backtrace.get() != nullptr);
171  ASSERT_TRUE(backtrace->Unwind(0));
172
173  VerifyMaxDump(backtrace.get());
174}
175
176void ThreadSetState(void* data) {
177  thread_t* thread = reinterpret_cast<thread_t*>(data);
178  android_atomic_acquire_store(1, &thread->state);
179  volatile int i = 0;
180  while (thread->state) {
181    i++;
182  }
183}
184
185void VerifyThreadTest(pid_t tid, void (*VerifyFunc)(Backtrace*)) {
186  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), tid));
187  ASSERT_TRUE(backtrace.get() != nullptr);
188  ASSERT_TRUE(backtrace->Unwind(0));
189
190  VerifyFunc(backtrace.get());
191}
192
193bool WaitForNonZero(int32_t* value, uint64_t seconds) {
194  uint64_t start = NanoTime();
195  do {
196    if (android_atomic_acquire_load(value)) {
197      return true;
198    }
199  } while ((NanoTime() - start) < seconds * NS_PER_SEC);
200  return false;
201}
202
203TEST(libbacktrace, local_trace) {
204  ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelBacktrace, nullptr), 0);
205}
206
207void VerifyIgnoreFrames(
208    Backtrace* bt_all, Backtrace* bt_ign1,
209    Backtrace* bt_ign2, const char* cur_proc) {
210  EXPECT_EQ(bt_all->NumFrames(), bt_ign1->NumFrames() + 1);
211  EXPECT_EQ(bt_all->NumFrames(), bt_ign2->NumFrames() + 2);
212
213  // Check all of the frames are the same > the current frame.
214  bool check = (cur_proc == nullptr);
215  for (size_t i = 0; i < bt_ign2->NumFrames(); i++) {
216    if (check) {
217      EXPECT_EQ(bt_ign2->GetFrame(i)->pc, bt_ign1->GetFrame(i+1)->pc);
218      EXPECT_EQ(bt_ign2->GetFrame(i)->sp, bt_ign1->GetFrame(i+1)->sp);
219      EXPECT_EQ(bt_ign2->GetFrame(i)->stack_size, bt_ign1->GetFrame(i+1)->stack_size);
220
221      EXPECT_EQ(bt_ign2->GetFrame(i)->pc, bt_all->GetFrame(i+2)->pc);
222      EXPECT_EQ(bt_ign2->GetFrame(i)->sp, bt_all->GetFrame(i+2)->sp);
223      EXPECT_EQ(bt_ign2->GetFrame(i)->stack_size, bt_all->GetFrame(i+2)->stack_size);
224    }
225    if (!check && bt_ign2->GetFrame(i)->func_name == cur_proc) {
226      check = true;
227    }
228  }
229}
230
231void VerifyLevelIgnoreFrames(void*) {
232  std::unique_ptr<Backtrace> all(
233      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
234  ASSERT_TRUE(all.get() != nullptr);
235  ASSERT_TRUE(all->Unwind(0));
236
237  std::unique_ptr<Backtrace> ign1(
238      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
239  ASSERT_TRUE(ign1.get() != nullptr);
240  ASSERT_TRUE(ign1->Unwind(1));
241
242  std::unique_ptr<Backtrace> ign2(
243      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
244  ASSERT_TRUE(ign2.get() != nullptr);
245  ASSERT_TRUE(ign2->Unwind(2));
246
247  VerifyIgnoreFrames(all.get(), ign1.get(), ign2.get(), "VerifyLevelIgnoreFrames");
248}
249
250TEST(libbacktrace, local_trace_ignore_frames) {
251  ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelIgnoreFrames, nullptr), 0);
252}
253
254TEST(libbacktrace, local_max_trace) {
255  ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, VerifyMaxBacktrace, nullptr), 0);
256}
257
258void VerifyProcTest(pid_t pid, pid_t tid, bool share_map,
259                    bool (*ReadyFunc)(Backtrace*),
260                    void (*VerifyFunc)(Backtrace*)) {
261  pid_t ptrace_tid;
262  if (tid < 0) {
263    ptrace_tid = pid;
264  } else {
265    ptrace_tid = tid;
266  }
267  uint64_t start = NanoTime();
268  bool verified = false;
269  do {
270    usleep(US_PER_MSEC);
271    if (ptrace(PTRACE_ATTACH, ptrace_tid, 0, 0) == 0) {
272      // Wait for the process to get to a stopping point.
273      WaitForStop(ptrace_tid);
274
275      std::unique_ptr<BacktraceMap> map;
276      if (share_map) {
277        map.reset(BacktraceMap::Create(pid));
278      }
279      std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, tid, map.get()));
280      ASSERT_TRUE(backtrace->Unwind(0));
281      ASSERT_TRUE(backtrace.get() != nullptr);
282      if (ReadyFunc(backtrace.get())) {
283        VerifyFunc(backtrace.get());
284        verified = true;
285      }
286
287      ASSERT_TRUE(ptrace(PTRACE_DETACH, ptrace_tid, 0, 0) == 0);
288    }
289    // If 5 seconds have passed, then we are done.
290  } while (!verified && (NanoTime() - start) <= 5 * NS_PER_SEC);
291  ASSERT_TRUE(verified);
292}
293
294TEST(libbacktrace, ptrace_trace) {
295  pid_t pid;
296  if ((pid = fork()) == 0) {
297    ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
298    _exit(1);
299  }
300  VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyLevelBacktrace, VerifyLevelDump);
301
302  kill(pid, SIGKILL);
303  int status;
304  ASSERT_EQ(waitpid(pid, &status, 0), pid);
305}
306
307TEST(libbacktrace, ptrace_trace_shared_map) {
308  pid_t pid;
309  if ((pid = fork()) == 0) {
310    ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
311    _exit(1);
312  }
313
314  VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, true, ReadyLevelBacktrace, VerifyLevelDump);
315
316  kill(pid, SIGKILL);
317  int status;
318  ASSERT_EQ(waitpid(pid, &status, 0), pid);
319}
320
321TEST(libbacktrace, ptrace_max_trace) {
322  pid_t pid;
323  if ((pid = fork()) == 0) {
324    ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, nullptr, nullptr), 0);
325    _exit(1);
326  }
327  VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyMaxBacktrace, VerifyMaxDump);
328
329  kill(pid, SIGKILL);
330  int status;
331  ASSERT_EQ(waitpid(pid, &status, 0), pid);
332}
333
334void VerifyProcessIgnoreFrames(Backtrace* bt_all) {
335  std::unique_ptr<Backtrace> ign1(Backtrace::Create(bt_all->Pid(), BACKTRACE_CURRENT_THREAD));
336  ASSERT_TRUE(ign1.get() != nullptr);
337  ASSERT_TRUE(ign1->Unwind(1));
338
339  std::unique_ptr<Backtrace> ign2(Backtrace::Create(bt_all->Pid(), BACKTRACE_CURRENT_THREAD));
340  ASSERT_TRUE(ign2.get() != nullptr);
341  ASSERT_TRUE(ign2->Unwind(2));
342
343  VerifyIgnoreFrames(bt_all, ign1.get(), ign2.get(), nullptr);
344}
345
346TEST(libbacktrace, ptrace_ignore_frames) {
347  pid_t pid;
348  if ((pid = fork()) == 0) {
349    ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
350    _exit(1);
351  }
352  VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyLevelBacktrace, VerifyProcessIgnoreFrames);
353
354  kill(pid, SIGKILL);
355  int status;
356  ASSERT_EQ(waitpid(pid, &status, 0), pid);
357}
358
359// Create a process with multiple threads and dump all of the threads.
360void* PtraceThreadLevelRun(void*) {
361  EXPECT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
362  return nullptr;
363}
364
365void GetThreads(pid_t pid, std::vector<pid_t>* threads) {
366  // Get the list of tasks.
367  char task_path[128];
368  snprintf(task_path, sizeof(task_path), "/proc/%d/task", pid);
369
370  DIR* tasks_dir = opendir(task_path);
371  ASSERT_TRUE(tasks_dir != nullptr);
372  struct dirent* entry;
373  while ((entry = readdir(tasks_dir)) != nullptr) {
374    char* end;
375    pid_t tid = strtoul(entry->d_name, &end, 10);
376    if (*end == '\0') {
377      threads->push_back(tid);
378    }
379  }
380  closedir(tasks_dir);
381}
382
383TEST(libbacktrace, ptrace_threads) {
384  pid_t pid;
385  if ((pid = fork()) == 0) {
386    for (size_t i = 0; i < NUM_PTRACE_THREADS; i++) {
387      pthread_attr_t attr;
388      pthread_attr_init(&attr);
389      pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
390
391      pthread_t thread;
392      ASSERT_TRUE(pthread_create(&thread, &attr, PtraceThreadLevelRun, nullptr) == 0);
393    }
394    ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
395    _exit(1);
396  }
397
398  // Check to see that all of the threads are running before unwinding.
399  std::vector<pid_t> threads;
400  uint64_t start = NanoTime();
401  do {
402    usleep(US_PER_MSEC);
403    threads.clear();
404    GetThreads(pid, &threads);
405  } while ((threads.size() != NUM_PTRACE_THREADS + 1) &&
406      ((NanoTime() - start) <= 5 * NS_PER_SEC));
407  ASSERT_EQ(threads.size(), static_cast<size_t>(NUM_PTRACE_THREADS + 1));
408
409  ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
410  WaitForStop(pid);
411  for (std::vector<int>::const_iterator it = threads.begin(); it != threads.end(); ++it) {
412    // Skip the current forked process, we only care about the threads.
413    if (pid == *it) {
414      continue;
415    }
416    VerifyProcTest(pid, *it, false, ReadyLevelBacktrace, VerifyLevelDump);
417  }
418  ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
419
420  kill(pid, SIGKILL);
421  int status;
422  ASSERT_EQ(waitpid(pid, &status, 0), pid);
423}
424
425void VerifyLevelThread(void*) {
426  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), gettid()));
427  ASSERT_TRUE(backtrace.get() != nullptr);
428  ASSERT_TRUE(backtrace->Unwind(0));
429
430  VerifyLevelDump(backtrace.get());
431}
432
433TEST(libbacktrace, thread_current_level) {
434  ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelThread, nullptr), 0);
435}
436
437void VerifyMaxThread(void*) {
438  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), gettid()));
439  ASSERT_TRUE(backtrace.get() != nullptr);
440  ASSERT_TRUE(backtrace->Unwind(0));
441
442  VerifyMaxDump(backtrace.get());
443}
444
445TEST(libbacktrace, thread_current_max) {
446  ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, VerifyMaxThread, nullptr), 0);
447}
448
449void* ThreadLevelRun(void* data) {
450  thread_t* thread = reinterpret_cast<thread_t*>(data);
451
452  thread->tid = gettid();
453  EXPECT_NE(test_level_one(1, 2, 3, 4, ThreadSetState, data), 0);
454  return nullptr;
455}
456
457TEST(libbacktrace, thread_level_trace) {
458  pthread_attr_t attr;
459  pthread_attr_init(&attr);
460  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
461
462  thread_t thread_data = { 0, 0, 0, nullptr };
463  pthread_t thread;
464  ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
465
466  // Wait up to 2 seconds for the tid to be set.
467  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
468
469  // Make sure that the thread signal used is not visible when compiled for
470  // the target.
471#if !defined(__GLIBC__)
472  ASSERT_LT(THREAD_SIGNAL, SIGRTMIN);
473#endif
474
475  // Save the current signal action and make sure it is restored afterwards.
476  struct sigaction cur_action;
477  ASSERT_TRUE(sigaction(THREAD_SIGNAL, nullptr, &cur_action) == 0);
478
479  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
480  ASSERT_TRUE(backtrace.get() != nullptr);
481  ASSERT_TRUE(backtrace->Unwind(0));
482
483  VerifyLevelDump(backtrace.get());
484
485  // Tell the thread to exit its infinite loop.
486  android_atomic_acquire_store(0, &thread_data.state);
487
488  // Verify that the old action was restored.
489  struct sigaction new_action;
490  ASSERT_TRUE(sigaction(THREAD_SIGNAL, nullptr, &new_action) == 0);
491  EXPECT_EQ(cur_action.sa_sigaction, new_action.sa_sigaction);
492  // The SA_RESTORER flag gets set behind our back, so a direct comparison
493  // doesn't work unless we mask the value off. Mips doesn't have this
494  // flag, so skip this on that platform.
495#if defined(SA_RESTORER)
496  cur_action.sa_flags &= ~SA_RESTORER;
497  new_action.sa_flags &= ~SA_RESTORER;
498#elif defined(__GLIBC__)
499  // Our host compiler doesn't appear to define this flag for some reason.
500  cur_action.sa_flags &= ~0x04000000;
501  new_action.sa_flags &= ~0x04000000;
502#endif
503  EXPECT_EQ(cur_action.sa_flags, new_action.sa_flags);
504}
505
506TEST(libbacktrace, thread_ignore_frames) {
507  pthread_attr_t attr;
508  pthread_attr_init(&attr);
509  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
510
511  thread_t thread_data = { 0, 0, 0, nullptr };
512  pthread_t thread;
513  ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
514
515  // Wait up to 2 seconds for the tid to be set.
516  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
517
518  std::unique_ptr<Backtrace> all(Backtrace::Create(getpid(), thread_data.tid));
519  ASSERT_TRUE(all.get() != nullptr);
520  ASSERT_TRUE(all->Unwind(0));
521
522  std::unique_ptr<Backtrace> ign1(Backtrace::Create(getpid(), thread_data.tid));
523  ASSERT_TRUE(ign1.get() != nullptr);
524  ASSERT_TRUE(ign1->Unwind(1));
525
526  std::unique_ptr<Backtrace> ign2(Backtrace::Create(getpid(), thread_data.tid));
527  ASSERT_TRUE(ign2.get() != nullptr);
528  ASSERT_TRUE(ign2->Unwind(2));
529
530  VerifyIgnoreFrames(all.get(), ign1.get(), ign2.get(), nullptr);
531
532  // Tell the thread to exit its infinite loop.
533  android_atomic_acquire_store(0, &thread_data.state);
534}
535
536void* ThreadMaxRun(void* data) {
537  thread_t* thread = reinterpret_cast<thread_t*>(data);
538
539  thread->tid = gettid();
540  EXPECT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, ThreadSetState, data), 0);
541  return nullptr;
542}
543
544TEST(libbacktrace, thread_max_trace) {
545  pthread_attr_t attr;
546  pthread_attr_init(&attr);
547  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
548
549  thread_t thread_data = { 0, 0, 0, nullptr };
550  pthread_t thread;
551  ASSERT_TRUE(pthread_create(&thread, &attr, ThreadMaxRun, &thread_data) == 0);
552
553  // Wait for the tid to be set.
554  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
555
556  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
557  ASSERT_TRUE(backtrace.get() != nullptr);
558  ASSERT_TRUE(backtrace->Unwind(0));
559
560  VerifyMaxDump(backtrace.get());
561
562  // Tell the thread to exit its infinite loop.
563  android_atomic_acquire_store(0, &thread_data.state);
564}
565
566void* ThreadDump(void* data) {
567  dump_thread_t* dump = reinterpret_cast<dump_thread_t*>(data);
568  while (true) {
569    if (android_atomic_acquire_load(dump->now)) {
570      break;
571    }
572  }
573
574  // The status of the actual unwind will be checked elsewhere.
575  dump->backtrace = Backtrace::Create(getpid(), dump->thread.tid);
576  dump->backtrace->Unwind(0);
577
578  android_atomic_acquire_store(1, &dump->done);
579
580  return nullptr;
581}
582
583TEST(libbacktrace, thread_multiple_dump) {
584  // Dump NUM_THREADS simultaneously.
585  std::vector<thread_t> runners(NUM_THREADS);
586  std::vector<dump_thread_t> dumpers(NUM_THREADS);
587
588  pthread_attr_t attr;
589  pthread_attr_init(&attr);
590  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
591  for (size_t i = 0; i < NUM_THREADS; i++) {
592    // Launch the runners, they will spin in hard loops doing nothing.
593    runners[i].tid = 0;
594    runners[i].state = 0;
595    ASSERT_TRUE(pthread_create(&runners[i].threadId, &attr, ThreadMaxRun, &runners[i]) == 0);
596  }
597
598  // Wait for tids to be set.
599  for (std::vector<thread_t>::iterator it = runners.begin(); it != runners.end(); ++it) {
600    ASSERT_TRUE(WaitForNonZero(&it->state, 30));
601  }
602
603  // Start all of the dumpers at once, they will spin until they are signalled
604  // to begin their dump run.
605  int32_t dump_now = 0;
606  for (size_t i = 0; i < NUM_THREADS; i++) {
607    dumpers[i].thread.tid = runners[i].tid;
608    dumpers[i].thread.state = 0;
609    dumpers[i].done = 0;
610    dumpers[i].now = &dump_now;
611
612    ASSERT_TRUE(pthread_create(&dumpers[i].thread.threadId, &attr, ThreadDump, &dumpers[i]) == 0);
613  }
614
615  // Start all of the dumpers going at once.
616  android_atomic_acquire_store(1, &dump_now);
617
618  for (size_t i = 0; i < NUM_THREADS; i++) {
619    ASSERT_TRUE(WaitForNonZero(&dumpers[i].done, 30));
620
621    // Tell the runner thread to exit its infinite loop.
622    android_atomic_acquire_store(0, &runners[i].state);
623
624    ASSERT_TRUE(dumpers[i].backtrace != nullptr);
625    VerifyMaxDump(dumpers[i].backtrace);
626
627    delete dumpers[i].backtrace;
628    dumpers[i].backtrace = nullptr;
629  }
630}
631
632TEST(libbacktrace, thread_multiple_dump_same_thread) {
633  pthread_attr_t attr;
634  pthread_attr_init(&attr);
635  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
636  thread_t runner;
637  runner.tid = 0;
638  runner.state = 0;
639  ASSERT_TRUE(pthread_create(&runner.threadId, &attr, ThreadMaxRun, &runner) == 0);
640
641  // Wait for tids to be set.
642  ASSERT_TRUE(WaitForNonZero(&runner.state, 30));
643
644  // Start all of the dumpers at once, they will spin until they are signalled
645  // to begin their dump run.
646  int32_t dump_now = 0;
647  // Dump the same thread NUM_THREADS simultaneously.
648  std::vector<dump_thread_t> dumpers(NUM_THREADS);
649  for (size_t i = 0; i < NUM_THREADS; i++) {
650    dumpers[i].thread.tid = runner.tid;
651    dumpers[i].thread.state = 0;
652    dumpers[i].done = 0;
653    dumpers[i].now = &dump_now;
654
655    ASSERT_TRUE(pthread_create(&dumpers[i].thread.threadId, &attr, ThreadDump, &dumpers[i]) == 0);
656  }
657
658  // Start all of the dumpers going at once.
659  android_atomic_acquire_store(1, &dump_now);
660
661  for (size_t i = 0; i < NUM_THREADS; i++) {
662    ASSERT_TRUE(WaitForNonZero(&dumpers[i].done, 30));
663
664    ASSERT_TRUE(dumpers[i].backtrace != nullptr);
665    VerifyMaxDump(dumpers[i].backtrace);
666
667    delete dumpers[i].backtrace;
668    dumpers[i].backtrace = nullptr;
669  }
670
671  // Tell the runner thread to exit its infinite loop.
672  android_atomic_acquire_store(0, &runner.state);
673}
674
675// This test is for UnwindMaps that should share the same map cursor when
676// multiple maps are created for the current process at the same time.
677TEST(libbacktrace, simultaneous_maps) {
678  BacktraceMap* map1 = BacktraceMap::Create(getpid());
679  BacktraceMap* map2 = BacktraceMap::Create(getpid());
680  BacktraceMap* map3 = BacktraceMap::Create(getpid());
681
682  Backtrace* back1 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map1);
683  EXPECT_TRUE(back1->Unwind(0));
684  delete back1;
685  delete map1;
686
687  Backtrace* back2 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map2);
688  EXPECT_TRUE(back2->Unwind(0));
689  delete back2;
690  delete map2;
691
692  Backtrace* back3 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map3);
693  EXPECT_TRUE(back3->Unwind(0));
694  delete back3;
695  delete map3;
696}
697
698TEST(libbacktrace, fillin_erases) {
699  BacktraceMap* back_map = BacktraceMap::Create(getpid());
700
701  backtrace_map_t map;
702
703  map.start = 1;
704  map.end = 3;
705  map.flags = 1;
706  map.name = "Initialized";
707  back_map->FillIn(0, &map);
708  delete back_map;
709
710  ASSERT_FALSE(BacktraceMap::IsValid(map));
711  ASSERT_EQ(static_cast<uintptr_t>(0), map.start);
712  ASSERT_EQ(static_cast<uintptr_t>(0), map.end);
713  ASSERT_EQ(0, map.flags);
714  ASSERT_EQ("", map.name);
715}
716
717TEST(libbacktrace, format_test) {
718  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD));
719  ASSERT_TRUE(backtrace.get() != nullptr);
720
721  backtrace_frame_data_t frame;
722  frame.num = 1;
723  frame.pc = 2;
724  frame.sp = 0;
725  frame.stack_size = 0;
726  frame.func_offset = 0;
727
728  // Check no map set.
729  frame.num = 1;
730#if defined(__LP64__)
731  EXPECT_EQ("#01 pc 0000000000000002  <unknown>",
732#else
733  EXPECT_EQ("#01 pc 00000002  <unknown>",
734#endif
735            backtrace->FormatFrameData(&frame));
736
737  // Check map name empty, but exists.
738  frame.map.start = 1;
739  frame.map.end = 1;
740#if defined(__LP64__)
741  EXPECT_EQ("#01 pc 0000000000000001  <unknown>",
742#else
743  EXPECT_EQ("#01 pc 00000001  <unknown>",
744#endif
745            backtrace->FormatFrameData(&frame));
746
747
748  // Check relative pc is set and map name is set.
749  frame.pc = 0x12345679;
750  frame.map.name = "MapFake";
751  frame.map.start =  1;
752  frame.map.end =  1;
753#if defined(__LP64__)
754  EXPECT_EQ("#01 pc 0000000012345678  MapFake",
755#else
756  EXPECT_EQ("#01 pc 12345678  MapFake",
757#endif
758            backtrace->FormatFrameData(&frame));
759
760  // Check func_name is set, but no func offset.
761  frame.func_name = "ProcFake";
762#if defined(__LP64__)
763  EXPECT_EQ("#01 pc 0000000012345678  MapFake (ProcFake)",
764#else
765  EXPECT_EQ("#01 pc 12345678  MapFake (ProcFake)",
766#endif
767            backtrace->FormatFrameData(&frame));
768
769  // Check func_name is set, and func offset is non-zero.
770  frame.func_offset = 645;
771#if defined(__LP64__)
772  EXPECT_EQ("#01 pc 0000000012345678  MapFake (ProcFake+645)",
773#else
774  EXPECT_EQ("#01 pc 12345678  MapFake (ProcFake+645)",
775#endif
776            backtrace->FormatFrameData(&frame));
777}
778
779struct map_test_t {
780  uintptr_t start;
781  uintptr_t end;
782};
783
784bool map_sort(map_test_t i, map_test_t j) {
785  return i.start < j.start;
786}
787
788void VerifyMap(pid_t pid) {
789  char buffer[4096];
790  snprintf(buffer, sizeof(buffer), "/proc/%d/maps", pid);
791
792  FILE* map_file = fopen(buffer, "r");
793  ASSERT_TRUE(map_file != nullptr);
794  std::vector<map_test_t> test_maps;
795  while (fgets(buffer, sizeof(buffer), map_file)) {
796    map_test_t map;
797    ASSERT_EQ(2, sscanf(buffer, "%" SCNxPTR "-%" SCNxPTR " ", &map.start, &map.end));
798    test_maps.push_back(map);
799  }
800  fclose(map_file);
801  std::sort(test_maps.begin(), test_maps.end(), map_sort);
802
803  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(pid));
804
805  // Basic test that verifies that the map is in the expected order.
806  std::vector<map_test_t>::const_iterator test_it = test_maps.begin();
807  for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
808    ASSERT_TRUE(test_it != test_maps.end());
809    ASSERT_EQ(test_it->start, it->start);
810    ASSERT_EQ(test_it->end, it->end);
811    ++test_it;
812  }
813  ASSERT_TRUE(test_it == test_maps.end());
814}
815
816TEST(libbacktrace, verify_map_remote) {
817  pid_t pid;
818
819  if ((pid = fork()) == 0) {
820    while (true) {
821    }
822    _exit(0);
823  }
824  ASSERT_LT(0, pid);
825
826  ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
827
828  // Wait for the process to get to a stopping point.
829  WaitForStop(pid);
830
831  // The maps should match exactly since the forked process has been paused.
832  VerifyMap(pid);
833
834  ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
835
836  kill(pid, SIGKILL);
837  ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
838}
839
840void* ThreadReadTest(void* data) {
841  thread_t* thread_data = reinterpret_cast<thread_t*>(data);
842
843  thread_data->tid = gettid();
844
845  // Create two map pages.
846  // Mark the second page as not-readable.
847  size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
848  uint8_t* memory;
849  if (posix_memalign(reinterpret_cast<void**>(&memory), pagesize, 2 * pagesize) != 0) {
850    return reinterpret_cast<void*>(-1);
851  }
852
853  if (mprotect(&memory[pagesize], pagesize, PROT_NONE) != 0) {
854    return reinterpret_cast<void*>(-1);
855  }
856
857  // Set up a simple pattern in memory.
858  for (size_t i = 0; i < pagesize; i++) {
859    memory[i] = i;
860  }
861
862  thread_data->data = memory;
863
864  // Tell the caller it's okay to start reading memory.
865  android_atomic_acquire_store(1, &thread_data->state);
866
867  // Loop waiting for the caller to finish reading the memory.
868  while (thread_data->state) {
869  }
870
871  // Re-enable read-write on the page so that we don't crash if we try
872  // and access data on this page when freeing the memory.
873  if (mprotect(&memory[pagesize], pagesize, PROT_READ | PROT_WRITE) != 0) {
874    return reinterpret_cast<void*>(-1);
875  }
876  free(memory);
877
878  android_atomic_acquire_store(1, &thread_data->state);
879
880  return nullptr;
881}
882
883void RunReadTest(Backtrace* backtrace, uintptr_t read_addr) {
884  size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
885
886  // Create a page of data to use to do quick compares.
887  uint8_t* expected = new uint8_t[pagesize];
888  for (size_t i = 0; i < pagesize; i++) {
889    expected[i] = i;
890  }
891  uint8_t* data = new uint8_t[2*pagesize];
892  // Verify that we can only read one page worth of data.
893  size_t bytes_read = backtrace->Read(read_addr, data, 2 * pagesize);
894  ASSERT_EQ(pagesize, bytes_read);
895  ASSERT_TRUE(memcmp(data, expected, pagesize) == 0);
896
897  // Verify unaligned reads.
898  for (size_t i = 1; i < sizeof(word_t); i++) {
899    bytes_read = backtrace->Read(read_addr + i, data, 2 * sizeof(word_t));
900    ASSERT_EQ(2 * sizeof(word_t), bytes_read);
901    ASSERT_TRUE(memcmp(data, &expected[i], 2 * sizeof(word_t)) == 0)
902        << "Offset at " << i << " failed";
903  }
904  delete data;
905  delete expected;
906}
907
908TEST(libbacktrace, thread_read) {
909  pthread_attr_t attr;
910  pthread_attr_init(&attr);
911  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
912  pthread_t thread;
913  thread_t thread_data = { 0, 0, 0, nullptr };
914  ASSERT_TRUE(pthread_create(&thread, &attr, ThreadReadTest, &thread_data) == 0);
915
916  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 10));
917
918  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
919  ASSERT_TRUE(backtrace.get() != nullptr);
920
921  RunReadTest(backtrace.get(), reinterpret_cast<uintptr_t>(thread_data.data));
922
923  android_atomic_acquire_store(0, &thread_data.state);
924
925  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 10));
926}
927
928volatile uintptr_t g_ready = 0;
929volatile uintptr_t g_addr = 0;
930
931void ForkedReadTest() {
932  // Create two map pages.
933  size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
934  uint8_t* memory;
935  if (posix_memalign(reinterpret_cast<void**>(&memory), pagesize, 2 * pagesize) != 0) {
936    perror("Failed to allocate memory\n");
937    exit(1);
938  }
939
940  // Mark the second page as not-readable.
941  if (mprotect(&memory[pagesize], pagesize, PROT_NONE) != 0) {
942    perror("Failed to mprotect memory\n");
943    exit(1);
944  }
945
946  // Set up a simple pattern in memory.
947  for (size_t i = 0; i < pagesize; i++) {
948    memory[i] = i;
949  }
950
951  g_addr = reinterpret_cast<uintptr_t>(memory);
952  g_ready = 1;
953
954  while (1) {
955    usleep(US_PER_MSEC);
956  }
957}
958
959TEST(libbacktrace, process_read) {
960  pid_t pid;
961  if ((pid = fork()) == 0) {
962    ForkedReadTest();
963    exit(0);
964  }
965  ASSERT_NE(-1, pid);
966
967  bool test_executed = false;
968  uint64_t start = NanoTime();
969  while (1) {
970    if (ptrace(PTRACE_ATTACH, pid, 0, 0) == 0) {
971      WaitForStop(pid);
972
973      std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
974
975      uintptr_t read_addr;
976      size_t bytes_read = backtrace->Read(reinterpret_cast<uintptr_t>(&g_ready),
977                                          reinterpret_cast<uint8_t*>(&read_addr),
978                                          sizeof(uintptr_t));
979      ASSERT_EQ(sizeof(uintptr_t), bytes_read);
980      if (read_addr) {
981        // The forked process is ready to be read.
982        bytes_read = backtrace->Read(reinterpret_cast<uintptr_t>(&g_addr),
983                                     reinterpret_cast<uint8_t*>(&read_addr),
984                                     sizeof(uintptr_t));
985        ASSERT_EQ(sizeof(uintptr_t), bytes_read);
986
987        RunReadTest(backtrace.get(), read_addr);
988
989        test_executed = true;
990        break;
991      }
992      ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
993    }
994    if ((NanoTime() - start) > 5 * NS_PER_SEC) {
995      break;
996    }
997    usleep(US_PER_MSEC);
998  }
999  kill(pid, SIGKILL);
1000  ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1001
1002  ASSERT_TRUE(test_executed);
1003}
1004
1005#if defined(ENABLE_PSS_TESTS)
1006#include "GetPss.h"
1007
1008#define MAX_LEAK_BYTES 32*1024UL
1009
1010void CheckForLeak(pid_t pid, pid_t tid) {
1011  // Do a few runs to get the PSS stable.
1012  for (size_t i = 0; i < 100; i++) {
1013    Backtrace* backtrace = Backtrace::Create(pid, tid);
1014    ASSERT_TRUE(backtrace != nullptr);
1015    ASSERT_TRUE(backtrace->Unwind(0));
1016    delete backtrace;
1017  }
1018  size_t stable_pss = GetPssBytes();
1019  ASSERT_TRUE(stable_pss != 0);
1020
1021  // Loop enough that even a small leak should be detectable.
1022  for (size_t i = 0; i < 4096; i++) {
1023    Backtrace* backtrace = Backtrace::Create(pid, tid);
1024    ASSERT_TRUE(backtrace != nullptr);
1025    ASSERT_TRUE(backtrace->Unwind(0));
1026    delete backtrace;
1027  }
1028  size_t new_pss = GetPssBytes();
1029  ASSERT_TRUE(new_pss != 0);
1030  size_t abs_diff = (new_pss > stable_pss) ? new_pss - stable_pss : stable_pss - new_pss;
1031  // As long as the new pss is within a certain amount, consider everything okay.
1032  ASSERT_LE(abs_diff, MAX_LEAK_BYTES);
1033}
1034
1035TEST(libbacktrace, check_for_leak_local) {
1036  CheckForLeak(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD);
1037}
1038
1039TEST(libbacktrace, check_for_leak_local_thread) {
1040  thread_t thread_data = { 0, 0, 0, nullptr };
1041  pthread_t thread;
1042  ASSERT_TRUE(pthread_create(&thread, nullptr, ThreadLevelRun, &thread_data) == 0);
1043
1044  // Wait up to 2 seconds for the tid to be set.
1045  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
1046
1047  CheckForLeak(BACKTRACE_CURRENT_PROCESS, thread_data.tid);
1048
1049  // Tell the thread to exit its infinite loop.
1050  android_atomic_acquire_store(0, &thread_data.state);
1051
1052  ASSERT_TRUE(pthread_join(thread, nullptr) == 0);
1053}
1054
1055TEST(libbacktrace, check_for_leak_remote) {
1056  pid_t pid;
1057
1058  if ((pid = fork()) == 0) {
1059    while (true) {
1060    }
1061    _exit(0);
1062  }
1063  ASSERT_LT(0, pid);
1064
1065  ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1066
1067  // Wait for the process to get to a stopping point.
1068  WaitForStop(pid);
1069
1070  CheckForLeak(pid, BACKTRACE_CURRENT_THREAD);
1071
1072  ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1073
1074  kill(pid, SIGKILL);
1075  ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1076}
1077#endif
1078