backtrace_test.cpp revision 2b4a63fc6a4bfc6db69901258539276b888c7ec4
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <dirent.h>
18#include <errno.h>
19#include <inttypes.h>
20#include <pthread.h>
21#include <signal.h>
22#include <stdint.h>
23#include <stdio.h>
24#include <stdlib.h>
25#include <string.h>
26#include <sys/ptrace.h>
27#include <sys/types.h>
28#include <sys/wait.h>
29#include <time.h>
30#include <unistd.h>
31
32#include <backtrace/Backtrace.h>
33#include <backtrace/BacktraceMap.h>
34
35// For the THREAD_SIGNAL definition.
36#include "BacktraceThread.h"
37
38#include <cutils/atomic.h>
39#include <gtest/gtest.h>
40
41#include <algorithm>
42#include <memory>
43#include <vector>
44
45#include "thread_utils.h"
46
47// Number of microseconds per milliseconds.
48#define US_PER_MSEC             1000
49
50// Number of nanoseconds in a second.
51#define NS_PER_SEC              1000000000ULL
52
53// Number of simultaneous dumping operations to perform.
54#define NUM_THREADS  40
55
56// Number of simultaneous threads running in our forked process.
57#define NUM_PTRACE_THREADS 5
58
59struct thread_t {
60  pid_t tid;
61  int32_t state;
62  pthread_t threadId;
63  void* data;
64};
65
66struct dump_thread_t {
67  thread_t thread;
68  Backtrace* backtrace;
69  int32_t* now;
70  int32_t done;
71};
72
73extern "C" {
74// Prototypes for functions in the test library.
75int test_level_one(int, int, int, int, void (*)(void*), void*);
76
77int test_recursive_call(int, void (*)(void*), void*);
78}
79
80uint64_t NanoTime() {
81  struct timespec t = { 0, 0 };
82  clock_gettime(CLOCK_MONOTONIC, &t);
83  return static_cast<uint64_t>(t.tv_sec * NS_PER_SEC + t.tv_nsec);
84}
85
86void DumpFrames(Backtrace* backtrace) {
87  if (backtrace->NumFrames() == 0) {
88    printf("    No frames to dump\n");
89    return;
90  }
91
92  for (size_t i = 0; i < backtrace->NumFrames(); i++) {
93    printf("    %s\n", backtrace->FormatFrameData(i).c_str());
94  }
95}
96
97void WaitForStop(pid_t pid) {
98  uint64_t start = NanoTime();
99
100  siginfo_t si;
101  while (ptrace(PTRACE_GETSIGINFO, pid, 0, &si) < 0 && (errno == EINTR || errno == ESRCH)) {
102    if ((NanoTime() - start) > NS_PER_SEC) {
103      printf("The process did not get to a stopping point in 1 second.\n");
104      break;
105    }
106    usleep(US_PER_MSEC);
107  }
108}
109
110bool ReadyLevelBacktrace(Backtrace* backtrace) {
111  // See if test_level_four is in the backtrace.
112  bool found = false;
113  for (Backtrace::const_iterator it = backtrace->begin(); it != backtrace->end(); ++it) {
114    if (it->func_name == "test_level_four") {
115      found = true;
116      break;
117    }
118  }
119
120  return found;
121}
122
123void VerifyLevelDump(Backtrace* backtrace) {
124  ASSERT_GT(backtrace->NumFrames(), static_cast<size_t>(0));
125  ASSERT_LT(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES));
126
127  // Look through the frames starting at the highest to find the
128  // frame we want.
129  size_t frame_num = 0;
130  for (size_t i = backtrace->NumFrames()-1; i > 2; i--) {
131    if (backtrace->GetFrame(i)->func_name == "test_level_one") {
132      frame_num = i;
133      break;
134    }
135  }
136  ASSERT_LT(static_cast<size_t>(0), frame_num);
137  ASSERT_LE(static_cast<size_t>(3), frame_num);
138
139  ASSERT_EQ(backtrace->GetFrame(frame_num)->func_name, "test_level_one");
140  ASSERT_EQ(backtrace->GetFrame(frame_num-1)->func_name, "test_level_two");
141  ASSERT_EQ(backtrace->GetFrame(frame_num-2)->func_name, "test_level_three");
142  ASSERT_EQ(backtrace->GetFrame(frame_num-3)->func_name, "test_level_four");
143}
144
145void VerifyLevelBacktrace(void*) {
146  std::unique_ptr<Backtrace> backtrace(
147      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
148  ASSERT_TRUE(backtrace.get() != nullptr);
149  ASSERT_TRUE(backtrace->Unwind(0));
150
151  VerifyLevelDump(backtrace.get());
152}
153
154bool ReadyMaxBacktrace(Backtrace* backtrace) {
155  return (backtrace->NumFrames() == MAX_BACKTRACE_FRAMES);
156}
157
158void VerifyMaxDump(Backtrace* backtrace) {
159  ASSERT_EQ(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES));
160  // Verify that the last frame is our recursive call.
161  ASSERT_EQ(backtrace->GetFrame(MAX_BACKTRACE_FRAMES-1)->func_name,
162            "test_recursive_call");
163}
164
165void VerifyMaxBacktrace(void*) {
166  std::unique_ptr<Backtrace> backtrace(
167      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
168  ASSERT_TRUE(backtrace.get() != nullptr);
169  ASSERT_TRUE(backtrace->Unwind(0));
170
171  VerifyMaxDump(backtrace.get());
172}
173
174void ThreadSetState(void* data) {
175  thread_t* thread = reinterpret_cast<thread_t*>(data);
176  android_atomic_acquire_store(1, &thread->state);
177  volatile int i = 0;
178  while (thread->state) {
179    i++;
180  }
181}
182
183void VerifyThreadTest(pid_t tid, void (*VerifyFunc)(Backtrace*)) {
184  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), tid));
185  ASSERT_TRUE(backtrace.get() != nullptr);
186  ASSERT_TRUE(backtrace->Unwind(0));
187
188  VerifyFunc(backtrace.get());
189}
190
191bool WaitForNonZero(int32_t* value, uint64_t seconds) {
192  uint64_t start = NanoTime();
193  do {
194    if (android_atomic_acquire_load(value)) {
195      return true;
196    }
197  } while ((NanoTime() - start) < seconds * NS_PER_SEC);
198  return false;
199}
200
201TEST(libbacktrace, local_trace) {
202  ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelBacktrace, nullptr), 0);
203}
204
205void VerifyIgnoreFrames(
206    Backtrace* bt_all, Backtrace* bt_ign1,
207    Backtrace* bt_ign2, const char* cur_proc) {
208  EXPECT_EQ(bt_all->NumFrames(), bt_ign1->NumFrames() + 1);
209  EXPECT_EQ(bt_all->NumFrames(), bt_ign2->NumFrames() + 2);
210
211  // Check all of the frames are the same > the current frame.
212  bool check = (cur_proc == nullptr);
213  for (size_t i = 0; i < bt_ign2->NumFrames(); i++) {
214    if (check) {
215      EXPECT_EQ(bt_ign2->GetFrame(i)->pc, bt_ign1->GetFrame(i+1)->pc);
216      EXPECT_EQ(bt_ign2->GetFrame(i)->sp, bt_ign1->GetFrame(i+1)->sp);
217      EXPECT_EQ(bt_ign2->GetFrame(i)->stack_size, bt_ign1->GetFrame(i+1)->stack_size);
218
219      EXPECT_EQ(bt_ign2->GetFrame(i)->pc, bt_all->GetFrame(i+2)->pc);
220      EXPECT_EQ(bt_ign2->GetFrame(i)->sp, bt_all->GetFrame(i+2)->sp);
221      EXPECT_EQ(bt_ign2->GetFrame(i)->stack_size, bt_all->GetFrame(i+2)->stack_size);
222    }
223    if (!check && bt_ign2->GetFrame(i)->func_name == cur_proc) {
224      check = true;
225    }
226  }
227}
228
229void VerifyLevelIgnoreFrames(void*) {
230  std::unique_ptr<Backtrace> all(
231      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
232  ASSERT_TRUE(all.get() != nullptr);
233  ASSERT_TRUE(all->Unwind(0));
234
235  std::unique_ptr<Backtrace> ign1(
236      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
237  ASSERT_TRUE(ign1.get() != nullptr);
238  ASSERT_TRUE(ign1->Unwind(1));
239
240  std::unique_ptr<Backtrace> ign2(
241      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
242  ASSERT_TRUE(ign2.get() != nullptr);
243  ASSERT_TRUE(ign2->Unwind(2));
244
245  VerifyIgnoreFrames(all.get(), ign1.get(), ign2.get(), "VerifyLevelIgnoreFrames");
246}
247
248TEST(libbacktrace, local_trace_ignore_frames) {
249  ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelIgnoreFrames, nullptr), 0);
250}
251
252TEST(libbacktrace, local_max_trace) {
253  ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, VerifyMaxBacktrace, nullptr), 0);
254}
255
256void VerifyProcTest(pid_t pid, pid_t tid, bool share_map,
257                    bool (*ReadyFunc)(Backtrace*),
258                    void (*VerifyFunc)(Backtrace*)) {
259  pid_t ptrace_tid;
260  if (tid < 0) {
261    ptrace_tid = pid;
262  } else {
263    ptrace_tid = tid;
264  }
265  uint64_t start = NanoTime();
266  bool verified = false;
267  do {
268    usleep(US_PER_MSEC);
269    if (ptrace(PTRACE_ATTACH, ptrace_tid, 0, 0) == 0) {
270      // Wait for the process to get to a stopping point.
271      WaitForStop(ptrace_tid);
272
273      std::unique_ptr<BacktraceMap> map;
274      if (share_map) {
275        map.reset(BacktraceMap::Create(pid));
276      }
277      std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, tid, map.get()));
278      ASSERT_TRUE(backtrace->Unwind(0));
279      ASSERT_TRUE(backtrace.get() != nullptr);
280      if (ReadyFunc(backtrace.get())) {
281        VerifyFunc(backtrace.get());
282        verified = true;
283      }
284
285      ASSERT_TRUE(ptrace(PTRACE_DETACH, ptrace_tid, 0, 0) == 0);
286    }
287    // If 5 seconds have passed, then we are done.
288  } while (!verified && (NanoTime() - start) <= 5 * NS_PER_SEC);
289  ASSERT_TRUE(verified);
290}
291
292TEST(libbacktrace, ptrace_trace) {
293  pid_t pid;
294  if ((pid = fork()) == 0) {
295    ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
296    _exit(1);
297  }
298  VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyLevelBacktrace, VerifyLevelDump);
299
300  kill(pid, SIGKILL);
301  int status;
302  ASSERT_EQ(waitpid(pid, &status, 0), pid);
303}
304
305TEST(libbacktrace, ptrace_trace_shared_map) {
306  pid_t pid;
307  if ((pid = fork()) == 0) {
308    ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
309    _exit(1);
310  }
311
312  VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, true, ReadyLevelBacktrace, VerifyLevelDump);
313
314  kill(pid, SIGKILL);
315  int status;
316  ASSERT_EQ(waitpid(pid, &status, 0), pid);
317}
318
319TEST(libbacktrace, ptrace_max_trace) {
320  pid_t pid;
321  if ((pid = fork()) == 0) {
322    ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, nullptr, nullptr), 0);
323    _exit(1);
324  }
325  VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyMaxBacktrace, VerifyMaxDump);
326
327  kill(pid, SIGKILL);
328  int status;
329  ASSERT_EQ(waitpid(pid, &status, 0), pid);
330}
331
332void VerifyProcessIgnoreFrames(Backtrace* bt_all) {
333  std::unique_ptr<Backtrace> ign1(Backtrace::Create(bt_all->Pid(), BACKTRACE_CURRENT_THREAD));
334  ASSERT_TRUE(ign1.get() != nullptr);
335  ASSERT_TRUE(ign1->Unwind(1));
336
337  std::unique_ptr<Backtrace> ign2(Backtrace::Create(bt_all->Pid(), BACKTRACE_CURRENT_THREAD));
338  ASSERT_TRUE(ign2.get() != nullptr);
339  ASSERT_TRUE(ign2->Unwind(2));
340
341  VerifyIgnoreFrames(bt_all, ign1.get(), ign2.get(), nullptr);
342}
343
344TEST(libbacktrace, ptrace_ignore_frames) {
345  pid_t pid;
346  if ((pid = fork()) == 0) {
347    ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
348    _exit(1);
349  }
350  VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyLevelBacktrace, VerifyProcessIgnoreFrames);
351
352  kill(pid, SIGKILL);
353  int status;
354  ASSERT_EQ(waitpid(pid, &status, 0), pid);
355}
356
357// Create a process with multiple threads and dump all of the threads.
358void* PtraceThreadLevelRun(void*) {
359  EXPECT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
360  return nullptr;
361}
362
363void GetThreads(pid_t pid, std::vector<pid_t>* threads) {
364  // Get the list of tasks.
365  char task_path[128];
366  snprintf(task_path, sizeof(task_path), "/proc/%d/task", pid);
367
368  DIR* tasks_dir = opendir(task_path);
369  ASSERT_TRUE(tasks_dir != nullptr);
370  struct dirent* entry;
371  while ((entry = readdir(tasks_dir)) != nullptr) {
372    char* end;
373    pid_t tid = strtoul(entry->d_name, &end, 10);
374    if (*end == '\0') {
375      threads->push_back(tid);
376    }
377  }
378  closedir(tasks_dir);
379}
380
381TEST(libbacktrace, ptrace_threads) {
382  pid_t pid;
383  if ((pid = fork()) == 0) {
384    for (size_t i = 0; i < NUM_PTRACE_THREADS; i++) {
385      pthread_attr_t attr;
386      pthread_attr_init(&attr);
387      pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
388
389      pthread_t thread;
390      ASSERT_TRUE(pthread_create(&thread, &attr, PtraceThreadLevelRun, nullptr) == 0);
391    }
392    ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
393    _exit(1);
394  }
395
396  // Check to see that all of the threads are running before unwinding.
397  std::vector<pid_t> threads;
398  uint64_t start = NanoTime();
399  do {
400    usleep(US_PER_MSEC);
401    threads.clear();
402    GetThreads(pid, &threads);
403  } while ((threads.size() != NUM_PTRACE_THREADS + 1) &&
404      ((NanoTime() - start) <= 5 * NS_PER_SEC));
405  ASSERT_EQ(threads.size(), static_cast<size_t>(NUM_PTRACE_THREADS + 1));
406
407  ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
408  WaitForStop(pid);
409  for (std::vector<int>::const_iterator it = threads.begin(); it != threads.end(); ++it) {
410    // Skip the current forked process, we only care about the threads.
411    if (pid == *it) {
412      continue;
413    }
414    VerifyProcTest(pid, *it, false, ReadyLevelBacktrace, VerifyLevelDump);
415  }
416  ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
417
418  kill(pid, SIGKILL);
419  int status;
420  ASSERT_EQ(waitpid(pid, &status, 0), pid);
421}
422
423void VerifyLevelThread(void*) {
424  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), gettid()));
425  ASSERT_TRUE(backtrace.get() != nullptr);
426  ASSERT_TRUE(backtrace->Unwind(0));
427
428  VerifyLevelDump(backtrace.get());
429}
430
431TEST(libbacktrace, thread_current_level) {
432  ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelThread, nullptr), 0);
433}
434
435void VerifyMaxThread(void*) {
436  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), gettid()));
437  ASSERT_TRUE(backtrace.get() != nullptr);
438  ASSERT_TRUE(backtrace->Unwind(0));
439
440  VerifyMaxDump(backtrace.get());
441}
442
443TEST(libbacktrace, thread_current_max) {
444  ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, VerifyMaxThread, nullptr), 0);
445}
446
447void* ThreadLevelRun(void* data) {
448  thread_t* thread = reinterpret_cast<thread_t*>(data);
449
450  thread->tid = gettid();
451  EXPECT_NE(test_level_one(1, 2, 3, 4, ThreadSetState, data), 0);
452  return nullptr;
453}
454
455TEST(libbacktrace, thread_level_trace) {
456  pthread_attr_t attr;
457  pthread_attr_init(&attr);
458  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
459
460  thread_t thread_data = { 0, 0, 0, nullptr };
461  pthread_t thread;
462  ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
463
464  // Wait up to 2 seconds for the tid to be set.
465  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
466
467  // Make sure that the thread signal used is not visible when compiled for
468  // the target.
469#if !defined(__GLIBC__)
470  ASSERT_LT(THREAD_SIGNAL, SIGRTMIN);
471#endif
472
473  // Save the current signal action and make sure it is restored afterwards.
474  struct sigaction cur_action;
475  ASSERT_TRUE(sigaction(THREAD_SIGNAL, nullptr, &cur_action) == 0);
476
477  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
478  ASSERT_TRUE(backtrace.get() != nullptr);
479  ASSERT_TRUE(backtrace->Unwind(0));
480
481  VerifyLevelDump(backtrace.get());
482
483  // Tell the thread to exit its infinite loop.
484  android_atomic_acquire_store(0, &thread_data.state);
485
486  // Verify that the old action was restored.
487  struct sigaction new_action;
488  ASSERT_TRUE(sigaction(THREAD_SIGNAL, nullptr, &new_action) == 0);
489  EXPECT_EQ(cur_action.sa_sigaction, new_action.sa_sigaction);
490  // The SA_RESTORER flag gets set behind our back, so a direct comparison
491  // doesn't work unless we mask the value off. Mips doesn't have this
492  // flag, so skip this on that platform.
493#ifdef SA_RESTORER
494  cur_action.sa_flags &= ~SA_RESTORER;
495  new_action.sa_flags &= ~SA_RESTORER;
496#endif
497  EXPECT_EQ(cur_action.sa_flags, new_action.sa_flags);
498}
499
500TEST(libbacktrace, thread_ignore_frames) {
501  pthread_attr_t attr;
502  pthread_attr_init(&attr);
503  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
504
505  thread_t thread_data = { 0, 0, 0, nullptr };
506  pthread_t thread;
507  ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
508
509  // Wait up to 2 seconds for the tid to be set.
510  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
511
512  std::unique_ptr<Backtrace> all(Backtrace::Create(getpid(), thread_data.tid));
513  ASSERT_TRUE(all.get() != nullptr);
514  ASSERT_TRUE(all->Unwind(0));
515
516  std::unique_ptr<Backtrace> ign1(Backtrace::Create(getpid(), thread_data.tid));
517  ASSERT_TRUE(ign1.get() != nullptr);
518  ASSERT_TRUE(ign1->Unwind(1));
519
520  std::unique_ptr<Backtrace> ign2(Backtrace::Create(getpid(), thread_data.tid));
521  ASSERT_TRUE(ign2.get() != nullptr);
522  ASSERT_TRUE(ign2->Unwind(2));
523
524  VerifyIgnoreFrames(all.get(), ign1.get(), ign2.get(), nullptr);
525
526  // Tell the thread to exit its infinite loop.
527  android_atomic_acquire_store(0, &thread_data.state);
528}
529
530void* ThreadMaxRun(void* data) {
531  thread_t* thread = reinterpret_cast<thread_t*>(data);
532
533  thread->tid = gettid();
534  EXPECT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, ThreadSetState, data), 0);
535  return nullptr;
536}
537
538TEST(libbacktrace, thread_max_trace) {
539  pthread_attr_t attr;
540  pthread_attr_init(&attr);
541  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
542
543  thread_t thread_data = { 0, 0, 0, nullptr };
544  pthread_t thread;
545  ASSERT_TRUE(pthread_create(&thread, &attr, ThreadMaxRun, &thread_data) == 0);
546
547  // Wait for the tid to be set.
548  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
549
550  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
551  ASSERT_TRUE(backtrace.get() != nullptr);
552  ASSERT_TRUE(backtrace->Unwind(0));
553
554  VerifyMaxDump(backtrace.get());
555
556  // Tell the thread to exit its infinite loop.
557  android_atomic_acquire_store(0, &thread_data.state);
558}
559
560void* ThreadDump(void* data) {
561  dump_thread_t* dump = reinterpret_cast<dump_thread_t*>(data);
562  while (true) {
563    if (android_atomic_acquire_load(dump->now)) {
564      break;
565    }
566  }
567
568  // The status of the actual unwind will be checked elsewhere.
569  dump->backtrace = Backtrace::Create(getpid(), dump->thread.tid);
570  dump->backtrace->Unwind(0);
571
572  android_atomic_acquire_store(1, &dump->done);
573
574  return nullptr;
575}
576
577TEST(libbacktrace, thread_multiple_dump) {
578  // Dump NUM_THREADS simultaneously.
579  std::vector<thread_t> runners(NUM_THREADS);
580  std::vector<dump_thread_t> dumpers(NUM_THREADS);
581
582  pthread_attr_t attr;
583  pthread_attr_init(&attr);
584  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
585  for (size_t i = 0; i < NUM_THREADS; i++) {
586    // Launch the runners, they will spin in hard loops doing nothing.
587    runners[i].tid = 0;
588    runners[i].state = 0;
589    ASSERT_TRUE(pthread_create(&runners[i].threadId, &attr, ThreadMaxRun, &runners[i]) == 0);
590  }
591
592  // Wait for tids to be set.
593  for (std::vector<thread_t>::iterator it = runners.begin(); it != runners.end(); ++it) {
594    ASSERT_TRUE(WaitForNonZero(&it->state, 30));
595  }
596
597  // Start all of the dumpers at once, they will spin until they are signalled
598  // to begin their dump run.
599  int32_t dump_now = 0;
600  for (size_t i = 0; i < NUM_THREADS; i++) {
601    dumpers[i].thread.tid = runners[i].tid;
602    dumpers[i].thread.state = 0;
603    dumpers[i].done = 0;
604    dumpers[i].now = &dump_now;
605
606    ASSERT_TRUE(pthread_create(&dumpers[i].thread.threadId, &attr, ThreadDump, &dumpers[i]) == 0);
607  }
608
609  // Start all of the dumpers going at once.
610  android_atomic_acquire_store(1, &dump_now);
611
612  for (size_t i = 0; i < NUM_THREADS; i++) {
613    ASSERT_TRUE(WaitForNonZero(&dumpers[i].done, 30));
614
615    // Tell the runner thread to exit its infinite loop.
616    android_atomic_acquire_store(0, &runners[i].state);
617
618    ASSERT_TRUE(dumpers[i].backtrace != nullptr);
619    VerifyMaxDump(dumpers[i].backtrace);
620
621    delete dumpers[i].backtrace;
622    dumpers[i].backtrace = nullptr;
623  }
624}
625
626TEST(libbacktrace, thread_multiple_dump_same_thread) {
627  pthread_attr_t attr;
628  pthread_attr_init(&attr);
629  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
630  thread_t runner;
631  runner.tid = 0;
632  runner.state = 0;
633  ASSERT_TRUE(pthread_create(&runner.threadId, &attr, ThreadMaxRun, &runner) == 0);
634
635  // Wait for tids to be set.
636  ASSERT_TRUE(WaitForNonZero(&runner.state, 30));
637
638  // Start all of the dumpers at once, they will spin until they are signalled
639  // to begin their dump run.
640  int32_t dump_now = 0;
641  // Dump the same thread NUM_THREADS simultaneously.
642  std::vector<dump_thread_t> dumpers(NUM_THREADS);
643  for (size_t i = 0; i < NUM_THREADS; i++) {
644    dumpers[i].thread.tid = runner.tid;
645    dumpers[i].thread.state = 0;
646    dumpers[i].done = 0;
647    dumpers[i].now = &dump_now;
648
649    ASSERT_TRUE(pthread_create(&dumpers[i].thread.threadId, &attr, ThreadDump, &dumpers[i]) == 0);
650  }
651
652  // Start all of the dumpers going at once.
653  android_atomic_acquire_store(1, &dump_now);
654
655  for (size_t i = 0; i < NUM_THREADS; i++) {
656    ASSERT_TRUE(WaitForNonZero(&dumpers[i].done, 30));
657
658    ASSERT_TRUE(dumpers[i].backtrace != nullptr);
659    VerifyMaxDump(dumpers[i].backtrace);
660
661    delete dumpers[i].backtrace;
662    dumpers[i].backtrace = nullptr;
663  }
664
665  // Tell the runner thread to exit its infinite loop.
666  android_atomic_acquire_store(0, &runner.state);
667}
668
669// This test is for UnwindMaps that should share the same map cursor when
670// multiple maps are created for the current process at the same time.
671TEST(libbacktrace, simultaneous_maps) {
672  BacktraceMap* map1 = BacktraceMap::Create(getpid());
673  BacktraceMap* map2 = BacktraceMap::Create(getpid());
674  BacktraceMap* map3 = BacktraceMap::Create(getpid());
675
676  Backtrace* back1 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map1);
677  EXPECT_TRUE(back1->Unwind(0));
678  delete back1;
679  delete map1;
680
681  Backtrace* back2 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map2);
682  EXPECT_TRUE(back2->Unwind(0));
683  delete back2;
684  delete map2;
685
686  Backtrace* back3 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map3);
687  EXPECT_TRUE(back3->Unwind(0));
688  delete back3;
689  delete map3;
690}
691
692TEST(libbacktrace, fillin_erases) {
693  BacktraceMap* back_map = BacktraceMap::Create(getpid());
694
695  backtrace_map_t map;
696
697  map.start = 1;
698  map.end = 3;
699  map.flags = 1;
700  map.name = "Initialized";
701  back_map->FillIn(0, &map);
702  delete back_map;
703
704  ASSERT_FALSE(BacktraceMap::IsValid(map));
705  ASSERT_EQ(static_cast<uintptr_t>(0), map.start);
706  ASSERT_EQ(static_cast<uintptr_t>(0), map.end);
707  ASSERT_EQ(0, map.flags);
708  ASSERT_EQ("", map.name);
709}
710
711TEST(libbacktrace, format_test) {
712  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD));
713  ASSERT_TRUE(backtrace.get() != nullptr);
714
715  backtrace_frame_data_t frame;
716  frame.num = 1;
717  frame.pc = 2;
718  frame.sp = 0;
719  frame.stack_size = 0;
720  frame.func_offset = 0;
721
722  // Check no map set.
723  frame.num = 1;
724#if defined(__LP64__)
725  EXPECT_EQ("#01 pc 0000000000000002  <unknown>",
726#else
727  EXPECT_EQ("#01 pc 00000002  <unknown>",
728#endif
729            backtrace->FormatFrameData(&frame));
730
731  // Check map name empty, but exists.
732  frame.map.start = 1;
733  frame.map.end = 1;
734#if defined(__LP64__)
735  EXPECT_EQ("#01 pc 0000000000000001  <unknown>",
736#else
737  EXPECT_EQ("#01 pc 00000001  <unknown>",
738#endif
739            backtrace->FormatFrameData(&frame));
740
741
742  // Check relative pc is set and map name is set.
743  frame.pc = 0x12345679;
744  frame.map.name = "MapFake";
745  frame.map.start =  1;
746  frame.map.end =  1;
747#if defined(__LP64__)
748  EXPECT_EQ("#01 pc 0000000012345678  MapFake",
749#else
750  EXPECT_EQ("#01 pc 12345678  MapFake",
751#endif
752            backtrace->FormatFrameData(&frame));
753
754  // Check func_name is set, but no func offset.
755  frame.func_name = "ProcFake";
756#if defined(__LP64__)
757  EXPECT_EQ("#01 pc 0000000012345678  MapFake (ProcFake)",
758#else
759  EXPECT_EQ("#01 pc 12345678  MapFake (ProcFake)",
760#endif
761            backtrace->FormatFrameData(&frame));
762
763  // Check func_name is set, and func offset is non-zero.
764  frame.func_offset = 645;
765#if defined(__LP64__)
766  EXPECT_EQ("#01 pc 0000000012345678  MapFake (ProcFake+645)",
767#else
768  EXPECT_EQ("#01 pc 12345678  MapFake (ProcFake+645)",
769#endif
770            backtrace->FormatFrameData(&frame));
771}
772
773struct map_test_t {
774  uintptr_t start;
775  uintptr_t end;
776};
777
778bool map_sort(map_test_t i, map_test_t j) {
779  return i.start < j.start;
780}
781
782void VerifyMap(pid_t pid) {
783  char buffer[4096];
784  snprintf(buffer, sizeof(buffer), "/proc/%d/maps", pid);
785
786  FILE* map_file = fopen(buffer, "r");
787  ASSERT_TRUE(map_file != nullptr);
788  std::vector<map_test_t> test_maps;
789  while (fgets(buffer, sizeof(buffer), map_file)) {
790    map_test_t map;
791    ASSERT_EQ(2, sscanf(buffer, "%" SCNxPTR "-%" SCNxPTR " ", &map.start, &map.end));
792    test_maps.push_back(map);
793  }
794  fclose(map_file);
795  std::sort(test_maps.begin(), test_maps.end(), map_sort);
796
797  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(pid));
798
799  // Basic test that verifies that the map is in the expected order.
800  std::vector<map_test_t>::const_iterator test_it = test_maps.begin();
801  for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
802    ASSERT_TRUE(test_it != test_maps.end());
803    ASSERT_EQ(test_it->start, it->start);
804    ASSERT_EQ(test_it->end, it->end);
805    ++test_it;
806  }
807  ASSERT_TRUE(test_it == test_maps.end());
808}
809
810TEST(libbacktrace, verify_map_remote) {
811  pid_t pid;
812
813  if ((pid = fork()) == 0) {
814    while (true) {
815    }
816    _exit(0);
817  }
818  ASSERT_LT(0, pid);
819
820  ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
821
822  // Wait for the process to get to a stopping point.
823  WaitForStop(pid);
824
825  // The maps should match exactly since the forked process has been paused.
826  VerifyMap(pid);
827
828  ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
829
830  kill(pid, SIGKILL);
831  ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
832}
833
834void* ThreadReadTest(void* data) {
835  thread_t* thread_data = reinterpret_cast<thread_t*>(data);
836
837  thread_data->tid = gettid();
838
839  // Create two map pages.
840  // Mark the second page as not-readable.
841  size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
842  uint8_t* memory;
843  if (posix_memalign(reinterpret_cast<void**>(&memory), pagesize, 2 * pagesize) != 0) {
844    return reinterpret_cast<void*>(-1);
845  }
846
847  if (mprotect(&memory[pagesize], pagesize, PROT_NONE) != 0) {
848    return reinterpret_cast<void*>(-1);
849  }
850
851  // Set up a simple pattern in memory.
852  for (size_t i = 0; i < pagesize; i++) {
853    memory[i] = i;
854  }
855
856  thread_data->data = memory;
857
858  // Tell the caller it's okay to start reading memory.
859  android_atomic_acquire_store(1, &thread_data->state);
860
861  // Loop waiting for everything
862  while (thread_data->state) {
863  }
864
865  free(memory);
866
867  android_atomic_acquire_store(1, &thread_data->state);
868
869  return nullptr;
870}
871
872void RunReadTest(Backtrace* backtrace, uintptr_t read_addr) {
873  size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
874
875  // Create a page of data to use to do quick compares.
876  uint8_t* expected = new uint8_t[pagesize];
877  for (size_t i = 0; i < pagesize; i++) {
878    expected[i] = i;
879  }
880  uint8_t* data = new uint8_t[2*pagesize];
881  // Verify that we can only read one page worth of data.
882  size_t bytes_read = backtrace->Read(read_addr, data, 2 * pagesize);
883  ASSERT_EQ(pagesize, bytes_read);
884  ASSERT_TRUE(memcmp(data, expected, pagesize) == 0);
885
886  // Verify unaligned reads.
887  for (size_t i = 1; i < sizeof(word_t); i++) {
888    bytes_read = backtrace->Read(read_addr + i, data, 2 * sizeof(word_t));
889    ASSERT_EQ(2 * sizeof(word_t), bytes_read);
890    ASSERT_TRUE(memcmp(data, &expected[i], 2 * sizeof(word_t)) == 0)
891        << "Offset at " << i << " failed";
892  }
893  delete data;
894  delete expected;
895}
896
897TEST(libbacktrace, thread_read) {
898  pthread_attr_t attr;
899  pthread_attr_init(&attr);
900  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
901  pthread_t thread;
902  thread_t thread_data = { 0, 0, 0, nullptr };
903  ASSERT_TRUE(pthread_create(&thread, &attr, ThreadReadTest, &thread_data) == 0);
904
905  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 10));
906
907  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
908  ASSERT_TRUE(backtrace.get() != nullptr);
909
910  RunReadTest(backtrace.get(), reinterpret_cast<uintptr_t>(thread_data.data));
911
912  android_atomic_acquire_store(0, &thread_data.state);
913
914  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 10));
915}
916
917volatile uintptr_t g_ready = 0;
918volatile uintptr_t g_addr = 0;
919
920void ForkedReadTest() {
921  // Create two map pages.
922  size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
923  uint8_t* memory;
924  if (posix_memalign(reinterpret_cast<void**>(&memory), pagesize, 2 * pagesize) != 0) {
925    perror("Failed to allocate memory\n");
926    exit(1);
927  }
928
929  // Mark the second page as not-readable.
930  if (mprotect(&memory[pagesize], pagesize, PROT_NONE) != 0) {
931    perror("Failed to mprotect memory\n");
932    exit(1);
933  }
934
935  // Set up a simple pattern in memory.
936  for (size_t i = 0; i < pagesize; i++) {
937    memory[i] = i;
938  }
939
940  g_addr = reinterpret_cast<uintptr_t>(memory);
941  g_ready = 1;
942
943  while (1) {
944    usleep(US_PER_MSEC);
945  }
946}
947
948TEST(libbacktrace, process_read) {
949  pid_t pid;
950  if ((pid = fork()) == 0) {
951    ForkedReadTest();
952    exit(0);
953  }
954  ASSERT_NE(-1, pid);
955
956  bool test_executed = false;
957  uint64_t start = NanoTime();
958  while (1) {
959    if (ptrace(PTRACE_ATTACH, pid, 0, 0) == 0) {
960      WaitForStop(pid);
961
962      std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
963
964      uintptr_t read_addr;
965      size_t bytes_read = backtrace->Read(reinterpret_cast<uintptr_t>(&g_ready),
966                                          reinterpret_cast<uint8_t*>(&read_addr),
967                                          sizeof(uintptr_t));
968      ASSERT_EQ(sizeof(uintptr_t), bytes_read);
969      if (read_addr) {
970        // The forked process is ready to be read.
971        bytes_read = backtrace->Read(reinterpret_cast<uintptr_t>(&g_addr),
972                                     reinterpret_cast<uint8_t*>(&read_addr),
973                                     sizeof(uintptr_t));
974        ASSERT_EQ(sizeof(uintptr_t), bytes_read);
975
976        RunReadTest(backtrace.get(), read_addr);
977
978        test_executed = true;
979        break;
980      }
981      ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
982    }
983    if ((NanoTime() - start) > 5 * NS_PER_SEC) {
984      break;
985    }
986    usleep(US_PER_MSEC);
987  }
988  kill(pid, SIGKILL);
989  ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
990
991  ASSERT_TRUE(test_executed);
992}
993
994#if defined(ENABLE_PSS_TESTS)
995#include "GetPss.h"
996
997#define MAX_LEAK_BYTES 32*1024UL
998
999void CheckForLeak(pid_t pid, pid_t tid) {
1000  // Do a few runs to get the PSS stable.
1001  for (size_t i = 0; i < 100; i++) {
1002    Backtrace* backtrace = Backtrace::Create(pid, tid);
1003    ASSERT_TRUE(backtrace != nullptr);
1004    ASSERT_TRUE(backtrace->Unwind(0));
1005    delete backtrace;
1006  }
1007  size_t stable_pss = GetPssBytes();
1008
1009  // Loop enough that even a small leak should be detectable.
1010  for (size_t i = 0; i < 4096; i++) {
1011    Backtrace* backtrace = Backtrace::Create(pid, tid);
1012    ASSERT_TRUE(backtrace != nullptr);
1013    ASSERT_TRUE(backtrace->Unwind(0));
1014    delete backtrace;
1015  }
1016  size_t new_pss = GetPssBytes();
1017  size_t abs_diff = (new_pss > stable_pss) ? new_pss - stable_pss : stable_pss - new_pss;
1018  // As long as the new pss is within a certain amount, consider everything okay.
1019  ASSERT_LE(abs_diff, MAX_LEAK_BYTES);
1020}
1021
1022TEST(libbacktrace, check_for_leak_local) {
1023  CheckForLeak(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD);
1024}
1025
1026TEST(libbacktrace, check_for_leak_local_thread) {
1027  thread_t thread_data = { 0, 0, 0, nullptr };
1028  pthread_t thread;
1029  ASSERT_TRUE(pthread_create(&thread, nullptr, ThreadLevelRun, &thread_data) == 0);
1030
1031  // Wait up to 2 seconds for the tid to be set.
1032  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
1033
1034  CheckForLeak(BACKTRACE_CURRENT_PROCESS, thread_data.tid);
1035
1036  // Tell the thread to exit its infinite loop.
1037  android_atomic_acquire_store(0, &thread_data.state);
1038
1039  ASSERT_TRUE(pthread_join(thread, nullptr) == 0);
1040}
1041
1042TEST(libbacktrace, check_for_leak_remote) {
1043  pid_t pid;
1044
1045  if ((pid = fork()) == 0) {
1046    while (true) {
1047    }
1048    _exit(0);
1049  }
1050  ASSERT_LT(0, pid);
1051
1052  ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1053
1054  // Wait for the process to get to a stopping point.
1055  WaitForStop(pid);
1056
1057  CheckForLeak(pid, BACKTRACE_CURRENT_THREAD);
1058
1059  ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1060
1061  kill(pid, SIGKILL);
1062  ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1063}
1064#endif
1065