1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define _GNU_SOURCE 1
18#include <dirent.h>
19#include <dlfcn.h>
20#include <errno.h>
21#include <fcntl.h>
22#include <inttypes.h>
23#include <pthread.h>
24#include <signal.h>
25#include <stdint.h>
26#include <stdio.h>
27#include <stdlib.h>
28#include <string.h>
29#include <sys/ptrace.h>
30#include <sys/stat.h>
31#include <sys/types.h>
32#include <sys/wait.h>
33#include <time.h>
34#include <unistd.h>
35
36#include <algorithm>
37#include <list>
38#include <memory>
39#include <ostream>
40#include <string>
41#include <vector>
42
43#include <backtrace/Backtrace.h>
44#include <backtrace/BacktraceMap.h>
45
46#include <android-base/macros.h>
47#include <android-base/stringprintf.h>
48#include <android-base/unique_fd.h>
49#include <cutils/atomic.h>
50#include <cutils/threads.h>
51
52#include <gtest/gtest.h>
53
54// For the THREAD_SIGNAL definition.
55#include "BacktraceCurrent.h"
56#include "backtrace_testlib.h"
57#include "thread_utils.h"
58
59// Number of microseconds per milliseconds.
60#define US_PER_MSEC             1000
61
62// Number of nanoseconds in a second.
63#define NS_PER_SEC              1000000000ULL
64
65// Number of simultaneous dumping operations to perform.
66#define NUM_THREADS  40
67
68// Number of simultaneous threads running in our forked process.
69#define NUM_PTRACE_THREADS 5
70
71struct thread_t {
72  pid_t tid;
73  int32_t state;
74  pthread_t threadId;
75  void* data;
76};
77
78struct dump_thread_t {
79  thread_t thread;
80  Backtrace* backtrace;
81  int32_t* now;
82  int32_t done;
83};
84
85static uint64_t NanoTime() {
86  struct timespec t = { 0, 0 };
87  clock_gettime(CLOCK_MONOTONIC, &t);
88  return static_cast<uint64_t>(t.tv_sec * NS_PER_SEC + t.tv_nsec);
89}
90
91static std::string DumpFrames(Backtrace* backtrace) {
92  if (backtrace->NumFrames() == 0) {
93    return "   No frames to dump.\n";
94  }
95
96  std::string frame;
97  for (size_t i = 0; i < backtrace->NumFrames(); i++) {
98    frame += "   " + backtrace->FormatFrameData(i) + '\n';
99  }
100  return frame;
101}
102
103static void WaitForStop(pid_t pid) {
104  uint64_t start = NanoTime();
105
106  siginfo_t si;
107  while (ptrace(PTRACE_GETSIGINFO, pid, 0, &si) < 0 && (errno == EINTR || errno == ESRCH)) {
108    if ((NanoTime() - start) > NS_PER_SEC) {
109      printf("The process did not get to a stopping point in 1 second.\n");
110      break;
111    }
112    usleep(US_PER_MSEC);
113  }
114}
115
116static void CreateRemoteProcess(pid_t* pid) {
117  if ((*pid = fork()) == 0) {
118    while (true)
119      ;
120    _exit(0);
121  }
122  ASSERT_NE(-1, *pid);
123
124  ASSERT_TRUE(ptrace(PTRACE_ATTACH, *pid, 0, 0) == 0);
125
126  // Wait for the process to get to a stopping point.
127  WaitForStop(*pid);
128}
129
130static void FinishRemoteProcess(pid_t pid) {
131  ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
132
133  kill(pid, SIGKILL);
134  ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
135}
136
137static bool ReadyLevelBacktrace(Backtrace* backtrace) {
138  // See if test_level_four is in the backtrace.
139  bool found = false;
140  for (Backtrace::const_iterator it = backtrace->begin(); it != backtrace->end(); ++it) {
141    if (it->func_name == "test_level_four") {
142      found = true;
143      break;
144    }
145  }
146
147  return found;
148}
149
150static void VerifyLevelDump(Backtrace* backtrace) {
151  ASSERT_GT(backtrace->NumFrames(), static_cast<size_t>(0))
152    << DumpFrames(backtrace);
153  ASSERT_LT(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES))
154    << DumpFrames(backtrace);
155
156  // Look through the frames starting at the highest to find the
157  // frame we want.
158  size_t frame_num = 0;
159  for (size_t i = backtrace->NumFrames()-1; i > 2; i--) {
160    if (backtrace->GetFrame(i)->func_name == "test_level_one") {
161      frame_num = i;
162      break;
163    }
164  }
165  ASSERT_LT(static_cast<size_t>(0), frame_num) << DumpFrames(backtrace);
166  ASSERT_LE(static_cast<size_t>(3), frame_num) << DumpFrames(backtrace);
167
168  ASSERT_EQ(backtrace->GetFrame(frame_num)->func_name, "test_level_one")
169    << DumpFrames(backtrace);
170  ASSERT_EQ(backtrace->GetFrame(frame_num-1)->func_name, "test_level_two")
171    << DumpFrames(backtrace);
172  ASSERT_EQ(backtrace->GetFrame(frame_num-2)->func_name, "test_level_three")
173    << DumpFrames(backtrace);
174  ASSERT_EQ(backtrace->GetFrame(frame_num-3)->func_name, "test_level_four")
175    << DumpFrames(backtrace);
176}
177
178static void VerifyLevelBacktrace(void*) {
179  std::unique_ptr<Backtrace> backtrace(
180      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
181  ASSERT_TRUE(backtrace.get() != nullptr);
182  ASSERT_TRUE(backtrace->Unwind(0));
183  ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
184
185  VerifyLevelDump(backtrace.get());
186}
187
188static bool ReadyMaxBacktrace(Backtrace* backtrace) {
189  return (backtrace->NumFrames() == MAX_BACKTRACE_FRAMES);
190}
191
192static void VerifyMaxDump(Backtrace* backtrace) {
193  ASSERT_EQ(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES))
194    << DumpFrames(backtrace);
195  // Verify that the last frame is our recursive call.
196  ASSERT_EQ(backtrace->GetFrame(MAX_BACKTRACE_FRAMES-1)->func_name, "test_recursive_call")
197    << DumpFrames(backtrace);
198}
199
200static void VerifyMaxBacktrace(void*) {
201  std::unique_ptr<Backtrace> backtrace(
202      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
203  ASSERT_TRUE(backtrace.get() != nullptr);
204  ASSERT_TRUE(backtrace->Unwind(0));
205  ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
206
207  VerifyMaxDump(backtrace.get());
208}
209
210static void ThreadSetState(void* data) {
211  thread_t* thread = reinterpret_cast<thread_t*>(data);
212  android_atomic_acquire_store(1, &thread->state);
213  volatile int i = 0;
214  while (thread->state) {
215    i++;
216  }
217}
218
219static bool WaitForNonZero(int32_t* value, uint64_t seconds) {
220  uint64_t start = NanoTime();
221  do {
222    if (android_atomic_acquire_load(value)) {
223      return true;
224    }
225  } while ((NanoTime() - start) < seconds * NS_PER_SEC);
226  return false;
227}
228
229TEST(libbacktrace, local_no_unwind_frames) {
230  // Verify that a local unwind does not include any frames within
231  // libunwind or libbacktrace.
232  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), getpid()));
233  ASSERT_TRUE(backtrace.get() != nullptr);
234  ASSERT_TRUE(backtrace->Unwind(0));
235  ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
236
237  ASSERT_TRUE(backtrace->NumFrames() != 0);
238  for (const auto& frame : *backtrace ) {
239    if (BacktraceMap::IsValid(frame.map)) {
240      const std::string name = basename(frame.map.name.c_str());
241      ASSERT_TRUE(name != "libunwind.so" && name != "libbacktrace.so")
242        << DumpFrames(backtrace.get());
243    }
244    break;
245  }
246}
247
248TEST(libbacktrace, local_trace) {
249  ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelBacktrace, nullptr), 0);
250}
251
252static void VerifyIgnoreFrames(Backtrace* bt_all, Backtrace* bt_ign1, Backtrace* bt_ign2,
253                               const char* cur_proc) {
254  EXPECT_EQ(bt_all->NumFrames(), bt_ign1->NumFrames() + 1)
255    << "All backtrace:\n" << DumpFrames(bt_all) << "Ignore 1 backtrace:\n" << DumpFrames(bt_ign1);
256  EXPECT_EQ(bt_all->NumFrames(), bt_ign2->NumFrames() + 2)
257    << "All backtrace:\n" << DumpFrames(bt_all) << "Ignore 2 backtrace:\n" << DumpFrames(bt_ign2);
258
259  // Check all of the frames are the same > the current frame.
260  bool check = (cur_proc == nullptr);
261  for (size_t i = 0; i < bt_ign2->NumFrames(); i++) {
262    if (check) {
263      EXPECT_EQ(bt_ign2->GetFrame(i)->pc, bt_ign1->GetFrame(i+1)->pc);
264      EXPECT_EQ(bt_ign2->GetFrame(i)->sp, bt_ign1->GetFrame(i+1)->sp);
265      EXPECT_EQ(bt_ign2->GetFrame(i)->stack_size, bt_ign1->GetFrame(i+1)->stack_size);
266
267      EXPECT_EQ(bt_ign2->GetFrame(i)->pc, bt_all->GetFrame(i+2)->pc);
268      EXPECT_EQ(bt_ign2->GetFrame(i)->sp, bt_all->GetFrame(i+2)->sp);
269      EXPECT_EQ(bt_ign2->GetFrame(i)->stack_size, bt_all->GetFrame(i+2)->stack_size);
270    }
271    if (!check && bt_ign2->GetFrame(i)->func_name == cur_proc) {
272      check = true;
273    }
274  }
275}
276
277static void VerifyLevelIgnoreFrames(void*) {
278  std::unique_ptr<Backtrace> all(
279      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
280  ASSERT_TRUE(all.get() != nullptr);
281  ASSERT_TRUE(all->Unwind(0));
282  ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, all->GetError());
283
284  std::unique_ptr<Backtrace> ign1(
285      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
286  ASSERT_TRUE(ign1.get() != nullptr);
287  ASSERT_TRUE(ign1->Unwind(1));
288  ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, ign1->GetError());
289
290  std::unique_ptr<Backtrace> ign2(
291      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
292  ASSERT_TRUE(ign2.get() != nullptr);
293  ASSERT_TRUE(ign2->Unwind(2));
294  ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, ign2->GetError());
295
296  VerifyIgnoreFrames(all.get(), ign1.get(), ign2.get(), "VerifyLevelIgnoreFrames");
297}
298
299TEST(libbacktrace, local_trace_ignore_frames) {
300  ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelIgnoreFrames, nullptr), 0);
301}
302
303TEST(libbacktrace, local_max_trace) {
304  ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, VerifyMaxBacktrace, nullptr), 0);
305}
306
307static void VerifyProcTest(pid_t pid, pid_t tid, bool share_map, bool (*ReadyFunc)(Backtrace*),
308                           void (*VerifyFunc)(Backtrace*)) {
309  pid_t ptrace_tid;
310  if (tid < 0) {
311    ptrace_tid = pid;
312  } else {
313    ptrace_tid = tid;
314  }
315  uint64_t start = NanoTime();
316  bool verified = false;
317  std::string last_dump;
318  do {
319    usleep(US_PER_MSEC);
320    if (ptrace(PTRACE_ATTACH, ptrace_tid, 0, 0) == 0) {
321      // Wait for the process to get to a stopping point.
322      WaitForStop(ptrace_tid);
323
324      std::unique_ptr<BacktraceMap> map;
325      if (share_map) {
326        map.reset(BacktraceMap::Create(pid));
327      }
328      std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, tid, map.get()));
329      ASSERT_TRUE(backtrace.get() != nullptr);
330      ASSERT_TRUE(backtrace->Unwind(0));
331      ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
332      if (ReadyFunc(backtrace.get())) {
333        VerifyFunc(backtrace.get());
334        verified = true;
335      } else {
336        last_dump = DumpFrames(backtrace.get());
337      }
338
339      ASSERT_TRUE(ptrace(PTRACE_DETACH, ptrace_tid, 0, 0) == 0);
340    }
341    // If 5 seconds have passed, then we are done.
342  } while (!verified && (NanoTime() - start) <= 5 * NS_PER_SEC);
343  ASSERT_TRUE(verified) << "Last backtrace:\n" << last_dump;
344}
345
346TEST(libbacktrace, ptrace_trace) {
347  pid_t pid;
348  if ((pid = fork()) == 0) {
349    ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
350    _exit(1);
351  }
352  VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyLevelBacktrace, VerifyLevelDump);
353
354  kill(pid, SIGKILL);
355  int status;
356  ASSERT_EQ(waitpid(pid, &status, 0), pid);
357}
358
359TEST(libbacktrace, ptrace_trace_shared_map) {
360  pid_t pid;
361  if ((pid = fork()) == 0) {
362    ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
363    _exit(1);
364  }
365
366  VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, true, ReadyLevelBacktrace, VerifyLevelDump);
367
368  kill(pid, SIGKILL);
369  int status;
370  ASSERT_EQ(waitpid(pid, &status, 0), pid);
371}
372
373TEST(libbacktrace, ptrace_max_trace) {
374  pid_t pid;
375  if ((pid = fork()) == 0) {
376    ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, nullptr, nullptr), 0);
377    _exit(1);
378  }
379  VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyMaxBacktrace, VerifyMaxDump);
380
381  kill(pid, SIGKILL);
382  int status;
383  ASSERT_EQ(waitpid(pid, &status, 0), pid);
384}
385
386static void VerifyProcessIgnoreFrames(Backtrace* bt_all) {
387  std::unique_ptr<Backtrace> ign1(Backtrace::Create(bt_all->Pid(), BACKTRACE_CURRENT_THREAD));
388  ASSERT_TRUE(ign1.get() != nullptr);
389  ASSERT_TRUE(ign1->Unwind(1));
390  ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, ign1->GetError());
391
392  std::unique_ptr<Backtrace> ign2(Backtrace::Create(bt_all->Pid(), BACKTRACE_CURRENT_THREAD));
393  ASSERT_TRUE(ign2.get() != nullptr);
394  ASSERT_TRUE(ign2->Unwind(2));
395  ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, ign2->GetError());
396
397  VerifyIgnoreFrames(bt_all, ign1.get(), ign2.get(), nullptr);
398}
399
400TEST(libbacktrace, ptrace_ignore_frames) {
401  pid_t pid;
402  if ((pid = fork()) == 0) {
403    ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
404    _exit(1);
405  }
406  VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyLevelBacktrace, VerifyProcessIgnoreFrames);
407
408  kill(pid, SIGKILL);
409  int status;
410  ASSERT_EQ(waitpid(pid, &status, 0), pid);
411}
412
413// Create a process with multiple threads and dump all of the threads.
414static void* PtraceThreadLevelRun(void*) {
415  EXPECT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
416  return nullptr;
417}
418
419static void GetThreads(pid_t pid, std::vector<pid_t>* threads) {
420  // Get the list of tasks.
421  char task_path[128];
422  snprintf(task_path, sizeof(task_path), "/proc/%d/task", pid);
423
424  std::unique_ptr<DIR, decltype(&closedir)> tasks_dir(opendir(task_path), closedir);
425  ASSERT_TRUE(tasks_dir != nullptr);
426  struct dirent* entry;
427  while ((entry = readdir(tasks_dir.get())) != nullptr) {
428    char* end;
429    pid_t tid = strtoul(entry->d_name, &end, 10);
430    if (*end == '\0') {
431      threads->push_back(tid);
432    }
433  }
434}
435
436TEST(libbacktrace, ptrace_threads) {
437  pid_t pid;
438  if ((pid = fork()) == 0) {
439    for (size_t i = 0; i < NUM_PTRACE_THREADS; i++) {
440      pthread_attr_t attr;
441      pthread_attr_init(&attr);
442      pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
443
444      pthread_t thread;
445      ASSERT_TRUE(pthread_create(&thread, &attr, PtraceThreadLevelRun, nullptr) == 0);
446    }
447    ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
448    _exit(1);
449  }
450
451  // Check to see that all of the threads are running before unwinding.
452  std::vector<pid_t> threads;
453  uint64_t start = NanoTime();
454  do {
455    usleep(US_PER_MSEC);
456    threads.clear();
457    GetThreads(pid, &threads);
458  } while ((threads.size() != NUM_PTRACE_THREADS + 1) &&
459      ((NanoTime() - start) <= 5 * NS_PER_SEC));
460  ASSERT_EQ(threads.size(), static_cast<size_t>(NUM_PTRACE_THREADS + 1));
461
462  ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
463  WaitForStop(pid);
464  for (std::vector<int>::const_iterator it = threads.begin(); it != threads.end(); ++it) {
465    // Skip the current forked process, we only care about the threads.
466    if (pid == *it) {
467      continue;
468    }
469    VerifyProcTest(pid, *it, false, ReadyLevelBacktrace, VerifyLevelDump);
470  }
471
472  FinishRemoteProcess(pid);
473}
474
475void VerifyLevelThread(void*) {
476  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), gettid()));
477  ASSERT_TRUE(backtrace.get() != nullptr);
478  ASSERT_TRUE(backtrace->Unwind(0));
479  ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
480
481  VerifyLevelDump(backtrace.get());
482}
483
484TEST(libbacktrace, thread_current_level) {
485  ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelThread, nullptr), 0);
486}
487
488static void VerifyMaxThread(void*) {
489  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), gettid()));
490  ASSERT_TRUE(backtrace.get() != nullptr);
491  ASSERT_TRUE(backtrace->Unwind(0));
492  ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
493
494  VerifyMaxDump(backtrace.get());
495}
496
497TEST(libbacktrace, thread_current_max) {
498  ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, VerifyMaxThread, nullptr), 0);
499}
500
501static void* ThreadLevelRun(void* data) {
502  thread_t* thread = reinterpret_cast<thread_t*>(data);
503
504  thread->tid = gettid();
505  EXPECT_NE(test_level_one(1, 2, 3, 4, ThreadSetState, data), 0);
506  return nullptr;
507}
508
509TEST(libbacktrace, thread_level_trace) {
510  pthread_attr_t attr;
511  pthread_attr_init(&attr);
512  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
513
514  thread_t thread_data = { 0, 0, 0, nullptr };
515  pthread_t thread;
516  ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
517
518  // Wait up to 2 seconds for the tid to be set.
519  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
520
521  // Make sure that the thread signal used is not visible when compiled for
522  // the target.
523#if !defined(__GLIBC__)
524  ASSERT_LT(THREAD_SIGNAL, SIGRTMIN);
525#endif
526
527  // Save the current signal action and make sure it is restored afterwards.
528  struct sigaction cur_action;
529  ASSERT_TRUE(sigaction(THREAD_SIGNAL, nullptr, &cur_action) == 0);
530
531  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
532  ASSERT_TRUE(backtrace.get() != nullptr);
533  ASSERT_TRUE(backtrace->Unwind(0));
534  ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
535
536  VerifyLevelDump(backtrace.get());
537
538  // Tell the thread to exit its infinite loop.
539  android_atomic_acquire_store(0, &thread_data.state);
540
541  // Verify that the old action was restored.
542  struct sigaction new_action;
543  ASSERT_TRUE(sigaction(THREAD_SIGNAL, nullptr, &new_action) == 0);
544  EXPECT_EQ(cur_action.sa_sigaction, new_action.sa_sigaction);
545  // The SA_RESTORER flag gets set behind our back, so a direct comparison
546  // doesn't work unless we mask the value off. Mips doesn't have this
547  // flag, so skip this on that platform.
548#if defined(SA_RESTORER)
549  cur_action.sa_flags &= ~SA_RESTORER;
550  new_action.sa_flags &= ~SA_RESTORER;
551#elif defined(__GLIBC__)
552  // Our host compiler doesn't appear to define this flag for some reason.
553  cur_action.sa_flags &= ~0x04000000;
554  new_action.sa_flags &= ~0x04000000;
555#endif
556  EXPECT_EQ(cur_action.sa_flags, new_action.sa_flags);
557}
558
559TEST(libbacktrace, thread_ignore_frames) {
560  pthread_attr_t attr;
561  pthread_attr_init(&attr);
562  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
563
564  thread_t thread_data = { 0, 0, 0, nullptr };
565  pthread_t thread;
566  ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
567
568  // Wait up to 2 seconds for the tid to be set.
569  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
570
571  std::unique_ptr<Backtrace> all(Backtrace::Create(getpid(), thread_data.tid));
572  ASSERT_TRUE(all.get() != nullptr);
573  ASSERT_TRUE(all->Unwind(0));
574  ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, all->GetError());
575
576  std::unique_ptr<Backtrace> ign1(Backtrace::Create(getpid(), thread_data.tid));
577  ASSERT_TRUE(ign1.get() != nullptr);
578  ASSERT_TRUE(ign1->Unwind(1));
579  ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, ign1->GetError());
580
581  std::unique_ptr<Backtrace> ign2(Backtrace::Create(getpid(), thread_data.tid));
582  ASSERT_TRUE(ign2.get() != nullptr);
583  ASSERT_TRUE(ign2->Unwind(2));
584  ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, ign2->GetError());
585
586  VerifyIgnoreFrames(all.get(), ign1.get(), ign2.get(), nullptr);
587
588  // Tell the thread to exit its infinite loop.
589  android_atomic_acquire_store(0, &thread_data.state);
590}
591
592static void* ThreadMaxRun(void* data) {
593  thread_t* thread = reinterpret_cast<thread_t*>(data);
594
595  thread->tid = gettid();
596  EXPECT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, ThreadSetState, data), 0);
597  return nullptr;
598}
599
600TEST(libbacktrace, thread_max_trace) {
601  pthread_attr_t attr;
602  pthread_attr_init(&attr);
603  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
604
605  thread_t thread_data = { 0, 0, 0, nullptr };
606  pthread_t thread;
607  ASSERT_TRUE(pthread_create(&thread, &attr, ThreadMaxRun, &thread_data) == 0);
608
609  // Wait for the tid to be set.
610  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
611
612  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
613  ASSERT_TRUE(backtrace.get() != nullptr);
614  ASSERT_TRUE(backtrace->Unwind(0));
615  ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
616
617  VerifyMaxDump(backtrace.get());
618
619  // Tell the thread to exit its infinite loop.
620  android_atomic_acquire_store(0, &thread_data.state);
621}
622
623static void* ThreadDump(void* data) {
624  dump_thread_t* dump = reinterpret_cast<dump_thread_t*>(data);
625  while (true) {
626    if (android_atomic_acquire_load(dump->now)) {
627      break;
628    }
629  }
630
631  // The status of the actual unwind will be checked elsewhere.
632  dump->backtrace = Backtrace::Create(getpid(), dump->thread.tid);
633  dump->backtrace->Unwind(0);
634
635  android_atomic_acquire_store(1, &dump->done);
636
637  return nullptr;
638}
639
640TEST(libbacktrace, thread_multiple_dump) {
641  // Dump NUM_THREADS simultaneously.
642  std::vector<thread_t> runners(NUM_THREADS);
643  std::vector<dump_thread_t> dumpers(NUM_THREADS);
644
645  pthread_attr_t attr;
646  pthread_attr_init(&attr);
647  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
648  for (size_t i = 0; i < NUM_THREADS; i++) {
649    // Launch the runners, they will spin in hard loops doing nothing.
650    runners[i].tid = 0;
651    runners[i].state = 0;
652    ASSERT_TRUE(pthread_create(&runners[i].threadId, &attr, ThreadMaxRun, &runners[i]) == 0);
653  }
654
655  // Wait for tids to be set.
656  for (std::vector<thread_t>::iterator it = runners.begin(); it != runners.end(); ++it) {
657    ASSERT_TRUE(WaitForNonZero(&it->state, 30));
658  }
659
660  // Start all of the dumpers at once, they will spin until they are signalled
661  // to begin their dump run.
662  int32_t dump_now = 0;
663  for (size_t i = 0; i < NUM_THREADS; i++) {
664    dumpers[i].thread.tid = runners[i].tid;
665    dumpers[i].thread.state = 0;
666    dumpers[i].done = 0;
667    dumpers[i].now = &dump_now;
668
669    ASSERT_TRUE(pthread_create(&dumpers[i].thread.threadId, &attr, ThreadDump, &dumpers[i]) == 0);
670  }
671
672  // Start all of the dumpers going at once.
673  android_atomic_acquire_store(1, &dump_now);
674
675  for (size_t i = 0; i < NUM_THREADS; i++) {
676    ASSERT_TRUE(WaitForNonZero(&dumpers[i].done, 30));
677
678    // Tell the runner thread to exit its infinite loop.
679    android_atomic_acquire_store(0, &runners[i].state);
680
681    ASSERT_TRUE(dumpers[i].backtrace != nullptr);
682    VerifyMaxDump(dumpers[i].backtrace);
683
684    delete dumpers[i].backtrace;
685    dumpers[i].backtrace = nullptr;
686  }
687}
688
689TEST(libbacktrace, thread_multiple_dump_same_thread) {
690  pthread_attr_t attr;
691  pthread_attr_init(&attr);
692  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
693  thread_t runner;
694  runner.tid = 0;
695  runner.state = 0;
696  ASSERT_TRUE(pthread_create(&runner.threadId, &attr, ThreadMaxRun, &runner) == 0);
697
698  // Wait for tids to be set.
699  ASSERT_TRUE(WaitForNonZero(&runner.state, 30));
700
701  // Start all of the dumpers at once, they will spin until they are signalled
702  // to begin their dump run.
703  int32_t dump_now = 0;
704  // Dump the same thread NUM_THREADS simultaneously.
705  std::vector<dump_thread_t> dumpers(NUM_THREADS);
706  for (size_t i = 0; i < NUM_THREADS; i++) {
707    dumpers[i].thread.tid = runner.tid;
708    dumpers[i].thread.state = 0;
709    dumpers[i].done = 0;
710    dumpers[i].now = &dump_now;
711
712    ASSERT_TRUE(pthread_create(&dumpers[i].thread.threadId, &attr, ThreadDump, &dumpers[i]) == 0);
713  }
714
715  // Start all of the dumpers going at once.
716  android_atomic_acquire_store(1, &dump_now);
717
718  for (size_t i = 0; i < NUM_THREADS; i++) {
719    ASSERT_TRUE(WaitForNonZero(&dumpers[i].done, 30));
720
721    ASSERT_TRUE(dumpers[i].backtrace != nullptr);
722    VerifyMaxDump(dumpers[i].backtrace);
723
724    delete dumpers[i].backtrace;
725    dumpers[i].backtrace = nullptr;
726  }
727
728  // Tell the runner thread to exit its infinite loop.
729  android_atomic_acquire_store(0, &runner.state);
730}
731
732// This test is for UnwindMaps that should share the same map cursor when
733// multiple maps are created for the current process at the same time.
734TEST(libbacktrace, simultaneous_maps) {
735  BacktraceMap* map1 = BacktraceMap::Create(getpid());
736  BacktraceMap* map2 = BacktraceMap::Create(getpid());
737  BacktraceMap* map3 = BacktraceMap::Create(getpid());
738
739  Backtrace* back1 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map1);
740  ASSERT_TRUE(back1 != nullptr);
741  EXPECT_TRUE(back1->Unwind(0));
742  ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, back1->GetError());
743  delete back1;
744  delete map1;
745
746  Backtrace* back2 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map2);
747  ASSERT_TRUE(back2 != nullptr);
748  EXPECT_TRUE(back2->Unwind(0));
749  ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, back2->GetError());
750  delete back2;
751  delete map2;
752
753  Backtrace* back3 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map3);
754  ASSERT_TRUE(back3 != nullptr);
755  EXPECT_TRUE(back3->Unwind(0));
756  ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, back3->GetError());
757  delete back3;
758  delete map3;
759}
760
761TEST(libbacktrace, fillin_erases) {
762  BacktraceMap* back_map = BacktraceMap::Create(getpid());
763
764  backtrace_map_t map;
765
766  map.start = 1;
767  map.end = 3;
768  map.flags = 1;
769  map.name = "Initialized";
770  back_map->FillIn(0, &map);
771  delete back_map;
772
773  ASSERT_FALSE(BacktraceMap::IsValid(map));
774  ASSERT_EQ(static_cast<uintptr_t>(0), map.start);
775  ASSERT_EQ(static_cast<uintptr_t>(0), map.end);
776  ASSERT_EQ(0, map.flags);
777  ASSERT_EQ("", map.name);
778}
779
780TEST(libbacktrace, format_test) {
781  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD));
782  ASSERT_TRUE(backtrace.get() != nullptr);
783
784  backtrace_frame_data_t frame;
785  frame.num = 1;
786  frame.pc = 2;
787  frame.rel_pc = 2;
788  frame.sp = 0;
789  frame.stack_size = 0;
790  frame.func_offset = 0;
791
792  // Check no map set.
793  frame.num = 1;
794#if defined(__LP64__)
795  EXPECT_EQ("#01 pc 0000000000000002  <unknown>",
796#else
797  EXPECT_EQ("#01 pc 00000002  <unknown>",
798#endif
799            backtrace->FormatFrameData(&frame));
800
801  // Check map name empty, but exists.
802  frame.pc = 0xb0020;
803  frame.rel_pc = 0x20;
804  frame.map.start = 0xb0000;
805  frame.map.end = 0xbffff;
806  frame.map.load_bias = 0;
807#if defined(__LP64__)
808  EXPECT_EQ("#01 pc 0000000000000020  <anonymous:00000000000b0000>",
809#else
810  EXPECT_EQ("#01 pc 00000020  <anonymous:000b0000>",
811#endif
812            backtrace->FormatFrameData(&frame));
813
814  // Check map name begins with a [.
815  frame.pc = 0xc0020;
816  frame.map.start = 0xc0000;
817  frame.map.end = 0xcffff;
818  frame.map.load_bias = 0;
819  frame.map.name = "[anon:thread signal stack]";
820#if defined(__LP64__)
821  EXPECT_EQ("#01 pc 0000000000000020  [anon:thread signal stack:00000000000c0000]",
822#else
823  EXPECT_EQ("#01 pc 00000020  [anon:thread signal stack:000c0000]",
824#endif
825            backtrace->FormatFrameData(&frame));
826
827  // Check relative pc is set and map name is set.
828  frame.pc = 0x12345679;
829  frame.rel_pc = 0x12345678;
830  frame.map.name = "MapFake";
831  frame.map.start =  1;
832  frame.map.end =  1;
833#if defined(__LP64__)
834  EXPECT_EQ("#01 pc 0000000012345678  MapFake",
835#else
836  EXPECT_EQ("#01 pc 12345678  MapFake",
837#endif
838            backtrace->FormatFrameData(&frame));
839
840  // Check func_name is set, but no func offset.
841  frame.func_name = "ProcFake";
842#if defined(__LP64__)
843  EXPECT_EQ("#01 pc 0000000012345678  MapFake (ProcFake)",
844#else
845  EXPECT_EQ("#01 pc 12345678  MapFake (ProcFake)",
846#endif
847            backtrace->FormatFrameData(&frame));
848
849  // Check func_name is set, and func offset is non-zero.
850  frame.func_offset = 645;
851#if defined(__LP64__)
852  EXPECT_EQ("#01 pc 0000000012345678  MapFake (ProcFake+645)",
853#else
854  EXPECT_EQ("#01 pc 12345678  MapFake (ProcFake+645)",
855#endif
856            backtrace->FormatFrameData(&frame));
857
858  // Check func_name is set, func offset is non-zero, and load_bias is non-zero.
859  frame.rel_pc = 0x123456dc;
860  frame.func_offset = 645;
861  frame.map.load_bias = 100;
862#if defined(__LP64__)
863  EXPECT_EQ("#01 pc 00000000123456dc  MapFake (ProcFake+645)",
864#else
865  EXPECT_EQ("#01 pc 123456dc  MapFake (ProcFake+645)",
866#endif
867            backtrace->FormatFrameData(&frame));
868
869  // Check a non-zero map offset.
870  frame.map.offset = 0x1000;
871#if defined(__LP64__)
872  EXPECT_EQ("#01 pc 00000000123456dc  MapFake (offset 0x1000) (ProcFake+645)",
873#else
874  EXPECT_EQ("#01 pc 123456dc  MapFake (offset 0x1000) (ProcFake+645)",
875#endif
876            backtrace->FormatFrameData(&frame));
877}
878
879struct map_test_t {
880  uintptr_t start;
881  uintptr_t end;
882};
883
884static bool map_sort(map_test_t i, map_test_t j) { return i.start < j.start; }
885
886static void VerifyMap(pid_t pid) {
887  char buffer[4096];
888  snprintf(buffer, sizeof(buffer), "/proc/%d/maps", pid);
889
890  FILE* map_file = fopen(buffer, "r");
891  ASSERT_TRUE(map_file != nullptr);
892  std::vector<map_test_t> test_maps;
893  while (fgets(buffer, sizeof(buffer), map_file)) {
894    map_test_t map;
895    ASSERT_EQ(2, sscanf(buffer, "%" SCNxPTR "-%" SCNxPTR " ", &map.start, &map.end));
896    test_maps.push_back(map);
897  }
898  fclose(map_file);
899  std::sort(test_maps.begin(), test_maps.end(), map_sort);
900
901  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(pid));
902
903  // Basic test that verifies that the map is in the expected order.
904  ScopedBacktraceMapIteratorLock lock(map.get());
905  std::vector<map_test_t>::const_iterator test_it = test_maps.begin();
906  for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
907    ASSERT_TRUE(test_it != test_maps.end());
908    ASSERT_EQ(test_it->start, it->start);
909    ASSERT_EQ(test_it->end, it->end);
910    ++test_it;
911  }
912  ASSERT_TRUE(test_it == test_maps.end());
913}
914
915TEST(libbacktrace, verify_map_remote) {
916  pid_t pid;
917  CreateRemoteProcess(&pid);
918
919  // The maps should match exactly since the forked process has been paused.
920  VerifyMap(pid);
921
922  FinishRemoteProcess(pid);
923}
924
925static void InitMemory(uint8_t* memory, size_t bytes) {
926  for (size_t i = 0; i < bytes; i++) {
927    memory[i] = i;
928    if (memory[i] == '\0') {
929      // Don't use '\0' in our data so we can verify that an overread doesn't
930      // occur by using a '\0' as the character after the read data.
931      memory[i] = 23;
932    }
933  }
934}
935
936static void* ThreadReadTest(void* data) {
937  thread_t* thread_data = reinterpret_cast<thread_t*>(data);
938
939  thread_data->tid = gettid();
940
941  // Create two map pages.
942  // Mark the second page as not-readable.
943  size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
944  uint8_t* memory;
945  if (posix_memalign(reinterpret_cast<void**>(&memory), pagesize, 2 * pagesize) != 0) {
946    return reinterpret_cast<void*>(-1);
947  }
948
949  if (mprotect(&memory[pagesize], pagesize, PROT_NONE) != 0) {
950    return reinterpret_cast<void*>(-1);
951  }
952
953  // Set up a simple pattern in memory.
954  InitMemory(memory, pagesize);
955
956  thread_data->data = memory;
957
958  // Tell the caller it's okay to start reading memory.
959  android_atomic_acquire_store(1, &thread_data->state);
960
961  // Loop waiting for the caller to finish reading the memory.
962  while (thread_data->state) {
963  }
964
965  // Re-enable read-write on the page so that we don't crash if we try
966  // and access data on this page when freeing the memory.
967  if (mprotect(&memory[pagesize], pagesize, PROT_READ | PROT_WRITE) != 0) {
968    return reinterpret_cast<void*>(-1);
969  }
970  free(memory);
971
972  android_atomic_acquire_store(1, &thread_data->state);
973
974  return nullptr;
975}
976
977static void RunReadTest(Backtrace* backtrace, uintptr_t read_addr) {
978  size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
979
980  // Create a page of data to use to do quick compares.
981  uint8_t* expected = new uint8_t[pagesize];
982  InitMemory(expected, pagesize);
983
984  uint8_t* data = new uint8_t[2*pagesize];
985  // Verify that we can only read one page worth of data.
986  size_t bytes_read = backtrace->Read(read_addr, data, 2 * pagesize);
987  ASSERT_EQ(pagesize, bytes_read);
988  ASSERT_TRUE(memcmp(data, expected, pagesize) == 0);
989
990  // Verify unaligned reads.
991  for (size_t i = 1; i < sizeof(word_t); i++) {
992    bytes_read = backtrace->Read(read_addr + i, data, 2 * sizeof(word_t));
993    ASSERT_EQ(2 * sizeof(word_t), bytes_read);
994    ASSERT_TRUE(memcmp(data, &expected[i], 2 * sizeof(word_t)) == 0)
995        << "Offset at " << i << " failed";
996  }
997
998  // Verify small unaligned reads.
999  for (size_t i = 1; i < sizeof(word_t); i++) {
1000    for (size_t j = 1; j < sizeof(word_t); j++) {
1001      // Set one byte past what we expect to read, to guarantee we don't overread.
1002      data[j] = '\0';
1003      bytes_read = backtrace->Read(read_addr + i, data, j);
1004      ASSERT_EQ(j, bytes_read);
1005      ASSERT_TRUE(memcmp(data, &expected[i], j) == 0)
1006          << "Offset at " << i << " length " << j << " miscompared";
1007      ASSERT_EQ('\0', data[j])
1008          << "Offset at " << i << " length " << j << " wrote too much data";
1009    }
1010  }
1011  delete[] data;
1012  delete[] expected;
1013}
1014
1015TEST(libbacktrace, thread_read) {
1016  pthread_attr_t attr;
1017  pthread_attr_init(&attr);
1018  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
1019  pthread_t thread;
1020  thread_t thread_data = { 0, 0, 0, nullptr };
1021  ASSERT_TRUE(pthread_create(&thread, &attr, ThreadReadTest, &thread_data) == 0);
1022
1023  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 10));
1024
1025  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
1026  ASSERT_TRUE(backtrace.get() != nullptr);
1027
1028  RunReadTest(backtrace.get(), reinterpret_cast<uintptr_t>(thread_data.data));
1029
1030  android_atomic_acquire_store(0, &thread_data.state);
1031
1032  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 10));
1033}
1034
1035volatile uintptr_t g_ready = 0;
1036volatile uintptr_t g_addr = 0;
1037
1038static void ForkedReadTest() {
1039  // Create two map pages.
1040  size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
1041  uint8_t* memory;
1042  if (posix_memalign(reinterpret_cast<void**>(&memory), pagesize, 2 * pagesize) != 0) {
1043    perror("Failed to allocate memory\n");
1044    exit(1);
1045  }
1046
1047  // Mark the second page as not-readable.
1048  if (mprotect(&memory[pagesize], pagesize, PROT_NONE) != 0) {
1049    perror("Failed to mprotect memory\n");
1050    exit(1);
1051  }
1052
1053  // Set up a simple pattern in memory.
1054  InitMemory(memory, pagesize);
1055
1056  g_addr = reinterpret_cast<uintptr_t>(memory);
1057  g_ready = 1;
1058
1059  while (1) {
1060    usleep(US_PER_MSEC);
1061  }
1062}
1063
1064TEST(libbacktrace, process_read) {
1065  g_ready = 0;
1066  pid_t pid;
1067  if ((pid = fork()) == 0) {
1068    ForkedReadTest();
1069    exit(0);
1070  }
1071  ASSERT_NE(-1, pid);
1072
1073  bool test_executed = false;
1074  uint64_t start = NanoTime();
1075  while (1) {
1076    if (ptrace(PTRACE_ATTACH, pid, 0, 0) == 0) {
1077      WaitForStop(pid);
1078
1079      std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
1080      ASSERT_TRUE(backtrace.get() != nullptr);
1081
1082      uintptr_t read_addr;
1083      size_t bytes_read = backtrace->Read(reinterpret_cast<uintptr_t>(&g_ready),
1084                                          reinterpret_cast<uint8_t*>(&read_addr),
1085                                          sizeof(uintptr_t));
1086      ASSERT_EQ(sizeof(uintptr_t), bytes_read);
1087      if (read_addr) {
1088        // The forked process is ready to be read.
1089        bytes_read = backtrace->Read(reinterpret_cast<uintptr_t>(&g_addr),
1090                                     reinterpret_cast<uint8_t*>(&read_addr),
1091                                     sizeof(uintptr_t));
1092        ASSERT_EQ(sizeof(uintptr_t), bytes_read);
1093
1094        RunReadTest(backtrace.get(), read_addr);
1095
1096        test_executed = true;
1097        break;
1098      }
1099      ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1100    }
1101    if ((NanoTime() - start) > 5 * NS_PER_SEC) {
1102      break;
1103    }
1104    usleep(US_PER_MSEC);
1105  }
1106  kill(pid, SIGKILL);
1107  ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1108
1109  ASSERT_TRUE(test_executed);
1110}
1111
1112static void VerifyFunctionsFound(const std::vector<std::string>& found_functions) {
1113  // We expect to find these functions in libbacktrace_test. If we don't
1114  // find them, that's a bug in the memory read handling code in libunwind.
1115  std::list<std::string> expected_functions;
1116  expected_functions.push_back("test_recursive_call");
1117  expected_functions.push_back("test_level_one");
1118  expected_functions.push_back("test_level_two");
1119  expected_functions.push_back("test_level_three");
1120  expected_functions.push_back("test_level_four");
1121  for (const auto& found_function : found_functions) {
1122    for (const auto& expected_function : expected_functions) {
1123      if (found_function == expected_function) {
1124        expected_functions.remove(found_function);
1125        break;
1126      }
1127    }
1128  }
1129  ASSERT_TRUE(expected_functions.empty()) << "Not all functions found in shared library.";
1130}
1131
1132static const char* CopySharedLibrary() {
1133#if defined(__LP64__)
1134  const char* lib_name = "lib64";
1135#else
1136  const char* lib_name = "lib";
1137#endif
1138
1139#if defined(__BIONIC__)
1140  const char* tmp_so_name = "/data/local/tmp/libbacktrace_test.so";
1141  std::string cp_cmd = android::base::StringPrintf("cp /system/%s/libbacktrace_test.so %s",
1142                                                   lib_name, tmp_so_name);
1143#else
1144  const char* tmp_so_name = "/tmp/libbacktrace_test.so";
1145  if (getenv("ANDROID_HOST_OUT") == NULL) {
1146    fprintf(stderr, "ANDROID_HOST_OUT not set, make sure you run lunch.");
1147    return nullptr;
1148  }
1149  std::string cp_cmd = android::base::StringPrintf("cp %s/%s/libbacktrace_test.so %s",
1150                                                   getenv("ANDROID_HOST_OUT"), lib_name,
1151                                                   tmp_so_name);
1152#endif
1153
1154  // Copy the shared so to a tempory directory.
1155  system(cp_cmd.c_str());
1156
1157  return tmp_so_name;
1158}
1159
1160TEST(libbacktrace, check_unreadable_elf_local) {
1161  const char* tmp_so_name = CopySharedLibrary();
1162  ASSERT_TRUE(tmp_so_name != nullptr);
1163
1164  struct stat buf;
1165  ASSERT_TRUE(stat(tmp_so_name, &buf) != -1);
1166  uintptr_t map_size = buf.st_size;
1167
1168  int fd = open(tmp_so_name, O_RDONLY);
1169  ASSERT_TRUE(fd != -1);
1170
1171  void* map = mmap(NULL, map_size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0);
1172  ASSERT_TRUE(map != MAP_FAILED);
1173  close(fd);
1174  ASSERT_TRUE(unlink(tmp_so_name) != -1);
1175
1176  std::vector<std::string> found_functions;
1177  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS,
1178                                                         BACKTRACE_CURRENT_THREAD));
1179  ASSERT_TRUE(backtrace.get() != nullptr);
1180
1181  // Needed before GetFunctionName will work.
1182  backtrace->Unwind(0);
1183
1184  // Loop through the entire map, and get every function we can find.
1185  map_size += reinterpret_cast<uintptr_t>(map);
1186  std::string last_func;
1187  for (uintptr_t read_addr = reinterpret_cast<uintptr_t>(map);
1188       read_addr < map_size; read_addr += 4) {
1189    uintptr_t offset;
1190    std::string func_name = backtrace->GetFunctionName(read_addr, &offset);
1191    if (!func_name.empty() && last_func != func_name) {
1192      found_functions.push_back(func_name);
1193    }
1194    last_func = func_name;
1195  }
1196
1197  ASSERT_TRUE(munmap(map, map_size - reinterpret_cast<uintptr_t>(map)) == 0);
1198
1199  VerifyFunctionsFound(found_functions);
1200}
1201
1202TEST(libbacktrace, check_unreadable_elf_remote) {
1203  const char* tmp_so_name = CopySharedLibrary();
1204  ASSERT_TRUE(tmp_so_name != nullptr);
1205
1206  g_ready = 0;
1207
1208  struct stat buf;
1209  ASSERT_TRUE(stat(tmp_so_name, &buf) != -1);
1210  uintptr_t map_size = buf.st_size;
1211
1212  pid_t pid;
1213  if ((pid = fork()) == 0) {
1214    int fd = open(tmp_so_name, O_RDONLY);
1215    if (fd == -1) {
1216      fprintf(stderr, "Failed to open file %s: %s\n", tmp_so_name, strerror(errno));
1217      unlink(tmp_so_name);
1218      exit(0);
1219    }
1220
1221    void* map = mmap(NULL, map_size, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0);
1222    if (map == MAP_FAILED) {
1223      fprintf(stderr, "Failed to map in memory: %s\n", strerror(errno));
1224      unlink(tmp_so_name);
1225      exit(0);
1226    }
1227    close(fd);
1228    if (unlink(tmp_so_name) == -1) {
1229      fprintf(stderr, "Failed to unlink: %s\n", strerror(errno));
1230      exit(0);
1231    }
1232
1233    g_addr = reinterpret_cast<uintptr_t>(map);
1234    g_ready = 1;
1235    while (true) {
1236      usleep(US_PER_MSEC);
1237    }
1238    exit(0);
1239  }
1240  ASSERT_TRUE(pid > 0);
1241
1242  std::vector<std::string> found_functions;
1243  uint64_t start = NanoTime();
1244  while (true) {
1245    ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1246
1247    // Wait for the process to get to a stopping point.
1248    WaitForStop(pid);
1249
1250    std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, BACKTRACE_CURRENT_THREAD));
1251    ASSERT_TRUE(backtrace.get() != nullptr);
1252
1253    uintptr_t read_addr;
1254    ASSERT_EQ(sizeof(uintptr_t), backtrace->Read(reinterpret_cast<uintptr_t>(&g_ready), reinterpret_cast<uint8_t*>(&read_addr), sizeof(uintptr_t)));
1255    if (read_addr) {
1256      ASSERT_EQ(sizeof(uintptr_t), backtrace->Read(reinterpret_cast<uintptr_t>(&g_addr), reinterpret_cast<uint8_t*>(&read_addr), sizeof(uintptr_t)));
1257
1258      // Needed before GetFunctionName will work.
1259      backtrace->Unwind(0);
1260
1261      // Loop through the entire map, and get every function we can find.
1262      map_size += read_addr;
1263      std::string last_func;
1264      for (; read_addr < map_size; read_addr += 4) {
1265        uintptr_t offset;
1266        std::string func_name = backtrace->GetFunctionName(read_addr, &offset);
1267        if (!func_name.empty() && last_func != func_name) {
1268          found_functions.push_back(func_name);
1269        }
1270        last_func = func_name;
1271      }
1272      break;
1273    }
1274    ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1275
1276    if ((NanoTime() - start) > 5 * NS_PER_SEC) {
1277      break;
1278    }
1279    usleep(US_PER_MSEC);
1280  }
1281
1282  kill(pid, SIGKILL);
1283  ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1284
1285  VerifyFunctionsFound(found_functions);
1286}
1287
1288static bool FindFuncFrameInBacktrace(Backtrace* backtrace, uintptr_t test_func, size_t* frame_num) {
1289  backtrace_map_t map;
1290  backtrace->FillInMap(test_func, &map);
1291  if (!BacktraceMap::IsValid(map)) {
1292    return false;
1293  }
1294
1295  // Loop through the frames, and find the one that is in the map.
1296  *frame_num = 0;
1297  for (Backtrace::const_iterator it = backtrace->begin(); it != backtrace->end(); ++it) {
1298    if (BacktraceMap::IsValid(it->map) && map.start == it->map.start &&
1299        it->pc >= test_func) {
1300      *frame_num = it->num;
1301      return true;
1302    }
1303  }
1304  return false;
1305}
1306
1307static void VerifyUnreadableElfFrame(Backtrace* backtrace, uintptr_t test_func, size_t frame_num) {
1308  ASSERT_LT(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES))
1309    << DumpFrames(backtrace);
1310
1311  ASSERT_TRUE(frame_num != 0) << DumpFrames(backtrace);
1312  // Make sure that there is at least one more frame above the test func call.
1313  ASSERT_LT(frame_num, backtrace->NumFrames()) << DumpFrames(backtrace);
1314
1315  uintptr_t diff = backtrace->GetFrame(frame_num)->pc - test_func;
1316  ASSERT_LT(diff, 200U) << DumpFrames(backtrace);
1317}
1318
1319static void VerifyUnreadableElfBacktrace(uintptr_t test_func) {
1320  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS,
1321                                                         BACKTRACE_CURRENT_THREAD));
1322  ASSERT_TRUE(backtrace.get() != nullptr);
1323  ASSERT_TRUE(backtrace->Unwind(0));
1324  ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
1325
1326  size_t frame_num;
1327  ASSERT_TRUE(FindFuncFrameInBacktrace(backtrace.get(), test_func, &frame_num));
1328
1329  VerifyUnreadableElfFrame(backtrace.get(), test_func, frame_num);
1330}
1331
1332typedef int (*test_func_t)(int, int, int, int, void (*)(uintptr_t), uintptr_t);
1333
1334TEST(libbacktrace, unwind_through_unreadable_elf_local) {
1335  const char* tmp_so_name = CopySharedLibrary();
1336  ASSERT_TRUE(tmp_so_name != nullptr);
1337  void* lib_handle = dlopen(tmp_so_name, RTLD_NOW);
1338  ASSERT_TRUE(lib_handle != nullptr);
1339  ASSERT_TRUE(unlink(tmp_so_name) != -1);
1340
1341  test_func_t test_func;
1342  test_func = reinterpret_cast<test_func_t>(dlsym(lib_handle, "test_level_one"));
1343  ASSERT_TRUE(test_func != nullptr);
1344
1345  ASSERT_NE(test_func(1, 2, 3, 4, VerifyUnreadableElfBacktrace,
1346                      reinterpret_cast<uintptr_t>(test_func)), 0);
1347
1348  ASSERT_TRUE(dlclose(lib_handle) == 0);
1349}
1350
1351TEST(libbacktrace, unwind_through_unreadable_elf_remote) {
1352  const char* tmp_so_name = CopySharedLibrary();
1353  ASSERT_TRUE(tmp_so_name != nullptr);
1354  void* lib_handle = dlopen(tmp_so_name, RTLD_NOW);
1355  ASSERT_TRUE(lib_handle != nullptr);
1356  ASSERT_TRUE(unlink(tmp_so_name) != -1);
1357
1358  test_func_t test_func;
1359  test_func = reinterpret_cast<test_func_t>(dlsym(lib_handle, "test_level_one"));
1360  ASSERT_TRUE(test_func != nullptr);
1361
1362  pid_t pid;
1363  if ((pid = fork()) == 0) {
1364    test_func(1, 2, 3, 4, 0, 0);
1365    exit(0);
1366  }
1367  ASSERT_TRUE(pid > 0);
1368  ASSERT_TRUE(dlclose(lib_handle) == 0);
1369
1370  uint64_t start = NanoTime();
1371  bool done = false;
1372  while (!done) {
1373    ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1374
1375    // Wait for the process to get to a stopping point.
1376    WaitForStop(pid);
1377
1378    std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, BACKTRACE_CURRENT_THREAD));
1379    ASSERT_TRUE(backtrace.get() != nullptr);
1380    ASSERT_TRUE(backtrace->Unwind(0));
1381    ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
1382
1383    size_t frame_num;
1384    if (FindFuncFrameInBacktrace(backtrace.get(),
1385                                 reinterpret_cast<uintptr_t>(test_func), &frame_num)) {
1386
1387      VerifyUnreadableElfFrame(backtrace.get(), reinterpret_cast<uintptr_t>(test_func), frame_num);
1388      done = true;
1389    }
1390
1391    ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1392
1393    if ((NanoTime() - start) > 5 * NS_PER_SEC) {
1394      break;
1395    }
1396    usleep(US_PER_MSEC);
1397  }
1398
1399  kill(pid, SIGKILL);
1400  ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1401
1402  ASSERT_TRUE(done) << "Test function never found in unwind.";
1403}
1404
1405TEST(libbacktrace, unwind_thread_doesnt_exist) {
1406  std::unique_ptr<Backtrace> backtrace(
1407      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, 99999999));
1408  ASSERT_TRUE(backtrace.get() != nullptr);
1409  ASSERT_FALSE(backtrace->Unwind(0));
1410  ASSERT_EQ(BACKTRACE_UNWIND_ERROR_THREAD_DOESNT_EXIST, backtrace->GetError());
1411}
1412
1413TEST(libbacktrace, local_get_function_name_before_unwind) {
1414  std::unique_ptr<Backtrace> backtrace(
1415      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
1416  ASSERT_TRUE(backtrace.get() != nullptr);
1417
1418  // Verify that trying to get a function name before doing an unwind works.
1419  uintptr_t cur_func_offset = reinterpret_cast<uintptr_t>(&test_level_one) + 1;
1420  size_t offset;
1421  ASSERT_NE(std::string(""), backtrace->GetFunctionName(cur_func_offset, &offset));
1422}
1423
1424TEST(libbacktrace, remote_get_function_name_before_unwind) {
1425  pid_t pid;
1426  CreateRemoteProcess(&pid);
1427
1428  // Now create an unwind object.
1429  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
1430
1431  // Verify that trying to get a function name before doing an unwind works.
1432  uintptr_t cur_func_offset = reinterpret_cast<uintptr_t>(&test_level_one) + 1;
1433  size_t offset;
1434  ASSERT_NE(std::string(""), backtrace->GetFunctionName(cur_func_offset, &offset));
1435
1436  FinishRemoteProcess(pid);
1437}
1438
1439static void SetUcontextSp(uintptr_t sp, ucontext_t* ucontext) {
1440#if defined(__arm__)
1441  ucontext->uc_mcontext.arm_sp = sp;
1442#elif defined(__aarch64__)
1443  ucontext->uc_mcontext.sp = sp;
1444#elif defined(__i386__)
1445  ucontext->uc_mcontext.gregs[REG_ESP] = sp;
1446#elif defined(__x86_64__)
1447  ucontext->uc_mcontext.gregs[REG_RSP] = sp;
1448#else
1449  UNUSED(sp);
1450  UNUSED(ucontext);
1451  ASSERT_TRUE(false) << "Unsupported architecture";
1452#endif
1453}
1454
1455static void SetUcontextPc(uintptr_t pc, ucontext_t* ucontext) {
1456#if defined(__arm__)
1457  ucontext->uc_mcontext.arm_pc = pc;
1458#elif defined(__aarch64__)
1459  ucontext->uc_mcontext.pc = pc;
1460#elif defined(__i386__)
1461  ucontext->uc_mcontext.gregs[REG_EIP] = pc;
1462#elif defined(__x86_64__)
1463  ucontext->uc_mcontext.gregs[REG_RIP] = pc;
1464#else
1465  UNUSED(pc);
1466  UNUSED(ucontext);
1467  ASSERT_TRUE(false) << "Unsupported architecture";
1468#endif
1469}
1470
1471static void SetUcontextLr(uintptr_t lr, ucontext_t* ucontext) {
1472#if defined(__arm__)
1473  ucontext->uc_mcontext.arm_lr = lr;
1474#elif defined(__aarch64__)
1475  ucontext->uc_mcontext.regs[30] = lr;
1476#elif defined(__i386__)
1477  // The lr is on the stack.
1478  ASSERT_TRUE(lr != 0);
1479  ASSERT_TRUE(ucontext != nullptr);
1480#elif defined(__x86_64__)
1481  // The lr is on the stack.
1482  ASSERT_TRUE(lr != 0);
1483  ASSERT_TRUE(ucontext != nullptr);
1484#else
1485  UNUSED(lr);
1486  UNUSED(ucontext);
1487  ASSERT_TRUE(false) << "Unsupported architecture";
1488#endif
1489}
1490
1491static constexpr size_t DEVICE_MAP_SIZE = 1024;
1492
1493static void SetupDeviceMap(void** device_map) {
1494  // Make sure that anything in a device map will result in fails
1495  // to read.
1496  android::base::unique_fd device_fd(open("/dev/zero", O_RDONLY | O_CLOEXEC));
1497
1498  *device_map = mmap(nullptr, 1024, PROT_READ, MAP_PRIVATE, device_fd, 0);
1499  ASSERT_TRUE(*device_map != MAP_FAILED);
1500
1501  // Make sure the map is readable.
1502  ASSERT_EQ(0, reinterpret_cast<int*>(*device_map)[0]);
1503}
1504
1505static void UnwindFromDevice(Backtrace* backtrace, void* device_map) {
1506  uintptr_t device_map_uint = reinterpret_cast<uintptr_t>(device_map);
1507
1508  backtrace_map_t map;
1509  backtrace->FillInMap(device_map_uint, &map);
1510  // Verify the flag is set.
1511  ASSERT_EQ(PROT_DEVICE_MAP, map.flags & PROT_DEVICE_MAP);
1512
1513  // Quick sanity checks.
1514  size_t offset;
1515  ASSERT_EQ(std::string(""), backtrace->GetFunctionName(device_map_uint, &offset));
1516  ASSERT_EQ(std::string(""), backtrace->GetFunctionName(device_map_uint, &offset, &map));
1517  ASSERT_EQ(std::string(""), backtrace->GetFunctionName(0, &offset));
1518
1519  uintptr_t cur_func_offset = reinterpret_cast<uintptr_t>(&test_level_one) + 1;
1520  // Now verify the device map flag actually causes the function name to be empty.
1521  backtrace->FillInMap(cur_func_offset, &map);
1522  ASSERT_TRUE((map.flags & PROT_DEVICE_MAP) == 0);
1523  ASSERT_NE(std::string(""), backtrace->GetFunctionName(cur_func_offset, &offset, &map));
1524  map.flags |= PROT_DEVICE_MAP;
1525  ASSERT_EQ(std::string(""), backtrace->GetFunctionName(cur_func_offset, &offset, &map));
1526
1527  ucontext_t ucontext;
1528
1529  // Create a context that has the pc in the device map, but the sp
1530  // in a non-device map.
1531  memset(&ucontext, 0, sizeof(ucontext));
1532  SetUcontextSp(reinterpret_cast<uintptr_t>(&ucontext), &ucontext);
1533  SetUcontextPc(device_map_uint, &ucontext);
1534  SetUcontextLr(cur_func_offset, &ucontext);
1535
1536  ASSERT_TRUE(backtrace->Unwind(0, &ucontext));
1537
1538  // The buffer should only be a single element.
1539  ASSERT_EQ(1U, backtrace->NumFrames());
1540  const backtrace_frame_data_t* frame = backtrace->GetFrame(0);
1541  ASSERT_EQ(device_map_uint, frame->pc);
1542  ASSERT_EQ(reinterpret_cast<uintptr_t>(&ucontext), frame->sp);
1543
1544  // Check what happens when skipping the first frame.
1545  ASSERT_TRUE(backtrace->Unwind(1, &ucontext));
1546  ASSERT_EQ(0U, backtrace->NumFrames());
1547
1548  // Create a context that has the sp in the device map, but the pc
1549  // in a non-device map.
1550  memset(&ucontext, 0, sizeof(ucontext));
1551  SetUcontextSp(device_map_uint, &ucontext);
1552  SetUcontextPc(cur_func_offset, &ucontext);
1553  SetUcontextLr(cur_func_offset, &ucontext);
1554
1555  ASSERT_TRUE(backtrace->Unwind(0, &ucontext));
1556
1557  // The buffer should only be a single element.
1558  ASSERT_EQ(1U, backtrace->NumFrames());
1559  frame = backtrace->GetFrame(0);
1560  ASSERT_EQ(cur_func_offset, frame->pc);
1561  ASSERT_EQ(device_map_uint, frame->sp);
1562
1563  // Check what happens when skipping the first frame.
1564  ASSERT_TRUE(backtrace->Unwind(1, &ucontext));
1565  ASSERT_EQ(0U, backtrace->NumFrames());
1566}
1567
1568TEST(libbacktrace, unwind_disallow_device_map_local) {
1569  void* device_map;
1570  SetupDeviceMap(&device_map);
1571
1572  // Now create an unwind object.
1573  std::unique_ptr<Backtrace> backtrace(
1574      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
1575  ASSERT_TRUE(backtrace);
1576
1577  UnwindFromDevice(backtrace.get(), device_map);
1578
1579  munmap(device_map, DEVICE_MAP_SIZE);
1580}
1581
1582TEST(libbacktrace, unwind_disallow_device_map_remote) {
1583  void* device_map;
1584  SetupDeviceMap(&device_map);
1585
1586  // Fork a process to do a remote backtrace.
1587  pid_t pid;
1588  CreateRemoteProcess(&pid);
1589
1590  // Now create an unwind object.
1591  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
1592
1593  // TODO: Currently unwind from context doesn't work on remote
1594  // unwind. Keep this test because the new unwinder should support
1595  // this eventually, or we can delete this test.
1596  // properly with unwind from context.
1597  // UnwindFromDevice(backtrace.get(), device_map);
1598
1599  FinishRemoteProcess(pid);
1600
1601  munmap(device_map, DEVICE_MAP_SIZE);
1602}
1603
1604class ScopedSignalHandler {
1605 public:
1606  ScopedSignalHandler(int signal_number, void (*handler)(int)) : signal_number_(signal_number) {
1607    memset(&action_, 0, sizeof(action_));
1608    action_.sa_handler = handler;
1609    sigaction(signal_number_, &action_, &old_action_);
1610  }
1611
1612  ScopedSignalHandler(int signal_number, void (*action)(int, siginfo_t*, void*))
1613      : signal_number_(signal_number) {
1614    memset(&action_, 0, sizeof(action_));
1615    action_.sa_flags = SA_SIGINFO;
1616    action_.sa_sigaction = action;
1617    sigaction(signal_number_, &action_, &old_action_);
1618  }
1619
1620  ~ScopedSignalHandler() { sigaction(signal_number_, &old_action_, nullptr); }
1621
1622 private:
1623  struct sigaction action_;
1624  struct sigaction old_action_;
1625  const int signal_number_;
1626};
1627
1628static void SetValueAndLoop(void* data) {
1629  volatile int* value = reinterpret_cast<volatile int*>(data);
1630
1631  *value = 1;
1632  for (volatile int i = 0;; i++)
1633    ;
1634}
1635
1636static void UnwindThroughSignal(bool use_action) {
1637  volatile int value = 0;
1638  pid_t pid;
1639  if ((pid = fork()) == 0) {
1640    if (use_action) {
1641      ScopedSignalHandler ssh(SIGUSR1, test_signal_action);
1642
1643      test_level_one(1, 2, 3, 4, SetValueAndLoop, const_cast<int*>(&value));
1644    } else {
1645      ScopedSignalHandler ssh(SIGUSR1, test_signal_handler);
1646
1647      test_level_one(1, 2, 3, 4, SetValueAndLoop, const_cast<int*>(&value));
1648    }
1649  }
1650  ASSERT_NE(-1, pid);
1651
1652  int read_value = 0;
1653  uint64_t start = NanoTime();
1654  while (read_value == 0) {
1655    usleep(1000);
1656
1657    // Loop until the remote function gets into the final function.
1658    ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1659
1660    WaitForStop(pid);
1661
1662    std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
1663
1664    size_t bytes_read = backtrace->Read(reinterpret_cast<uintptr_t>(const_cast<int*>(&value)),
1665                                        reinterpret_cast<uint8_t*>(&read_value), sizeof(read_value));
1666    ASSERT_EQ(sizeof(read_value), bytes_read);
1667
1668    ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1669
1670    ASSERT_TRUE(NanoTime() - start < 5 * NS_PER_SEC)
1671        << "Remote process did not execute far enough in 5 seconds.";
1672  }
1673
1674  // Now need to send a signal to the remote process.
1675  kill(pid, SIGUSR1);
1676
1677  // Wait for the process to get to the signal handler loop.
1678  Backtrace::const_iterator frame_iter;
1679  start = NanoTime();
1680  std::unique_ptr<Backtrace> backtrace;
1681  while (true) {
1682    usleep(1000);
1683
1684    ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1685
1686    WaitForStop(pid);
1687
1688    backtrace.reset(Backtrace::Create(pid, pid));
1689    ASSERT_TRUE(backtrace->Unwind(0));
1690    bool found = false;
1691    for (frame_iter = backtrace->begin(); frame_iter != backtrace->end(); ++frame_iter) {
1692      if (frame_iter->func_name == "test_loop_forever") {
1693        ++frame_iter;
1694        found = true;
1695        break;
1696      }
1697    }
1698    if (found) {
1699      break;
1700    }
1701
1702    ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1703
1704    ASSERT_TRUE(NanoTime() - start < 5 * NS_PER_SEC)
1705        << "Remote process did not get in signal handler in 5 seconds." << std::endl
1706        << DumpFrames(backtrace.get());
1707  }
1708
1709  std::vector<std::string> names;
1710  // Loop through the frames, and save the function names.
1711  size_t frame = 0;
1712  for (; frame_iter != backtrace->end(); ++frame_iter) {
1713    if (frame_iter->func_name == "test_level_four") {
1714      frame = names.size() + 1;
1715    }
1716    names.push_back(frame_iter->func_name);
1717  }
1718  ASSERT_NE(0U, frame) << "Unable to find test_level_four in backtrace" << std::endl
1719                       << DumpFrames(backtrace.get());
1720
1721  // The expected order of the frames:
1722  //   test_loop_forever
1723  //   test_signal_handler|test_signal_action
1724  //   <OPTIONAL_FRAME> May or may not exist.
1725  //   SetValueAndLoop (but the function name might be empty)
1726  //   test_level_four
1727  //   test_level_three
1728  //   test_level_two
1729  //   test_level_one
1730  ASSERT_LE(frame + 2, names.size()) << DumpFrames(backtrace.get());
1731  ASSERT_LE(2U, frame) << DumpFrames(backtrace.get());
1732  if (use_action) {
1733    ASSERT_EQ("test_signal_action", names[0]) << DumpFrames(backtrace.get());
1734  } else {
1735    ASSERT_EQ("test_signal_handler", names[0]) << DumpFrames(backtrace.get());
1736  }
1737  ASSERT_EQ("test_level_three", names[frame]) << DumpFrames(backtrace.get());
1738  ASSERT_EQ("test_level_two", names[frame + 1]) << DumpFrames(backtrace.get());
1739  ASSERT_EQ("test_level_one", names[frame + 2]) << DumpFrames(backtrace.get());
1740
1741  FinishRemoteProcess(pid);
1742}
1743
1744TEST(libbacktrace, unwind_remote_through_signal_using_handler) {
1745  UnwindThroughSignal(false);
1746}
1747
1748TEST(libbacktrace, unwind_remote_through_signal_using_action) {
1749  UnwindThroughSignal(true);
1750}
1751
1752#if defined(ENABLE_PSS_TESTS)
1753#include "GetPss.h"
1754
1755#define MAX_LEAK_BYTES (32*1024UL)
1756
1757static void CheckForLeak(pid_t pid, pid_t tid) {
1758  // Do a few runs to get the PSS stable.
1759  for (size_t i = 0; i < 100; i++) {
1760    Backtrace* backtrace = Backtrace::Create(pid, tid);
1761    ASSERT_TRUE(backtrace != nullptr);
1762    ASSERT_TRUE(backtrace->Unwind(0));
1763    ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
1764    delete backtrace;
1765  }
1766  size_t stable_pss = GetPssBytes();
1767  ASSERT_TRUE(stable_pss != 0);
1768
1769  // Loop enough that even a small leak should be detectable.
1770  for (size_t i = 0; i < 4096; i++) {
1771    Backtrace* backtrace = Backtrace::Create(pid, tid);
1772    ASSERT_TRUE(backtrace != nullptr);
1773    ASSERT_TRUE(backtrace->Unwind(0));
1774    ASSERT_EQ(BACKTRACE_UNWIND_NO_ERROR, backtrace->GetError());
1775    delete backtrace;
1776  }
1777  size_t new_pss = GetPssBytes();
1778  ASSERT_TRUE(new_pss != 0);
1779  if (new_pss > stable_pss) {
1780    ASSERT_LE(new_pss - stable_pss, MAX_LEAK_BYTES);
1781  }
1782}
1783
1784TEST(libbacktrace, check_for_leak_local) {
1785  CheckForLeak(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD);
1786}
1787
1788TEST(libbacktrace, check_for_leak_local_thread) {
1789  thread_t thread_data = { 0, 0, 0, nullptr };
1790  pthread_t thread;
1791  ASSERT_TRUE(pthread_create(&thread, nullptr, ThreadLevelRun, &thread_data) == 0);
1792
1793  // Wait up to 2 seconds for the tid to be set.
1794  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
1795
1796  CheckForLeak(BACKTRACE_CURRENT_PROCESS, thread_data.tid);
1797
1798  // Tell the thread to exit its infinite loop.
1799  android_atomic_acquire_store(0, &thread_data.state);
1800
1801  ASSERT_TRUE(pthread_join(thread, nullptr) == 0);
1802}
1803
1804TEST(libbacktrace, check_for_leak_remote) {
1805  pid_t pid;
1806  CreateRemoteProcess(&pid);
1807
1808  CheckForLeak(pid, BACKTRACE_CURRENT_THREAD);
1809
1810  FinishRemoteProcess(pid);
1811}
1812#endif
1813