backtrace_test.cpp revision 837eff22c86bcdddd50c68315ef17792a0d1949a
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define _GNU_SOURCE 1
18#include <dirent.h>
19#include <dlfcn.h>
20#include <errno.h>
21#include <fcntl.h>
22#include <inttypes.h>
23#include <pthread.h>
24#include <signal.h>
25#include <stdint.h>
26#include <stdio.h>
27#include <stdlib.h>
28#include <string.h>
29#include <sys/ptrace.h>
30#include <sys/stat.h>
31#include <sys/types.h>
32#include <sys/wait.h>
33#include <time.h>
34#include <unistd.h>
35
36#include <algorithm>
37#include <list>
38#include <memory>
39#include <string>
40#include <vector>
41
42#include <backtrace/Backtrace.h>
43#include <backtrace/BacktraceMap.h>
44
45#include <base/stringprintf.h>
46#include <cutils/atomic.h>
47#include <cutils/threads.h>
48
49#include <gtest/gtest.h>
50
51// For the THREAD_SIGNAL definition.
52#include "BacktraceCurrent.h"
53#include "thread_utils.h"
54
55// Number of microseconds per milliseconds.
56#define US_PER_MSEC             1000
57
58// Number of nanoseconds in a second.
59#define NS_PER_SEC              1000000000ULL
60
61// Number of simultaneous dumping operations to perform.
62#define NUM_THREADS  40
63
64// Number of simultaneous threads running in our forked process.
65#define NUM_PTRACE_THREADS 5
66
67struct thread_t {
68  pid_t tid;
69  int32_t state;
70  pthread_t threadId;
71  void* data;
72};
73
74struct dump_thread_t {
75  thread_t thread;
76  Backtrace* backtrace;
77  int32_t* now;
78  int32_t done;
79};
80
81extern "C" {
82// Prototypes for functions in the test library.
83int test_level_one(int, int, int, int, void (*)(void*), void*);
84
85int test_recursive_call(int, void (*)(void*), void*);
86}
87
88uint64_t NanoTime() {
89  struct timespec t = { 0, 0 };
90  clock_gettime(CLOCK_MONOTONIC, &t);
91  return static_cast<uint64_t>(t.tv_sec * NS_PER_SEC + t.tv_nsec);
92}
93
94std::string DumpFrames(Backtrace* backtrace) {
95  if (backtrace->NumFrames() == 0) {
96    return "   No frames to dump.\n";
97  }
98
99  std::string frame;
100  for (size_t i = 0; i < backtrace->NumFrames(); i++) {
101    frame += "   " + backtrace->FormatFrameData(i) + '\n';
102  }
103  return frame;
104}
105
106void WaitForStop(pid_t pid) {
107  uint64_t start = NanoTime();
108
109  siginfo_t si;
110  while (ptrace(PTRACE_GETSIGINFO, pid, 0, &si) < 0 && (errno == EINTR || errno == ESRCH)) {
111    if ((NanoTime() - start) > NS_PER_SEC) {
112      printf("The process did not get to a stopping point in 1 second.\n");
113      break;
114    }
115    usleep(US_PER_MSEC);
116  }
117}
118
119bool ReadyLevelBacktrace(Backtrace* backtrace) {
120  // See if test_level_four is in the backtrace.
121  bool found = false;
122  for (Backtrace::const_iterator it = backtrace->begin(); it != backtrace->end(); ++it) {
123    if (it->func_name == "test_level_four") {
124      found = true;
125      break;
126    }
127  }
128
129  return found;
130}
131
132void VerifyLevelDump(Backtrace* backtrace) {
133  ASSERT_GT(backtrace->NumFrames(), static_cast<size_t>(0))
134    << DumpFrames(backtrace);
135  ASSERT_LT(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES))
136    << DumpFrames(backtrace);
137
138  // Look through the frames starting at the highest to find the
139  // frame we want.
140  size_t frame_num = 0;
141  for (size_t i = backtrace->NumFrames()-1; i > 2; i--) {
142    if (backtrace->GetFrame(i)->func_name == "test_level_one") {
143      frame_num = i;
144      break;
145    }
146  }
147  ASSERT_LT(static_cast<size_t>(0), frame_num) << DumpFrames(backtrace);
148  ASSERT_LE(static_cast<size_t>(3), frame_num) << DumpFrames(backtrace);
149
150  ASSERT_EQ(backtrace->GetFrame(frame_num)->func_name, "test_level_one")
151    << DumpFrames(backtrace);
152  ASSERT_EQ(backtrace->GetFrame(frame_num-1)->func_name, "test_level_two")
153    << DumpFrames(backtrace);
154  ASSERT_EQ(backtrace->GetFrame(frame_num-2)->func_name, "test_level_three")
155    << DumpFrames(backtrace);
156  ASSERT_EQ(backtrace->GetFrame(frame_num-3)->func_name, "test_level_four")
157    << DumpFrames(backtrace);
158}
159
160void VerifyLevelBacktrace(void*) {
161  std::unique_ptr<Backtrace> backtrace(
162      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
163  ASSERT_TRUE(backtrace.get() != nullptr);
164  ASSERT_TRUE(backtrace->Unwind(0));
165
166  VerifyLevelDump(backtrace.get());
167}
168
169bool ReadyMaxBacktrace(Backtrace* backtrace) {
170  return (backtrace->NumFrames() == MAX_BACKTRACE_FRAMES);
171}
172
173void VerifyMaxDump(Backtrace* backtrace) {
174  ASSERT_EQ(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES))
175    << DumpFrames(backtrace);
176  // Verify that the last frame is our recursive call.
177  ASSERT_EQ(backtrace->GetFrame(MAX_BACKTRACE_FRAMES-1)->func_name, "test_recursive_call")
178    << DumpFrames(backtrace);
179}
180
181void VerifyMaxBacktrace(void*) {
182  std::unique_ptr<Backtrace> backtrace(
183      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
184  ASSERT_TRUE(backtrace.get() != nullptr);
185  ASSERT_TRUE(backtrace->Unwind(0));
186
187  VerifyMaxDump(backtrace.get());
188}
189
190void ThreadSetState(void* data) {
191  thread_t* thread = reinterpret_cast<thread_t*>(data);
192  android_atomic_acquire_store(1, &thread->state);
193  volatile int i = 0;
194  while (thread->state) {
195    i++;
196  }
197}
198
199void VerifyThreadTest(pid_t tid, void (*VerifyFunc)(Backtrace*)) {
200  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), tid));
201  ASSERT_TRUE(backtrace.get() != nullptr);
202  ASSERT_TRUE(backtrace->Unwind(0));
203
204  VerifyFunc(backtrace.get());
205}
206
207bool WaitForNonZero(int32_t* value, uint64_t seconds) {
208  uint64_t start = NanoTime();
209  do {
210    if (android_atomic_acquire_load(value)) {
211      return true;
212    }
213  } while ((NanoTime() - start) < seconds * NS_PER_SEC);
214  return false;
215}
216
217TEST(libbacktrace, local_no_unwind_frames) {
218  // Verify that a local unwind does not include any frames within
219  // libunwind or libbacktrace.
220  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), getpid()));
221  ASSERT_TRUE(backtrace.get() != nullptr);
222  ASSERT_TRUE(backtrace->Unwind(0));
223
224  ASSERT_TRUE(backtrace->NumFrames() != 0);
225  for (const auto& frame : *backtrace ) {
226    if (BacktraceMap::IsValid(frame.map)) {
227      const std::string name = basename(frame.map.name.c_str());
228      ASSERT_TRUE(name != "libunwind.so" && name != "libbacktrace.so")
229        << DumpFrames(backtrace.get());
230    }
231    break;
232  }
233}
234
235TEST(libbacktrace, local_trace) {
236  ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelBacktrace, nullptr), 0);
237}
238
239void VerifyIgnoreFrames(
240    Backtrace* bt_all, Backtrace* bt_ign1,
241    Backtrace* bt_ign2, const char* cur_proc) {
242  EXPECT_EQ(bt_all->NumFrames(), bt_ign1->NumFrames() + 1)
243    << "All backtrace:\n" << DumpFrames(bt_all) << "Ignore 1 backtrace:\n" << DumpFrames(bt_ign1);
244  EXPECT_EQ(bt_all->NumFrames(), bt_ign2->NumFrames() + 2)
245    << "All backtrace:\n" << DumpFrames(bt_all) << "Ignore 2 backtrace:\n" << DumpFrames(bt_ign2);
246
247  // Check all of the frames are the same > the current frame.
248  bool check = (cur_proc == nullptr);
249  for (size_t i = 0; i < bt_ign2->NumFrames(); i++) {
250    if (check) {
251      EXPECT_EQ(bt_ign2->GetFrame(i)->pc, bt_ign1->GetFrame(i+1)->pc);
252      EXPECT_EQ(bt_ign2->GetFrame(i)->sp, bt_ign1->GetFrame(i+1)->sp);
253      EXPECT_EQ(bt_ign2->GetFrame(i)->stack_size, bt_ign1->GetFrame(i+1)->stack_size);
254
255      EXPECT_EQ(bt_ign2->GetFrame(i)->pc, bt_all->GetFrame(i+2)->pc);
256      EXPECT_EQ(bt_ign2->GetFrame(i)->sp, bt_all->GetFrame(i+2)->sp);
257      EXPECT_EQ(bt_ign2->GetFrame(i)->stack_size, bt_all->GetFrame(i+2)->stack_size);
258    }
259    if (!check && bt_ign2->GetFrame(i)->func_name == cur_proc) {
260      check = true;
261    }
262  }
263}
264
265void VerifyLevelIgnoreFrames(void*) {
266  std::unique_ptr<Backtrace> all(
267      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
268  ASSERT_TRUE(all.get() != nullptr);
269  ASSERT_TRUE(all->Unwind(0));
270
271  std::unique_ptr<Backtrace> ign1(
272      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
273  ASSERT_TRUE(ign1.get() != nullptr);
274  ASSERT_TRUE(ign1->Unwind(1));
275
276  std::unique_ptr<Backtrace> ign2(
277      Backtrace::Create(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD));
278  ASSERT_TRUE(ign2.get() != nullptr);
279  ASSERT_TRUE(ign2->Unwind(2));
280
281  VerifyIgnoreFrames(all.get(), ign1.get(), ign2.get(), "VerifyLevelIgnoreFrames");
282}
283
284TEST(libbacktrace, local_trace_ignore_frames) {
285  ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelIgnoreFrames, nullptr), 0);
286}
287
288TEST(libbacktrace, local_max_trace) {
289  ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, VerifyMaxBacktrace, nullptr), 0);
290}
291
292void VerifyProcTest(pid_t pid, pid_t tid, bool share_map,
293                    bool (*ReadyFunc)(Backtrace*),
294                    void (*VerifyFunc)(Backtrace*)) {
295  pid_t ptrace_tid;
296  if (tid < 0) {
297    ptrace_tid = pid;
298  } else {
299    ptrace_tid = tid;
300  }
301  uint64_t start = NanoTime();
302  bool verified = false;
303  std::string last_dump;
304  do {
305    usleep(US_PER_MSEC);
306    if (ptrace(PTRACE_ATTACH, ptrace_tid, 0, 0) == 0) {
307      // Wait for the process to get to a stopping point.
308      WaitForStop(ptrace_tid);
309
310      std::unique_ptr<BacktraceMap> map;
311      if (share_map) {
312        map.reset(BacktraceMap::Create(pid));
313      }
314      std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, tid, map.get()));
315      ASSERT_TRUE(backtrace.get() != nullptr);
316      ASSERT_TRUE(backtrace->Unwind(0));
317      if (ReadyFunc(backtrace.get())) {
318        VerifyFunc(backtrace.get());
319        verified = true;
320      } else {
321        last_dump = DumpFrames(backtrace.get());
322      }
323
324      ASSERT_TRUE(ptrace(PTRACE_DETACH, ptrace_tid, 0, 0) == 0);
325    }
326    // If 5 seconds have passed, then we are done.
327  } while (!verified && (NanoTime() - start) <= 5 * NS_PER_SEC);
328  ASSERT_TRUE(verified) << "Last backtrace:\n" << last_dump;
329}
330
331TEST(libbacktrace, ptrace_trace) {
332  pid_t pid;
333  if ((pid = fork()) == 0) {
334    ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
335    _exit(1);
336  }
337  VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyLevelBacktrace, VerifyLevelDump);
338
339  kill(pid, SIGKILL);
340  int status;
341  ASSERT_EQ(waitpid(pid, &status, 0), pid);
342}
343
344TEST(libbacktrace, ptrace_trace_shared_map) {
345  pid_t pid;
346  if ((pid = fork()) == 0) {
347    ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
348    _exit(1);
349  }
350
351  VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, true, ReadyLevelBacktrace, VerifyLevelDump);
352
353  kill(pid, SIGKILL);
354  int status;
355  ASSERT_EQ(waitpid(pid, &status, 0), pid);
356}
357
358TEST(libbacktrace, ptrace_max_trace) {
359  pid_t pid;
360  if ((pid = fork()) == 0) {
361    ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, nullptr, nullptr), 0);
362    _exit(1);
363  }
364  VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyMaxBacktrace, VerifyMaxDump);
365
366  kill(pid, SIGKILL);
367  int status;
368  ASSERT_EQ(waitpid(pid, &status, 0), pid);
369}
370
371void VerifyProcessIgnoreFrames(Backtrace* bt_all) {
372  std::unique_ptr<Backtrace> ign1(Backtrace::Create(bt_all->Pid(), BACKTRACE_CURRENT_THREAD));
373  ASSERT_TRUE(ign1.get() != nullptr);
374  ASSERT_TRUE(ign1->Unwind(1));
375
376  std::unique_ptr<Backtrace> ign2(Backtrace::Create(bt_all->Pid(), BACKTRACE_CURRENT_THREAD));
377  ASSERT_TRUE(ign2.get() != nullptr);
378  ASSERT_TRUE(ign2->Unwind(2));
379
380  VerifyIgnoreFrames(bt_all, ign1.get(), ign2.get(), nullptr);
381}
382
383TEST(libbacktrace, ptrace_ignore_frames) {
384  pid_t pid;
385  if ((pid = fork()) == 0) {
386    ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
387    _exit(1);
388  }
389  VerifyProcTest(pid, BACKTRACE_CURRENT_THREAD, false, ReadyLevelBacktrace, VerifyProcessIgnoreFrames);
390
391  kill(pid, SIGKILL);
392  int status;
393  ASSERT_EQ(waitpid(pid, &status, 0), pid);
394}
395
396// Create a process with multiple threads and dump all of the threads.
397void* PtraceThreadLevelRun(void*) {
398  EXPECT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
399  return nullptr;
400}
401
402void GetThreads(pid_t pid, std::vector<pid_t>* threads) {
403  // Get the list of tasks.
404  char task_path[128];
405  snprintf(task_path, sizeof(task_path), "/proc/%d/task", pid);
406
407  DIR* tasks_dir = opendir(task_path);
408  ASSERT_TRUE(tasks_dir != nullptr);
409  struct dirent* entry;
410  while ((entry = readdir(tasks_dir)) != nullptr) {
411    char* end;
412    pid_t tid = strtoul(entry->d_name, &end, 10);
413    if (*end == '\0') {
414      threads->push_back(tid);
415    }
416  }
417  closedir(tasks_dir);
418}
419
420TEST(libbacktrace, ptrace_threads) {
421  pid_t pid;
422  if ((pid = fork()) == 0) {
423    for (size_t i = 0; i < NUM_PTRACE_THREADS; i++) {
424      pthread_attr_t attr;
425      pthread_attr_init(&attr);
426      pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
427
428      pthread_t thread;
429      ASSERT_TRUE(pthread_create(&thread, &attr, PtraceThreadLevelRun, nullptr) == 0);
430    }
431    ASSERT_NE(test_level_one(1, 2, 3, 4, nullptr, nullptr), 0);
432    _exit(1);
433  }
434
435  // Check to see that all of the threads are running before unwinding.
436  std::vector<pid_t> threads;
437  uint64_t start = NanoTime();
438  do {
439    usleep(US_PER_MSEC);
440    threads.clear();
441    GetThreads(pid, &threads);
442  } while ((threads.size() != NUM_PTRACE_THREADS + 1) &&
443      ((NanoTime() - start) <= 5 * NS_PER_SEC));
444  ASSERT_EQ(threads.size(), static_cast<size_t>(NUM_PTRACE_THREADS + 1));
445
446  ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
447  WaitForStop(pid);
448  for (std::vector<int>::const_iterator it = threads.begin(); it != threads.end(); ++it) {
449    // Skip the current forked process, we only care about the threads.
450    if (pid == *it) {
451      continue;
452    }
453    VerifyProcTest(pid, *it, false, ReadyLevelBacktrace, VerifyLevelDump);
454  }
455  ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
456
457  kill(pid, SIGKILL);
458  int status;
459  ASSERT_EQ(waitpid(pid, &status, 0), pid);
460}
461
462void VerifyLevelThread(void*) {
463  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), gettid()));
464  ASSERT_TRUE(backtrace.get() != nullptr);
465  ASSERT_TRUE(backtrace->Unwind(0));
466
467  VerifyLevelDump(backtrace.get());
468}
469
470TEST(libbacktrace, thread_current_level) {
471  ASSERT_NE(test_level_one(1, 2, 3, 4, VerifyLevelThread, nullptr), 0);
472}
473
474void VerifyMaxThread(void*) {
475  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), gettid()));
476  ASSERT_TRUE(backtrace.get() != nullptr);
477  ASSERT_TRUE(backtrace->Unwind(0));
478
479  VerifyMaxDump(backtrace.get());
480}
481
482TEST(libbacktrace, thread_current_max) {
483  ASSERT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, VerifyMaxThread, nullptr), 0);
484}
485
486void* ThreadLevelRun(void* data) {
487  thread_t* thread = reinterpret_cast<thread_t*>(data);
488
489  thread->tid = gettid();
490  EXPECT_NE(test_level_one(1, 2, 3, 4, ThreadSetState, data), 0);
491  return nullptr;
492}
493
494TEST(libbacktrace, thread_level_trace) {
495  pthread_attr_t attr;
496  pthread_attr_init(&attr);
497  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
498
499  thread_t thread_data = { 0, 0, 0, nullptr };
500  pthread_t thread;
501  ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
502
503  // Wait up to 2 seconds for the tid to be set.
504  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
505
506  // Make sure that the thread signal used is not visible when compiled for
507  // the target.
508#if !defined(__GLIBC__)
509  ASSERT_LT(THREAD_SIGNAL, SIGRTMIN);
510#endif
511
512  // Save the current signal action and make sure it is restored afterwards.
513  struct sigaction cur_action;
514  ASSERT_TRUE(sigaction(THREAD_SIGNAL, nullptr, &cur_action) == 0);
515
516  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
517  ASSERT_TRUE(backtrace.get() != nullptr);
518  ASSERT_TRUE(backtrace->Unwind(0));
519
520  VerifyLevelDump(backtrace.get());
521
522  // Tell the thread to exit its infinite loop.
523  android_atomic_acquire_store(0, &thread_data.state);
524
525  // Verify that the old action was restored.
526  struct sigaction new_action;
527  ASSERT_TRUE(sigaction(THREAD_SIGNAL, nullptr, &new_action) == 0);
528  EXPECT_EQ(cur_action.sa_sigaction, new_action.sa_sigaction);
529  // The SA_RESTORER flag gets set behind our back, so a direct comparison
530  // doesn't work unless we mask the value off. Mips doesn't have this
531  // flag, so skip this on that platform.
532#if defined(SA_RESTORER)
533  cur_action.sa_flags &= ~SA_RESTORER;
534  new_action.sa_flags &= ~SA_RESTORER;
535#elif defined(__GLIBC__)
536  // Our host compiler doesn't appear to define this flag for some reason.
537  cur_action.sa_flags &= ~0x04000000;
538  new_action.sa_flags &= ~0x04000000;
539#endif
540  EXPECT_EQ(cur_action.sa_flags, new_action.sa_flags);
541}
542
543TEST(libbacktrace, thread_ignore_frames) {
544  pthread_attr_t attr;
545  pthread_attr_init(&attr);
546  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
547
548  thread_t thread_data = { 0, 0, 0, nullptr };
549  pthread_t thread;
550  ASSERT_TRUE(pthread_create(&thread, &attr, ThreadLevelRun, &thread_data) == 0);
551
552  // Wait up to 2 seconds for the tid to be set.
553  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
554
555  std::unique_ptr<Backtrace> all(Backtrace::Create(getpid(), thread_data.tid));
556  ASSERT_TRUE(all.get() != nullptr);
557  ASSERT_TRUE(all->Unwind(0));
558
559  std::unique_ptr<Backtrace> ign1(Backtrace::Create(getpid(), thread_data.tid));
560  ASSERT_TRUE(ign1.get() != nullptr);
561  ASSERT_TRUE(ign1->Unwind(1));
562
563  std::unique_ptr<Backtrace> ign2(Backtrace::Create(getpid(), thread_data.tid));
564  ASSERT_TRUE(ign2.get() != nullptr);
565  ASSERT_TRUE(ign2->Unwind(2));
566
567  VerifyIgnoreFrames(all.get(), ign1.get(), ign2.get(), nullptr);
568
569  // Tell the thread to exit its infinite loop.
570  android_atomic_acquire_store(0, &thread_data.state);
571}
572
573void* ThreadMaxRun(void* data) {
574  thread_t* thread = reinterpret_cast<thread_t*>(data);
575
576  thread->tid = gettid();
577  EXPECT_NE(test_recursive_call(MAX_BACKTRACE_FRAMES+10, ThreadSetState, data), 0);
578  return nullptr;
579}
580
581TEST(libbacktrace, thread_max_trace) {
582  pthread_attr_t attr;
583  pthread_attr_init(&attr);
584  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
585
586  thread_t thread_data = { 0, 0, 0, nullptr };
587  pthread_t thread;
588  ASSERT_TRUE(pthread_create(&thread, &attr, ThreadMaxRun, &thread_data) == 0);
589
590  // Wait for the tid to be set.
591  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
592
593  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
594  ASSERT_TRUE(backtrace.get() != nullptr);
595  ASSERT_TRUE(backtrace->Unwind(0));
596
597  VerifyMaxDump(backtrace.get());
598
599  // Tell the thread to exit its infinite loop.
600  android_atomic_acquire_store(0, &thread_data.state);
601}
602
603void* ThreadDump(void* data) {
604  dump_thread_t* dump = reinterpret_cast<dump_thread_t*>(data);
605  while (true) {
606    if (android_atomic_acquire_load(dump->now)) {
607      break;
608    }
609  }
610
611  // The status of the actual unwind will be checked elsewhere.
612  dump->backtrace = Backtrace::Create(getpid(), dump->thread.tid);
613  dump->backtrace->Unwind(0);
614
615  android_atomic_acquire_store(1, &dump->done);
616
617  return nullptr;
618}
619
620TEST(libbacktrace, thread_multiple_dump) {
621  // Dump NUM_THREADS simultaneously.
622  std::vector<thread_t> runners(NUM_THREADS);
623  std::vector<dump_thread_t> dumpers(NUM_THREADS);
624
625  pthread_attr_t attr;
626  pthread_attr_init(&attr);
627  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
628  for (size_t i = 0; i < NUM_THREADS; i++) {
629    // Launch the runners, they will spin in hard loops doing nothing.
630    runners[i].tid = 0;
631    runners[i].state = 0;
632    ASSERT_TRUE(pthread_create(&runners[i].threadId, &attr, ThreadMaxRun, &runners[i]) == 0);
633  }
634
635  // Wait for tids to be set.
636  for (std::vector<thread_t>::iterator it = runners.begin(); it != runners.end(); ++it) {
637    ASSERT_TRUE(WaitForNonZero(&it->state, 30));
638  }
639
640  // Start all of the dumpers at once, they will spin until they are signalled
641  // to begin their dump run.
642  int32_t dump_now = 0;
643  for (size_t i = 0; i < NUM_THREADS; i++) {
644    dumpers[i].thread.tid = runners[i].tid;
645    dumpers[i].thread.state = 0;
646    dumpers[i].done = 0;
647    dumpers[i].now = &dump_now;
648
649    ASSERT_TRUE(pthread_create(&dumpers[i].thread.threadId, &attr, ThreadDump, &dumpers[i]) == 0);
650  }
651
652  // Start all of the dumpers going at once.
653  android_atomic_acquire_store(1, &dump_now);
654
655  for (size_t i = 0; i < NUM_THREADS; i++) {
656    ASSERT_TRUE(WaitForNonZero(&dumpers[i].done, 30));
657
658    // Tell the runner thread to exit its infinite loop.
659    android_atomic_acquire_store(0, &runners[i].state);
660
661    ASSERT_TRUE(dumpers[i].backtrace != nullptr);
662    VerifyMaxDump(dumpers[i].backtrace);
663
664    delete dumpers[i].backtrace;
665    dumpers[i].backtrace = nullptr;
666  }
667}
668
669TEST(libbacktrace, thread_multiple_dump_same_thread) {
670  pthread_attr_t attr;
671  pthread_attr_init(&attr);
672  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
673  thread_t runner;
674  runner.tid = 0;
675  runner.state = 0;
676  ASSERT_TRUE(pthread_create(&runner.threadId, &attr, ThreadMaxRun, &runner) == 0);
677
678  // Wait for tids to be set.
679  ASSERT_TRUE(WaitForNonZero(&runner.state, 30));
680
681  // Start all of the dumpers at once, they will spin until they are signalled
682  // to begin their dump run.
683  int32_t dump_now = 0;
684  // Dump the same thread NUM_THREADS simultaneously.
685  std::vector<dump_thread_t> dumpers(NUM_THREADS);
686  for (size_t i = 0; i < NUM_THREADS; i++) {
687    dumpers[i].thread.tid = runner.tid;
688    dumpers[i].thread.state = 0;
689    dumpers[i].done = 0;
690    dumpers[i].now = &dump_now;
691
692    ASSERT_TRUE(pthread_create(&dumpers[i].thread.threadId, &attr, ThreadDump, &dumpers[i]) == 0);
693  }
694
695  // Start all of the dumpers going at once.
696  android_atomic_acquire_store(1, &dump_now);
697
698  for (size_t i = 0; i < NUM_THREADS; i++) {
699    ASSERT_TRUE(WaitForNonZero(&dumpers[i].done, 30));
700
701    ASSERT_TRUE(dumpers[i].backtrace != nullptr);
702    VerifyMaxDump(dumpers[i].backtrace);
703
704    delete dumpers[i].backtrace;
705    dumpers[i].backtrace = nullptr;
706  }
707
708  // Tell the runner thread to exit its infinite loop.
709  android_atomic_acquire_store(0, &runner.state);
710}
711
712// This test is for UnwindMaps that should share the same map cursor when
713// multiple maps are created for the current process at the same time.
714TEST(libbacktrace, simultaneous_maps) {
715  BacktraceMap* map1 = BacktraceMap::Create(getpid());
716  BacktraceMap* map2 = BacktraceMap::Create(getpid());
717  BacktraceMap* map3 = BacktraceMap::Create(getpid());
718
719  Backtrace* back1 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map1);
720  ASSERT_TRUE(back1 != nullptr);
721  EXPECT_TRUE(back1->Unwind(0));
722  delete back1;
723  delete map1;
724
725  Backtrace* back2 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map2);
726  ASSERT_TRUE(back2 != nullptr);
727  EXPECT_TRUE(back2->Unwind(0));
728  delete back2;
729  delete map2;
730
731  Backtrace* back3 = Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD, map3);
732  ASSERT_TRUE(back3 != nullptr);
733  EXPECT_TRUE(back3->Unwind(0));
734  delete back3;
735  delete map3;
736}
737
738TEST(libbacktrace, fillin_erases) {
739  BacktraceMap* back_map = BacktraceMap::Create(getpid());
740
741  backtrace_map_t map;
742
743  map.start = 1;
744  map.end = 3;
745  map.flags = 1;
746  map.name = "Initialized";
747  back_map->FillIn(0, &map);
748  delete back_map;
749
750  ASSERT_FALSE(BacktraceMap::IsValid(map));
751  ASSERT_EQ(static_cast<uintptr_t>(0), map.start);
752  ASSERT_EQ(static_cast<uintptr_t>(0), map.end);
753  ASSERT_EQ(0, map.flags);
754  ASSERT_EQ("", map.name);
755}
756
757TEST(libbacktrace, format_test) {
758  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), BACKTRACE_CURRENT_THREAD));
759  ASSERT_TRUE(backtrace.get() != nullptr);
760
761  backtrace_frame_data_t frame;
762  frame.num = 1;
763  frame.pc = 2;
764  frame.sp = 0;
765  frame.stack_size = 0;
766  frame.func_offset = 0;
767
768  // Check no map set.
769  frame.num = 1;
770#if defined(__LP64__)
771  EXPECT_EQ("#01 pc 0000000000000002  <unknown>",
772#else
773  EXPECT_EQ("#01 pc 00000002  <unknown>",
774#endif
775            backtrace->FormatFrameData(&frame));
776
777  // Check map name empty, but exists.
778  frame.map.start = 1;
779  frame.map.end = 1;
780  frame.map.load_base = 0;
781#if defined(__LP64__)
782  EXPECT_EQ("#01 pc 0000000000000001  <unknown>",
783#else
784  EXPECT_EQ("#01 pc 00000001  <unknown>",
785#endif
786            backtrace->FormatFrameData(&frame));
787
788
789  // Check relative pc is set and map name is set.
790  frame.pc = 0x12345679;
791  frame.map.name = "MapFake";
792  frame.map.start =  1;
793  frame.map.end =  1;
794#if defined(__LP64__)
795  EXPECT_EQ("#01 pc 0000000012345678  MapFake",
796#else
797  EXPECT_EQ("#01 pc 12345678  MapFake",
798#endif
799            backtrace->FormatFrameData(&frame));
800
801  // Check func_name is set, but no func offset.
802  frame.func_name = "ProcFake";
803#if defined(__LP64__)
804  EXPECT_EQ("#01 pc 0000000012345678  MapFake (ProcFake)",
805#else
806  EXPECT_EQ("#01 pc 12345678  MapFake (ProcFake)",
807#endif
808            backtrace->FormatFrameData(&frame));
809
810  // Check func_name is set, and func offset is non-zero.
811  frame.func_offset = 645;
812#if defined(__LP64__)
813  EXPECT_EQ("#01 pc 0000000012345678  MapFake (ProcFake+645)",
814#else
815  EXPECT_EQ("#01 pc 12345678  MapFake (ProcFake+645)",
816#endif
817            backtrace->FormatFrameData(&frame));
818
819  // Check func_name is set, func offset is non-zero, and load_base is non-zero.
820  frame.func_offset = 645;
821  frame.map.load_base = 100;
822#if defined(__LP64__)
823  EXPECT_EQ("#01 pc 00000000123456dc  MapFake (ProcFake+645)",
824#else
825  EXPECT_EQ("#01 pc 123456dc  MapFake (ProcFake+645)",
826#endif
827            backtrace->FormatFrameData(&frame));
828}
829
830struct map_test_t {
831  uintptr_t start;
832  uintptr_t end;
833};
834
835bool map_sort(map_test_t i, map_test_t j) {
836  return i.start < j.start;
837}
838
839void VerifyMap(pid_t pid) {
840  char buffer[4096];
841  snprintf(buffer, sizeof(buffer), "/proc/%d/maps", pid);
842
843  FILE* map_file = fopen(buffer, "r");
844  ASSERT_TRUE(map_file != nullptr);
845  std::vector<map_test_t> test_maps;
846  while (fgets(buffer, sizeof(buffer), map_file)) {
847    map_test_t map;
848    ASSERT_EQ(2, sscanf(buffer, "%" SCNxPTR "-%" SCNxPTR " ", &map.start, &map.end));
849    test_maps.push_back(map);
850  }
851  fclose(map_file);
852  std::sort(test_maps.begin(), test_maps.end(), map_sort);
853
854  std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(pid));
855
856  // Basic test that verifies that the map is in the expected order.
857  std::vector<map_test_t>::const_iterator test_it = test_maps.begin();
858  for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
859    ASSERT_TRUE(test_it != test_maps.end());
860    ASSERT_EQ(test_it->start, it->start);
861    ASSERT_EQ(test_it->end, it->end);
862    ++test_it;
863  }
864  ASSERT_TRUE(test_it == test_maps.end());
865}
866
867TEST(libbacktrace, verify_map_remote) {
868  pid_t pid;
869
870  if ((pid = fork()) == 0) {
871    while (true) {
872    }
873    _exit(0);
874  }
875  ASSERT_LT(0, pid);
876
877  ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
878
879  // Wait for the process to get to a stopping point.
880  WaitForStop(pid);
881
882  // The maps should match exactly since the forked process has been paused.
883  VerifyMap(pid);
884
885  ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
886
887  kill(pid, SIGKILL);
888  ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
889}
890
891void InitMemory(uint8_t* memory, size_t bytes) {
892  for (size_t i = 0; i < bytes; i++) {
893    memory[i] = i;
894    if (memory[i] == '\0') {
895      // Don't use '\0' in our data so we can verify that an overread doesn't
896      // occur by using a '\0' as the character after the read data.
897      memory[i] = 23;
898    }
899  }
900}
901
902void* ThreadReadTest(void* data) {
903  thread_t* thread_data = reinterpret_cast<thread_t*>(data);
904
905  thread_data->tid = gettid();
906
907  // Create two map pages.
908  // Mark the second page as not-readable.
909  size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
910  uint8_t* memory;
911  if (posix_memalign(reinterpret_cast<void**>(&memory), pagesize, 2 * pagesize) != 0) {
912    return reinterpret_cast<void*>(-1);
913  }
914
915  if (mprotect(&memory[pagesize], pagesize, PROT_NONE) != 0) {
916    return reinterpret_cast<void*>(-1);
917  }
918
919  // Set up a simple pattern in memory.
920  InitMemory(memory, pagesize);
921
922  thread_data->data = memory;
923
924  // Tell the caller it's okay to start reading memory.
925  android_atomic_acquire_store(1, &thread_data->state);
926
927  // Loop waiting for the caller to finish reading the memory.
928  while (thread_data->state) {
929  }
930
931  // Re-enable read-write on the page so that we don't crash if we try
932  // and access data on this page when freeing the memory.
933  if (mprotect(&memory[pagesize], pagesize, PROT_READ | PROT_WRITE) != 0) {
934    return reinterpret_cast<void*>(-1);
935  }
936  free(memory);
937
938  android_atomic_acquire_store(1, &thread_data->state);
939
940  return nullptr;
941}
942
943void RunReadTest(Backtrace* backtrace, uintptr_t read_addr) {
944  size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
945
946  // Create a page of data to use to do quick compares.
947  uint8_t* expected = new uint8_t[pagesize];
948  InitMemory(expected, pagesize);
949
950  uint8_t* data = new uint8_t[2*pagesize];
951  // Verify that we can only read one page worth of data.
952  size_t bytes_read = backtrace->Read(read_addr, data, 2 * pagesize);
953  ASSERT_EQ(pagesize, bytes_read);
954  ASSERT_TRUE(memcmp(data, expected, pagesize) == 0);
955
956  // Verify unaligned reads.
957  for (size_t i = 1; i < sizeof(word_t); i++) {
958    bytes_read = backtrace->Read(read_addr + i, data, 2 * sizeof(word_t));
959    ASSERT_EQ(2 * sizeof(word_t), bytes_read);
960    ASSERT_TRUE(memcmp(data, &expected[i], 2 * sizeof(word_t)) == 0)
961        << "Offset at " << i << " failed";
962  }
963
964  // Verify small unaligned reads.
965  for (size_t i = 1; i < sizeof(word_t); i++) {
966    for (size_t j = 1; j < sizeof(word_t); j++) {
967      // Set one byte past what we expect to read, to guarantee we don't overread.
968      data[j] = '\0';
969      bytes_read = backtrace->Read(read_addr + i, data, j);
970      ASSERT_EQ(j, bytes_read);
971      ASSERT_TRUE(memcmp(data, &expected[i], j) == 0)
972          << "Offset at " << i << " length " << j << " miscompared";
973      ASSERT_EQ('\0', data[j])
974          << "Offset at " << i << " length " << j << " wrote too much data";
975    }
976  }
977  delete[] data;
978  delete[] expected;
979}
980
981TEST(libbacktrace, thread_read) {
982  pthread_attr_t attr;
983  pthread_attr_init(&attr);
984  pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
985  pthread_t thread;
986  thread_t thread_data = { 0, 0, 0, nullptr };
987  ASSERT_TRUE(pthread_create(&thread, &attr, ThreadReadTest, &thread_data) == 0);
988
989  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 10));
990
991  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(getpid(), thread_data.tid));
992  ASSERT_TRUE(backtrace.get() != nullptr);
993
994  RunReadTest(backtrace.get(), reinterpret_cast<uintptr_t>(thread_data.data));
995
996  android_atomic_acquire_store(0, &thread_data.state);
997
998  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 10));
999}
1000
1001volatile uintptr_t g_ready = 0;
1002volatile uintptr_t g_addr = 0;
1003
1004void ForkedReadTest() {
1005  // Create two map pages.
1006  size_t pagesize = static_cast<size_t>(sysconf(_SC_PAGE_SIZE));
1007  uint8_t* memory;
1008  if (posix_memalign(reinterpret_cast<void**>(&memory), pagesize, 2 * pagesize) != 0) {
1009    perror("Failed to allocate memory\n");
1010    exit(1);
1011  }
1012
1013  // Mark the second page as not-readable.
1014  if (mprotect(&memory[pagesize], pagesize, PROT_NONE) != 0) {
1015    perror("Failed to mprotect memory\n");
1016    exit(1);
1017  }
1018
1019  // Set up a simple pattern in memory.
1020  InitMemory(memory, pagesize);
1021
1022  g_addr = reinterpret_cast<uintptr_t>(memory);
1023  g_ready = 1;
1024
1025  while (1) {
1026    usleep(US_PER_MSEC);
1027  }
1028}
1029
1030TEST(libbacktrace, process_read) {
1031  g_ready = 0;
1032  pid_t pid;
1033  if ((pid = fork()) == 0) {
1034    ForkedReadTest();
1035    exit(0);
1036  }
1037  ASSERT_NE(-1, pid);
1038
1039  bool test_executed = false;
1040  uint64_t start = NanoTime();
1041  while (1) {
1042    if (ptrace(PTRACE_ATTACH, pid, 0, 0) == 0) {
1043      WaitForStop(pid);
1044
1045      std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, pid));
1046      ASSERT_TRUE(backtrace.get() != nullptr);
1047
1048      uintptr_t read_addr;
1049      size_t bytes_read = backtrace->Read(reinterpret_cast<uintptr_t>(&g_ready),
1050                                          reinterpret_cast<uint8_t*>(&read_addr),
1051                                          sizeof(uintptr_t));
1052      ASSERT_EQ(sizeof(uintptr_t), bytes_read);
1053      if (read_addr) {
1054        // The forked process is ready to be read.
1055        bytes_read = backtrace->Read(reinterpret_cast<uintptr_t>(&g_addr),
1056                                     reinterpret_cast<uint8_t*>(&read_addr),
1057                                     sizeof(uintptr_t));
1058        ASSERT_EQ(sizeof(uintptr_t), bytes_read);
1059
1060        RunReadTest(backtrace.get(), read_addr);
1061
1062        test_executed = true;
1063        break;
1064      }
1065      ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1066    }
1067    if ((NanoTime() - start) > 5 * NS_PER_SEC) {
1068      break;
1069    }
1070    usleep(US_PER_MSEC);
1071  }
1072  kill(pid, SIGKILL);
1073  ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1074
1075  ASSERT_TRUE(test_executed);
1076}
1077
1078void VerifyFunctionsFound(const std::vector<std::string>& found_functions) {
1079  // We expect to find these functions in libbacktrace_test. If we don't
1080  // find them, that's a bug in the memory read handling code in libunwind.
1081  std::list<std::string> expected_functions;
1082  expected_functions.push_back("test_recursive_call");
1083  expected_functions.push_back("test_level_one");
1084  expected_functions.push_back("test_level_two");
1085  expected_functions.push_back("test_level_three");
1086  expected_functions.push_back("test_level_four");
1087  for (const auto& found_function : found_functions) {
1088    for (const auto& expected_function : expected_functions) {
1089      if (found_function == expected_function) {
1090        expected_functions.remove(found_function);
1091        break;
1092      }
1093    }
1094  }
1095  ASSERT_TRUE(expected_functions.empty()) << "Not all functions found in shared library.";
1096}
1097
1098const char* CopySharedLibrary() {
1099#if defined(__LP64__)
1100  const char* lib_name = "lib64";
1101#else
1102  const char* lib_name = "lib";
1103#endif
1104
1105#if defined(__BIONIC__)
1106  const char* tmp_so_name = "/data/local/tmp/libbacktrace_test.so";
1107  std::string cp_cmd = android::base::StringPrintf("cp /system/%s/libbacktrace_test.so %s",
1108                                                   lib_name, tmp_so_name);
1109#else
1110  const char* tmp_so_name = "/tmp/libbacktrace_test.so";
1111  if (getenv("ANDROID_HOST_OUT") == NULL) {
1112    fprintf(stderr, "ANDROID_HOST_OUT not set, make sure you run lunch.");
1113    return nullptr;
1114  }
1115  std::string cp_cmd = android::base::StringPrintf("cp %s/%s/libbacktrace_test.so %s",
1116                                                   getenv("ANDROID_HOST_OUT"), lib_name,
1117                                                   tmp_so_name);
1118#endif
1119
1120  // Copy the shared so to a tempory directory.
1121  system(cp_cmd.c_str());
1122
1123  return tmp_so_name;
1124}
1125
1126TEST(libbacktrace, check_unreadable_elf_local) {
1127  const char* tmp_so_name = CopySharedLibrary();
1128  ASSERT_TRUE(tmp_so_name != nullptr);
1129
1130  struct stat buf;
1131  ASSERT_TRUE(stat(tmp_so_name, &buf) != -1);
1132  uintptr_t map_size = buf.st_size;
1133
1134  int fd = open(tmp_so_name, O_RDONLY);
1135  ASSERT_TRUE(fd != -1);
1136
1137  void* map = mmap(NULL, map_size, PROT_READ, MAP_PRIVATE, fd, 0);
1138  ASSERT_TRUE(map != MAP_FAILED);
1139  close(fd);
1140  ASSERT_TRUE(unlink(tmp_so_name) != -1);
1141
1142  std::vector<std::string> found_functions;
1143  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS,
1144                                                         BACKTRACE_CURRENT_THREAD));
1145  ASSERT_TRUE(backtrace.get() != nullptr);
1146
1147  // Needed before GetFunctionName will work.
1148  backtrace->Unwind(0);
1149
1150  // Loop through the entire map, and get every function we can find.
1151  map_size += reinterpret_cast<uintptr_t>(map);
1152  std::string last_func;
1153  for (uintptr_t read_addr = reinterpret_cast<uintptr_t>(map);
1154       read_addr < map_size; read_addr += 4) {
1155    uintptr_t offset;
1156    std::string func_name = backtrace->GetFunctionName(read_addr, &offset);
1157    if (!func_name.empty() && last_func != func_name) {
1158      found_functions.push_back(func_name);
1159    }
1160    last_func = func_name;
1161  }
1162
1163  ASSERT_TRUE(munmap(map, map_size - reinterpret_cast<uintptr_t>(map)) == 0);
1164
1165  VerifyFunctionsFound(found_functions);
1166}
1167
1168TEST(libbacktrace, check_unreadable_elf_remote) {
1169  const char* tmp_so_name = CopySharedLibrary();
1170  ASSERT_TRUE(tmp_so_name != nullptr);
1171
1172  g_ready = 0;
1173
1174  struct stat buf;
1175  ASSERT_TRUE(stat(tmp_so_name, &buf) != -1);
1176  uintptr_t map_size = buf.st_size;
1177
1178  pid_t pid;
1179  if ((pid = fork()) == 0) {
1180    int fd = open(tmp_so_name, O_RDONLY);
1181    if (fd == -1) {
1182      fprintf(stderr, "Failed to open file %s: %s\n", tmp_so_name, strerror(errno));
1183      unlink(tmp_so_name);
1184      exit(0);
1185    }
1186
1187    void* map = mmap(NULL, map_size, PROT_READ, MAP_PRIVATE, fd, 0);
1188    if (map == MAP_FAILED) {
1189      fprintf(stderr, "Failed to map in memory: %s\n", strerror(errno));
1190      unlink(tmp_so_name);
1191      exit(0);
1192    }
1193    close(fd);
1194    if (unlink(tmp_so_name) == -1) {
1195      fprintf(stderr, "Failed to unlink: %s\n", strerror(errno));
1196      exit(0);
1197    }
1198
1199    g_addr = reinterpret_cast<uintptr_t>(map);
1200    g_ready = 1;
1201    while (true) {
1202      usleep(US_PER_MSEC);
1203    }
1204    exit(0);
1205  }
1206  ASSERT_TRUE(pid > 0);
1207
1208  std::vector<std::string> found_functions;
1209  uint64_t start = NanoTime();
1210  while (true) {
1211    ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1212
1213    // Wait for the process to get to a stopping point.
1214    WaitForStop(pid);
1215
1216    std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, BACKTRACE_CURRENT_THREAD));
1217    ASSERT_TRUE(backtrace.get() != nullptr);
1218
1219    uintptr_t read_addr;
1220    ASSERT_EQ(sizeof(uintptr_t), backtrace->Read(reinterpret_cast<uintptr_t>(&g_ready), reinterpret_cast<uint8_t*>(&read_addr), sizeof(uintptr_t)));
1221    if (read_addr) {
1222      ASSERT_EQ(sizeof(uintptr_t), backtrace->Read(reinterpret_cast<uintptr_t>(&g_addr), reinterpret_cast<uint8_t*>(&read_addr), sizeof(uintptr_t)));
1223
1224      // Needed before GetFunctionName will work.
1225      backtrace->Unwind(0);
1226
1227      // Loop through the entire map, and get every function we can find.
1228      map_size += read_addr;
1229      std::string last_func;
1230      for (; read_addr < map_size; read_addr += 4) {
1231        uintptr_t offset;
1232        std::string func_name = backtrace->GetFunctionName(read_addr, &offset);
1233        if (!func_name.empty() && last_func != func_name) {
1234          found_functions.push_back(func_name);
1235        }
1236        last_func = func_name;
1237      }
1238      break;
1239    }
1240    ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1241
1242    if ((NanoTime() - start) > 5 * NS_PER_SEC) {
1243      break;
1244    }
1245    usleep(US_PER_MSEC);
1246  }
1247
1248  kill(pid, SIGKILL);
1249  ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1250
1251  VerifyFunctionsFound(found_functions);
1252}
1253
1254bool FindFuncFrameInBacktrace(Backtrace* backtrace, uintptr_t test_func, size_t* frame_num) {
1255  backtrace_map_t map;
1256  backtrace->FillInMap(test_func, &map);
1257  if (!BacktraceMap::IsValid(map)) {
1258    return false;
1259  }
1260
1261  // Loop through the frames, and find the one that is in the map.
1262  *frame_num = 0;
1263  for (Backtrace::const_iterator it = backtrace->begin(); it != backtrace->end(); ++it) {
1264    if (BacktraceMap::IsValid(it->map) && map.start == it->map.start &&
1265        it->pc >= test_func) {
1266      *frame_num = it->num;
1267      return true;
1268    }
1269  }
1270  return false;
1271}
1272
1273void VerifyUnreadableElfFrame(Backtrace* backtrace, uintptr_t test_func, size_t frame_num) {
1274  ASSERT_LT(backtrace->NumFrames(), static_cast<size_t>(MAX_BACKTRACE_FRAMES))
1275    << DumpFrames(backtrace);
1276
1277  ASSERT_TRUE(frame_num != 0) << DumpFrames(backtrace);
1278  // Make sure that there is at least one more frame above the test func call.
1279  ASSERT_LT(frame_num, backtrace->NumFrames()) << DumpFrames(backtrace);
1280
1281  uintptr_t diff = backtrace->GetFrame(frame_num)->pc - test_func;
1282  ASSERT_LT(diff, 200U) << DumpFrames(backtrace);
1283}
1284
1285void VerifyUnreadableElfBacktrace(uintptr_t test_func) {
1286  std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS,
1287                                                         BACKTRACE_CURRENT_THREAD));
1288  ASSERT_TRUE(backtrace.get() != nullptr);
1289  ASSERT_TRUE(backtrace->Unwind(0));
1290
1291  size_t frame_num;
1292  ASSERT_TRUE(FindFuncFrameInBacktrace(backtrace.get(), test_func, &frame_num));
1293
1294  VerifyUnreadableElfFrame(backtrace.get(), test_func, frame_num);
1295}
1296
1297typedef int (*test_func_t)(int, int, int, int, void (*)(uintptr_t), uintptr_t);
1298
1299TEST(libbacktrace, unwind_through_unreadable_elf_local) {
1300  const char* tmp_so_name = CopySharedLibrary();
1301  ASSERT_TRUE(tmp_so_name != nullptr);
1302  void* lib_handle = dlopen(tmp_so_name, RTLD_NOW);
1303  ASSERT_TRUE(lib_handle != nullptr);
1304  ASSERT_TRUE(unlink(tmp_so_name) != -1);
1305
1306  test_func_t test_func;
1307  test_func = reinterpret_cast<test_func_t>(dlsym(lib_handle, "test_level_one"));
1308  ASSERT_TRUE(test_func != nullptr);
1309
1310  ASSERT_NE(test_func(1, 2, 3, 4, VerifyUnreadableElfBacktrace,
1311                      reinterpret_cast<uintptr_t>(test_func)), 0);
1312
1313  ASSERT_TRUE(dlclose(lib_handle) == 0);
1314}
1315
1316TEST(libbacktrace, unwind_through_unreadable_elf_remote) {
1317  const char* tmp_so_name = CopySharedLibrary();
1318  ASSERT_TRUE(tmp_so_name != nullptr);
1319  void* lib_handle = dlopen(tmp_so_name, RTLD_NOW);
1320  ASSERT_TRUE(lib_handle != nullptr);
1321  ASSERT_TRUE(unlink(tmp_so_name) != -1);
1322
1323  test_func_t test_func;
1324  test_func = reinterpret_cast<test_func_t>(dlsym(lib_handle, "test_level_one"));
1325  ASSERT_TRUE(test_func != nullptr);
1326
1327  pid_t pid;
1328  if ((pid = fork()) == 0) {
1329    test_func(1, 2, 3, 4, 0, 0);
1330    exit(0);
1331  }
1332  ASSERT_TRUE(pid > 0);
1333  ASSERT_TRUE(dlclose(lib_handle) == 0);
1334
1335  uint64_t start = NanoTime();
1336  bool done = false;
1337  while (!done) {
1338    ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1339
1340    // Wait for the process to get to a stopping point.
1341    WaitForStop(pid);
1342
1343    std::unique_ptr<Backtrace> backtrace(Backtrace::Create(pid, BACKTRACE_CURRENT_THREAD));
1344    ASSERT_TRUE(backtrace.get() != nullptr);
1345    ASSERT_TRUE(backtrace->Unwind(0));
1346
1347    size_t frame_num;
1348    if (FindFuncFrameInBacktrace(backtrace.get(),
1349                                 reinterpret_cast<uintptr_t>(test_func), &frame_num)) {
1350
1351      VerifyUnreadableElfFrame(backtrace.get(), reinterpret_cast<uintptr_t>(test_func), frame_num);
1352      done = true;
1353    }
1354
1355    ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1356
1357    if ((NanoTime() - start) > 5 * NS_PER_SEC) {
1358      break;
1359    }
1360    usleep(US_PER_MSEC);
1361  }
1362
1363  kill(pid, SIGKILL);
1364  ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1365
1366  ASSERT_TRUE(done) << "Test function never found in unwind.";
1367}
1368
1369#if defined(ENABLE_PSS_TESTS)
1370#include "GetPss.h"
1371
1372#define MAX_LEAK_BYTES 32*1024UL
1373
1374void CheckForLeak(pid_t pid, pid_t tid) {
1375  // Do a few runs to get the PSS stable.
1376  for (size_t i = 0; i < 100; i++) {
1377    Backtrace* backtrace = Backtrace::Create(pid, tid);
1378    ASSERT_TRUE(backtrace != nullptr);
1379    ASSERT_TRUE(backtrace->Unwind(0));
1380    delete backtrace;
1381  }
1382  size_t stable_pss = GetPssBytes();
1383  ASSERT_TRUE(stable_pss != 0);
1384
1385  // Loop enough that even a small leak should be detectable.
1386  for (size_t i = 0; i < 4096; i++) {
1387    Backtrace* backtrace = Backtrace::Create(pid, tid);
1388    ASSERT_TRUE(backtrace != nullptr);
1389    ASSERT_TRUE(backtrace->Unwind(0));
1390    delete backtrace;
1391  }
1392  size_t new_pss = GetPssBytes();
1393  ASSERT_TRUE(new_pss != 0);
1394  size_t abs_diff = (new_pss > stable_pss) ? new_pss - stable_pss : stable_pss - new_pss;
1395  // As long as the new pss is within a certain amount, consider everything okay.
1396  ASSERT_LE(abs_diff, MAX_LEAK_BYTES);
1397}
1398
1399TEST(libbacktrace, check_for_leak_local) {
1400  CheckForLeak(BACKTRACE_CURRENT_PROCESS, BACKTRACE_CURRENT_THREAD);
1401}
1402
1403TEST(libbacktrace, check_for_leak_local_thread) {
1404  thread_t thread_data = { 0, 0, 0, nullptr };
1405  pthread_t thread;
1406  ASSERT_TRUE(pthread_create(&thread, nullptr, ThreadLevelRun, &thread_data) == 0);
1407
1408  // Wait up to 2 seconds for the tid to be set.
1409  ASSERT_TRUE(WaitForNonZero(&thread_data.state, 2));
1410
1411  CheckForLeak(BACKTRACE_CURRENT_PROCESS, thread_data.tid);
1412
1413  // Tell the thread to exit its infinite loop.
1414  android_atomic_acquire_store(0, &thread_data.state);
1415
1416  ASSERT_TRUE(pthread_join(thread, nullptr) == 0);
1417}
1418
1419TEST(libbacktrace, check_for_leak_remote) {
1420  pid_t pid;
1421
1422  if ((pid = fork()) == 0) {
1423    while (true) {
1424    }
1425    _exit(0);
1426  }
1427  ASSERT_LT(0, pid);
1428
1429  ASSERT_TRUE(ptrace(PTRACE_ATTACH, pid, 0, 0) == 0);
1430
1431  // Wait for the process to get to a stopping point.
1432  WaitForStop(pid);
1433
1434  CheckForLeak(pid, BACKTRACE_CURRENT_THREAD);
1435
1436  ASSERT_TRUE(ptrace(PTRACE_DETACH, pid, 0, 0) == 0);
1437
1438  kill(pid, SIGKILL);
1439  ASSERT_EQ(waitpid(pid, nullptr, 0), pid);
1440}
1441#endif
1442
1443