gtest_main.cpp revision 64a9c4f697a2588bbcfb20534b8b15b823595d1f
1/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <gtest/gtest.h>
18
19#include <ctype.h>
20#include <errno.h>
21#include <fcntl.h>
22#include <inttypes.h>
23#include <limits.h>
24#include <signal.h>
25#include <stdarg.h>
26#include <stdio.h>
27#include <string.h>
28#include <sys/wait.h>
29#include <time.h>
30#include <unistd.h>
31
32#include <string>
33#include <tuple>
34#include <utility>
35#include <vector>
36
37#include "BionicDeathTest.h" // For selftest.
38
39namespace testing {
40namespace internal {
41
42// Reuse of testing::internal::ColoredPrintf in gtest.
43enum GTestColor {
44  COLOR_DEFAULT,
45  COLOR_RED,
46  COLOR_GREEN,
47  COLOR_YELLOW
48};
49
50void ColoredPrintf(GTestColor color, const char* fmt, ...);
51
52}  // namespace internal
53}  // namespace testing
54
55using testing::internal::GTestColor;
56using testing::internal::COLOR_DEFAULT;
57using testing::internal::COLOR_RED;
58using testing::internal::COLOR_GREEN;
59using testing::internal::COLOR_YELLOW;
60using testing::internal::ColoredPrintf;
61
62constexpr int DEFAULT_GLOBAL_TEST_RUN_DEADLINE_MS = 60000;
63constexpr int DEFAULT_GLOBAL_TEST_RUN_WARNLINE_MS = 2000;
64
65// The time each test can run before killed for the reason of timeout.
66// It takes effect only with --isolate option.
67static int global_test_run_deadline_ms = DEFAULT_GLOBAL_TEST_RUN_DEADLINE_MS;
68
69// The time each test can run before be warned for too much running time.
70// It takes effect only with --isolate option.
71static int global_test_run_warnline_ms = DEFAULT_GLOBAL_TEST_RUN_WARNLINE_MS;
72
73// Return deadline duration for a test, in ms.
74static int GetDeadlineInfo(const std::string& /*test_name*/) {
75  return global_test_run_deadline_ms;
76}
77
78// Return warnline duration for a test, in ms.
79static int GetWarnlineInfo(const std::string& /*test_name*/) {
80  return global_test_run_warnline_ms;
81}
82
83static void PrintHelpInfo() {
84  printf("Bionic Unit Test Options:\n"
85         "  -j [JOB_COUNT] or -j[JOB_COUNT]\n"
86         "      Run up to JOB_COUNT tests in parallel.\n"
87         "      Use isolation mode, Run each test in a separate process.\n"
88         "      If JOB_COUNT is not given, it is set to the count of available processors.\n"
89         "  --no-isolate\n"
90         "      Don't use isolation mode, run all tests in a single process.\n"
91         "  --deadline=[TIME_IN_MS]\n"
92         "      Run each test in no longer than [TIME_IN_MS] time.\n"
93         "      It takes effect only in isolation mode. Deafult deadline is 60000 ms.\n"
94         "  --warnline=[TIME_IN_MS]\n"
95         "      Test running longer than [TIME_IN_MS] will be warned.\n"
96         "      It takes effect only in isolation mode. Default warnline is 2000 ms.\n"
97         "  --gtest-filter=POSITIVE_PATTERNS[-NEGATIVE_PATTERNS]\n"
98         "      Used as a synonym for --gtest_filter option in gtest.\n"
99         "Default bionic unit test option is -j.\n"
100         "In isolation mode, you can send SIGQUIT to the parent process to show current\n"
101         "running tests, or send SIGINT to the parent process to stop testing and\n"
102         "clean up current running tests.\n"
103         "\n");
104}
105
106enum TestResult {
107  TEST_SUCCESS = 0,
108  TEST_FAILED,
109  TEST_TIMEOUT
110};
111
112class Test {
113 public:
114  Test() {} // For std::vector<Test>.
115  explicit Test(const char* name) : name_(name) {}
116
117  const std::string& GetName() const { return name_; }
118
119  void SetResult(TestResult result) { result_ = result; }
120
121  TestResult GetResult() const { return result_; }
122
123  void SetTestTime(int64_t elapsed_time_ns) { elapsed_time_ns_ = elapsed_time_ns; }
124
125  int64_t GetTestTime() const { return elapsed_time_ns_; }
126
127  void AppendTestOutput(const std::string& s) { output_ += s; }
128
129  const std::string& GetTestOutput() const { return output_; }
130
131 private:
132  const std::string name_;
133  TestResult result_;
134  int64_t elapsed_time_ns_;
135  std::string output_;
136};
137
138class TestCase {
139 public:
140  TestCase() {} // For std::vector<TestCase>.
141  explicit TestCase(const char* name) : name_(name) {}
142
143  const std::string& GetName() const { return name_; }
144
145  void AppendTest(const char* test_name) {
146    test_list_.push_back(Test(test_name));
147  }
148
149  size_t TestCount() const { return test_list_.size(); }
150
151  std::string GetTestName(size_t test_id) const {
152    VerifyTestId(test_id);
153    return name_ + "." + test_list_[test_id].GetName();
154  }
155
156  Test& GetTest(size_t test_id) {
157    VerifyTestId(test_id);
158    return test_list_[test_id];
159  }
160
161  const Test& GetTest(size_t test_id) const {
162    VerifyTestId(test_id);
163    return test_list_[test_id];
164  }
165
166  void SetTestResult(size_t test_id, TestResult result) {
167    VerifyTestId(test_id);
168    test_list_[test_id].SetResult(result);
169  }
170
171  TestResult GetTestResult(size_t test_id) const {
172    VerifyTestId(test_id);
173    return test_list_[test_id].GetResult();
174  }
175
176  void SetTestTime(size_t test_id, int64_t elapsed_time_ns) {
177    VerifyTestId(test_id);
178    test_list_[test_id].SetTestTime(elapsed_time_ns);
179  }
180
181  int64_t GetTestTime(size_t test_id) const {
182    VerifyTestId(test_id);
183    return test_list_[test_id].GetTestTime();
184  }
185
186 private:
187  void VerifyTestId(size_t test_id) const {
188    if(test_id >= test_list_.size()) {
189      fprintf(stderr, "test_id %zu out of range [0, %zu)\n", test_id, test_list_.size());
190      exit(1);
191    }
192  }
193
194 private:
195  const std::string name_;
196  std::vector<Test> test_list_;
197};
198
199class TestResultPrinter : public testing::EmptyTestEventListener {
200 public:
201  TestResultPrinter() : pinfo_(NULL) {}
202  virtual void OnTestStart(const testing::TestInfo& test_info) {
203    pinfo_ = &test_info; // Record test_info for use in OnTestPartResult.
204  }
205  virtual void OnTestPartResult(const testing::TestPartResult& result);
206
207 private:
208  const testing::TestInfo* pinfo_;
209};
210
211// Called after an assertion failure.
212void TestResultPrinter::OnTestPartResult(const testing::TestPartResult& result) {
213  // If the test part succeeded, we don't need to do anything.
214  if (result.type() == testing::TestPartResult::kSuccess)
215    return;
216
217  // Print failure message from the assertion (e.g. expected this and got that).
218  printf("%s:(%d) Failure in test %s.%s\n%s\n", result.file_name(), result.line_number(),
219         pinfo_->test_case_name(), pinfo_->name(), result.message());
220  fflush(stdout);
221}
222
223static int64_t NanoTime() {
224  struct timespec t;
225  t.tv_sec = t.tv_nsec = 0;
226  clock_gettime(CLOCK_MONOTONIC, &t);
227  return static_cast<int64_t>(t.tv_sec) * 1000000000LL + t.tv_nsec;
228}
229
230static bool EnumerateTests(int argc, char** argv, std::vector<TestCase>& testcase_list) {
231  std::string command;
232  for (int i = 0; i < argc; ++i) {
233    command += argv[i];
234    command += " ";
235  }
236  command += "--gtest_list_tests";
237  FILE* fp = popen(command.c_str(), "r");
238  if (fp == NULL) {
239    perror("popen");
240    return false;
241  }
242
243  char buf[200];
244  while (fgets(buf, sizeof(buf), fp) != NULL) {
245    char* p = buf;
246
247    while (*p != '\0' && isspace(*p)) {
248      ++p;
249    }
250    if (*p == '\0') continue;
251    char* start = p;
252    while (*p != '\0' && !isspace(*p)) {
253      ++p;
254    }
255    char* end = p;
256    while (*p != '\0' && isspace(*p)) {
257      ++p;
258    }
259    if (*p != '\0') {
260      // This is not we want, gtest must meet with some error when parsing the arguments.
261      fprintf(stderr, "argument error, check with --help\n");
262      return false;
263    }
264    *end = '\0';
265    if (*(end - 1) == '.') {
266      *(end - 1) = '\0';
267      testcase_list.push_back(TestCase(start));
268    } else {
269      testcase_list.back().AppendTest(start);
270    }
271  }
272  int result = pclose(fp);
273  return (result != -1 && WEXITSTATUS(result) == 0);
274}
275
276// Part of the following *Print functions are copied from external/gtest/src/gtest.cc:
277// PrettyUnitTestResultPrinter. The reason for copy is that PrettyUnitTestResultPrinter
278// is defined and used in gtest.cc, which is hard to reuse.
279static void OnTestIterationStartPrint(const std::vector<TestCase>& testcase_list, size_t iteration,
280                                      size_t iteration_count) {
281  if (iteration_count > 1) {
282    printf("\nRepeating all tests (iteration %zu) . . .\n\n", iteration);
283  }
284  ColoredPrintf(COLOR_GREEN,  "[==========] ");
285
286  size_t testcase_count = testcase_list.size();
287  size_t test_count = 0;
288  for (const auto& testcase : testcase_list) {
289    test_count += testcase.TestCount();
290  }
291
292  printf("Running %zu %s from %zu %s.\n",
293         test_count, (test_count == 1) ? "test" : "tests",
294         testcase_count, (testcase_count == 1) ? "test case" : "test cases");
295  fflush(stdout);
296}
297
298// bionic cts test needs gtest output format.
299#if defined(USING_GTEST_OUTPUT_FORMAT)
300
301static void OnTestEndPrint(const TestCase& testcase, size_t test_id) {
302  ColoredPrintf(COLOR_GREEN, "[ RUN      ] ");
303  printf("%s\n", testcase.GetTestName(test_id).c_str());
304
305  const std::string& test_output = testcase.GetTest(test_id).GetTestOutput();
306  printf("%s", test_output.c_str());
307
308  TestResult result = testcase.GetTestResult(test_id);
309  if (result == TEST_SUCCESS) {
310    ColoredPrintf(COLOR_GREEN, "[       OK ] ");
311  } else {
312    ColoredPrintf(COLOR_RED, "[  FAILED  ] ");
313  }
314  printf("%s", testcase.GetTestName(test_id).c_str());
315  if (testing::GTEST_FLAG(print_time)) {
316    printf(" (%" PRId64 " ms)", testcase.GetTestTime(test_id) / 1000000);
317  }
318  printf("\n");
319  fflush(stdout);
320}
321
322#else  // !defined(USING_GTEST_OUTPUT_FORMAT)
323
324static void OnTestEndPrint(const TestCase& testcase, size_t test_id) {
325  TestResult result = testcase.GetTestResult(test_id);
326  if (result == TEST_SUCCESS) {
327    ColoredPrintf(COLOR_GREEN, "[    OK    ] ");
328  } else if (result == TEST_FAILED) {
329    ColoredPrintf(COLOR_RED, "[  FAILED  ] ");
330  } else if (result == TEST_TIMEOUT) {
331    ColoredPrintf(COLOR_RED, "[ TIMEOUT  ] ");
332  }
333
334  printf("%s", testcase.GetTestName(test_id).c_str());
335  if (testing::GTEST_FLAG(print_time)) {
336    printf(" (%" PRId64 " ms)", testcase.GetTestTime(test_id) / 1000000);
337  }
338  printf("\n");
339
340  const std::string& test_output = testcase.GetTest(test_id).GetTestOutput();
341  printf("%s", test_output.c_str());
342  fflush(stdout);
343}
344
345#endif  // !defined(USING_GTEST_OUTPUT_FORMAT)
346
347static void OnTestIterationEndPrint(const std::vector<TestCase>& testcase_list, size_t /*iteration*/,
348                                    int64_t elapsed_time_ns) {
349
350  std::vector<std::string> fail_test_name_list;
351  std::vector<std::pair<std::string, int64_t>> timeout_test_list;
352
353  // For tests run exceed warnline but not timeout.
354  std::vector<std::tuple<std::string, int64_t, int>> slow_test_list;
355  size_t testcase_count = testcase_list.size();
356  size_t test_count = 0;
357  size_t success_test_count = 0;
358
359  for (const auto& testcase : testcase_list) {
360    test_count += testcase.TestCount();
361    for (size_t i = 0; i < testcase.TestCount(); ++i) {
362      TestResult result = testcase.GetTestResult(i);
363      if (result == TEST_SUCCESS) {
364        ++success_test_count;
365      } else if (result == TEST_FAILED) {
366        fail_test_name_list.push_back(testcase.GetTestName(i));
367      } else if (result == TEST_TIMEOUT) {
368        timeout_test_list.push_back(std::make_pair(testcase.GetTestName(i),
369                                                   testcase.GetTestTime(i)));
370      }
371      if (result != TEST_TIMEOUT &&
372          testcase.GetTestTime(i) / 1000000 >= GetWarnlineInfo(testcase.GetTestName(i))) {
373        slow_test_list.push_back(std::make_tuple(testcase.GetTestName(i),
374                                                 testcase.GetTestTime(i),
375                                                 GetWarnlineInfo(testcase.GetTestName(i))));
376      }
377    }
378  }
379
380  ColoredPrintf(COLOR_GREEN,  "[==========] ");
381  printf("%zu %s from %zu %s ran.", test_count, (test_count == 1) ? "test" : "tests",
382                                    testcase_count, (testcase_count == 1) ? "test case" : "test cases");
383  if (testing::GTEST_FLAG(print_time)) {
384    printf(" (%" PRId64 " ms total)", elapsed_time_ns / 1000000);
385  }
386  printf("\n");
387  ColoredPrintf(COLOR_GREEN,  "[   PASS   ] ");
388  printf("%zu %s.\n", success_test_count, (success_test_count == 1) ? "test" : "tests");
389
390  // Print tests failed.
391  size_t fail_test_count = fail_test_name_list.size();
392  if (fail_test_count > 0) {
393    ColoredPrintf(COLOR_RED,  "[   FAIL   ] ");
394    printf("%zu %s, listed below:\n", fail_test_count, (fail_test_count == 1) ? "test" : "tests");
395    for (const auto& name : fail_test_name_list) {
396      ColoredPrintf(COLOR_RED, "[   FAIL   ] ");
397      printf("%s\n", name.c_str());
398    }
399  }
400
401  // Print tests run timeout.
402  size_t timeout_test_count = timeout_test_list.size();
403  if (timeout_test_count > 0) {
404    ColoredPrintf(COLOR_RED, "[ TIMEOUT  ] ");
405    printf("%zu %s, listed below:\n", timeout_test_count, (timeout_test_count == 1) ? "test" : "tests");
406    for (const auto& timeout_pair : timeout_test_list) {
407      ColoredPrintf(COLOR_RED, "[ TIMEOUT  ] ");
408      printf("%s (stopped at %" PRId64 " ms)\n", timeout_pair.first.c_str(),
409                                                 timeout_pair.second / 1000000);
410    }
411  }
412
413  // Print tests run exceed warnline.
414  size_t slow_test_count = slow_test_list.size();
415  if (slow_test_count > 0) {
416    ColoredPrintf(COLOR_YELLOW, "[   SLOW   ] ");
417    printf("%zu %s, listed below:\n", slow_test_count, (slow_test_count == 1) ? "test" : "tests");
418    for (const auto& slow_tuple : slow_test_list) {
419      ColoredPrintf(COLOR_YELLOW, "[   SLOW   ] ");
420      printf("%s (%" PRId64 " ms, exceed warnline %d ms)\n", std::get<0>(slow_tuple).c_str(),
421             std::get<1>(slow_tuple) / 1000000, std::get<2>(slow_tuple));
422    }
423  }
424
425  if (fail_test_count > 0) {
426    printf("\n%2zu FAILED %s\n", fail_test_count, (fail_test_count == 1) ? "TEST" : "TESTS");
427  }
428  if (timeout_test_count > 0) {
429    printf("%2zu TIMEOUT %s\n", timeout_test_count, (timeout_test_count == 1) ? "TEST" : "TESTS");
430  }
431  if (slow_test_count > 0) {
432    printf("%2zu SLOW %s\n", slow_test_count, (slow_test_count == 1) ? "TEST" : "TESTS");
433  }
434  fflush(stdout);
435}
436
437// Output xml file when --gtest_output is used, write this function as we can't reuse
438// gtest.cc:XmlUnitTestResultPrinter. The reason is XmlUnitTestResultPrinter is totally
439// defined in gtest.cc and not expose to outside. What's more, as we don't run gtest in
440// the parent process, we don't have gtest classes which are needed by XmlUnitTestResultPrinter.
441void OnTestIterationEndXmlPrint(const std::string& xml_output_filename,
442                                const std::vector<TestCase>& testcase_list,
443                                time_t epoch_iteration_start_time,
444                                int64_t elapsed_time_ns) {
445  FILE* fp = fopen(xml_output_filename.c_str(), "w");
446  if (fp == NULL) {
447    fprintf(stderr, "failed to open '%s': %s\n", xml_output_filename.c_str(), strerror(errno));
448    exit(1);
449  }
450
451  size_t total_test_count = 0;
452  size_t total_failed_count = 0;
453  std::vector<size_t> failed_count_list(testcase_list.size(), 0);
454  std::vector<int64_t> elapsed_time_list(testcase_list.size(), 0);
455  for (size_t i = 0; i < testcase_list.size(); ++i) {
456    auto& testcase = testcase_list[i];
457    total_test_count += testcase.TestCount();
458    for (size_t j = 0; j < testcase.TestCount(); ++j) {
459      if (testcase.GetTestResult(j) != TEST_SUCCESS) {
460        ++failed_count_list[i];
461      }
462      elapsed_time_list[i] += testcase.GetTestTime(j);
463    }
464    total_failed_count += failed_count_list[i];
465  }
466
467  const tm* time_struct = localtime(&epoch_iteration_start_time);
468  char timestamp[40];
469  snprintf(timestamp, sizeof(timestamp), "%4d-%02d-%02dT%02d:%02d:%02d",
470           time_struct->tm_year + 1900, time_struct->tm_mon + 1, time_struct->tm_mday,
471           time_struct->tm_hour, time_struct->tm_min, time_struct->tm_sec);
472
473  fputs("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n", fp);
474  fprintf(fp, "<testsuites tests=\"%zu\" failures=\"%zu\" disabled=\"0\" errors=\"0\"",
475          total_test_count, total_failed_count);
476  fprintf(fp, " timestamp=\"%s\" time=\"%.3lf\" name=\"AllTests\">\n", timestamp, elapsed_time_ns / 1e9);
477  for (size_t i = 0; i < testcase_list.size(); ++i) {
478    auto& testcase = testcase_list[i];
479    fprintf(fp, "  <testsuite name=\"%s\" tests=\"%zu\" failures=\"%zu\" disabled=\"0\" errors=\"0\"",
480            testcase.GetName().c_str(), testcase.TestCount(), failed_count_list[i]);
481    fprintf(fp, " time=\"%.3lf\">\n", elapsed_time_list[i] / 1e9);
482
483    for (size_t j = 0; j < testcase.TestCount(); ++j) {
484      fprintf(fp, "    <testcase name=\"%s\" status=\"run\" time=\"%.3lf\" classname=\"%s\"",
485              testcase.GetTest(j).GetName().c_str(), testcase.GetTestTime(j) / 1e9,
486              testcase.GetName().c_str());
487      if (testcase.GetTestResult(j) == TEST_SUCCESS) {
488        fputs(" />\n", fp);
489      } else {
490        fputs(">\n", fp);
491        const std::string& test_output = testcase.GetTest(j).GetTestOutput();
492        fprintf(fp, "      <failure message=\"%s\" type=\"\">\n", test_output.c_str());
493        fputs("      </failure>\n", fp);
494        fputs("    </testcase>\n", fp);
495      }
496    }
497
498    fputs("  </testsuite>\n", fp);
499  }
500  fputs("</testsuites>\n", fp);
501  fclose(fp);
502}
503
504struct ChildProcInfo {
505  pid_t pid;
506  int64_t start_time_ns;
507  int64_t end_time_ns;
508  int64_t deadline_end_time_ns; // The time when the test is thought of as timeout.
509  size_t testcase_id, test_id;
510  bool finished;
511  bool timed_out;
512  int exit_status;
513  int child_read_fd; // File descriptor to read child test failure info.
514};
515
516// Forked Child process, run the single test.
517static void ChildProcessFn(int argc, char** argv, const std::string& test_name) {
518  char** new_argv = new char*[argc + 2];
519  memcpy(new_argv, argv, sizeof(char*) * argc);
520
521  char* filter_arg = new char [test_name.size() + 20];
522  strcpy(filter_arg, "--gtest_filter=");
523  strcat(filter_arg, test_name.c_str());
524  new_argv[argc] = filter_arg;
525  new_argv[argc + 1] = NULL;
526
527  int new_argc = argc + 1;
528  testing::InitGoogleTest(&new_argc, new_argv);
529  int result = RUN_ALL_TESTS();
530  exit(result);
531}
532
533static ChildProcInfo RunChildProcess(const std::string& test_name, int testcase_id, int test_id,
534                                     sigset_t sigmask, int argc, char** argv) {
535  int pipefd[2];
536  int ret = pipe2(pipefd, O_NONBLOCK);
537  if (ret == -1) {
538    perror("pipe2 in RunTestInSeparateProc");
539    exit(1);
540  }
541  pid_t pid = fork();
542  if (pid == -1) {
543    perror("fork in RunTestInSeparateProc");
544    exit(1);
545  } else if (pid == 0) {
546    // In child process, run a single test.
547    close(pipefd[0]);
548    close(STDOUT_FILENO);
549    close(STDERR_FILENO);
550    dup2(pipefd[1], STDOUT_FILENO);
551    dup2(pipefd[1], STDERR_FILENO);
552
553    if (sigprocmask(SIG_SETMASK, &sigmask, NULL) == -1) {
554      perror("sigprocmask SIG_SETMASK");
555      exit(1);
556    }
557    ChildProcessFn(argc, argv, test_name);
558    // Unreachable.
559  }
560  // In parent process, initialize child process info.
561  close(pipefd[1]);
562  ChildProcInfo child_proc;
563  child_proc.child_read_fd = pipefd[0];
564  child_proc.pid = pid;
565  child_proc.start_time_ns = NanoTime();
566  child_proc.deadline_end_time_ns = child_proc.start_time_ns + GetDeadlineInfo(test_name) * 1000000LL;
567  child_proc.testcase_id = testcase_id;
568  child_proc.test_id = test_id;
569  child_proc.finished = false;
570  return child_proc;
571}
572
573static void HandleSignals(std::vector<TestCase>& testcase_list,
574                            std::vector<ChildProcInfo>& child_proc_list) {
575  sigset_t waiting_mask;
576  sigemptyset(&waiting_mask);
577  sigaddset(&waiting_mask, SIGINT);
578  sigaddset(&waiting_mask, SIGQUIT);
579  timespec timeout;
580  timeout.tv_sec = timeout.tv_nsec = 0;
581  while (true) {
582    int signo = TEMP_FAILURE_RETRY(sigtimedwait(&waiting_mask, NULL, &timeout));
583    if (signo == -1) {
584      if (errno == EAGAIN) {
585        return; // Timeout, no pending signals.
586      }
587      perror("sigtimedwait");
588      exit(1);
589    } else if (signo == SIGQUIT) {
590      // Print current running tests.
591      printf("List of current running tests:\n");
592      for (auto& child_proc : child_proc_list) {
593        if (child_proc.pid != 0) {
594          std::string test_name = testcase_list[child_proc.testcase_id].GetTestName(child_proc.test_id);
595          int64_t current_time_ns = NanoTime();
596          int64_t run_time_ms = (current_time_ns - child_proc.start_time_ns) / 1000000;
597          printf("  %s (%" PRId64 " ms)\n", test_name.c_str(), run_time_ms);
598        }
599      }
600    } else if (signo == SIGINT) {
601      // Kill current running tests.
602      for (auto& child_proc : child_proc_list) {
603        if (child_proc.pid != 0) {
604          // Send SIGKILL to ensure the child process can be killed unconditionally.
605          kill(child_proc.pid, SIGKILL);
606        }
607      }
608      // SIGINT kills the parent process as well.
609      exit(1);
610    }
611  }
612}
613
614static bool CheckChildProcExit(pid_t exit_pid, int exit_status,
615                               std::vector<ChildProcInfo>& child_proc_list) {
616  for (size_t i = 0; i < child_proc_list.size(); ++i) {
617    if (child_proc_list[i].pid == exit_pid) {
618      child_proc_list[i].finished = true;
619      child_proc_list[i].timed_out = false;
620      child_proc_list[i].exit_status = exit_status;
621      child_proc_list[i].end_time_ns = NanoTime();
622      return true;
623    }
624  }
625  return false;
626}
627
628static size_t CheckChildProcTimeout(std::vector<ChildProcInfo>& child_proc_list) {
629  int64_t current_time_ns = NanoTime();
630  size_t timeout_child_count = 0;
631  for (size_t i = 0; i < child_proc_list.size(); ++i) {
632    if (child_proc_list[i].deadline_end_time_ns <= current_time_ns) {
633      child_proc_list[i].finished = true;
634      child_proc_list[i].timed_out = true;
635      child_proc_list[i].end_time_ns = current_time_ns;
636      ++timeout_child_count;
637    }
638  }
639  return timeout_child_count;
640}
641
642static void WaitChildProcs(std::vector<TestCase>& testcase_list,
643                           std::vector<ChildProcInfo>& child_proc_list) {
644  size_t finished_child_count = 0;
645  while (true) {
646    int status;
647    pid_t result;
648    while ((result = TEMP_FAILURE_RETRY(waitpid(-1, &status, WNOHANG))) > 0) {
649      if (CheckChildProcExit(result, status, child_proc_list)) {
650        ++finished_child_count;
651      }
652    }
653
654    if (result == -1) {
655      if (errno == ECHILD) {
656        // This happens when we have no running child processes.
657        return;
658      } else {
659        perror("waitpid");
660        exit(1);
661      }
662    } else if (result == 0) {
663      finished_child_count += CheckChildProcTimeout(child_proc_list);
664    }
665
666    if (finished_child_count > 0) {
667      return;
668    }
669
670    HandleSignals(testcase_list, child_proc_list);
671
672    // sleep 1 ms to avoid busy looping.
673    timespec sleep_time;
674    sleep_time.tv_sec = 0;
675    sleep_time.tv_nsec = 1000000;
676    nanosleep(&sleep_time, NULL);
677  }
678}
679
680static TestResult WaitForOneChild(pid_t pid) {
681  int exit_status;
682  pid_t result = TEMP_FAILURE_RETRY(waitpid(pid, &exit_status, 0));
683
684  TestResult test_result = TEST_SUCCESS;
685  if (result != pid || WEXITSTATUS(exit_status) != 0) {
686    test_result = TEST_FAILED;
687  }
688  return test_result;
689}
690
691static void CollectChildTestResult(const ChildProcInfo& child_proc, TestCase& testcase) {
692  int test_id = child_proc.test_id;
693  testcase.SetTestTime(test_id, child_proc.end_time_ns - child_proc.start_time_ns);
694  if (child_proc.timed_out) {
695    // The child process marked as timed_out has not exited, and we should kill it manually.
696    kill(child_proc.pid, SIGKILL);
697    WaitForOneChild(child_proc.pid);
698  }
699
700  while (true) {
701    char buf[1024];
702    ssize_t bytes_read = TEMP_FAILURE_RETRY(read(child_proc.child_read_fd, buf, sizeof(buf) - 1));
703    if (bytes_read > 0) {
704      buf[bytes_read] = '\0';
705      testcase.GetTest(test_id).AppendTestOutput(buf);
706    } else if (bytes_read == 0) {
707      break; // Read end.
708    } else {
709      if (errno == EAGAIN) {
710        // No data is available. This rarely happens, only when the child process created other
711        // processes which have not exited so far. But the child process has already exited or
712        // been killed, so the test has finished, and we shouldn't wait further.
713        break;
714      }
715      perror("read child_read_fd in RunTestInSeparateProc");
716      exit(1);
717    }
718  }
719  close(child_proc.child_read_fd);
720
721  if (child_proc.timed_out) {
722    testcase.SetTestResult(test_id, TEST_TIMEOUT);
723    char buf[1024];
724    snprintf(buf, sizeof(buf), "%s killed because of timeout at %" PRId64 " ms.\n",
725             testcase.GetTestName(test_id).c_str(), testcase.GetTestTime(test_id) / 1000000);
726    testcase.GetTest(test_id).AppendTestOutput(buf);
727
728  } else if (WIFSIGNALED(child_proc.exit_status)) {
729    // Record signal terminated test as failed.
730    testcase.SetTestResult(test_id, TEST_FAILED);
731    char buf[1024];
732    snprintf(buf, sizeof(buf), "%s terminated by signal: %s.\n",
733             testcase.GetTestName(test_id).c_str(), strsignal(WTERMSIG(child_proc.exit_status)));
734    testcase.GetTest(test_id).AppendTestOutput(buf);
735
736  } else {
737    testcase.SetTestResult(test_id, WEXITSTATUS(child_proc.exit_status) == 0 ?
738                           TEST_SUCCESS : TEST_FAILED);
739  }
740}
741
742// We choose to use multi-fork and multi-wait here instead of multi-thread, because it always
743// makes deadlock to use fork in multi-thread.
744// Returns true if all tests run successfully, otherwise return false.
745static bool RunTestInSeparateProc(int argc, char** argv, std::vector<TestCase>& testcase_list,
746                                  size_t iteration_count, size_t job_count,
747                                  const std::string& xml_output_filename) {
748  // Stop default result printer to avoid environment setup/teardown information for each test.
749  testing::UnitTest::GetInstance()->listeners().Release(
750                        testing::UnitTest::GetInstance()->listeners().default_result_printer());
751  testing::UnitTest::GetInstance()->listeners().Append(new TestResultPrinter);
752
753  // Signals are blocked here as we want to handle them in HandleSignals() later.
754  sigset_t block_mask, orig_mask;
755  sigemptyset(&block_mask);
756  sigaddset(&block_mask, SIGINT);
757  sigaddset(&block_mask, SIGQUIT);
758  if (sigprocmask(SIG_BLOCK, &block_mask, &orig_mask) == -1) {
759    perror("sigprocmask SIG_BLOCK");
760    exit(1);
761  }
762
763  bool all_tests_passed = true;
764
765  for (size_t iteration = 1; iteration <= iteration_count; ++iteration) {
766    OnTestIterationStartPrint(testcase_list, iteration, iteration_count);
767    int64_t iteration_start_time_ns = NanoTime();
768    time_t epoch_iteration_start_time = time(NULL);
769
770    // Run up to job_count tests in parallel, each test in a child process.
771    std::vector<ChildProcInfo> child_proc_list;
772
773    // Next test to run is [next_testcase_id:next_test_id].
774    size_t next_testcase_id = 0;
775    size_t next_test_id = 0;
776
777    // Record how many tests are finished.
778    std::vector<size_t> finished_test_count_list(testcase_list.size(), 0);
779    size_t finished_testcase_count = 0;
780
781    while (finished_testcase_count < testcase_list.size()) {
782      // run up to job_count child processes.
783      while (child_proc_list.size() < job_count && next_testcase_id < testcase_list.size()) {
784        std::string test_name = testcase_list[next_testcase_id].GetTestName(next_test_id);
785        ChildProcInfo child_proc = RunChildProcess(test_name, next_testcase_id, next_test_id,
786                                                   orig_mask, argc, argv);
787        child_proc_list.push_back(child_proc);
788        if (++next_test_id == testcase_list[next_testcase_id].TestCount()) {
789          next_test_id = 0;
790          ++next_testcase_id;
791        }
792      }
793
794      // Wait for any child proc finish or timeout.
795      WaitChildProcs(testcase_list, child_proc_list);
796
797      // Collect result.
798      auto it = child_proc_list.begin();
799      while (it != child_proc_list.end()) {
800        auto& child_proc = *it;
801        if (child_proc.finished == true) {
802          size_t testcase_id = child_proc.testcase_id;
803          size_t test_id = child_proc.test_id;
804          TestCase& testcase = testcase_list[testcase_id];
805
806          CollectChildTestResult(child_proc, testcase);
807          OnTestEndPrint(testcase, test_id);
808
809          if (++finished_test_count_list[testcase_id] == testcase.TestCount()) {
810            ++finished_testcase_count;
811          }
812          if (testcase.GetTestResult(test_id) != TEST_SUCCESS) {
813            all_tests_passed = false;
814          }
815
816          it = child_proc_list.erase(it);
817        } else {
818          ++it;
819        }
820      }
821    }
822
823    int64_t elapsed_time_ns = NanoTime() - iteration_start_time_ns;
824    OnTestIterationEndPrint(testcase_list, iteration, elapsed_time_ns);
825    if (!xml_output_filename.empty()) {
826      OnTestIterationEndXmlPrint(xml_output_filename, testcase_list, epoch_iteration_start_time,
827                                 elapsed_time_ns);
828    }
829  }
830
831  // Restore signal mask.
832  if (sigprocmask(SIG_SETMASK, &orig_mask, NULL) == -1) {
833    perror("sigprocmask SIG_SETMASK");
834    exit(1);
835  }
836
837  return all_tests_passed;
838}
839
840static size_t GetProcessorCount() {
841  return static_cast<size_t>(sysconf(_SC_NPROCESSORS_ONLN));
842}
843
844static void AddPathSeparatorInTestProgramPath(std::vector<char*>& args) {
845  // To run DeathTest in threadsafe mode, gtest requires that the user must invoke the
846  // test program via a valid path that contains at least one path separator.
847  // The reason is that gtest uses clone() + execve() to run DeathTest in threadsafe mode,
848  // and execve() doesn't read environment variable PATH, so execve() will not success
849  // until we specify the absolute path or relative path of the test program directly.
850  if (strchr(args[0], '/') == NULL) {
851    char path[PATH_MAX];
852    ssize_t path_len = readlink("/proc/self/exe", path, sizeof(path));
853    if (path_len <= 0 || path_len >= static_cast<ssize_t>(sizeof(path))) {
854      perror("readlink");
855      exit(1);
856    }
857    path[path_len] = '\0';
858    args[0] = strdup(path);
859  }
860}
861
862static void AddGtestFilterSynonym(std::vector<char*>& args) {
863  // Support --gtest-filter as a synonym for --gtest_filter.
864  for (size_t i = 1; i < args.size(); ++i) {
865    if (strncmp(args[i], "--gtest-filter", strlen("--gtest-filter")) == 0) {
866      args[i][7] = '_';
867    }
868  }
869}
870
871struct IsolationTestOptions {
872  bool isolate;
873  size_t job_count;
874  int test_deadline_ms;
875  int test_warnline_ms;
876  std::string gtest_color;
877  bool gtest_print_time;
878  size_t gtest_repeat;
879  std::string gtest_output;
880};
881
882// Pick options not for gtest: There are two parts in args, one part is used in isolation test mode
883// as described in PrintHelpInfo(), the other part is handled by testing::InitGoogleTest() in
884// gtest. PickOptions() picks the first part into IsolationTestOptions structure, leaving the second
885// part in args.
886// Arguments:
887//   args is used to pass in all command arguments, and pass out only the part of options for gtest.
888//   options is used to pass out test options in isolation mode.
889// Return false if there is error in arguments.
890static bool PickOptions(std::vector<char*>& args, IsolationTestOptions& options) {
891  for (size_t i = 1; i < args.size(); ++i) {
892    if (strcmp(args[i], "--help") == 0 || strcmp(args[i], "-h") == 0) {
893      PrintHelpInfo();
894      options.isolate = false;
895      return true;
896    }
897  }
898
899  AddPathSeparatorInTestProgramPath(args);
900  AddGtestFilterSynonym(args);
901
902  // if --bionic-selftest argument is used, only enable self tests, otherwise remove self tests.
903  bool enable_selftest = false;
904  for (size_t i = 1; i < args.size(); ++i) {
905    if (strcmp(args[i], "--bionic-selftest") == 0) {
906      // This argument is to enable "bionic_selftest*" for self test, and is not shown in help info.
907      // Don't remove this option from arguments.
908      enable_selftest = true;
909    }
910  }
911  std::string gtest_filter_str;
912  for (size_t i = args.size() - 1; i >= 1; --i) {
913    if (strncmp(args[i], "--gtest_filter=", strlen("--gtest_filter=")) == 0) {
914      gtest_filter_str = std::string(args[i]);
915      args.erase(args.begin() + i);
916      break;
917    }
918  }
919  if (enable_selftest == true) {
920    args.push_back(strdup("--gtest_filter=bionic_selftest*"));
921  } else {
922    if (gtest_filter_str == "") {
923      gtest_filter_str = "--gtest_filter=-bionic_selftest*";
924    } else {
925      // Find if '-' for NEGATIVE_PATTERNS exists.
926      if (gtest_filter_str.find(":-") != std::string::npos) {
927        gtest_filter_str += ":bionic_selftest*";
928      } else {
929        gtest_filter_str += ":-bionic_selftest*";
930      }
931    }
932    args.push_back(strdup(gtest_filter_str.c_str()));
933  }
934
935  options.isolate = true;
936  // Parse arguments that make us can't run in isolation mode.
937  for (size_t i = 1; i < args.size(); ++i) {
938    if (strcmp(args[i], "--no-isolate") == 0) {
939      options.isolate = false;
940    } else if (strcmp(args[i], "--gtest_list_tests") == 0) {
941      options.isolate = false;
942    }
943  }
944
945  // Stop parsing if we will not run in isolation mode.
946  if (options.isolate == false) {
947    return true;
948  }
949
950  // Init default isolation test options.
951  options.job_count = GetProcessorCount();
952  options.test_deadline_ms = DEFAULT_GLOBAL_TEST_RUN_DEADLINE_MS;
953  options.test_warnline_ms = DEFAULT_GLOBAL_TEST_RUN_WARNLINE_MS;
954  options.gtest_color = testing::GTEST_FLAG(color);
955  options.gtest_print_time = testing::GTEST_FLAG(print_time);
956  options.gtest_repeat = testing::GTEST_FLAG(repeat);
957  options.gtest_output = testing::GTEST_FLAG(output);
958
959  // Parse arguments speficied for isolation mode.
960  for (size_t i = 1; i < args.size(); ++i) {
961    if (strncmp(args[i], "-j", strlen("-j")) == 0) {
962      char* p = args[i] + strlen("-j");
963      int count = 0;
964      if (*p != '\0') {
965        // Argument like -j5.
966        count = atoi(p);
967      } else if (args.size() > i + 1) {
968        // Arguments like -j 5.
969        count = atoi(args[i + 1]);
970        ++i;
971      }
972      if (count <= 0) {
973        fprintf(stderr, "invalid job count: %d\n", count);
974        return false;
975      }
976      options.job_count = static_cast<size_t>(count);
977    } else if (strncmp(args[i], "--deadline=", strlen("--deadline=")) == 0) {
978      int time_ms = atoi(args[i] + strlen("--deadline="));
979      if (time_ms <= 0) {
980        fprintf(stderr, "invalid deadline: %d\n", time_ms);
981        return false;
982      }
983      options.test_deadline_ms = time_ms;
984    } else if (strncmp(args[i], "--warnline=", strlen("--warnline=")) == 0) {
985      int time_ms = atoi(args[i] + strlen("--warnline="));
986      if (time_ms <= 0) {
987        fprintf(stderr, "invalid warnline: %d\n", time_ms);
988        return false;
989      }
990      options.test_warnline_ms = time_ms;
991    } else if (strncmp(args[i], "--gtest_color=", strlen("--gtest_color=")) == 0) {
992      options.gtest_color = args[i] + strlen("--gtest_color=");
993    } else if (strcmp(args[i], "--gtest_print_time=0") == 0) {
994      options.gtest_print_time = false;
995    } else if (strncmp(args[i], "--gtest_repeat=", strlen("--gtest_repeat=")) == 0) {
996      int repeat = atoi(args[i] + strlen("--gtest_repeat="));
997      if (repeat < 0) {
998        fprintf(stderr, "invalid gtest_repeat count: %d\n", repeat);
999        return false;
1000      }
1001      options.gtest_repeat = repeat;
1002      // Remove --gtest_repeat=xx from arguments, so child process only run one iteration for a single test.
1003      args.erase(args.begin() + i);
1004      --i;
1005    } else if (strncmp(args[i], "--gtest_output=", strlen("--gtest_output=")) == 0) {
1006      std::string output = args[i] + strlen("--gtest_output=");
1007      // generate output xml file path according to the strategy in gtest.
1008      bool success = true;
1009      if (strncmp(output.c_str(), "xml:", strlen("xml:")) == 0) {
1010        output = output.substr(strlen("xml:"));
1011        if (output.size() == 0) {
1012          success = false;
1013        }
1014        // Make absolute path.
1015        if (success && output[0] != '/') {
1016          char* cwd = getcwd(NULL, 0);
1017          if (cwd != NULL) {
1018            output = std::string(cwd) + "/" + output;
1019            free(cwd);
1020          } else {
1021            success = false;
1022          }
1023        }
1024        // Add file name if output is a directory.
1025        if (success && output.back() == '/') {
1026          output += "test_details.xml";
1027        }
1028      }
1029      if (success) {
1030        options.gtest_output = output;
1031      } else {
1032        fprintf(stderr, "invalid gtest_output file: %s\n", args[i]);
1033        return false;
1034      }
1035
1036      // Remove --gtest_output=xxx from arguments, so child process will not write xml file.
1037      args.erase(args.begin() + i);
1038      --i;
1039    }
1040  }
1041
1042  // Add --no-isolate in args to prevent child process from running in isolation mode again.
1043  // As DeathTest will try to call execve(), this argument should always be added.
1044  args.insert(args.begin() + 1, strdup("--no-isolate"));
1045  return true;
1046}
1047
1048int main(int argc, char** argv) {
1049  std::vector<char*> arg_list;
1050  for (int i = 0; i < argc; ++i) {
1051    arg_list.push_back(argv[i]);
1052  }
1053
1054  IsolationTestOptions options;
1055  if (PickOptions(arg_list, options) == false) {
1056    return 1;
1057  }
1058
1059  if (options.isolate == true) {
1060    // Set global variables.
1061    global_test_run_deadline_ms = options.test_deadline_ms;
1062    global_test_run_warnline_ms = options.test_warnline_ms;
1063    testing::GTEST_FLAG(color) = options.gtest_color.c_str();
1064    testing::GTEST_FLAG(print_time) = options.gtest_print_time;
1065    std::vector<TestCase> testcase_list;
1066
1067    argc = static_cast<int>(arg_list.size());
1068    arg_list.push_back(NULL);
1069    if (EnumerateTests(argc, arg_list.data(), testcase_list) == false) {
1070      return 1;
1071    }
1072    bool all_test_passed =  RunTestInSeparateProc(argc, arg_list.data(), testcase_list,
1073                              options.gtest_repeat, options.job_count, options.gtest_output);
1074    return all_test_passed ? 0 : 1;
1075  } else {
1076    argc = static_cast<int>(arg_list.size());
1077    arg_list.push_back(NULL);
1078    testing::InitGoogleTest(&argc, arg_list.data());
1079    return RUN_ALL_TESTS();
1080  }
1081}
1082
1083//################################################################################
1084// Bionic Gtest self test, run this by --bionic-selftest option.
1085
1086TEST(bionic_selftest, test_success) {
1087  ASSERT_EQ(1, 1);
1088}
1089
1090TEST(bionic_selftest, test_fail) {
1091  ASSERT_EQ(0, 1);
1092}
1093
1094TEST(bionic_selftest, test_time_warn) {
1095  sleep(4);
1096}
1097
1098TEST(bionic_selftest, test_timeout) {
1099  while (1) {}
1100}
1101
1102TEST(bionic_selftest, test_signal_SEGV_terminated) {
1103  char* p = reinterpret_cast<char*>(static_cast<intptr_t>(atoi("0")));
1104  *p = 3;
1105}
1106
1107class bionic_selftest_DeathTest : public BionicDeathTest {};
1108
1109static void deathtest_helper_success() {
1110  ASSERT_EQ(1, 1);
1111  exit(0);
1112}
1113
1114TEST_F(bionic_selftest_DeathTest, success) {
1115  ASSERT_EXIT(deathtest_helper_success(), ::testing::ExitedWithCode(0), "");
1116}
1117
1118static void deathtest_helper_fail() {
1119  ASSERT_EQ(1, 0);
1120}
1121
1122TEST_F(bionic_selftest_DeathTest, fail) {
1123  ASSERT_EXIT(deathtest_helper_fail(), ::testing::ExitedWithCode(0), "");
1124}
1125