gtest_main.cpp revision 1d4c780a2e1fe81cf3ea35ba0641dd14744c142a
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <gtest/gtest.h> 18 19#include <errno.h> 20#include <fcntl.h> 21#include <inttypes.h> 22#include <signal.h> 23#include <stdarg.h> 24#include <stdio.h> 25#include <string.h> 26#include <sys/wait.h> 27#include <time.h> 28#include <unistd.h> 29 30#include <string> 31#include <tuple> 32#include <utility> 33#include <vector> 34 35#include "BionicDeathTest.h" // For selftest. 36 37namespace testing { 38namespace internal { 39 40// Reuse of testing::internal::ColoredPrintf in gtest. 41enum GTestColor { 42 COLOR_DEFAULT, 43 COLOR_RED, 44 COLOR_GREEN, 45 COLOR_YELLOW 46}; 47 48void ColoredPrintf(GTestColor color, const char* fmt, ...); 49 50} // namespace internal 51} // namespace testing 52 53using testing::internal::GTestColor; 54using testing::internal::COLOR_DEFAULT; 55using testing::internal::COLOR_RED; 56using testing::internal::COLOR_GREEN; 57using testing::internal::COLOR_YELLOW; 58using testing::internal::ColoredPrintf; 59 60constexpr int DEFAULT_GLOBAL_TEST_RUN_DEADLINE_MS = 60000; 61constexpr int DEFAULT_GLOBAL_TEST_RUN_WARNLINE_MS = 2000; 62 63// The time each test can run before killed for the reason of timeout. 64// It takes effect only with --isolate option. 65static int global_test_run_deadline_ms = DEFAULT_GLOBAL_TEST_RUN_DEADLINE_MS; 66 67// The time each test can run before be warned for too much running time. 68// It takes effect only with --isolate option. 69static int global_test_run_warnline_ms = DEFAULT_GLOBAL_TEST_RUN_WARNLINE_MS; 70 71// Return deadline duration for a test, in ms. 72static int GetDeadlineInfo(const std::string& /*test_name*/) { 73 return global_test_run_deadline_ms; 74} 75 76// Return warnline duration for a test, in ms. 77static int GetWarnlineInfo(const std::string& /*test_name*/) { 78 return global_test_run_warnline_ms; 79} 80 81static void PrintHelpInfo() { 82 printf("Bionic Unit Test Options:\n" 83 " -j [JOB_COUNT] or -j[JOB_COUNT]\n" 84 " Run up to JOB_COUNT tests in parallel.\n" 85 " Use isolation mode, Run each test in a separate process.\n" 86 " If JOB_COUNT is not given, it is set to the count of available processors.\n" 87 " --no-isolate\n" 88 " Don't use isolation mode, run all tests in a single process.\n" 89 " --deadline=[TIME_IN_MS]\n" 90 " Run each test in no longer than [TIME_IN_MS] time.\n" 91 " It takes effect only in isolation mode. Deafult deadline is 60000 ms.\n" 92 " --warnline=[TIME_IN_MS]\n" 93 " Test running longer than [TIME_IN_MS] will be warned.\n" 94 " It takes effect only in isolation mode. Default warnline is 2000 ms.\n" 95 " --gtest-filter=POSITIVE_PATTERNS[-NEGATIVE_PATTERNS]\n" 96 " Used as a synonym for --gtest_filter option in gtest.\n" 97 "Default bionic unit test option is -j.\n" 98 "In isolation mode, you can send SIGQUIT to the parent process to show current\n" 99 "running tests, or send SIGINT to the parent process to stop testing and\n" 100 "clean up current running tests.\n" 101 "\n"); 102} 103 104enum TestResult { 105 TEST_SUCCESS = 0, 106 TEST_FAILED, 107 TEST_TIMEOUT 108}; 109 110class Test { 111 public: 112 Test() {} // For std::vector<Test>. 113 explicit Test(const char* name) : name_(name) {} 114 115 const std::string& GetName() const { return name_; } 116 117 void SetResult(TestResult result) { result_ = result; } 118 119 TestResult GetResult() const { return result_; } 120 121 void SetTestTime(int64_t elapsed_time_ns) { elapsed_time_ns_ = elapsed_time_ns; } 122 123 int64_t GetTestTime() const { return elapsed_time_ns_; } 124 125 void AppendFailureMessage(const std::string& s) { failure_message_ += s; } 126 127 const std::string& GetFailureMessage() const { return failure_message_; } 128 129 private: 130 const std::string name_; 131 TestResult result_; 132 int64_t elapsed_time_ns_; 133 std::string failure_message_; 134}; 135 136class TestCase { 137 public: 138 TestCase() {} // For std::vector<TestCase>. 139 explicit TestCase(const char* name) : name_(name) {} 140 141 const std::string& GetName() const { return name_; } 142 143 void AppendTest(const char* test_name) { 144 test_list_.push_back(Test(test_name)); 145 } 146 147 size_t TestCount() const { return test_list_.size(); } 148 149 std::string GetTestName(size_t test_id) const { 150 VerifyTestId(test_id); 151 return name_ + "." + test_list_[test_id].GetName(); 152 } 153 154 Test& GetTest(size_t test_id) { 155 VerifyTestId(test_id); 156 return test_list_[test_id]; 157 } 158 159 const Test& GetTest(size_t test_id) const { 160 VerifyTestId(test_id); 161 return test_list_[test_id]; 162 } 163 164 void SetTestResult(size_t test_id, TestResult result) { 165 VerifyTestId(test_id); 166 test_list_[test_id].SetResult(result); 167 } 168 169 TestResult GetTestResult(size_t test_id) const { 170 VerifyTestId(test_id); 171 return test_list_[test_id].GetResult(); 172 } 173 174 void SetTestTime(size_t test_id, int64_t elapsed_time_ns) { 175 VerifyTestId(test_id); 176 test_list_[test_id].SetTestTime(elapsed_time_ns); 177 } 178 179 int64_t GetTestTime(size_t test_id) const { 180 VerifyTestId(test_id); 181 return test_list_[test_id].GetTestTime(); 182 } 183 184 private: 185 void VerifyTestId(size_t test_id) const { 186 if(test_id >= test_list_.size()) { 187 fprintf(stderr, "test_id %zu out of range [0, %zu)\n", test_id, test_list_.size()); 188 exit(1); 189 } 190 } 191 192 private: 193 const std::string name_; 194 std::vector<Test> test_list_; 195}; 196 197// This is the file descriptor used by the child process to write failure message. 198// The parent process will collect the information and dump to stdout / xml file. 199static int child_output_fd; 200 201class TestResultPrinter : public testing::EmptyTestEventListener { 202 public: 203 TestResultPrinter() : pinfo_(NULL) {} 204 virtual void OnTestStart(const testing::TestInfo& test_info) { 205 pinfo_ = &test_info; // Record test_info for use in OnTestPartResult. 206 } 207 virtual void OnTestPartResult(const testing::TestPartResult& result); 208 209 private: 210 const testing::TestInfo* pinfo_; 211}; 212 213// Called after an assertion failure. 214void TestResultPrinter::OnTestPartResult(const testing::TestPartResult& result) { 215 // If the test part succeeded, we don't need to do anything. 216 if (result.type() == testing::TestPartResult::kSuccess) 217 return; 218 219 // Print failure message from the assertion (e.g. expected this and got that). 220 char buf[1024]; 221 snprintf(buf, sizeof(buf), "%s:(%d) Failure in test %s.%s\n%s\n", result.file_name(), 222 result.line_number(), 223 pinfo_->test_case_name(), 224 pinfo_->name(), 225 result.message()); 226 227 int towrite = strlen(buf); 228 char* p = buf; 229 while (towrite > 0) { 230 ssize_t bytes_written = TEMP_FAILURE_RETRY(write(child_output_fd, p, towrite)); 231 if (bytes_written == -1) { 232 fprintf(stderr, "failed to write child_output_fd: %s\n", strerror(errno)); 233 exit(1); 234 } else { 235 towrite -= bytes_written; 236 p += bytes_written; 237 } 238 } 239} 240 241static int64_t NanoTime() { 242 struct timespec t; 243 t.tv_sec = t.tv_nsec = 0; 244 clock_gettime(CLOCK_MONOTONIC, &t); 245 return static_cast<int64_t>(t.tv_sec) * 1000000000LL + t.tv_nsec; 246} 247 248static bool EnumerateTests(int argc, char** argv, std::vector<TestCase>& testcase_list) { 249 std::string command; 250 for (int i = 0; i < argc; ++i) { 251 command += argv[i]; 252 command += " "; 253 } 254 command += "--gtest_list_tests"; 255 FILE* fp = popen(command.c_str(), "r"); 256 if (fp == NULL) { 257 perror("popen"); 258 return false; 259 } 260 261 char buf[200]; 262 while (fgets(buf, sizeof(buf), fp) != NULL) { 263 char* p = buf; 264 265 while (*p != '\0' && isspace(*p)) { 266 ++p; 267 } 268 if (*p == '\0') continue; 269 char* start = p; 270 while (*p != '\0' && !isspace(*p)) { 271 ++p; 272 } 273 char* end = p; 274 while (*p != '\0' && isspace(*p)) { 275 ++p; 276 } 277 if (*p != '\0') { 278 // This is not we want, gtest must meet with some error when parsing the arguments. 279 fprintf(stderr, "argument error, check with --help\n"); 280 return false; 281 } 282 *end = '\0'; 283 if (*(end - 1) == '.') { 284 *(end - 1) = '\0'; 285 testcase_list.push_back(TestCase(start)); 286 } else { 287 testcase_list.back().AppendTest(start); 288 } 289 } 290 int result = pclose(fp); 291 return (result != -1 && WEXITSTATUS(result) == 0); 292} 293 294// Part of the following *Print functions are copied from external/gtest/src/gtest.cc: 295// PrettyUnitTestResultPrinter. The reason for copy is that PrettyUnitTestResultPrinter 296// is defined and used in gtest.cc, which is hard to reuse. 297static void OnTestIterationStartPrint(const std::vector<TestCase>& testcase_list, size_t iteration, 298 size_t iteration_count) { 299 if (iteration_count > 1) { 300 printf("\nRepeating all tests (iteration %zu) . . .\n\n", iteration); 301 } 302 ColoredPrintf(COLOR_GREEN, "[==========] "); 303 304 size_t testcase_count = testcase_list.size(); 305 size_t test_count = 0; 306 for (const auto& testcase : testcase_list) { 307 test_count += testcase.TestCount(); 308 } 309 310 printf("Running %zu %s from %zu %s.\n", 311 test_count, (test_count == 1) ? "test" : "tests", 312 testcase_count, (testcase_count == 1) ? "test case" : "test cases"); 313 fflush(stdout); 314} 315 316static void OnTestEndPrint(const TestCase& testcase, size_t test_id) { 317 TestResult result = testcase.GetTestResult(test_id); 318 if (result == TEST_SUCCESS) { 319 ColoredPrintf(COLOR_GREEN, "[ OK ] "); 320 } else if (result == TEST_FAILED) { 321 ColoredPrintf(COLOR_RED, "[ FAILED ] "); 322 } else if (result == TEST_TIMEOUT) { 323 ColoredPrintf(COLOR_RED, "[ TIMEOUT ] "); 324 } 325 326 printf("%s", testcase.GetTestName(test_id).c_str()); 327 if (testing::GTEST_FLAG(print_time)) { 328 printf(" (%" PRId64 " ms)\n", testcase.GetTestTime(test_id) / 1000000); 329 } else { 330 printf("\n"); 331 } 332 333 const std::string& failure_message = testcase.GetTest(test_id).GetFailureMessage(); 334 printf("%s", failure_message.c_str()); 335 fflush(stdout); 336} 337 338static void OnTestIterationEndPrint(const std::vector<TestCase>& testcase_list, size_t /*iteration*/, 339 int64_t elapsed_time_ns) { 340 341 std::vector<std::string> fail_test_name_list; 342 std::vector<std::pair<std::string, int64_t>> timeout_test_list; 343 344 // For tests run exceed warnline but not timeout. 345 std::vector<std::tuple<std::string, int64_t, int>> slow_test_list; 346 size_t testcase_count = testcase_list.size(); 347 size_t test_count = 0; 348 size_t success_test_count = 0; 349 350 for (const auto& testcase : testcase_list) { 351 test_count += testcase.TestCount(); 352 for (size_t i = 0; i < testcase.TestCount(); ++i) { 353 TestResult result = testcase.GetTestResult(i); 354 if (result == TEST_SUCCESS) { 355 ++success_test_count; 356 } else if (result == TEST_FAILED) { 357 fail_test_name_list.push_back(testcase.GetTestName(i)); 358 } else if (result == TEST_TIMEOUT) { 359 timeout_test_list.push_back(std::make_pair(testcase.GetTestName(i), 360 testcase.GetTestTime(i))); 361 } 362 if (result != TEST_TIMEOUT && 363 testcase.GetTestTime(i) / 1000000 >= GetWarnlineInfo(testcase.GetTestName(i))) { 364 slow_test_list.push_back(std::make_tuple(testcase.GetTestName(i), 365 testcase.GetTestTime(i), 366 GetWarnlineInfo(testcase.GetTestName(i)))); 367 } 368 } 369 } 370 371 ColoredPrintf(COLOR_GREEN, "[==========] "); 372 printf("%zu %s from %zu %s ran.", test_count, (test_count == 1) ? "test" : "tests", 373 testcase_count, (testcase_count == 1) ? "test case" : "test cases"); 374 if (testing::GTEST_FLAG(print_time)) { 375 printf(" (%" PRId64 " ms total)", elapsed_time_ns / 1000000); 376 } 377 printf("\n"); 378 ColoredPrintf(COLOR_GREEN, "[ PASS ] "); 379 printf("%zu %s.\n", success_test_count, (success_test_count == 1) ? "test" : "tests"); 380 381 // Print tests failed. 382 size_t fail_test_count = fail_test_name_list.size(); 383 if (fail_test_count > 0) { 384 ColoredPrintf(COLOR_RED, "[ FAIL ] "); 385 printf("%zu %s, listed below:\n", fail_test_count, (fail_test_count == 1) ? "test" : "tests"); 386 for (const auto& name : fail_test_name_list) { 387 ColoredPrintf(COLOR_RED, "[ FAIL ] "); 388 printf("%s\n", name.c_str()); 389 } 390 } 391 392 // Print tests run timeout. 393 size_t timeout_test_count = timeout_test_list.size(); 394 if (timeout_test_count > 0) { 395 ColoredPrintf(COLOR_RED, "[ TIMEOUT ] "); 396 printf("%zu %s, listed below:\n", timeout_test_count, (timeout_test_count == 1) ? "test" : "tests"); 397 for (const auto& timeout_pair : timeout_test_list) { 398 ColoredPrintf(COLOR_RED, "[ TIMEOUT ] "); 399 printf("%s (stopped at %" PRId64 " ms)\n", timeout_pair.first.c_str(), 400 timeout_pair.second / 1000000); 401 } 402 } 403 404 // Print tests run exceed warnline. 405 size_t slow_test_count = slow_test_list.size(); 406 if (slow_test_count > 0) { 407 ColoredPrintf(COLOR_YELLOW, "[ SLOW ] "); 408 printf("%zu %s, listed below:\n", slow_test_count, (slow_test_count == 1) ? "test" : "tests"); 409 for (const auto& slow_tuple : slow_test_list) { 410 ColoredPrintf(COLOR_YELLOW, "[ SLOW ] "); 411 printf("%s (%" PRId64 " ms, exceed warnline %d ms)\n", std::get<0>(slow_tuple).c_str(), 412 std::get<1>(slow_tuple) / 1000000, std::get<2>(slow_tuple)); 413 } 414 } 415 416 if (fail_test_count > 0) { 417 printf("\n%2zu FAILED %s\n", fail_test_count, (fail_test_count == 1) ? "TEST" : "TESTS"); 418 } 419 if (timeout_test_count > 0) { 420 printf("%2zu TIMEOUT %s\n", timeout_test_count, (timeout_test_count == 1) ? "TEST" : "TESTS"); 421 } 422 if (slow_test_count > 0) { 423 printf("%2zu SLOW %s\n", slow_test_count, (slow_test_count == 1) ? "TEST" : "TESTS"); 424 } 425 fflush(stdout); 426} 427 428// Output xml file when --gtest_output is used, write this function as we can't reuse 429// gtest.cc:XmlUnitTestResultPrinter. The reason is XmlUnitTestResultPrinter is totally 430// defined in gtest.cc and not expose to outside. What's more, as we don't run gtest in 431// the parent process, we don't have gtest classes which are needed by XmlUnitTestResultPrinter. 432void OnTestIterationEndXmlPrint(const std::string& xml_output_filename, 433 const std::vector<TestCase>& testcase_list, 434 time_t epoch_iteration_start_time, 435 int64_t elapsed_time_ns) { 436 FILE* fp = fopen(xml_output_filename.c_str(), "w"); 437 if (fp == NULL) { 438 fprintf(stderr, "failed to open '%s': %s\n", xml_output_filename.c_str(), strerror(errno)); 439 exit(1); 440 } 441 442 size_t total_test_count = 0; 443 size_t total_failed_count = 0; 444 std::vector<size_t> failed_count_list(testcase_list.size(), 0); 445 std::vector<int64_t> elapsed_time_list(testcase_list.size(), 0); 446 for (size_t i = 0; i < testcase_list.size(); ++i) { 447 auto& testcase = testcase_list[i]; 448 total_test_count += testcase.TestCount(); 449 for (size_t j = 0; j < testcase.TestCount(); ++j) { 450 if (testcase.GetTestResult(j) != TEST_SUCCESS) { 451 ++failed_count_list[i]; 452 } 453 elapsed_time_list[i] += testcase.GetTestTime(j); 454 } 455 total_failed_count += failed_count_list[i]; 456 } 457 458 const tm* time_struct = localtime(&epoch_iteration_start_time); 459 char timestamp[40]; 460 snprintf(timestamp, sizeof(timestamp), "%4d-%02d-%02dT%02d:%02d:%02d", 461 time_struct->tm_year + 1900, time_struct->tm_mon + 1, time_struct->tm_mday, 462 time_struct->tm_hour, time_struct->tm_min, time_struct->tm_sec); 463 464 fputs("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n", fp); 465 fprintf(fp, "<testsuites tests=\"%zu\" failures=\"%zu\" disabled=\"0\" errors=\"0\"", 466 total_test_count, total_failed_count); 467 fprintf(fp, " timestamp=\"%s\" time=\"%.3lf\" name=\"AllTests\">\n", timestamp, elapsed_time_ns / 1e9); 468 for (size_t i = 0; i < testcase_list.size(); ++i) { 469 auto& testcase = testcase_list[i]; 470 fprintf(fp, " <testsuite name=\"%s\" tests=\"%zu\" failures=\"%zu\" disabled=\"0\" errors=\"0\"", 471 testcase.GetName().c_str(), testcase.TestCount(), failed_count_list[i]); 472 fprintf(fp, " time=\"%.3lf\">\n", elapsed_time_list[i] / 1e9); 473 474 for (size_t j = 0; j < testcase.TestCount(); ++j) { 475 fprintf(fp, " <testcase name=\"%s\" status=\"run\" time=\"%.3lf\" classname=\"%s\"", 476 testcase.GetTest(j).GetName().c_str(), testcase.GetTestTime(j) / 1e9, 477 testcase.GetName().c_str()); 478 if (testcase.GetTestResult(j) == TEST_SUCCESS) { 479 fputs(" />\n", fp); 480 } else { 481 fputs(">\n", fp); 482 const std::string& failure_message = testcase.GetTest(j).GetFailureMessage(); 483 fprintf(fp, " <failure message=\"%s\" type=\"\">\n", failure_message.c_str()); 484 fputs(" </failure>\n", fp); 485 fputs(" </testcase>\n", fp); 486 } 487 } 488 489 fputs(" </testsuite>\n", fp); 490 } 491 fputs("</testsuites>\n", fp); 492 fclose(fp); 493} 494 495struct ChildProcInfo { 496 pid_t pid; 497 int64_t start_time_ns; 498 int64_t end_time_ns; 499 int64_t deadline_end_time_ns; // The time when the test is thought of as timeout. 500 size_t testcase_id, test_id; 501 bool finished; 502 bool timed_out; 503 int exit_status; 504 int child_read_fd; // File descriptor to read child test failure info. 505}; 506 507// Forked Child process, run the single test. 508static void ChildProcessFn(int argc, char** argv, const std::string& test_name) { 509 char** new_argv = new char*[argc + 2]; 510 memcpy(new_argv, argv, sizeof(char*) * argc); 511 512 char* filter_arg = new char [test_name.size() + 20]; 513 strcpy(filter_arg, "--gtest_filter="); 514 strcat(filter_arg, test_name.c_str()); 515 new_argv[argc] = filter_arg; 516 new_argv[argc + 1] = NULL; 517 518 int new_argc = argc + 1; 519 testing::InitGoogleTest(&new_argc, new_argv); 520 int result = RUN_ALL_TESTS(); 521 exit(result); 522} 523 524static ChildProcInfo RunChildProcess(const std::string& test_name, int testcase_id, int test_id, 525 sigset_t sigmask, int argc, char** argv) { 526 int pipefd[2]; 527 int ret = pipe2(pipefd, O_NONBLOCK); 528 if (ret == -1) { 529 perror("pipe2 in RunTestInSeparateProc"); 530 exit(1); 531 } 532 pid_t pid = fork(); 533 if (pid == -1) { 534 perror("fork in RunTestInSeparateProc"); 535 exit(1); 536 } else if (pid == 0) { 537 // In child process, run a single test. 538 close(pipefd[0]); 539 child_output_fd = pipefd[1]; 540 541 if (sigprocmask(SIG_SETMASK, &sigmask, NULL) == -1) { 542 perror("sigprocmask SIG_SETMASK"); 543 exit(1); 544 } 545 ChildProcessFn(argc, argv, test_name); 546 // Unreachable. 547 } 548 // In parent process, initialize child process info. 549 close(pipefd[1]); 550 ChildProcInfo child_proc; 551 child_proc.child_read_fd = pipefd[0]; 552 child_proc.pid = pid; 553 child_proc.start_time_ns = NanoTime(); 554 child_proc.deadline_end_time_ns = child_proc.start_time_ns + GetDeadlineInfo(test_name) * 1000000LL; 555 child_proc.testcase_id = testcase_id; 556 child_proc.test_id = test_id; 557 child_proc.finished = false; 558 return child_proc; 559} 560 561static void HandleSignals(std::vector<TestCase>& testcase_list, 562 std::vector<ChildProcInfo>& child_proc_list) { 563 sigset_t waiting_mask; 564 sigemptyset(&waiting_mask); 565 sigaddset(&waiting_mask, SIGINT); 566 sigaddset(&waiting_mask, SIGQUIT); 567 timespec timeout; 568 timeout.tv_sec = timeout.tv_nsec = 0; 569 while (true) { 570 int signo = TEMP_FAILURE_RETRY(sigtimedwait(&waiting_mask, NULL, &timeout)); 571 if (signo == -1) { 572 if (errno == EAGAIN) { 573 return; // Timeout, no pending signals. 574 } 575 perror("sigtimedwait"); 576 exit(1); 577 } else if (signo == SIGQUIT) { 578 // Print current running tests. 579 printf("List of current running tests:\n"); 580 for (auto& child_proc : child_proc_list) { 581 if (child_proc.pid != 0) { 582 std::string test_name = testcase_list[child_proc.testcase_id].GetTestName(child_proc.test_id); 583 int64_t current_time_ns = NanoTime(); 584 int64_t run_time_ms = (current_time_ns - child_proc.start_time_ns) / 1000000; 585 printf(" %s (%" PRId64 " ms)\n", test_name.c_str(), run_time_ms); 586 } 587 } 588 } else if (signo == SIGINT) { 589 // Kill current running tests. 590 for (auto& child_proc : child_proc_list) { 591 if (child_proc.pid != 0) { 592 // Send SIGKILL to ensure the child process can be killed unconditionally. 593 kill(child_proc.pid, SIGKILL); 594 } 595 } 596 // SIGINT kills the parent process as well. 597 exit(1); 598 } 599 } 600} 601 602static bool CheckChildProcExit(pid_t exit_pid, int exit_status, 603 std::vector<ChildProcInfo>& child_proc_list) { 604 for (size_t i = 0; i < child_proc_list.size(); ++i) { 605 if (child_proc_list[i].pid == exit_pid) { 606 child_proc_list[i].finished = true; 607 child_proc_list[i].timed_out = false; 608 child_proc_list[i].exit_status = exit_status; 609 child_proc_list[i].end_time_ns = NanoTime(); 610 return true; 611 } 612 } 613 return false; 614} 615 616static size_t CheckChildProcTimeout(std::vector<ChildProcInfo>& child_proc_list) { 617 int64_t current_time_ns = NanoTime(); 618 size_t timeout_child_count = 0; 619 for (size_t i = 0; i < child_proc_list.size(); ++i) { 620 if (child_proc_list[i].deadline_end_time_ns <= current_time_ns) { 621 child_proc_list[i].finished = true; 622 child_proc_list[i].timed_out = true; 623 child_proc_list[i].end_time_ns = current_time_ns; 624 ++timeout_child_count; 625 } 626 } 627 return timeout_child_count; 628} 629 630static void WaitChildProcs(std::vector<TestCase>& testcase_list, 631 std::vector<ChildProcInfo>& child_proc_list) { 632 size_t finished_child_count = 0; 633 while (true) { 634 int status; 635 pid_t result; 636 while ((result = TEMP_FAILURE_RETRY(waitpid(-1, &status, WNOHANG))) > 0) { 637 if (CheckChildProcExit(result, status, child_proc_list)) { 638 ++finished_child_count; 639 } 640 } 641 642 if (result == -1) { 643 if (errno == ECHILD) { 644 // This happens when we have no running child processes. 645 return; 646 } else { 647 perror("waitpid"); 648 exit(1); 649 } 650 } else if (result == 0) { 651 finished_child_count += CheckChildProcTimeout(child_proc_list); 652 } 653 654 if (finished_child_count > 0) { 655 return; 656 } 657 658 HandleSignals(testcase_list, child_proc_list); 659 660 // sleep 1 ms to avoid busy looping. 661 timespec sleep_time; 662 sleep_time.tv_sec = 0; 663 sleep_time.tv_nsec = 1000000; 664 nanosleep(&sleep_time, NULL); 665 } 666} 667 668static TestResult WaitForOneChild(pid_t pid) { 669 int exit_status; 670 pid_t result = TEMP_FAILURE_RETRY(waitpid(pid, &exit_status, 0)); 671 672 TestResult test_result = TEST_SUCCESS; 673 if (result != pid || WEXITSTATUS(exit_status) != 0) { 674 test_result = TEST_FAILED; 675 } 676 return test_result; 677} 678 679static void CollectChildTestResult(const ChildProcInfo& child_proc, TestCase& testcase) { 680 int test_id = child_proc.test_id; 681 testcase.SetTestTime(test_id, child_proc.end_time_ns - child_proc.start_time_ns); 682 if (child_proc.timed_out) { 683 // The child process marked as timed_out has not exited, and we should kill it manually. 684 kill(child_proc.pid, SIGKILL); 685 WaitForOneChild(child_proc.pid); 686 } 687 688 while (true) { 689 char buf[1024]; 690 ssize_t bytes_read = TEMP_FAILURE_RETRY(read(child_proc.child_read_fd, buf, sizeof(buf) - 1)); 691 if (bytes_read > 0) { 692 buf[bytes_read] = '\0'; 693 testcase.GetTest(test_id).AppendFailureMessage(buf); 694 } else if (bytes_read == 0) { 695 break; // Read end. 696 } else { 697 if (errno == EAGAIN) { 698 // No data is available. This rarely happens, only when the child process created other 699 // processes which have not exited so far. But the child process has already exited or 700 // been killed, so the test has finished, and we shouldn't wait further. 701 break; 702 } 703 perror("read child_read_fd in RunTestInSeparateProc"); 704 exit(1); 705 } 706 } 707 close(child_proc.child_read_fd); 708 709 if (child_proc.timed_out) { 710 testcase.SetTestResult(test_id, TEST_TIMEOUT); 711 char buf[1024]; 712 snprintf(buf, sizeof(buf), "%s killed because of timeout at %" PRId64 " ms.\n", 713 testcase.GetTestName(test_id).c_str(), testcase.GetTestTime(test_id) / 1000000); 714 testcase.GetTest(test_id).AppendFailureMessage(buf); 715 716 } else if (WIFSIGNALED(child_proc.exit_status)) { 717 // Record signal terminated test as failed. 718 testcase.SetTestResult(test_id, TEST_FAILED); 719 char buf[1024]; 720 snprintf(buf, sizeof(buf), "%s terminated by signal: %s.\n", 721 testcase.GetTestName(test_id).c_str(), strsignal(WTERMSIG(child_proc.exit_status))); 722 testcase.GetTest(test_id).AppendFailureMessage(buf); 723 724 } else { 725 testcase.SetTestResult(test_id, WEXITSTATUS(child_proc.exit_status) == 0 ? 726 TEST_SUCCESS : TEST_FAILED); 727 } 728} 729 730// We choose to use multi-fork and multi-wait here instead of multi-thread, because it always 731// makes deadlock to use fork in multi-thread. 732static void RunTestInSeparateProc(int argc, char** argv, std::vector<TestCase>& testcase_list, 733 size_t iteration_count, size_t job_count, 734 const std::string& xml_output_filename) { 735 // Stop default result printer to avoid environment setup/teardown information for each test. 736 testing::UnitTest::GetInstance()->listeners().Release( 737 testing::UnitTest::GetInstance()->listeners().default_result_printer()); 738 testing::UnitTest::GetInstance()->listeners().Append(new TestResultPrinter); 739 740 // Signals are blocked here as we want to handle them in HandleSignals() later. 741 sigset_t block_mask, orig_mask; 742 sigemptyset(&block_mask); 743 sigaddset(&block_mask, SIGINT); 744 sigaddset(&block_mask, SIGQUIT); 745 if (sigprocmask(SIG_BLOCK, &block_mask, &orig_mask) == -1) { 746 perror("sigprocmask SIG_BLOCK"); 747 exit(1); 748 } 749 750 for (size_t iteration = 1; iteration <= iteration_count; ++iteration) { 751 OnTestIterationStartPrint(testcase_list, iteration, iteration_count); 752 int64_t iteration_start_time_ns = NanoTime(); 753 time_t epoch_iteration_start_time = time(NULL); 754 755 // Run up to job_count tests in parallel, each test in a child process. 756 std::vector<ChildProcInfo> child_proc_list; 757 758 // Next test to run is [next_testcase_id:next_test_id]. 759 size_t next_testcase_id = 0; 760 size_t next_test_id = 0; 761 762 // Record how many tests are finished. 763 std::vector<size_t> finished_test_count_list(testcase_list.size(), 0); 764 size_t finished_testcase_count = 0; 765 766 while (finished_testcase_count < testcase_list.size()) { 767 // run up to job_count child processes. 768 while (child_proc_list.size() < job_count && next_testcase_id < testcase_list.size()) { 769 std::string test_name = testcase_list[next_testcase_id].GetTestName(next_test_id); 770 ChildProcInfo child_proc = RunChildProcess(test_name, next_testcase_id, next_test_id, 771 orig_mask, argc, argv); 772 child_proc_list.push_back(child_proc); 773 if (++next_test_id == testcase_list[next_testcase_id].TestCount()) { 774 next_test_id = 0; 775 ++next_testcase_id; 776 } 777 } 778 779 // Wait for any child proc finish or timeout. 780 WaitChildProcs(testcase_list, child_proc_list); 781 782 // Collect result. 783 auto it = child_proc_list.begin(); 784 while (it != child_proc_list.end()) { 785 auto& child_proc = *it; 786 if (child_proc.finished == true) { 787 size_t testcase_id = child_proc.testcase_id; 788 size_t test_id = child_proc.test_id; 789 TestCase& testcase = testcase_list[testcase_id]; 790 791 CollectChildTestResult(child_proc, testcase); 792 OnTestEndPrint(testcase, test_id); 793 794 if (++finished_test_count_list[testcase_id] == testcase.TestCount()) { 795 ++finished_testcase_count; 796 } 797 798 it = child_proc_list.erase(it); 799 } else { 800 ++it; 801 } 802 } 803 } 804 805 int64_t elapsed_time_ns = NanoTime() - iteration_start_time_ns; 806 OnTestIterationEndPrint(testcase_list, iteration, elapsed_time_ns); 807 if (!xml_output_filename.empty()) { 808 OnTestIterationEndXmlPrint(xml_output_filename, testcase_list, epoch_iteration_start_time, 809 elapsed_time_ns); 810 } 811 } 812 813 // Restore signal mask. 814 if (sigprocmask(SIG_SETMASK, &orig_mask, NULL) == -1) { 815 perror("sigprocmask SIG_SETMASK"); 816 exit(1); 817 } 818} 819 820static size_t GetProcessorCount() { 821 return static_cast<size_t>(sysconf(_SC_NPROCESSORS_ONLN)); 822} 823 824static void AddGtestFilterSynonym(std::vector<char*>& args) { 825 // Support --gtest-filter as a synonym for --gtest_filter. 826 for (size_t i = 1; i < args.size(); ++i) { 827 if (strncmp(args[i], "--gtest-filter", strlen("--gtest-filter")) == 0) { 828 args[i][7] = '_'; 829 } 830 } 831} 832 833struct IsolationTestOptions { 834 bool isolate; 835 size_t job_count; 836 int test_deadline_ms; 837 int test_warnline_ms; 838 std::string gtest_color; 839 bool gtest_print_time; 840 size_t gtest_repeat; 841 std::string gtest_output; 842}; 843 844// Pick options not for gtest: There are two parts in args, one part is used in isolation test mode 845// as described in PrintHelpInfo(), the other part is handled by testing::InitGoogleTest() in 846// gtest. PickOptions() picks the first part into IsolationTestOptions structure, leaving the second 847// part in args. 848// Arguments: 849// args is used to pass in all command arguments, and pass out only the part of options for gtest. 850// options is used to pass out test options in isolation mode. 851// Return false if there is error in arguments. 852static bool PickOptions(std::vector<char*>& args, IsolationTestOptions& options) { 853 for (size_t i = 1; i < args.size(); ++i) { 854 if (strcmp(args[i], "--help") == 0 || strcmp(args[i], "-h") == 0) { 855 PrintHelpInfo(); 856 options.isolate = false; 857 return true; 858 } 859 } 860 861 AddGtestFilterSynonym(args); 862 863 // if --bionic-selftest argument is used, only enable self tests, otherwise remove self tests. 864 bool enable_selftest = false; 865 for (size_t i = 1; i < args.size(); ++i) { 866 if (strcmp(args[i], "--bionic-selftest") == 0) { 867 // This argument is to enable "bionic_selftest*" for self test, and is not shown in help info. 868 // Don't remove this option from arguments. 869 enable_selftest = true; 870 } 871 } 872 std::string gtest_filter_str; 873 for (size_t i = args.size() - 1; i >= 1; --i) { 874 if (strncmp(args[i], "--gtest_filter=", strlen("--gtest_filter=")) == 0) { 875 gtest_filter_str = std::string(args[i]); 876 args.erase(args.begin() + i); 877 break; 878 } 879 } 880 if (enable_selftest == true) { 881 args.push_back(strdup("--gtest_filter=bionic_selftest*")); 882 } else { 883 if (gtest_filter_str == "") { 884 gtest_filter_str = "--gtest_filter=-bionic_selftest*"; 885 } else { 886 // Find if '-' for NEGATIVE_PATTERNS exists. 887 if (gtest_filter_str.find(":-") != std::string::npos) { 888 gtest_filter_str += ":bionic_selftest*"; 889 } else { 890 gtest_filter_str += ":-bionic_selftest*"; 891 } 892 } 893 args.push_back(strdup(gtest_filter_str.c_str())); 894 } 895 896 options.isolate = true; 897 // Parse arguments that make us can't run in isolation mode. 898 for (size_t i = 1; i < args.size(); ++i) { 899 if (strcmp(args[i], "--no-isolate") == 0) { 900 options.isolate = false; 901 } else if (strcmp(args[i], "--gtest_list_tests") == 0) { 902 options.isolate = false; 903 } 904 } 905 906 // Stop parsing if we will not run in isolation mode. 907 if (options.isolate == false) { 908 return true; 909 } 910 911 // Init default isolation test options. 912 options.job_count = GetProcessorCount(); 913 options.test_deadline_ms = DEFAULT_GLOBAL_TEST_RUN_DEADLINE_MS; 914 options.test_warnline_ms = DEFAULT_GLOBAL_TEST_RUN_WARNLINE_MS; 915 options.gtest_color = testing::GTEST_FLAG(color); 916 options.gtest_print_time = testing::GTEST_FLAG(print_time); 917 options.gtest_repeat = testing::GTEST_FLAG(repeat); 918 options.gtest_output = testing::GTEST_FLAG(output); 919 920 // Parse arguments speficied for isolation mode. 921 for (size_t i = 1; i < args.size(); ++i) { 922 if (strncmp(args[i], "-j", strlen("-j")) == 0) { 923 char* p = args[i] + strlen("-j"); 924 int count = 0; 925 if (*p != '\0') { 926 // Argument like -j5. 927 count = atoi(p); 928 } else if (args.size() > i + 1) { 929 // Arguments like -j 5. 930 count = atoi(args[i + 1]); 931 ++i; 932 } 933 if (count <= 0) { 934 fprintf(stderr, "invalid job count: %d\n", count); 935 return false; 936 } 937 options.job_count = static_cast<size_t>(count); 938 } else if (strncmp(args[i], "--deadline=", strlen("--deadline=")) == 0) { 939 int time_ms = atoi(args[i] + strlen("--deadline=")); 940 if (time_ms <= 0) { 941 fprintf(stderr, "invalid deadline: %d\n", time_ms); 942 return false; 943 } 944 options.test_deadline_ms = time_ms; 945 } else if (strncmp(args[i], "--warnline=", strlen("--warnline=")) == 0) { 946 int time_ms = atoi(args[i] + strlen("--warnline=")); 947 if (time_ms <= 0) { 948 fprintf(stderr, "invalid warnline: %d\n", time_ms); 949 return false; 950 } 951 options.test_warnline_ms = time_ms; 952 } else if (strncmp(args[i], "--gtest_color=", strlen("--gtest_color=")) == 0) { 953 options.gtest_color = args[i] + strlen("--gtest_color="); 954 } else if (strcmp(args[i], "--gtest_print_time=0") == 0) { 955 options.gtest_print_time = false; 956 } else if (strncmp(args[i], "--gtest_repeat=", strlen("--gtest_repeat=")) == 0) { 957 int repeat = atoi(args[i] + strlen("--gtest_repeat=")); 958 if (repeat < 0) { 959 fprintf(stderr, "invalid gtest_repeat count: %d\n", repeat); 960 return false; 961 } 962 options.gtest_repeat = repeat; 963 // Remove --gtest_repeat=xx from arguments, so child process only run one iteration for a single test. 964 args.erase(args.begin() + i); 965 --i; 966 } else if (strncmp(args[i], "--gtest_output=", strlen("--gtest_output=")) == 0) { 967 std::string output = args[i] + strlen("--gtest_output="); 968 // generate output xml file path according to the strategy in gtest. 969 bool success = true; 970 if (strncmp(output.c_str(), "xml:", strlen("xml:")) == 0) { 971 output = output.substr(strlen("xml:")); 972 if (output.size() == 0) { 973 success = false; 974 } 975 // Make absolute path. 976 if (success && output[0] != '/') { 977 char* cwd = getcwd(NULL, 0); 978 if (cwd != NULL) { 979 output = std::string(cwd) + "/" + output; 980 free(cwd); 981 } else { 982 success = false; 983 } 984 } 985 // Add file name if output is a directory. 986 if (success && output.back() == '/') { 987 output += "test_details.xml"; 988 } 989 } 990 if (success) { 991 options.gtest_output = output; 992 } else { 993 fprintf(stderr, "invalid gtest_output file: %s\n", args[i]); 994 return false; 995 } 996 997 // Remove --gtest_output=xxx from arguments, so child process will not write xml file. 998 args.erase(args.begin() + i); 999 --i; 1000 } 1001 } 1002 1003 // Add --no-isolate in args to prevent child process from running in isolation mode again. 1004 // As DeathTest will try to call execve(), this argument should always be added. 1005 args.insert(args.begin() + 1, strdup("--no-isolate")); 1006 return true; 1007} 1008 1009int main(int argc, char** argv) { 1010 std::vector<char*> arg_list; 1011 for (int i = 0; i < argc; ++i) { 1012 arg_list.push_back(argv[i]); 1013 } 1014 1015 IsolationTestOptions options; 1016 if (PickOptions(arg_list, options) == false) { 1017 return 1; 1018 } 1019 1020 if (options.isolate == true) { 1021 // Set global variables. 1022 global_test_run_deadline_ms = options.test_deadline_ms; 1023 global_test_run_warnline_ms = options.test_warnline_ms; 1024 testing::GTEST_FLAG(color) = options.gtest_color.c_str(); 1025 testing::GTEST_FLAG(print_time) = options.gtest_print_time; 1026 std::vector<TestCase> testcase_list; 1027 1028 argc = static_cast<int>(arg_list.size()); 1029 arg_list.push_back(NULL); 1030 if (EnumerateTests(argc, arg_list.data(), testcase_list) == false) { 1031 return 1; 1032 } 1033 RunTestInSeparateProc(argc, arg_list.data(), testcase_list, options.gtest_repeat, 1034 options.job_count, options.gtest_output); 1035 } else { 1036 argc = static_cast<int>(arg_list.size()); 1037 arg_list.push_back(NULL); 1038 testing::InitGoogleTest(&argc, arg_list.data()); 1039 return RUN_ALL_TESTS(); 1040 } 1041 return 0; 1042} 1043 1044//################################################################################ 1045// Bionic Gtest self test, run this by --bionic-selftest option. 1046 1047TEST(bionic_selftest, test_success) { 1048 ASSERT_EQ(1, 1); 1049} 1050 1051TEST(bionic_selftest, test_fail) { 1052 ASSERT_EQ(0, 1); 1053} 1054 1055TEST(bionic_selftest, test_time_warn) { 1056 sleep(4); 1057} 1058 1059TEST(bionic_selftest, test_timeout) { 1060 while (1) {} 1061} 1062 1063TEST(bionic_selftest, test_signal_SEGV_terminated) { 1064 char* p = reinterpret_cast<char*>(static_cast<intptr_t>(atoi("0"))); 1065 *p = 3; 1066} 1067 1068class bionic_selftest_DeathTest : public BionicDeathTest {}; 1069 1070static void deathtest_helper_success() { 1071 ASSERT_EQ(1, 1); 1072 exit(0); 1073} 1074 1075TEST_F(bionic_selftest_DeathTest, success) { 1076 ASSERT_EXIT(deathtest_helper_success(), ::testing::ExitedWithCode(0), ""); 1077} 1078 1079static void deathtest_helper_fail() { 1080 ASSERT_EQ(1, 0); 1081} 1082 1083TEST_F(bionic_selftest_DeathTest, fail) { 1084 ASSERT_EXIT(deathtest_helper_fail(), ::testing::ExitedWithCode(0), ""); 1085} 1086