gtest_main.cpp revision a456fae45f608e10499ac27fca8b37ef48378b34
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <gtest/gtest.h> 18 19#include <ctype.h> 20#include <errno.h> 21#include <fcntl.h> 22#include <inttypes.h> 23#include <limits.h> 24#include <signal.h> 25#include <stdarg.h> 26#include <stdio.h> 27#include <string.h> 28#include <sys/wait.h> 29#include <unistd.h> 30 31#include <chrono> 32#include <string> 33#include <tuple> 34#include <utility> 35#include <vector> 36 37#ifndef TEMP_FAILURE_RETRY 38 39/* Used to retry syscalls that can return EINTR. */ 40#define TEMP_FAILURE_RETRY(exp) ({ \ 41 __typeof__(exp) _rc; \ 42 do { \ 43 _rc = (exp); \ 44 } while (_rc == -1 && errno == EINTR); \ 45 _rc; }) 46 47#endif 48 49static std::string g_executable_path; 50static int g_argc; 51static char** g_argv; 52static char** g_envp; 53 54const std::string& get_executable_path() { 55 return g_executable_path; 56} 57 58int get_argc() { 59 return g_argc; 60} 61 62char** get_argv() { 63 return g_argv; 64} 65 66char** get_envp() { 67 return g_envp; 68} 69 70namespace testing { 71namespace internal { 72 73// Reuse of testing::internal::ColoredPrintf in gtest. 74enum GTestColor { 75 COLOR_DEFAULT, 76 COLOR_RED, 77 COLOR_GREEN, 78 COLOR_YELLOW 79}; 80 81void ColoredPrintf(GTestColor color, const char* fmt, ...); 82 83} // namespace internal 84} // namespace testing 85 86using testing::internal::GTestColor; 87using testing::internal::COLOR_DEFAULT; 88using testing::internal::COLOR_RED; 89using testing::internal::COLOR_GREEN; 90using testing::internal::COLOR_YELLOW; 91using testing::internal::ColoredPrintf; 92 93constexpr int DEFAULT_GLOBAL_TEST_RUN_DEADLINE_MS = 90000; 94constexpr int DEFAULT_GLOBAL_TEST_RUN_SLOW_THRESHOLD_MS = 2000; 95 96// The time each test can run before killed for the reason of timeout. 97// It takes effect only with --isolate option. 98static int global_test_run_deadline_ms = DEFAULT_GLOBAL_TEST_RUN_DEADLINE_MS; 99 100// The time each test can run before be warned for too much running time. 101// It takes effect only with --isolate option. 102static int global_test_run_slow_threshold_ms = DEFAULT_GLOBAL_TEST_RUN_SLOW_THRESHOLD_MS; 103 104// Return timeout duration for a test, in ms. 105static int GetTimeoutMs(const std::string& /*test_name*/) { 106 return global_test_run_deadline_ms; 107} 108 109// Return threshold for calling a test slow, in ms. 110static int GetSlowThresholdMs(const std::string& /*test_name*/) { 111 return global_test_run_slow_threshold_ms; 112} 113 114static void PrintHelpInfo() { 115 printf("Bionic Unit Test Options:\n" 116 " -j [JOB_COUNT] or -j[JOB_COUNT]\n" 117 " Run up to JOB_COUNT tests in parallel.\n" 118 " Use isolation mode, Run each test in a separate process.\n" 119 " If JOB_COUNT is not given, it is set to the count of available processors.\n" 120 " --no-isolate\n" 121 " Don't use isolation mode, run all tests in a single process.\n" 122 " --deadline=[TIME_IN_MS]\n" 123 " Run each test in no longer than [TIME_IN_MS] time.\n" 124 " Only valid in isolation mode. Default deadline is 90000 ms.\n" 125 " --slow-threshold=[TIME_IN_MS]\n" 126 " Test running longer than [TIME_IN_MS] will be called slow.\n" 127 " Only valid in isolation mode. Default slow threshold is 2000 ms.\n" 128 " --gtest-filter=POSITIVE_PATTERNS[-NEGATIVE_PATTERNS]\n" 129 " Used as a synonym for --gtest_filter option in gtest.\n" 130 "Default bionic unit test option is -j.\n" 131 "In isolation mode, you can send SIGQUIT to the parent process to show current\n" 132 "running tests, or send SIGINT to the parent process to stop testing and\n" 133 "clean up current running tests.\n" 134 "\n"); 135} 136 137enum TestResult { 138 TEST_SUCCESS = 0, 139 TEST_FAILED, 140 TEST_TIMEOUT 141}; 142 143class Test { 144 public: 145 Test() {} // For std::vector<Test>. 146 explicit Test(const char* name) : name_(name) {} 147 148 const std::string& GetName() const { return name_; } 149 150 void SetResult(TestResult result) { result_ = result; } 151 152 TestResult GetResult() const { return result_; } 153 154 void SetTestTime(int64_t elapsed_time_ns) { elapsed_time_ns_ = elapsed_time_ns; } 155 156 int64_t GetTestTime() const { return elapsed_time_ns_; } 157 158 void AppendTestOutput(const std::string& s) { output_ += s; } 159 160 const std::string& GetTestOutput() const { return output_; } 161 162 private: 163 const std::string name_; 164 TestResult result_; 165 int64_t elapsed_time_ns_; 166 std::string output_; 167}; 168 169class TestCase { 170 public: 171 TestCase() {} // For std::vector<TestCase>. 172 explicit TestCase(const char* name) : name_(name) {} 173 174 const std::string& GetName() const { return name_; } 175 176 void AppendTest(const char* test_name) { 177 test_list_.push_back(Test(test_name)); 178 } 179 180 size_t TestCount() const { return test_list_.size(); } 181 182 std::string GetTestName(size_t test_id) const { 183 VerifyTestId(test_id); 184 return name_ + "." + test_list_[test_id].GetName(); 185 } 186 187 Test& GetTest(size_t test_id) { 188 VerifyTestId(test_id); 189 return test_list_[test_id]; 190 } 191 192 const Test& GetTest(size_t test_id) const { 193 VerifyTestId(test_id); 194 return test_list_[test_id]; 195 } 196 197 void SetTestResult(size_t test_id, TestResult result) { 198 VerifyTestId(test_id); 199 test_list_[test_id].SetResult(result); 200 } 201 202 TestResult GetTestResult(size_t test_id) const { 203 VerifyTestId(test_id); 204 return test_list_[test_id].GetResult(); 205 } 206 207 void SetTestTime(size_t test_id, int64_t elapsed_time_ns) { 208 VerifyTestId(test_id); 209 test_list_[test_id].SetTestTime(elapsed_time_ns); 210 } 211 212 int64_t GetTestTime(size_t test_id) const { 213 VerifyTestId(test_id); 214 return test_list_[test_id].GetTestTime(); 215 } 216 217 private: 218 void VerifyTestId(size_t test_id) const { 219 if(test_id >= test_list_.size()) { 220 fprintf(stderr, "test_id %zu out of range [0, %zu)\n", test_id, test_list_.size()); 221 exit(1); 222 } 223 } 224 225 private: 226 const std::string name_; 227 std::vector<Test> test_list_; 228}; 229 230class TestResultPrinter : public testing::EmptyTestEventListener { 231 public: 232 TestResultPrinter() : pinfo_(NULL) {} 233 virtual void OnTestStart(const testing::TestInfo& test_info) { 234 pinfo_ = &test_info; // Record test_info for use in OnTestPartResult. 235 } 236 virtual void OnTestPartResult(const testing::TestPartResult& result); 237 238 private: 239 const testing::TestInfo* pinfo_; 240}; 241 242// Called after an assertion failure. 243void TestResultPrinter::OnTestPartResult(const testing::TestPartResult& result) { 244 // If the test part succeeded, we don't need to do anything. 245 if (result.type() == testing::TestPartResult::kSuccess) 246 return; 247 248 // Print failure message from the assertion (e.g. expected this and got that). 249 printf("%s:(%d) Failure in test %s.%s\n%s\n", result.file_name(), result.line_number(), 250 pinfo_->test_case_name(), pinfo_->name(), result.message()); 251 fflush(stdout); 252} 253 254static int64_t NanoTime() { 255 std::chrono::nanoseconds duration(std::chrono::steady_clock::now().time_since_epoch()); 256 return static_cast<int64_t>(duration.count()); 257} 258 259static bool EnumerateTests(int argc, char** argv, std::vector<TestCase>& testcase_list) { 260 std::string command; 261 for (int i = 0; i < argc; ++i) { 262 command += argv[i]; 263 command += " "; 264 } 265 command += "--gtest_list_tests"; 266 FILE* fp = popen(command.c_str(), "r"); 267 if (fp == NULL) { 268 perror("popen"); 269 return false; 270 } 271 272 char buf[200]; 273 while (fgets(buf, sizeof(buf), fp) != NULL) { 274 char* p = buf; 275 276 while (*p != '\0' && isspace(*p)) { 277 ++p; 278 } 279 if (*p == '\0') continue; 280 char* start = p; 281 while (*p != '\0' && !isspace(*p)) { 282 ++p; 283 } 284 char* end = p; 285 while (*p != '\0' && isspace(*p)) { 286 ++p; 287 } 288 if (*p != '\0' && *p != '#') { 289 // This is not we want, gtest must meet with some error when parsing the arguments. 290 fprintf(stderr, "argument error, check with --help\n"); 291 return false; 292 } 293 *end = '\0'; 294 if (*(end - 1) == '.') { 295 *(end - 1) = '\0'; 296 testcase_list.push_back(TestCase(start)); 297 } else { 298 testcase_list.back().AppendTest(start); 299 } 300 } 301 int result = pclose(fp); 302 return (result != -1 && WEXITSTATUS(result) == 0); 303} 304 305// Part of the following *Print functions are copied from external/gtest/src/gtest.cc: 306// PrettyUnitTestResultPrinter. The reason for copy is that PrettyUnitTestResultPrinter 307// is defined and used in gtest.cc, which is hard to reuse. 308static void OnTestIterationStartPrint(const std::vector<TestCase>& testcase_list, size_t iteration, 309 int iteration_count) { 310 if (iteration_count != 1) { 311 printf("\nRepeating all tests (iteration %zu) . . .\n\n", iteration); 312 } 313 ColoredPrintf(COLOR_GREEN, "[==========] "); 314 315 size_t testcase_count = testcase_list.size(); 316 size_t test_count = 0; 317 for (const auto& testcase : testcase_list) { 318 test_count += testcase.TestCount(); 319 } 320 321 printf("Running %zu %s from %zu %s.\n", 322 test_count, (test_count == 1) ? "test" : "tests", 323 testcase_count, (testcase_count == 1) ? "test case" : "test cases"); 324 fflush(stdout); 325} 326 327// bionic cts test needs gtest output format. 328#if defined(USING_GTEST_OUTPUT_FORMAT) 329 330static void OnTestEndPrint(const TestCase& testcase, size_t test_id) { 331 ColoredPrintf(COLOR_GREEN, "[ RUN ] "); 332 printf("%s\n", testcase.GetTestName(test_id).c_str()); 333 334 const std::string& test_output = testcase.GetTest(test_id).GetTestOutput(); 335 printf("%s", test_output.c_str()); 336 337 TestResult result = testcase.GetTestResult(test_id); 338 if (result == TEST_SUCCESS) { 339 ColoredPrintf(COLOR_GREEN, "[ OK ] "); 340 } else { 341 ColoredPrintf(COLOR_RED, "[ FAILED ] "); 342 } 343 printf("%s", testcase.GetTestName(test_id).c_str()); 344 if (testing::GTEST_FLAG(print_time)) { 345 printf(" (%" PRId64 " ms)", testcase.GetTestTime(test_id) / 1000000); 346 } 347 printf("\n"); 348 fflush(stdout); 349} 350 351#else // !defined(USING_GTEST_OUTPUT_FORMAT) 352 353static void OnTestEndPrint(const TestCase& testcase, size_t test_id) { 354 TestResult result = testcase.GetTestResult(test_id); 355 if (result == TEST_SUCCESS) { 356 ColoredPrintf(COLOR_GREEN, "[ OK ] "); 357 } else if (result == TEST_FAILED) { 358 ColoredPrintf(COLOR_RED, "[ FAILED ] "); 359 } else if (result == TEST_TIMEOUT) { 360 ColoredPrintf(COLOR_RED, "[ TIMEOUT ] "); 361 } 362 363 printf("%s", testcase.GetTestName(test_id).c_str()); 364 if (testing::GTEST_FLAG(print_time)) { 365 printf(" (%" PRId64 " ms)", testcase.GetTestTime(test_id) / 1000000); 366 } 367 printf("\n"); 368 369 const std::string& test_output = testcase.GetTest(test_id).GetTestOutput(); 370 printf("%s", test_output.c_str()); 371 fflush(stdout); 372} 373 374#endif // !defined(USING_GTEST_OUTPUT_FORMAT) 375 376static void OnTestIterationEndPrint(const std::vector<TestCase>& testcase_list, size_t /*iteration*/, 377 int64_t elapsed_time_ns) { 378 379 std::vector<std::string> fail_test_name_list; 380 std::vector<std::pair<std::string, int64_t>> timeout_test_list; 381 382 // For tests that were slow but didn't time out. 383 std::vector<std::tuple<std::string, int64_t, int>> slow_test_list; 384 size_t testcase_count = testcase_list.size(); 385 size_t test_count = 0; 386 size_t success_test_count = 0; 387 388 for (const auto& testcase : testcase_list) { 389 test_count += testcase.TestCount(); 390 for (size_t i = 0; i < testcase.TestCount(); ++i) { 391 TestResult result = testcase.GetTestResult(i); 392 if (result == TEST_SUCCESS) { 393 ++success_test_count; 394 } else if (result == TEST_FAILED) { 395 fail_test_name_list.push_back(testcase.GetTestName(i)); 396 } else if (result == TEST_TIMEOUT) { 397 timeout_test_list.push_back(std::make_pair(testcase.GetTestName(i), 398 testcase.GetTestTime(i))); 399 } 400 if (result != TEST_TIMEOUT && 401 testcase.GetTestTime(i) / 1000000 >= GetSlowThresholdMs(testcase.GetTestName(i))) { 402 slow_test_list.push_back(std::make_tuple(testcase.GetTestName(i), 403 testcase.GetTestTime(i), 404 GetSlowThresholdMs(testcase.GetTestName(i)))); 405 } 406 } 407 } 408 409 ColoredPrintf(COLOR_GREEN, "[==========] "); 410 printf("%zu %s from %zu %s ran.", test_count, (test_count == 1) ? "test" : "tests", 411 testcase_count, (testcase_count == 1) ? "test case" : "test cases"); 412 if (testing::GTEST_FLAG(print_time)) { 413 printf(" (%" PRId64 " ms total)", elapsed_time_ns / 1000000); 414 } 415 printf("\n"); 416 ColoredPrintf(COLOR_GREEN, "[ PASS ] "); 417 printf("%zu %s.\n", success_test_count, (success_test_count == 1) ? "test" : "tests"); 418 419 // Print tests that timed out. 420 size_t timeout_test_count = timeout_test_list.size(); 421 if (timeout_test_count > 0) { 422 ColoredPrintf(COLOR_RED, "[ TIMEOUT ] "); 423 printf("%zu %s, listed below:\n", timeout_test_count, (timeout_test_count == 1) ? "test" : "tests"); 424 for (const auto& timeout_pair : timeout_test_list) { 425 ColoredPrintf(COLOR_RED, "[ TIMEOUT ] "); 426 printf("%s (stopped at %" PRId64 " ms)\n", timeout_pair.first.c_str(), 427 timeout_pair.second / 1000000); 428 } 429 } 430 431 // Print tests that were slow. 432 size_t slow_test_count = slow_test_list.size(); 433 if (slow_test_count > 0) { 434 ColoredPrintf(COLOR_YELLOW, "[ SLOW ] "); 435 printf("%zu %s, listed below:\n", slow_test_count, (slow_test_count == 1) ? "test" : "tests"); 436 for (const auto& slow_tuple : slow_test_list) { 437 ColoredPrintf(COLOR_YELLOW, "[ SLOW ] "); 438 printf("%s (%" PRId64 " ms, exceeded %d ms)\n", std::get<0>(slow_tuple).c_str(), 439 std::get<1>(slow_tuple) / 1000000, std::get<2>(slow_tuple)); 440 } 441 } 442 443 // Print tests that failed. 444 size_t fail_test_count = fail_test_name_list.size(); 445 if (fail_test_count > 0) { 446 ColoredPrintf(COLOR_RED, "[ FAIL ] "); 447 printf("%zu %s, listed below:\n", fail_test_count, (fail_test_count == 1) ? "test" : "tests"); 448 for (const auto& name : fail_test_name_list) { 449 ColoredPrintf(COLOR_RED, "[ FAIL ] "); 450 printf("%s\n", name.c_str()); 451 } 452 } 453 454 if (timeout_test_count > 0 || slow_test_count > 0 || fail_test_count > 0) { 455 printf("\n"); 456 } 457 458 if (timeout_test_count > 0) { 459 printf("%2zu TIMEOUT %s\n", timeout_test_count, (timeout_test_count == 1) ? "TEST" : "TESTS"); 460 } 461 if (slow_test_count > 0) { 462 printf("%2zu SLOW %s\n", slow_test_count, (slow_test_count == 1) ? "TEST" : "TESTS"); 463 } 464 if (fail_test_count > 0) { 465 printf("%2zu FAILED %s\n", fail_test_count, (fail_test_count == 1) ? "TEST" : "TESTS"); 466 } 467 fflush(stdout); 468} 469 470std::string XmlEscape(const std::string& xml) { 471 std::string escaped; 472 escaped.reserve(xml.size()); 473 474 for (auto c : xml) { 475 switch (c) { 476 case '<': 477 escaped.append("<"); 478 break; 479 case '>': 480 escaped.append(">"); 481 break; 482 case '&': 483 escaped.append("&"); 484 break; 485 case '\'': 486 escaped.append("'"); 487 break; 488 case '"': 489 escaped.append("""); 490 break; 491 default: 492 escaped.append(1, c); 493 break; 494 } 495 } 496 497 return escaped; 498} 499 500// Output xml file when --gtest_output is used, write this function as we can't reuse 501// gtest.cc:XmlUnitTestResultPrinter. The reason is XmlUnitTestResultPrinter is totally 502// defined in gtest.cc and not expose to outside. What's more, as we don't run gtest in 503// the parent process, we don't have gtest classes which are needed by XmlUnitTestResultPrinter. 504void OnTestIterationEndXmlPrint(const std::string& xml_output_filename, 505 const std::vector<TestCase>& testcase_list, 506 time_t epoch_iteration_start_time, 507 int64_t elapsed_time_ns) { 508 FILE* fp = fopen(xml_output_filename.c_str(), "w"); 509 if (fp == NULL) { 510 fprintf(stderr, "failed to open '%s': %s\n", xml_output_filename.c_str(), strerror(errno)); 511 exit(1); 512 } 513 514 size_t total_test_count = 0; 515 size_t total_failed_count = 0; 516 std::vector<size_t> failed_count_list(testcase_list.size(), 0); 517 std::vector<int64_t> elapsed_time_list(testcase_list.size(), 0); 518 for (size_t i = 0; i < testcase_list.size(); ++i) { 519 auto& testcase = testcase_list[i]; 520 total_test_count += testcase.TestCount(); 521 for (size_t j = 0; j < testcase.TestCount(); ++j) { 522 if (testcase.GetTestResult(j) != TEST_SUCCESS) { 523 ++failed_count_list[i]; 524 } 525 elapsed_time_list[i] += testcase.GetTestTime(j); 526 } 527 total_failed_count += failed_count_list[i]; 528 } 529 530 const tm* time_struct = localtime(&epoch_iteration_start_time); 531 char timestamp[40]; 532 snprintf(timestamp, sizeof(timestamp), "%4d-%02d-%02dT%02d:%02d:%02d", 533 time_struct->tm_year + 1900, time_struct->tm_mon + 1, time_struct->tm_mday, 534 time_struct->tm_hour, time_struct->tm_min, time_struct->tm_sec); 535 536 fputs("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n", fp); 537 fprintf(fp, "<testsuites tests=\"%zu\" failures=\"%zu\" disabled=\"0\" errors=\"0\"", 538 total_test_count, total_failed_count); 539 fprintf(fp, " timestamp=\"%s\" time=\"%.3lf\" name=\"AllTests\">\n", timestamp, elapsed_time_ns / 1e9); 540 for (size_t i = 0; i < testcase_list.size(); ++i) { 541 auto& testcase = testcase_list[i]; 542 fprintf(fp, " <testsuite name=\"%s\" tests=\"%zu\" failures=\"%zu\" disabled=\"0\" errors=\"0\"", 543 testcase.GetName().c_str(), testcase.TestCount(), failed_count_list[i]); 544 fprintf(fp, " time=\"%.3lf\">\n", elapsed_time_list[i] / 1e9); 545 546 for (size_t j = 0; j < testcase.TestCount(); ++j) { 547 fprintf(fp, " <testcase name=\"%s\" status=\"run\" time=\"%.3lf\" classname=\"%s\"", 548 testcase.GetTest(j).GetName().c_str(), testcase.GetTestTime(j) / 1e9, 549 testcase.GetName().c_str()); 550 if (testcase.GetTestResult(j) == TEST_SUCCESS) { 551 fputs(" />\n", fp); 552 } else { 553 fputs(">\n", fp); 554 const std::string& test_output = testcase.GetTest(j).GetTestOutput(); 555 const std::string escaped_test_output = XmlEscape(test_output); 556 fprintf(fp, " <failure message=\"%s\" type=\"\">\n", escaped_test_output.c_str()); 557 fputs(" </failure>\n", fp); 558 fputs(" </testcase>\n", fp); 559 } 560 } 561 562 fputs(" </testsuite>\n", fp); 563 } 564 fputs("</testsuites>\n", fp); 565 fclose(fp); 566} 567 568static bool sigint_flag; 569static bool sigquit_flag; 570 571static void signal_handler(int sig) { 572 if (sig == SIGINT) { 573 sigint_flag = true; 574 } else if (sig == SIGQUIT) { 575 sigquit_flag = true; 576 } 577} 578 579static bool RegisterSignalHandler() { 580 sigint_flag = false; 581 sigquit_flag = false; 582 sig_t ret = signal(SIGINT, signal_handler); 583 if (ret != SIG_ERR) { 584 ret = signal(SIGQUIT, signal_handler); 585 } 586 if (ret == SIG_ERR) { 587 perror("RegisterSignalHandler"); 588 return false; 589 } 590 return true; 591} 592 593static bool UnregisterSignalHandler() { 594 sig_t ret = signal(SIGINT, SIG_DFL); 595 if (ret != SIG_ERR) { 596 ret = signal(SIGQUIT, SIG_DFL); 597 } 598 if (ret == SIG_ERR) { 599 perror("UnregisterSignalHandler"); 600 return false; 601 } 602 return true; 603} 604 605struct ChildProcInfo { 606 pid_t pid; 607 int64_t start_time_ns; 608 int64_t end_time_ns; 609 int64_t deadline_end_time_ns; // The time when the test is thought of as timeout. 610 size_t testcase_id, test_id; 611 bool finished; 612 bool timed_out; 613 int exit_status; 614 int child_read_fd; // File descriptor to read child test failure info. 615}; 616 617// Forked Child process, run the single test. 618static void ChildProcessFn(int argc, char** argv, const std::string& test_name) { 619 char** new_argv = new char*[argc + 2]; 620 memcpy(new_argv, argv, sizeof(char*) * argc); 621 622 char* filter_arg = new char [test_name.size() + 20]; 623 strcpy(filter_arg, "--gtest_filter="); 624 strcat(filter_arg, test_name.c_str()); 625 new_argv[argc] = filter_arg; 626 new_argv[argc + 1] = NULL; 627 628 int new_argc = argc + 1; 629 testing::InitGoogleTest(&new_argc, new_argv); 630 int result = RUN_ALL_TESTS(); 631 exit(result); 632} 633 634static ChildProcInfo RunChildProcess(const std::string& test_name, int testcase_id, int test_id, 635 int argc, char** argv) { 636 int pipefd[2]; 637 if (pipe(pipefd) == -1) { 638 perror("pipe in RunTestInSeparateProc"); 639 exit(1); 640 } 641 if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) == -1) { 642 perror("fcntl in RunTestInSeparateProc"); 643 exit(1); 644 } 645 pid_t pid = fork(); 646 if (pid == -1) { 647 perror("fork in RunTestInSeparateProc"); 648 exit(1); 649 } else if (pid == 0) { 650 // In child process, run a single test. 651 close(pipefd[0]); 652 close(STDOUT_FILENO); 653 close(STDERR_FILENO); 654 dup2(pipefd[1], STDOUT_FILENO); 655 dup2(pipefd[1], STDERR_FILENO); 656 657 if (!UnregisterSignalHandler()) { 658 exit(1); 659 } 660 ChildProcessFn(argc, argv, test_name); 661 // Unreachable. 662 } 663 // In parent process, initialize child process info. 664 close(pipefd[1]); 665 ChildProcInfo child_proc; 666 child_proc.child_read_fd = pipefd[0]; 667 child_proc.pid = pid; 668 child_proc.start_time_ns = NanoTime(); 669 child_proc.deadline_end_time_ns = child_proc.start_time_ns + GetTimeoutMs(test_name) * 1000000LL; 670 child_proc.testcase_id = testcase_id; 671 child_proc.test_id = test_id; 672 child_proc.finished = false; 673 return child_proc; 674} 675 676static void HandleSignals(std::vector<TestCase>& testcase_list, 677 std::vector<ChildProcInfo>& child_proc_list) { 678 if (sigquit_flag) { 679 sigquit_flag = false; 680 // Print current running tests. 681 printf("List of current running tests:\n"); 682 for (const auto& child_proc : child_proc_list) { 683 if (child_proc.pid != 0) { 684 std::string test_name = testcase_list[child_proc.testcase_id].GetTestName(child_proc.test_id); 685 int64_t current_time_ns = NanoTime(); 686 int64_t run_time_ms = (current_time_ns - child_proc.start_time_ns) / 1000000; 687 printf(" %s (%" PRId64 " ms)\n", test_name.c_str(), run_time_ms); 688 } 689 } 690 } else if (sigint_flag) { 691 sigint_flag = false; 692 // Kill current running tests. 693 for (const auto& child_proc : child_proc_list) { 694 if (child_proc.pid != 0) { 695 // Send SIGKILL to ensure the child process can be killed unconditionally. 696 kill(child_proc.pid, SIGKILL); 697 } 698 } 699 // SIGINT kills the parent process as well. 700 exit(1); 701 } 702} 703 704static bool CheckChildProcExit(pid_t exit_pid, int exit_status, 705 std::vector<ChildProcInfo>& child_proc_list) { 706 for (size_t i = 0; i < child_proc_list.size(); ++i) { 707 if (child_proc_list[i].pid == exit_pid) { 708 child_proc_list[i].finished = true; 709 child_proc_list[i].timed_out = false; 710 child_proc_list[i].exit_status = exit_status; 711 child_proc_list[i].end_time_ns = NanoTime(); 712 return true; 713 } 714 } 715 return false; 716} 717 718static size_t CheckChildProcTimeout(std::vector<ChildProcInfo>& child_proc_list) { 719 int64_t current_time_ns = NanoTime(); 720 size_t timeout_child_count = 0; 721 for (size_t i = 0; i < child_proc_list.size(); ++i) { 722 if (child_proc_list[i].deadline_end_time_ns <= current_time_ns) { 723 child_proc_list[i].finished = true; 724 child_proc_list[i].timed_out = true; 725 child_proc_list[i].end_time_ns = current_time_ns; 726 ++timeout_child_count; 727 } 728 } 729 return timeout_child_count; 730} 731 732static void ReadChildProcOutput(std::vector<TestCase>& testcase_list, 733 std::vector<ChildProcInfo>& child_proc_list) { 734 for (const auto& child_proc : child_proc_list) { 735 TestCase& testcase = testcase_list[child_proc.testcase_id]; 736 int test_id = child_proc.test_id; 737 while (true) { 738 char buf[1024]; 739 ssize_t bytes_read = TEMP_FAILURE_RETRY(read(child_proc.child_read_fd, buf, sizeof(buf) - 1)); 740 if (bytes_read > 0) { 741 buf[bytes_read] = '\0'; 742 testcase.GetTest(test_id).AppendTestOutput(buf); 743 } else if (bytes_read == 0) { 744 break; // Read end. 745 } else { 746 if (errno == EAGAIN) { 747 break; 748 } 749 perror("failed to read child_read_fd"); 750 exit(1); 751 } 752 } 753 } 754} 755 756static void WaitChildProcs(std::vector<TestCase>& testcase_list, 757 std::vector<ChildProcInfo>& child_proc_list) { 758 size_t finished_child_count = 0; 759 while (true) { 760 int status; 761 pid_t result; 762 while ((result = TEMP_FAILURE_RETRY(waitpid(-1, &status, WNOHANG))) > 0) { 763 if (CheckChildProcExit(result, status, child_proc_list)) { 764 ++finished_child_count; 765 } 766 } 767 768 if (result == -1) { 769 if (errno == ECHILD) { 770 // This happens when we have no running child processes. 771 return; 772 } else { 773 perror("waitpid"); 774 exit(1); 775 } 776 } else if (result == 0) { 777 finished_child_count += CheckChildProcTimeout(child_proc_list); 778 } 779 780 ReadChildProcOutput(testcase_list, child_proc_list); 781 if (finished_child_count > 0) { 782 return; 783 } 784 785 HandleSignals(testcase_list, child_proc_list); 786 787 // sleep 1 ms to avoid busy looping. 788 timespec sleep_time; 789 sleep_time.tv_sec = 0; 790 sleep_time.tv_nsec = 1000000; 791 nanosleep(&sleep_time, NULL); 792 } 793} 794 795static TestResult WaitForOneChild(pid_t pid) { 796 int exit_status; 797 pid_t result = TEMP_FAILURE_RETRY(waitpid(pid, &exit_status, 0)); 798 799 TestResult test_result = TEST_SUCCESS; 800 if (result != pid || WEXITSTATUS(exit_status) != 0) { 801 test_result = TEST_FAILED; 802 } 803 return test_result; 804} 805 806static void CollectChildTestResult(const ChildProcInfo& child_proc, TestCase& testcase) { 807 int test_id = child_proc.test_id; 808 testcase.SetTestTime(test_id, child_proc.end_time_ns - child_proc.start_time_ns); 809 if (child_proc.timed_out) { 810 // The child process marked as timed_out has not exited, and we should kill it manually. 811 kill(child_proc.pid, SIGKILL); 812 WaitForOneChild(child_proc.pid); 813 } 814 close(child_proc.child_read_fd); 815 816 if (child_proc.timed_out) { 817 testcase.SetTestResult(test_id, TEST_TIMEOUT); 818 char buf[1024]; 819 snprintf(buf, sizeof(buf), "%s killed because of timeout at %" PRId64 " ms.\n", 820 testcase.GetTestName(test_id).c_str(), testcase.GetTestTime(test_id) / 1000000); 821 testcase.GetTest(test_id).AppendTestOutput(buf); 822 823 } else if (WIFSIGNALED(child_proc.exit_status)) { 824 // Record signal terminated test as failed. 825 testcase.SetTestResult(test_id, TEST_FAILED); 826 char buf[1024]; 827 snprintf(buf, sizeof(buf), "%s terminated by signal: %s.\n", 828 testcase.GetTestName(test_id).c_str(), strsignal(WTERMSIG(child_proc.exit_status))); 829 testcase.GetTest(test_id).AppendTestOutput(buf); 830 831 } else { 832 int exitcode = WEXITSTATUS(child_proc.exit_status); 833 testcase.SetTestResult(test_id, exitcode == 0 ? TEST_SUCCESS : TEST_FAILED); 834 if (exitcode != 0) { 835 char buf[1024]; 836 snprintf(buf, sizeof(buf), "%s exited with exitcode %d.\n", 837 testcase.GetTestName(test_id).c_str(), exitcode); 838 testcase.GetTest(test_id).AppendTestOutput(buf); 839 } 840 } 841} 842 843// We choose to use multi-fork and multi-wait here instead of multi-thread, because it always 844// makes deadlock to use fork in multi-thread. 845// Returns true if all tests run successfully, otherwise return false. 846static bool RunTestInSeparateProc(int argc, char** argv, std::vector<TestCase>& testcase_list, 847 int iteration_count, size_t job_count, 848 const std::string& xml_output_filename) { 849 // Stop default result printer to avoid environment setup/teardown information for each test. 850 testing::UnitTest::GetInstance()->listeners().Release( 851 testing::UnitTest::GetInstance()->listeners().default_result_printer()); 852 testing::UnitTest::GetInstance()->listeners().Append(new TestResultPrinter); 853 854 if (!RegisterSignalHandler()) { 855 exit(1); 856 } 857 858 bool all_tests_passed = true; 859 860 for (size_t iteration = 1; 861 iteration_count < 0 || iteration <= static_cast<size_t>(iteration_count); 862 ++iteration) { 863 OnTestIterationStartPrint(testcase_list, iteration, iteration_count); 864 int64_t iteration_start_time_ns = NanoTime(); 865 time_t epoch_iteration_start_time = time(NULL); 866 867 // Run up to job_count tests in parallel, each test in a child process. 868 std::vector<ChildProcInfo> child_proc_list; 869 870 // Next test to run is [next_testcase_id:next_test_id]. 871 size_t next_testcase_id = 0; 872 size_t next_test_id = 0; 873 874 // Record how many tests are finished. 875 std::vector<size_t> finished_test_count_list(testcase_list.size(), 0); 876 size_t finished_testcase_count = 0; 877 878 while (finished_testcase_count < testcase_list.size()) { 879 // run up to job_count child processes. 880 while (child_proc_list.size() < job_count && next_testcase_id < testcase_list.size()) { 881 std::string test_name = testcase_list[next_testcase_id].GetTestName(next_test_id); 882 ChildProcInfo child_proc = RunChildProcess(test_name, next_testcase_id, next_test_id, 883 argc, argv); 884 child_proc_list.push_back(child_proc); 885 if (++next_test_id == testcase_list[next_testcase_id].TestCount()) { 886 next_test_id = 0; 887 ++next_testcase_id; 888 } 889 } 890 891 // Wait for any child proc finish or timeout. 892 WaitChildProcs(testcase_list, child_proc_list); 893 894 // Collect result. 895 auto it = child_proc_list.begin(); 896 while (it != child_proc_list.end()) { 897 auto& child_proc = *it; 898 if (child_proc.finished == true) { 899 size_t testcase_id = child_proc.testcase_id; 900 size_t test_id = child_proc.test_id; 901 TestCase& testcase = testcase_list[testcase_id]; 902 903 CollectChildTestResult(child_proc, testcase); 904 OnTestEndPrint(testcase, test_id); 905 906 if (++finished_test_count_list[testcase_id] == testcase.TestCount()) { 907 ++finished_testcase_count; 908 } 909 if (testcase.GetTestResult(test_id) != TEST_SUCCESS) { 910 all_tests_passed = false; 911 } 912 913 it = child_proc_list.erase(it); 914 } else { 915 ++it; 916 } 917 } 918 } 919 920 int64_t elapsed_time_ns = NanoTime() - iteration_start_time_ns; 921 OnTestIterationEndPrint(testcase_list, iteration, elapsed_time_ns); 922 if (!xml_output_filename.empty()) { 923 OnTestIterationEndXmlPrint(xml_output_filename, testcase_list, epoch_iteration_start_time, 924 elapsed_time_ns); 925 } 926 } 927 928 if (!UnregisterSignalHandler()) { 929 exit(1); 930 } 931 932 return all_tests_passed; 933} 934 935static size_t GetDefaultJobCount() { 936 return static_cast<size_t>(sysconf(_SC_NPROCESSORS_ONLN)); 937} 938 939static void AddPathSeparatorInTestProgramPath(std::vector<char*>& args) { 940 // To run DeathTest in threadsafe mode, gtest requires that the user must invoke the 941 // test program via a valid path that contains at least one path separator. 942 // The reason is that gtest uses clone() + execve() to run DeathTest in threadsafe mode, 943 // and execve() doesn't read environment variable PATH, so execve() will not success 944 // until we specify the absolute path or relative path of the test program directly. 945 if (strchr(args[0], '/') == nullptr) { 946 args[0] = strdup(g_executable_path.c_str()); 947 } 948} 949 950static void AddGtestFilterSynonym(std::vector<char*>& args) { 951 // Support --gtest-filter as a synonym for --gtest_filter. 952 for (size_t i = 1; i < args.size(); ++i) { 953 if (strncmp(args[i], "--gtest-filter", strlen("--gtest-filter")) == 0) { 954 args[i][7] = '_'; 955 } 956 } 957} 958 959struct IsolationTestOptions { 960 bool isolate; 961 size_t job_count; 962 int test_deadline_ms; 963 int test_slow_threshold_ms; 964 std::string gtest_color; 965 bool gtest_print_time; 966 int gtest_repeat; 967 std::string gtest_output; 968}; 969 970// Pick options not for gtest: There are two parts in args, one part is used in isolation test mode 971// as described in PrintHelpInfo(), the other part is handled by testing::InitGoogleTest() in 972// gtest. PickOptions() picks the first part into IsolationTestOptions structure, leaving the second 973// part in args. 974// Arguments: 975// args is used to pass in all command arguments, and pass out only the part of options for gtest. 976// options is used to pass out test options in isolation mode. 977// Return false if there is error in arguments. 978static bool PickOptions(std::vector<char*>& args, IsolationTestOptions& options) { 979 for (size_t i = 1; i < args.size(); ++i) { 980 if (strcmp(args[i], "--help") == 0 || strcmp(args[i], "-h") == 0) { 981 PrintHelpInfo(); 982 options.isolate = false; 983 return true; 984 } 985 } 986 987 AddPathSeparatorInTestProgramPath(args); 988 AddGtestFilterSynonym(args); 989 990 // if --bionic-selftest argument is used, only enable self tests, otherwise remove self tests. 991 bool enable_selftest = false; 992 for (size_t i = 1; i < args.size(); ++i) { 993 if (strcmp(args[i], "--bionic-selftest") == 0) { 994 // This argument is to enable "bionic_selftest*" for self test, and is not shown in help info. 995 // Don't remove this option from arguments. 996 enable_selftest = true; 997 } 998 } 999 std::string gtest_filter_str; 1000 for (size_t i = args.size() - 1; i >= 1; --i) { 1001 if (strncmp(args[i], "--gtest_filter=", strlen("--gtest_filter=")) == 0) { 1002 gtest_filter_str = std::string(args[i]); 1003 args.erase(args.begin() + i); 1004 break; 1005 } 1006 } 1007 if (enable_selftest == true) { 1008 args.push_back(strdup("--gtest_filter=bionic_selftest*")); 1009 } else { 1010 if (gtest_filter_str == "") { 1011 gtest_filter_str = "--gtest_filter=-bionic_selftest*"; 1012 } else { 1013 // Find if '-' for NEGATIVE_PATTERNS exists. 1014 if (gtest_filter_str.find(":-") != std::string::npos) { 1015 gtest_filter_str += ":bionic_selftest*"; 1016 } else { 1017 gtest_filter_str += ":-bionic_selftest*"; 1018 } 1019 } 1020 args.push_back(strdup(gtest_filter_str.c_str())); 1021 } 1022 1023 options.isolate = true; 1024 // Parse arguments that make us can't run in isolation mode. 1025 for (size_t i = 1; i < args.size(); ++i) { 1026 if (strcmp(args[i], "--no-isolate") == 0) { 1027 options.isolate = false; 1028 } else if (strcmp(args[i], "--gtest_list_tests") == 0) { 1029 options.isolate = false; 1030 } 1031 } 1032 1033 // Stop parsing if we will not run in isolation mode. 1034 if (options.isolate == false) { 1035 return true; 1036 } 1037 1038 // Init default isolation test options. 1039 options.job_count = GetDefaultJobCount(); 1040 options.test_deadline_ms = DEFAULT_GLOBAL_TEST_RUN_DEADLINE_MS; 1041 options.test_slow_threshold_ms = DEFAULT_GLOBAL_TEST_RUN_SLOW_THRESHOLD_MS; 1042 options.gtest_color = testing::GTEST_FLAG(color); 1043 options.gtest_print_time = testing::GTEST_FLAG(print_time); 1044 options.gtest_repeat = testing::GTEST_FLAG(repeat); 1045 options.gtest_output = testing::GTEST_FLAG(output); 1046 1047 // Parse arguments speficied for isolation mode. 1048 for (size_t i = 1; i < args.size(); ++i) { 1049 if (strncmp(args[i], "-j", strlen("-j")) == 0) { 1050 char* p = args[i] + strlen("-j"); 1051 int count = 0; 1052 if (*p != '\0') { 1053 // Argument like -j5. 1054 count = atoi(p); 1055 } else if (args.size() > i + 1) { 1056 // Arguments like -j 5. 1057 count = atoi(args[i + 1]); 1058 ++i; 1059 } 1060 if (count <= 0) { 1061 fprintf(stderr, "invalid job count: %d\n", count); 1062 return false; 1063 } 1064 options.job_count = static_cast<size_t>(count); 1065 } else if (strncmp(args[i], "--deadline=", strlen("--deadline=")) == 0) { 1066 int time_ms = atoi(args[i] + strlen("--deadline=")); 1067 if (time_ms <= 0) { 1068 fprintf(stderr, "invalid deadline: %d\n", time_ms); 1069 return false; 1070 } 1071 options.test_deadline_ms = time_ms; 1072 } else if (strncmp(args[i], "--slow-threshold=", strlen("--slow-threshold=")) == 0) { 1073 int time_ms = atoi(args[i] + strlen("--slow-threshold=")); 1074 if (time_ms <= 0) { 1075 fprintf(stderr, "invalid slow test threshold: %d\n", time_ms); 1076 return false; 1077 } 1078 options.test_slow_threshold_ms = time_ms; 1079 } else if (strncmp(args[i], "--gtest_color=", strlen("--gtest_color=")) == 0) { 1080 options.gtest_color = args[i] + strlen("--gtest_color="); 1081 } else if (strcmp(args[i], "--gtest_print_time=0") == 0) { 1082 options.gtest_print_time = false; 1083 } else if (strncmp(args[i], "--gtest_repeat=", strlen("--gtest_repeat=")) == 0) { 1084 // If the value of gtest_repeat is < 0, then it indicates the tests 1085 // should be repeated forever. 1086 options.gtest_repeat = atoi(args[i] + strlen("--gtest_repeat=")); 1087 // Remove --gtest_repeat=xx from arguments, so child process only run one iteration for a single test. 1088 args.erase(args.begin() + i); 1089 --i; 1090 } else if (strncmp(args[i], "--gtest_output=", strlen("--gtest_output=")) == 0) { 1091 std::string output = args[i] + strlen("--gtest_output="); 1092 // generate output xml file path according to the strategy in gtest. 1093 bool success = true; 1094 if (strncmp(output.c_str(), "xml:", strlen("xml:")) == 0) { 1095 output = output.substr(strlen("xml:")); 1096 if (output.size() == 0) { 1097 success = false; 1098 } 1099 // Make absolute path. 1100 if (success && output[0] != '/') { 1101 char* cwd = getcwd(NULL, 0); 1102 if (cwd != NULL) { 1103 output = std::string(cwd) + "/" + output; 1104 free(cwd); 1105 } else { 1106 success = false; 1107 } 1108 } 1109 // Add file name if output is a directory. 1110 if (success && output.back() == '/') { 1111 output += "test_details.xml"; 1112 } 1113 } 1114 if (success) { 1115 options.gtest_output = output; 1116 } else { 1117 fprintf(stderr, "invalid gtest_output file: %s\n", args[i]); 1118 return false; 1119 } 1120 1121 // Remove --gtest_output=xxx from arguments, so child process will not write xml file. 1122 args.erase(args.begin() + i); 1123 --i; 1124 } 1125 } 1126 1127 // Add --no-isolate in args to prevent child process from running in isolation mode again. 1128 // As DeathTest will try to call execve(), this argument should always be added. 1129 args.insert(args.begin() + 1, strdup("--no-isolate")); 1130 return true; 1131} 1132 1133static std::string get_proc_self_exe() { 1134 char path[PATH_MAX]; 1135 ssize_t path_len = readlink("/proc/self/exe", path, sizeof(path)); 1136 if (path_len <= 0 || path_len >= static_cast<ssize_t>(sizeof(path))) { 1137 perror("readlink"); 1138 exit(1); 1139 } 1140 1141 return std::string(path, path_len); 1142} 1143 1144int main(int argc, char** argv, char** envp) { 1145 g_executable_path = get_proc_self_exe(); 1146 g_argc = argc; 1147 g_argv = argv; 1148 g_envp = envp; 1149 std::vector<char*> arg_list; 1150 for (int i = 0; i < argc; ++i) { 1151 arg_list.push_back(argv[i]); 1152 } 1153 1154 IsolationTestOptions options; 1155 if (PickOptions(arg_list, options) == false) { 1156 return 1; 1157 } 1158 1159 if (options.isolate == true) { 1160 // Set global variables. 1161 global_test_run_deadline_ms = options.test_deadline_ms; 1162 global_test_run_slow_threshold_ms = options.test_slow_threshold_ms; 1163 testing::GTEST_FLAG(color) = options.gtest_color.c_str(); 1164 testing::GTEST_FLAG(print_time) = options.gtest_print_time; 1165 std::vector<TestCase> testcase_list; 1166 1167 argc = static_cast<int>(arg_list.size()); 1168 arg_list.push_back(NULL); 1169 if (EnumerateTests(argc, arg_list.data(), testcase_list) == false) { 1170 return 1; 1171 } 1172 bool all_test_passed = RunTestInSeparateProc(argc, arg_list.data(), testcase_list, 1173 options.gtest_repeat, options.job_count, options.gtest_output); 1174 return all_test_passed ? 0 : 1; 1175 } else { 1176 argc = static_cast<int>(arg_list.size()); 1177 arg_list.push_back(NULL); 1178 testing::InitGoogleTest(&argc, arg_list.data()); 1179 return RUN_ALL_TESTS(); 1180 } 1181} 1182 1183//################################################################################ 1184// Bionic Gtest self test, run this by --bionic-selftest option. 1185 1186TEST(bionic_selftest, test_success) { 1187 ASSERT_EQ(1, 1); 1188} 1189 1190TEST(bionic_selftest, test_fail) { 1191 ASSERT_EQ(0, 1); 1192} 1193 1194TEST(bionic_selftest, test_time_warn) { 1195 sleep(4); 1196} 1197 1198TEST(bionic_selftest, test_timeout) { 1199 while (1) {} 1200} 1201 1202TEST(bionic_selftest, test_signal_SEGV_terminated) { 1203 char* p = reinterpret_cast<char*>(static_cast<intptr_t>(atoi("0"))); 1204 *p = 3; 1205} 1206 1207class bionic_selftest_DeathTest : public ::testing::Test { 1208 protected: 1209 virtual void SetUp() { 1210 ::testing::FLAGS_gtest_death_test_style = "threadsafe"; 1211 } 1212}; 1213 1214static void deathtest_helper_success() { 1215 ASSERT_EQ(1, 1); 1216 exit(0); 1217} 1218 1219TEST_F(bionic_selftest_DeathTest, success) { 1220 ASSERT_EXIT(deathtest_helper_success(), ::testing::ExitedWithCode(0), ""); 1221} 1222 1223static void deathtest_helper_fail() { 1224 ASSERT_EQ(1, 0); 1225} 1226 1227TEST_F(bionic_selftest_DeathTest, fail) { 1228 ASSERT_EXIT(deathtest_helper_fail(), ::testing::ExitedWithCode(0), ""); 1229} 1230