gtest_main.cpp revision 7d15dc388a5fb3d3e6f5097232d82ab2b9fe7ad2
1/* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <gtest/gtest.h> 18 19#include <ctype.h> 20#include <errno.h> 21#include <fcntl.h> 22#include <inttypes.h> 23#include <libgen.h> 24#include <limits.h> 25#include <signal.h> 26#include <stdarg.h> 27#include <stdio.h> 28#include <string.h> 29#include <sys/wait.h> 30#include <unistd.h> 31 32#include <chrono> 33#include <string> 34#include <tuple> 35#include <utility> 36#include <vector> 37 38#ifndef TEMP_FAILURE_RETRY 39 40/* Used to retry syscalls that can return EINTR. */ 41#define TEMP_FAILURE_RETRY(exp) ({ \ 42 __typeof__(exp) _rc; \ 43 do { \ 44 _rc = (exp); \ 45 } while (_rc == -1 && errno == EINTR); \ 46 _rc; }) 47 48#endif 49 50static std::string g_executable_path; 51static int g_argc; 52static char** g_argv; 53static char** g_envp; 54 55const std::string& get_executable_path() { 56 return g_executable_path; 57} 58 59bool get_realpath(const std::string& path, std::string* real_path) { 60 char realpath_buf[PATH_MAX]; 61 if (realpath(path.c_str(), realpath_buf) != realpath_buf) { 62 return false; 63 } 64 65 *real_path = realpath_buf; 66 return true; 67} 68 69std::string get_dirname(const char* path) { 70#if defined(__BIONIC__) 71 return dirname(path); 72#else 73 // GLIBC does not have const char* dirname 74 return dirname(const_cast<char*>(path)); 75#endif 76} 77 78int get_argc() { 79 return g_argc; 80} 81 82char** get_argv() { 83 return g_argv; 84} 85 86char** get_envp() { 87 return g_envp; 88} 89 90namespace testing { 91namespace internal { 92 93// Reuse of testing::internal::ColoredPrintf in gtest. 94enum GTestColor { 95 COLOR_DEFAULT, 96 COLOR_RED, 97 COLOR_GREEN, 98 COLOR_YELLOW 99}; 100 101void ColoredPrintf(GTestColor color, const char* fmt, ...); 102 103} // namespace internal 104} // namespace testing 105 106using testing::internal::GTestColor; 107using testing::internal::COLOR_DEFAULT; 108using testing::internal::COLOR_RED; 109using testing::internal::COLOR_GREEN; 110using testing::internal::COLOR_YELLOW; 111using testing::internal::ColoredPrintf; 112 113constexpr int DEFAULT_GLOBAL_TEST_RUN_DEADLINE_MS = 90000; 114constexpr int DEFAULT_GLOBAL_TEST_RUN_SLOW_THRESHOLD_MS = 2000; 115 116// The time each test can run before killed for the reason of timeout. 117// It takes effect only with --isolate option. 118static int global_test_run_deadline_ms = DEFAULT_GLOBAL_TEST_RUN_DEADLINE_MS; 119 120// The time each test can run before be warned for too much running time. 121// It takes effect only with --isolate option. 122static int global_test_run_slow_threshold_ms = DEFAULT_GLOBAL_TEST_RUN_SLOW_THRESHOLD_MS; 123 124// Return timeout duration for a test, in ms. 125static int GetTimeoutMs(const std::string& /*test_name*/) { 126 return global_test_run_deadline_ms; 127} 128 129// Return threshold for calling a test slow, in ms. 130static int GetSlowThresholdMs(const std::string& /*test_name*/) { 131 return global_test_run_slow_threshold_ms; 132} 133 134static void PrintHelpInfo() { 135 printf("Bionic Unit Test Options:\n" 136 " -j [JOB_COUNT] or -j[JOB_COUNT]\n" 137 " Run up to JOB_COUNT tests in parallel.\n" 138 " Use isolation mode, Run each test in a separate process.\n" 139 " If JOB_COUNT is not given, it is set to the count of available processors.\n" 140 " --no-isolate\n" 141 " Don't use isolation mode, run all tests in a single process.\n" 142 " --deadline=[TIME_IN_MS]\n" 143 " Run each test in no longer than [TIME_IN_MS] time.\n" 144 " Only valid in isolation mode. Default deadline is 90000 ms.\n" 145 " --slow-threshold=[TIME_IN_MS]\n" 146 " Test running longer than [TIME_IN_MS] will be called slow.\n" 147 " Only valid in isolation mode. Default slow threshold is 2000 ms.\n" 148 " --gtest-filter=POSITIVE_PATTERNS[-NEGATIVE_PATTERNS]\n" 149 " Used as a synonym for --gtest_filter option in gtest.\n" 150 "Default bionic unit test option is -j.\n" 151 "In isolation mode, you can send SIGQUIT to the parent process to show current\n" 152 "running tests, or send SIGINT to the parent process to stop testing and\n" 153 "clean up current running tests.\n" 154 "\n"); 155} 156 157enum TestResult { 158 TEST_SUCCESS = 0, 159 TEST_FAILED, 160 TEST_TIMEOUT 161}; 162 163class Test { 164 public: 165 Test() {} // For std::vector<Test>. 166 explicit Test(const char* name) : name_(name) {} 167 168 const std::string& GetName() const { return name_; } 169 170 void SetResult(TestResult result) { result_ = result; } 171 172 TestResult GetResult() const { return result_; } 173 TestResult GetExpectedResult() const { 174 return GetName().find("xfail") == 0 ? TEST_FAILED : TEST_SUCCESS; 175 } 176 177 void SetTestTime(int64_t elapsed_time_ns) { elapsed_time_ns_ = elapsed_time_ns; } 178 179 int64_t GetTestTime() const { return elapsed_time_ns_; } 180 181 void AppendTestOutput(const std::string& s) { output_ += s; } 182 183 const std::string& GetTestOutput() const { return output_; } 184 185 private: 186 const std::string name_; 187 TestResult result_; 188 int64_t elapsed_time_ns_; 189 std::string output_; 190}; 191 192class TestCase { 193 public: 194 TestCase() {} // For std::vector<TestCase>. 195 explicit TestCase(const char* name) : name_(name) {} 196 197 const std::string& GetName() const { return name_; } 198 199 void AppendTest(const char* test_name) { 200 test_list_.push_back(Test(test_name)); 201 } 202 203 size_t TestCount() const { return test_list_.size(); } 204 205 std::string GetTestName(size_t test_id) const { 206 VerifyTestId(test_id); 207 return name_ + "." + test_list_[test_id].GetName(); 208 } 209 210 Test& GetTest(size_t test_id) { 211 VerifyTestId(test_id); 212 return test_list_[test_id]; 213 } 214 215 const Test& GetTest(size_t test_id) const { 216 VerifyTestId(test_id); 217 return test_list_[test_id]; 218 } 219 220 void SetTestResult(size_t test_id, TestResult result) { 221 VerifyTestId(test_id); 222 test_list_[test_id].SetResult(result); 223 } 224 225 TestResult GetTestResult(size_t test_id) const { 226 VerifyTestId(test_id); 227 return test_list_[test_id].GetResult(); 228 } 229 230 TestResult GetExpectedTestResult(size_t test_id) const { 231 VerifyTestId(test_id); 232 return test_list_[test_id].GetExpectedResult(); 233 } 234 235 bool GetTestSuccess(size_t test_id) const { 236 return GetTestResult(test_id) == GetExpectedTestResult(test_id); 237 } 238 239 void SetTestTime(size_t test_id, int64_t elapsed_time_ns) { 240 VerifyTestId(test_id); 241 test_list_[test_id].SetTestTime(elapsed_time_ns); 242 } 243 244 int64_t GetTestTime(size_t test_id) const { 245 VerifyTestId(test_id); 246 return test_list_[test_id].GetTestTime(); 247 } 248 249 private: 250 void VerifyTestId(size_t test_id) const { 251 if(test_id >= test_list_.size()) { 252 fprintf(stderr, "test_id %zu out of range [0, %zu)\n", test_id, test_list_.size()); 253 exit(1); 254 } 255 } 256 257 private: 258 const std::string name_; 259 std::vector<Test> test_list_; 260}; 261 262class TestResultPrinter : public testing::EmptyTestEventListener { 263 public: 264 TestResultPrinter() : pinfo_(NULL) {} 265 virtual void OnTestStart(const testing::TestInfo& test_info) { 266 pinfo_ = &test_info; // Record test_info for use in OnTestPartResult. 267 } 268 virtual void OnTestPartResult(const testing::TestPartResult& result); 269 270 private: 271 const testing::TestInfo* pinfo_; 272}; 273 274// Called after an assertion failure. 275void TestResultPrinter::OnTestPartResult(const testing::TestPartResult& result) { 276 // If the test part succeeded, we don't need to do anything. 277 if (result.type() == testing::TestPartResult::kSuccess) 278 return; 279 280 // Print failure message from the assertion (e.g. expected this and got that). 281 printf("%s:(%d) Failure in test %s.%s\n%s\n", result.file_name(), result.line_number(), 282 pinfo_->test_case_name(), pinfo_->name(), result.message()); 283 fflush(stdout); 284} 285 286static int64_t NanoTime() { 287 std::chrono::nanoseconds duration(std::chrono::steady_clock::now().time_since_epoch()); 288 return static_cast<int64_t>(duration.count()); 289} 290 291static bool EnumerateTests(int argc, char** argv, std::vector<TestCase>& testcase_list) { 292 std::string command; 293 for (int i = 0; i < argc; ++i) { 294 command += argv[i]; 295 command += " "; 296 } 297 command += "--gtest_list_tests"; 298 FILE* fp = popen(command.c_str(), "r"); 299 if (fp == NULL) { 300 perror("popen"); 301 return false; 302 } 303 304 char buf[200]; 305 while (fgets(buf, sizeof(buf), fp) != NULL) { 306 char* p = buf; 307 308 while (*p != '\0' && isspace(*p)) { 309 ++p; 310 } 311 if (*p == '\0') continue; 312 char* start = p; 313 while (*p != '\0' && !isspace(*p)) { 314 ++p; 315 } 316 char* end = p; 317 while (*p != '\0' && isspace(*p)) { 318 ++p; 319 } 320 if (*p != '\0' && *p != '#') { 321 // This is not we want, gtest must meet with some error when parsing the arguments. 322 fprintf(stderr, "argument error, check with --help\n"); 323 return false; 324 } 325 *end = '\0'; 326 if (*(end - 1) == '.') { 327 *(end - 1) = '\0'; 328 testcase_list.push_back(TestCase(start)); 329 } else { 330 testcase_list.back().AppendTest(start); 331 } 332 } 333 int result = pclose(fp); 334 return (result != -1 && WEXITSTATUS(result) == 0); 335} 336 337// Part of the following *Print functions are copied from external/gtest/src/gtest.cc: 338// PrettyUnitTestResultPrinter. The reason for copy is that PrettyUnitTestResultPrinter 339// is defined and used in gtest.cc, which is hard to reuse. 340static void OnTestIterationStartPrint(const std::vector<TestCase>& testcase_list, size_t iteration, 341 int iteration_count, size_t job_count) { 342 if (iteration_count != 1) { 343 printf("\nRepeating all tests (iteration %zu) . . .\n\n", iteration); 344 } 345 ColoredPrintf(COLOR_GREEN, "[==========] "); 346 347 size_t testcase_count = testcase_list.size(); 348 size_t test_count = 0; 349 for (const auto& testcase : testcase_list) { 350 test_count += testcase.TestCount(); 351 } 352 353 printf("Running %zu %s from %zu %s (%zu %s).\n", 354 test_count, (test_count == 1) ? "test" : "tests", 355 testcase_count, (testcase_count == 1) ? "test case" : "test cases", 356 job_count, (job_count == 1) ? "job" : "jobs"); 357 fflush(stdout); 358} 359 360// bionic cts test needs gtest output format. 361#if defined(USING_GTEST_OUTPUT_FORMAT) 362 363static void OnTestEndPrint(const TestCase& testcase, size_t test_id) { 364 ColoredPrintf(COLOR_GREEN, "[ RUN ] "); 365 printf("%s\n", testcase.GetTestName(test_id).c_str()); 366 367 const std::string& test_output = testcase.GetTest(test_id).GetTestOutput(); 368 printf("%s", test_output.c_str()); 369 370 TestResult result = testcase.GetTestResult(test_id); 371 if (result == testcase.GetExpectedTestResult(test_id)) { 372 ColoredPrintf(COLOR_GREEN, "[ OK ] "); 373 } else { 374 ColoredPrintf(COLOR_RED, "[ FAILED ] "); 375 } 376 printf("%s", testcase.GetTestName(test_id).c_str()); 377 if (testing::GTEST_FLAG(print_time)) { 378 printf(" (%" PRId64 " ms)", testcase.GetTestTime(test_id) / 1000000); 379 } 380 printf("\n"); 381 fflush(stdout); 382} 383 384#else // !defined(USING_GTEST_OUTPUT_FORMAT) 385 386static void OnTestEndPrint(const TestCase& testcase, size_t test_id) { 387 TestResult result = testcase.GetTestResult(test_id); 388 TestResult expected = testcase.GetExpectedTestResult(test_id); 389 if (result == TEST_SUCCESS) { 390 if (expected == TEST_SUCCESS) { 391 ColoredPrintf(COLOR_GREEN, "[ OK ] "); 392 } else if (expected == TEST_FAILED) { 393 ColoredPrintf(COLOR_RED, "[ XPASS ] "); 394 } 395 } else if (result == TEST_FAILED) { 396 if (expected == TEST_SUCCESS) { 397 ColoredPrintf(COLOR_RED, "[ FAILED ] "); 398 } else if (expected == TEST_FAILED) { 399 ColoredPrintf(COLOR_YELLOW, "[ XFAIL ] "); 400 } 401 } else if (result == TEST_TIMEOUT) { 402 ColoredPrintf(COLOR_RED, "[ TIMEOUT ] "); 403 } 404 405 printf("%s", testcase.GetTestName(test_id).c_str()); 406 if (testing::GTEST_FLAG(print_time)) { 407 printf(" (%" PRId64 " ms)", testcase.GetTestTime(test_id) / 1000000); 408 } 409 printf("\n"); 410 411 const std::string& test_output = testcase.GetTest(test_id).GetTestOutput(); 412 printf("%s", test_output.c_str()); 413 fflush(stdout); 414} 415 416#endif // !defined(USING_GTEST_OUTPUT_FORMAT) 417 418static void OnTestIterationEndPrint(const std::vector<TestCase>& testcase_list, size_t /*iteration*/, 419 int64_t elapsed_time_ns) { 420 421 std::vector<std::string> fail_test_name_list; 422 std::vector<std::string> xpass_test_name_list; 423 std::vector<std::pair<std::string, int64_t>> timeout_test_list; 424 425 // For tests that were slow but didn't time out. 426 std::vector<std::tuple<std::string, int64_t, int>> slow_test_list; 427 size_t testcase_count = testcase_list.size(); 428 size_t test_count = 0; 429 size_t success_test_count = 0; 430 size_t expected_failure_count = 0; 431 432 for (const auto& testcase : testcase_list) { 433 test_count += testcase.TestCount(); 434 for (size_t i = 0; i < testcase.TestCount(); ++i) { 435 TestResult result = testcase.GetTestResult(i); 436 TestResult expected = testcase.GetExpectedTestResult(i); 437 if (result == TEST_TIMEOUT) { 438 timeout_test_list.push_back( 439 std::make_pair(testcase.GetTestName(i), testcase.GetTestTime(i))); 440 } else if (result == expected) { 441 if (result == TEST_SUCCESS) { 442 ++success_test_count; 443 } else { 444 ++expected_failure_count; 445 } 446 } else { 447 if (result == TEST_FAILED) { 448 fail_test_name_list.push_back(testcase.GetTestName(i)); 449 } else { 450 xpass_test_name_list.push_back(testcase.GetTestName(i)); 451 } 452 } 453 if (result != TEST_TIMEOUT && 454 testcase.GetTestTime(i) / 1000000 >= GetSlowThresholdMs(testcase.GetTestName(i))) { 455 slow_test_list.push_back(std::make_tuple(testcase.GetTestName(i), 456 testcase.GetTestTime(i), 457 GetSlowThresholdMs(testcase.GetTestName(i)))); 458 } 459 } 460 } 461 462 ColoredPrintf(COLOR_GREEN, "[==========] "); 463 printf("%zu %s from %zu %s ran.", test_count, (test_count == 1) ? "test" : "tests", 464 testcase_count, (testcase_count == 1) ? "test case" : "test cases"); 465 if (testing::GTEST_FLAG(print_time)) { 466 printf(" (%" PRId64 " ms total)", elapsed_time_ns / 1000000); 467 } 468 printf("\n"); 469 ColoredPrintf(COLOR_GREEN, "[ PASS ] "); 470 printf("%zu %s.", success_test_count, (success_test_count == 1) ? "test" : "tests"); 471 if (expected_failure_count > 0) { 472 printf(" (%zu expected failure%s)", expected_failure_count, 473 (expected_failure_count == 1) ? "" : "s"); 474 } 475 printf("\n"); 476 477 // Print tests that timed out. 478 size_t timeout_test_count = timeout_test_list.size(); 479 if (timeout_test_count > 0) { 480 ColoredPrintf(COLOR_RED, "[ TIMEOUT ] "); 481 printf("%zu %s, listed below:\n", timeout_test_count, (timeout_test_count == 1) ? "test" : "tests"); 482 for (const auto& timeout_pair : timeout_test_list) { 483 ColoredPrintf(COLOR_RED, "[ TIMEOUT ] "); 484 printf("%s (stopped at %" PRId64 " ms)\n", timeout_pair.first.c_str(), 485 timeout_pair.second / 1000000); 486 } 487 } 488 489 // Print tests that were slow. 490 size_t slow_test_count = slow_test_list.size(); 491 if (slow_test_count > 0) { 492 ColoredPrintf(COLOR_YELLOW, "[ SLOW ] "); 493 printf("%zu %s, listed below:\n", slow_test_count, (slow_test_count == 1) ? "test" : "tests"); 494 for (const auto& slow_tuple : slow_test_list) { 495 ColoredPrintf(COLOR_YELLOW, "[ SLOW ] "); 496 printf("%s (%" PRId64 " ms, exceeded %d ms)\n", std::get<0>(slow_tuple).c_str(), 497 std::get<1>(slow_tuple) / 1000000, std::get<2>(slow_tuple)); 498 } 499 } 500 501 // Print tests that failed. 502 size_t fail_test_count = fail_test_name_list.size(); 503 if (fail_test_count > 0) { 504 ColoredPrintf(COLOR_RED, "[ FAIL ] "); 505 printf("%zu %s, listed below:\n", fail_test_count, (fail_test_count == 1) ? "test" : "tests"); 506 for (const auto& name : fail_test_name_list) { 507 ColoredPrintf(COLOR_RED, "[ FAIL ] "); 508 printf("%s\n", name.c_str()); 509 } 510 } 511 512 // Print tests that should have failed. 513 size_t xpass_test_count = xpass_test_name_list.size(); 514 if (xpass_test_count > 0) { 515 ColoredPrintf(COLOR_RED, "[ XPASS ] "); 516 printf("%zu %s, listed below:\n", xpass_test_count, (xpass_test_count == 1) ? "test" : "tests"); 517 for (const auto& name : xpass_test_name_list) { 518 ColoredPrintf(COLOR_RED, "[ XPASS ] "); 519 printf("%s\n", name.c_str()); 520 } 521 } 522 523 if (timeout_test_count > 0 || slow_test_count > 0 || fail_test_count > 0 || xpass_test_count > 0) { 524 printf("\n"); 525 } 526 527 if (timeout_test_count > 0) { 528 printf("%2zu TIMEOUT %s\n", timeout_test_count, (timeout_test_count == 1) ? "TEST" : "TESTS"); 529 } 530 if (slow_test_count > 0) { 531 printf("%2zu SLOW %s\n", slow_test_count, (slow_test_count == 1) ? "TEST" : "TESTS"); 532 } 533 if (fail_test_count > 0) { 534 printf("%2zu FAILED %s\n", fail_test_count, (fail_test_count == 1) ? "TEST" : "TESTS"); 535 } 536 if (xpass_test_count > 0) { 537 printf("%2zu SHOULD HAVE FAILED %s\n", xpass_test_count, (xpass_test_count == 1) ? "TEST" : "TESTS"); 538 } 539 540 fflush(stdout); 541} 542 543std::string XmlEscape(const std::string& xml) { 544 std::string escaped; 545 escaped.reserve(xml.size()); 546 547 for (auto c : xml) { 548 switch (c) { 549 case '<': 550 escaped.append("<"); 551 break; 552 case '>': 553 escaped.append(">"); 554 break; 555 case '&': 556 escaped.append("&"); 557 break; 558 case '\'': 559 escaped.append("'"); 560 break; 561 case '"': 562 escaped.append("""); 563 break; 564 default: 565 escaped.append(1, c); 566 break; 567 } 568 } 569 570 return escaped; 571} 572 573// Output xml file when --gtest_output is used, write this function as we can't reuse 574// gtest.cc:XmlUnitTestResultPrinter. The reason is XmlUnitTestResultPrinter is totally 575// defined in gtest.cc and not expose to outside. What's more, as we don't run gtest in 576// the parent process, we don't have gtest classes which are needed by XmlUnitTestResultPrinter. 577void OnTestIterationEndXmlPrint(const std::string& xml_output_filename, 578 const std::vector<TestCase>& testcase_list, 579 time_t epoch_iteration_start_time, 580 int64_t elapsed_time_ns) { 581 FILE* fp = fopen(xml_output_filename.c_str(), "w"); 582 if (fp == NULL) { 583 fprintf(stderr, "failed to open '%s': %s\n", xml_output_filename.c_str(), strerror(errno)); 584 exit(1); 585 } 586 587 size_t total_test_count = 0; 588 size_t total_failed_count = 0; 589 std::vector<size_t> failed_count_list(testcase_list.size(), 0); 590 std::vector<int64_t> elapsed_time_list(testcase_list.size(), 0); 591 for (size_t i = 0; i < testcase_list.size(); ++i) { 592 auto& testcase = testcase_list[i]; 593 total_test_count += testcase.TestCount(); 594 for (size_t j = 0; j < testcase.TestCount(); ++j) { 595 if (!testcase.GetTestSuccess(j)) { 596 ++failed_count_list[i]; 597 } 598 elapsed_time_list[i] += testcase.GetTestTime(j); 599 } 600 total_failed_count += failed_count_list[i]; 601 } 602 603 const tm* time_struct = localtime(&epoch_iteration_start_time); 604 char timestamp[40]; 605 snprintf(timestamp, sizeof(timestamp), "%4d-%02d-%02dT%02d:%02d:%02d", 606 time_struct->tm_year + 1900, time_struct->tm_mon + 1, time_struct->tm_mday, 607 time_struct->tm_hour, time_struct->tm_min, time_struct->tm_sec); 608 609 fputs("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n", fp); 610 fprintf(fp, "<testsuites tests=\"%zu\" failures=\"%zu\" disabled=\"0\" errors=\"0\"", 611 total_test_count, total_failed_count); 612 fprintf(fp, " timestamp=\"%s\" time=\"%.3lf\" name=\"AllTests\">\n", timestamp, elapsed_time_ns / 1e9); 613 for (size_t i = 0; i < testcase_list.size(); ++i) { 614 auto& testcase = testcase_list[i]; 615 fprintf(fp, " <testsuite name=\"%s\" tests=\"%zu\" failures=\"%zu\" disabled=\"0\" errors=\"0\"", 616 testcase.GetName().c_str(), testcase.TestCount(), failed_count_list[i]); 617 fprintf(fp, " time=\"%.3lf\">\n", elapsed_time_list[i] / 1e9); 618 619 for (size_t j = 0; j < testcase.TestCount(); ++j) { 620 fprintf(fp, " <testcase name=\"%s\" status=\"run\" time=\"%.3lf\" classname=\"%s\"", 621 testcase.GetTest(j).GetName().c_str(), testcase.GetTestTime(j) / 1e9, 622 testcase.GetName().c_str()); 623 if (!testcase.GetTestSuccess(j)) { 624 fputs(" />\n", fp); 625 } else { 626 fputs(">\n", fp); 627 const std::string& test_output = testcase.GetTest(j).GetTestOutput(); 628 const std::string escaped_test_output = XmlEscape(test_output); 629 fprintf(fp, " <failure message=\"%s\" type=\"\">\n", escaped_test_output.c_str()); 630 fputs(" </failure>\n", fp); 631 fputs(" </testcase>\n", fp); 632 } 633 } 634 635 fputs(" </testsuite>\n", fp); 636 } 637 fputs("</testsuites>\n", fp); 638 fclose(fp); 639} 640 641static bool sigint_flag; 642static bool sigquit_flag; 643 644static void signal_handler(int sig) { 645 if (sig == SIGINT) { 646 sigint_flag = true; 647 } else if (sig == SIGQUIT) { 648 sigquit_flag = true; 649 } 650} 651 652static bool RegisterSignalHandler() { 653 sigint_flag = false; 654 sigquit_flag = false; 655 sig_t ret = signal(SIGINT, signal_handler); 656 if (ret != SIG_ERR) { 657 ret = signal(SIGQUIT, signal_handler); 658 } 659 if (ret == SIG_ERR) { 660 perror("RegisterSignalHandler"); 661 return false; 662 } 663 return true; 664} 665 666static bool UnregisterSignalHandler() { 667 sig_t ret = signal(SIGINT, SIG_DFL); 668 if (ret != SIG_ERR) { 669 ret = signal(SIGQUIT, SIG_DFL); 670 } 671 if (ret == SIG_ERR) { 672 perror("UnregisterSignalHandler"); 673 return false; 674 } 675 return true; 676} 677 678struct ChildProcInfo { 679 pid_t pid; 680 int64_t start_time_ns; 681 int64_t end_time_ns; 682 int64_t deadline_end_time_ns; // The time when the test is thought of as timeout. 683 size_t testcase_id, test_id; 684 bool finished; 685 bool timed_out; 686 int exit_status; 687 int child_read_fd; // File descriptor to read child test failure info. 688}; 689 690// Forked Child process, run the single test. 691static void ChildProcessFn(int argc, char** argv, const std::string& test_name) { 692 char** new_argv = new char*[argc + 2]; 693 memcpy(new_argv, argv, sizeof(char*) * argc); 694 695 char* filter_arg = new char [test_name.size() + 20]; 696 strcpy(filter_arg, "--gtest_filter="); 697 strcat(filter_arg, test_name.c_str()); 698 new_argv[argc] = filter_arg; 699 new_argv[argc + 1] = NULL; 700 701 int new_argc = argc + 1; 702 testing::InitGoogleTest(&new_argc, new_argv); 703 int result = RUN_ALL_TESTS(); 704 exit(result); 705} 706 707static ChildProcInfo RunChildProcess(const std::string& test_name, int testcase_id, int test_id, 708 int argc, char** argv) { 709 int pipefd[2]; 710 if (pipe(pipefd) == -1) { 711 perror("pipe in RunTestInSeparateProc"); 712 exit(1); 713 } 714 if (fcntl(pipefd[0], F_SETFL, O_NONBLOCK) == -1) { 715 perror("fcntl in RunTestInSeparateProc"); 716 exit(1); 717 } 718 pid_t pid = fork(); 719 if (pid == -1) { 720 perror("fork in RunTestInSeparateProc"); 721 exit(1); 722 } else if (pid == 0) { 723 // In child process, run a single test. 724 close(pipefd[0]); 725 close(STDOUT_FILENO); 726 close(STDERR_FILENO); 727 dup2(pipefd[1], STDOUT_FILENO); 728 dup2(pipefd[1], STDERR_FILENO); 729 730 if (!UnregisterSignalHandler()) { 731 exit(1); 732 } 733 ChildProcessFn(argc, argv, test_name); 734 // Unreachable. 735 } 736 // In parent process, initialize child process info. 737 close(pipefd[1]); 738 ChildProcInfo child_proc; 739 child_proc.child_read_fd = pipefd[0]; 740 child_proc.pid = pid; 741 child_proc.start_time_ns = NanoTime(); 742 child_proc.deadline_end_time_ns = child_proc.start_time_ns + GetTimeoutMs(test_name) * 1000000LL; 743 child_proc.testcase_id = testcase_id; 744 child_proc.test_id = test_id; 745 child_proc.finished = false; 746 return child_proc; 747} 748 749static void HandleSignals(std::vector<TestCase>& testcase_list, 750 std::vector<ChildProcInfo>& child_proc_list) { 751 if (sigquit_flag) { 752 sigquit_flag = false; 753 // Print current running tests. 754 printf("List of current running tests:\n"); 755 for (const auto& child_proc : child_proc_list) { 756 if (child_proc.pid != 0) { 757 std::string test_name = testcase_list[child_proc.testcase_id].GetTestName(child_proc.test_id); 758 int64_t current_time_ns = NanoTime(); 759 int64_t run_time_ms = (current_time_ns - child_proc.start_time_ns) / 1000000; 760 printf(" %s (%" PRId64 " ms)\n", test_name.c_str(), run_time_ms); 761 } 762 } 763 } else if (sigint_flag) { 764 sigint_flag = false; 765 // Kill current running tests. 766 for (const auto& child_proc : child_proc_list) { 767 if (child_proc.pid != 0) { 768 // Send SIGKILL to ensure the child process can be killed unconditionally. 769 kill(child_proc.pid, SIGKILL); 770 } 771 } 772 // SIGINT kills the parent process as well. 773 exit(1); 774 } 775} 776 777static bool CheckChildProcExit(pid_t exit_pid, int exit_status, 778 std::vector<ChildProcInfo>& child_proc_list) { 779 for (size_t i = 0; i < child_proc_list.size(); ++i) { 780 if (child_proc_list[i].pid == exit_pid) { 781 child_proc_list[i].finished = true; 782 child_proc_list[i].timed_out = false; 783 child_proc_list[i].exit_status = exit_status; 784 child_proc_list[i].end_time_ns = NanoTime(); 785 return true; 786 } 787 } 788 return false; 789} 790 791static size_t CheckChildProcTimeout(std::vector<ChildProcInfo>& child_proc_list) { 792 int64_t current_time_ns = NanoTime(); 793 size_t timeout_child_count = 0; 794 for (size_t i = 0; i < child_proc_list.size(); ++i) { 795 if (child_proc_list[i].deadline_end_time_ns <= current_time_ns) { 796 child_proc_list[i].finished = true; 797 child_proc_list[i].timed_out = true; 798 child_proc_list[i].end_time_ns = current_time_ns; 799 ++timeout_child_count; 800 } 801 } 802 return timeout_child_count; 803} 804 805static void ReadChildProcOutput(std::vector<TestCase>& testcase_list, 806 std::vector<ChildProcInfo>& child_proc_list) { 807 for (const auto& child_proc : child_proc_list) { 808 TestCase& testcase = testcase_list[child_proc.testcase_id]; 809 int test_id = child_proc.test_id; 810 while (true) { 811 char buf[1024]; 812 ssize_t bytes_read = TEMP_FAILURE_RETRY(read(child_proc.child_read_fd, buf, sizeof(buf) - 1)); 813 if (bytes_read > 0) { 814 buf[bytes_read] = '\0'; 815 testcase.GetTest(test_id).AppendTestOutput(buf); 816 } else if (bytes_read == 0) { 817 break; // Read end. 818 } else { 819 if (errno == EAGAIN) { 820 break; 821 } 822 perror("failed to read child_read_fd"); 823 exit(1); 824 } 825 } 826 } 827} 828 829static void WaitChildProcs(std::vector<TestCase>& testcase_list, 830 std::vector<ChildProcInfo>& child_proc_list) { 831 size_t finished_child_count = 0; 832 while (true) { 833 int status; 834 pid_t result; 835 while ((result = TEMP_FAILURE_RETRY(waitpid(-1, &status, WNOHANG))) > 0) { 836 if (CheckChildProcExit(result, status, child_proc_list)) { 837 ++finished_child_count; 838 } 839 } 840 841 if (result == -1) { 842 if (errno == ECHILD) { 843 // This happens when we have no running child processes. 844 return; 845 } else { 846 perror("waitpid"); 847 exit(1); 848 } 849 } else if (result == 0) { 850 finished_child_count += CheckChildProcTimeout(child_proc_list); 851 } 852 853 ReadChildProcOutput(testcase_list, child_proc_list); 854 if (finished_child_count > 0) { 855 return; 856 } 857 858 HandleSignals(testcase_list, child_proc_list); 859 860 // sleep 1 ms to avoid busy looping. 861 timespec sleep_time; 862 sleep_time.tv_sec = 0; 863 sleep_time.tv_nsec = 1000000; 864 nanosleep(&sleep_time, NULL); 865 } 866} 867 868static TestResult WaitForOneChild(pid_t pid) { 869 int exit_status; 870 pid_t result = TEMP_FAILURE_RETRY(waitpid(pid, &exit_status, 0)); 871 872 TestResult test_result = TEST_SUCCESS; 873 if (result != pid || WEXITSTATUS(exit_status) != 0) { 874 test_result = TEST_FAILED; 875 } 876 return test_result; 877} 878 879static void CollectChildTestResult(const ChildProcInfo& child_proc, TestCase& testcase) { 880 int test_id = child_proc.test_id; 881 testcase.SetTestTime(test_id, child_proc.end_time_ns - child_proc.start_time_ns); 882 if (child_proc.timed_out) { 883 // The child process marked as timed_out has not exited, and we should kill it manually. 884 kill(child_proc.pid, SIGKILL); 885 WaitForOneChild(child_proc.pid); 886 } 887 close(child_proc.child_read_fd); 888 889 if (child_proc.timed_out) { 890 testcase.SetTestResult(test_id, TEST_TIMEOUT); 891 char buf[1024]; 892 snprintf(buf, sizeof(buf), "%s killed because of timeout at %" PRId64 " ms.\n", 893 testcase.GetTestName(test_id).c_str(), testcase.GetTestTime(test_id) / 1000000); 894 testcase.GetTest(test_id).AppendTestOutput(buf); 895 896 } else if (WIFSIGNALED(child_proc.exit_status)) { 897 // Record signal terminated test as failed. 898 testcase.SetTestResult(test_id, TEST_FAILED); 899 char buf[1024]; 900 snprintf(buf, sizeof(buf), "%s terminated by signal: %s.\n", 901 testcase.GetTestName(test_id).c_str(), strsignal(WTERMSIG(child_proc.exit_status))); 902 testcase.GetTest(test_id).AppendTestOutput(buf); 903 904 } else { 905 int exitcode = WEXITSTATUS(child_proc.exit_status); 906 testcase.SetTestResult(test_id, exitcode == 0 ? TEST_SUCCESS : TEST_FAILED); 907 if (exitcode != 0) { 908 char buf[1024]; 909 snprintf(buf, sizeof(buf), "%s exited with exitcode %d.\n", 910 testcase.GetTestName(test_id).c_str(), exitcode); 911 testcase.GetTest(test_id).AppendTestOutput(buf); 912 } 913 } 914} 915 916// We choose to use multi-fork and multi-wait here instead of multi-thread, because it always 917// makes deadlock to use fork in multi-thread. 918// Returns true if all tests run successfully, otherwise return false. 919static bool RunTestInSeparateProc(int argc, char** argv, std::vector<TestCase>& testcase_list, 920 int iteration_count, size_t job_count, 921 const std::string& xml_output_filename) { 922 // Stop default result printer to avoid environment setup/teardown information for each test. 923 testing::UnitTest::GetInstance()->listeners().Release( 924 testing::UnitTest::GetInstance()->listeners().default_result_printer()); 925 testing::UnitTest::GetInstance()->listeners().Append(new TestResultPrinter); 926 927 if (!RegisterSignalHandler()) { 928 exit(1); 929 } 930 931 bool all_tests_passed = true; 932 933 for (size_t iteration = 1; 934 iteration_count < 0 || iteration <= static_cast<size_t>(iteration_count); 935 ++iteration) { 936 OnTestIterationStartPrint(testcase_list, iteration, iteration_count, job_count); 937 int64_t iteration_start_time_ns = NanoTime(); 938 time_t epoch_iteration_start_time = time(NULL); 939 940 // Run up to job_count tests in parallel, each test in a child process. 941 std::vector<ChildProcInfo> child_proc_list; 942 943 // Next test to run is [next_testcase_id:next_test_id]. 944 size_t next_testcase_id = 0; 945 size_t next_test_id = 0; 946 947 // Record how many tests are finished. 948 std::vector<size_t> finished_test_count_list(testcase_list.size(), 0); 949 size_t finished_testcase_count = 0; 950 951 while (finished_testcase_count < testcase_list.size()) { 952 // run up to job_count child processes. 953 while (child_proc_list.size() < job_count && next_testcase_id < testcase_list.size()) { 954 std::string test_name = testcase_list[next_testcase_id].GetTestName(next_test_id); 955 ChildProcInfo child_proc = RunChildProcess(test_name, next_testcase_id, next_test_id, 956 argc, argv); 957 child_proc_list.push_back(child_proc); 958 if (++next_test_id == testcase_list[next_testcase_id].TestCount()) { 959 next_test_id = 0; 960 ++next_testcase_id; 961 } 962 } 963 964 // Wait for any child proc finish or timeout. 965 WaitChildProcs(testcase_list, child_proc_list); 966 967 // Collect result. 968 auto it = child_proc_list.begin(); 969 while (it != child_proc_list.end()) { 970 auto& child_proc = *it; 971 if (child_proc.finished == true) { 972 size_t testcase_id = child_proc.testcase_id; 973 size_t test_id = child_proc.test_id; 974 TestCase& testcase = testcase_list[testcase_id]; 975 976 CollectChildTestResult(child_proc, testcase); 977 OnTestEndPrint(testcase, test_id); 978 979 if (++finished_test_count_list[testcase_id] == testcase.TestCount()) { 980 ++finished_testcase_count; 981 } 982 if (!testcase.GetTestSuccess(test_id)) { 983 all_tests_passed = false; 984 } 985 986 it = child_proc_list.erase(it); 987 } else { 988 ++it; 989 } 990 } 991 } 992 993 int64_t elapsed_time_ns = NanoTime() - iteration_start_time_ns; 994 OnTestIterationEndPrint(testcase_list, iteration, elapsed_time_ns); 995 if (!xml_output_filename.empty()) { 996 OnTestIterationEndXmlPrint(xml_output_filename, testcase_list, epoch_iteration_start_time, 997 elapsed_time_ns); 998 } 999 } 1000 1001 if (!UnregisterSignalHandler()) { 1002 exit(1); 1003 } 1004 1005 return all_tests_passed; 1006} 1007 1008static size_t GetDefaultJobCount() { 1009 return static_cast<size_t>(sysconf(_SC_NPROCESSORS_ONLN)); 1010} 1011 1012static void AddPathSeparatorInTestProgramPath(std::vector<char*>& args) { 1013 // To run DeathTest in threadsafe mode, gtest requires that the user must invoke the 1014 // test program via a valid path that contains at least one path separator. 1015 // The reason is that gtest uses clone() + execve() to run DeathTest in threadsafe mode, 1016 // and execve() doesn't read environment variable PATH, so execve() will not success 1017 // until we specify the absolute path or relative path of the test program directly. 1018 if (strchr(args[0], '/') == nullptr) { 1019 args[0] = strdup(g_executable_path.c_str()); 1020 } 1021} 1022 1023static void AddGtestFilterSynonym(std::vector<char*>& args) { 1024 // Support --gtest-filter as a synonym for --gtest_filter. 1025 for (size_t i = 1; i < args.size(); ++i) { 1026 if (strncmp(args[i], "--gtest-filter", strlen("--gtest-filter")) == 0) { 1027 args[i][7] = '_'; 1028 } 1029 } 1030} 1031 1032struct IsolationTestOptions { 1033 bool isolate; 1034 size_t job_count; 1035 int test_deadline_ms; 1036 int test_slow_threshold_ms; 1037 std::string gtest_color; 1038 bool gtest_print_time; 1039 int gtest_repeat; 1040 std::string gtest_output; 1041}; 1042 1043// Pick options not for gtest: There are two parts in args, one part is used in isolation test mode 1044// as described in PrintHelpInfo(), the other part is handled by testing::InitGoogleTest() in 1045// gtest. PickOptions() picks the first part into IsolationTestOptions structure, leaving the second 1046// part in args. 1047// Arguments: 1048// args is used to pass in all command arguments, and pass out only the part of options for gtest. 1049// options is used to pass out test options in isolation mode. 1050// Return false if there is error in arguments. 1051static bool PickOptions(std::vector<char*>& args, IsolationTestOptions& options) { 1052 for (size_t i = 1; i < args.size(); ++i) { 1053 if (strcmp(args[i], "--help") == 0 || strcmp(args[i], "-h") == 0) { 1054 PrintHelpInfo(); 1055 options.isolate = false; 1056 return true; 1057 } 1058 } 1059 1060 AddPathSeparatorInTestProgramPath(args); 1061 AddGtestFilterSynonym(args); 1062 1063 // if --bionic-selftest argument is used, only enable self tests, otherwise remove self tests. 1064 bool enable_selftest = false; 1065 for (size_t i = 1; i < args.size(); ++i) { 1066 if (strcmp(args[i], "--bionic-selftest") == 0) { 1067 // This argument is to enable "bionic_selftest*" for self test, and is not shown in help info. 1068 // Don't remove this option from arguments. 1069 enable_selftest = true; 1070 } 1071 } 1072 std::string gtest_filter_str; 1073 for (size_t i = args.size() - 1; i >= 1; --i) { 1074 if (strncmp(args[i], "--gtest_filter=", strlen("--gtest_filter=")) == 0) { 1075 gtest_filter_str = args[i] + strlen("--gtest_filter="); 1076 args.erase(args.begin() + i); 1077 break; 1078 } 1079 } 1080 if (enable_selftest == true) { 1081 gtest_filter_str = "bionic_selftest*"; 1082 } else { 1083 if (gtest_filter_str.empty()) { 1084 gtest_filter_str = "-bionic_selftest*"; 1085 } else { 1086 // Find if '-' for NEGATIVE_PATTERNS exists. 1087 if (gtest_filter_str.find("-") != std::string::npos) { 1088 gtest_filter_str += ":bionic_selftest*"; 1089 } else { 1090 gtest_filter_str += ":-bionic_selftest*"; 1091 } 1092 } 1093 } 1094 gtest_filter_str = "--gtest_filter=" + gtest_filter_str; 1095 args.push_back(strdup(gtest_filter_str.c_str())); 1096 1097 options.isolate = true; 1098 // Parse arguments that make us can't run in isolation mode. 1099 for (size_t i = 1; i < args.size(); ++i) { 1100 if (strcmp(args[i], "--no-isolate") == 0) { 1101 options.isolate = false; 1102 } else if (strcmp(args[i], "--gtest_list_tests") == 0) { 1103 options.isolate = false; 1104 } 1105 } 1106 1107 // Stop parsing if we will not run in isolation mode. 1108 if (options.isolate == false) { 1109 return true; 1110 } 1111 1112 // Init default isolation test options. 1113 options.job_count = GetDefaultJobCount(); 1114 options.test_deadline_ms = DEFAULT_GLOBAL_TEST_RUN_DEADLINE_MS; 1115 options.test_slow_threshold_ms = DEFAULT_GLOBAL_TEST_RUN_SLOW_THRESHOLD_MS; 1116 options.gtest_color = testing::GTEST_FLAG(color); 1117 options.gtest_print_time = testing::GTEST_FLAG(print_time); 1118 options.gtest_repeat = testing::GTEST_FLAG(repeat); 1119 options.gtest_output = testing::GTEST_FLAG(output); 1120 1121 // Parse arguments speficied for isolation mode. 1122 for (size_t i = 1; i < args.size(); ++i) { 1123 if (strncmp(args[i], "-j", strlen("-j")) == 0) { 1124 char* p = args[i] + strlen("-j"); 1125 int count = 0; 1126 if (*p != '\0') { 1127 // Argument like -j5. 1128 count = atoi(p); 1129 } else if (args.size() > i + 1) { 1130 // Arguments like -j 5. 1131 count = atoi(args[i + 1]); 1132 ++i; 1133 } 1134 if (count <= 0) { 1135 fprintf(stderr, "invalid job count: %d\n", count); 1136 return false; 1137 } 1138 options.job_count = static_cast<size_t>(count); 1139 } else if (strncmp(args[i], "--deadline=", strlen("--deadline=")) == 0) { 1140 int time_ms = atoi(args[i] + strlen("--deadline=")); 1141 if (time_ms <= 0) { 1142 fprintf(stderr, "invalid deadline: %d\n", time_ms); 1143 return false; 1144 } 1145 options.test_deadline_ms = time_ms; 1146 } else if (strncmp(args[i], "--slow-threshold=", strlen("--slow-threshold=")) == 0) { 1147 int time_ms = atoi(args[i] + strlen("--slow-threshold=")); 1148 if (time_ms <= 0) { 1149 fprintf(stderr, "invalid slow test threshold: %d\n", time_ms); 1150 return false; 1151 } 1152 options.test_slow_threshold_ms = time_ms; 1153 } else if (strncmp(args[i], "--gtest_color=", strlen("--gtest_color=")) == 0) { 1154 options.gtest_color = args[i] + strlen("--gtest_color="); 1155 } else if (strcmp(args[i], "--gtest_print_time=0") == 0) { 1156 options.gtest_print_time = false; 1157 } else if (strncmp(args[i], "--gtest_repeat=", strlen("--gtest_repeat=")) == 0) { 1158 // If the value of gtest_repeat is < 0, then it indicates the tests 1159 // should be repeated forever. 1160 options.gtest_repeat = atoi(args[i] + strlen("--gtest_repeat=")); 1161 // Remove --gtest_repeat=xx from arguments, so child process only run one iteration for a single test. 1162 args.erase(args.begin() + i); 1163 --i; 1164 } else if (strncmp(args[i], "--gtest_output=", strlen("--gtest_output=")) == 0) { 1165 std::string output = args[i] + strlen("--gtest_output="); 1166 // generate output xml file path according to the strategy in gtest. 1167 bool success = true; 1168 if (strncmp(output.c_str(), "xml:", strlen("xml:")) == 0) { 1169 output = output.substr(strlen("xml:")); 1170 if (output.size() == 0) { 1171 success = false; 1172 } 1173 // Make absolute path. 1174 if (success && output[0] != '/') { 1175 char* cwd = getcwd(NULL, 0); 1176 if (cwd != NULL) { 1177 output = std::string(cwd) + "/" + output; 1178 free(cwd); 1179 } else { 1180 success = false; 1181 } 1182 } 1183 // Add file name if output is a directory. 1184 if (success && output.back() == '/') { 1185 output += "test_details.xml"; 1186 } 1187 } 1188 if (success) { 1189 options.gtest_output = output; 1190 } else { 1191 fprintf(stderr, "invalid gtest_output file: %s\n", args[i]); 1192 return false; 1193 } 1194 1195 // Remove --gtest_output=xxx from arguments, so child process will not write xml file. 1196 args.erase(args.begin() + i); 1197 --i; 1198 } 1199 } 1200 1201 // Add --no-isolate in args to prevent child process from running in isolation mode again. 1202 // As DeathTest will try to call execve(), this argument should always be added. 1203 args.insert(args.begin() + 1, strdup("--no-isolate")); 1204 return true; 1205} 1206 1207static std::string get_proc_self_exe() { 1208 char path[PATH_MAX]; 1209 ssize_t path_len = readlink("/proc/self/exe", path, sizeof(path)); 1210 if (path_len <= 0 || path_len >= static_cast<ssize_t>(sizeof(path))) { 1211 perror("readlink"); 1212 exit(1); 1213 } 1214 1215 return std::string(path, path_len); 1216} 1217 1218int main(int argc, char** argv, char** envp) { 1219 g_executable_path = get_proc_self_exe(); 1220 g_argc = argc; 1221 g_argv = argv; 1222 g_envp = envp; 1223 std::vector<char*> arg_list; 1224 for (int i = 0; i < argc; ++i) { 1225 arg_list.push_back(argv[i]); 1226 } 1227 1228 IsolationTestOptions options; 1229 if (PickOptions(arg_list, options) == false) { 1230 return 1; 1231 } 1232 1233 if (options.isolate == true) { 1234 // Set global variables. 1235 global_test_run_deadline_ms = options.test_deadline_ms; 1236 global_test_run_slow_threshold_ms = options.test_slow_threshold_ms; 1237 testing::GTEST_FLAG(color) = options.gtest_color.c_str(); 1238 testing::GTEST_FLAG(print_time) = options.gtest_print_time; 1239 std::vector<TestCase> testcase_list; 1240 1241 argc = static_cast<int>(arg_list.size()); 1242 arg_list.push_back(NULL); 1243 if (EnumerateTests(argc, arg_list.data(), testcase_list) == false) { 1244 return 1; 1245 } 1246 bool all_test_passed = RunTestInSeparateProc(argc, arg_list.data(), testcase_list, 1247 options.gtest_repeat, options.job_count, options.gtest_output); 1248 return all_test_passed ? 0 : 1; 1249 } else { 1250 argc = static_cast<int>(arg_list.size()); 1251 arg_list.push_back(NULL); 1252 testing::InitGoogleTest(&argc, arg_list.data()); 1253 return RUN_ALL_TESTS(); 1254 } 1255} 1256 1257//################################################################################ 1258// Bionic Gtest self test, run this by --bionic-selftest option. 1259 1260TEST(bionic_selftest, test_success) { 1261 ASSERT_EQ(1, 1); 1262} 1263 1264TEST(bionic_selftest, test_fail) { 1265 ASSERT_EQ(0, 1); 1266} 1267 1268TEST(bionic_selftest, test_time_warn) { 1269 sleep(4); 1270} 1271 1272TEST(bionic_selftest, test_timeout) { 1273 while (1) {} 1274} 1275 1276TEST(bionic_selftest, test_signal_SEGV_terminated) { 1277 char* p = reinterpret_cast<char*>(static_cast<intptr_t>(atoi("0"))); 1278 *p = 3; 1279} 1280 1281class bionic_selftest_DeathTest : public ::testing::Test { 1282 protected: 1283 virtual void SetUp() { 1284 ::testing::FLAGS_gtest_death_test_style = "threadsafe"; 1285 } 1286}; 1287 1288static void deathtest_helper_success() { 1289 ASSERT_EQ(1, 1); 1290 exit(0); 1291} 1292 1293TEST_F(bionic_selftest_DeathTest, success) { 1294 ASSERT_EXIT(deathtest_helper_success(), ::testing::ExitedWithCode(0), ""); 1295} 1296 1297static void deathtest_helper_fail() { 1298 ASSERT_EQ(1, 0); 1299} 1300 1301TEST_F(bionic_selftest_DeathTest, fail) { 1302 ASSERT_EXIT(deathtest_helper_fail(), ::testing::ExitedWithCode(0), ""); 1303} 1304