1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <errno.h>
18 #include <fcntl.h>
19 #include <inttypes.h>
20 #include <poll.h>
21 #include <signal.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <unistd.h>
26
27 #include <atomic>
28 #include <memory>
29 #include <string>
30 #include <tuple>
31 #include <vector>
32
33 #include <gtest/gtest.h>
34
35 #include "Color.h"
36 #include "Isolate.h"
37 #include "Log.h"
38 #include "NanoTime.h"
39 #include "Test.h"
40
41 namespace android {
42 namespace gtest_extras {
43
44 static std::atomic_int g_signal;
45
SignalHandler(int sig)46 static void SignalHandler(int sig) {
47 g_signal = sig;
48 }
49
RegisterSignalHandler()50 static void RegisterSignalHandler() {
51 auto ret = signal(SIGINT, SignalHandler);
52 if (ret == SIG_ERR) {
53 FATAL_PLOG("Setting up SIGINT handler failed");
54 }
55 ret = signal(SIGQUIT, SignalHandler);
56 if (ret == SIG_ERR) {
57 FATAL_PLOG("Setting up SIGQUIT handler failed");
58 }
59 }
60
UnregisterSignalHandler()61 static void UnregisterSignalHandler() {
62 auto ret = signal(SIGINT, SIG_DFL);
63 if (ret == SIG_ERR) {
64 FATAL_PLOG("Disabling SIGINT handler failed");
65 }
66 ret = signal(SIGQUIT, SIG_DFL);
67 if (ret == SIG_ERR) {
68 FATAL_PLOG("Disabling SIGQUIT handler failed");
69 }
70 }
71
PluralizeString(size_t value,const char * name,bool uppercase=false)72 static std::string PluralizeString(size_t value, const char* name, bool uppercase = false) {
73 std::string string(std::to_string(value) + name);
74 if (value != 1) {
75 if (uppercase) {
76 string += 'S';
77 } else {
78 string += 's';
79 }
80 }
81 return string;
82 }
83
StartsWithDisabled(const std::string & str)84 inline static bool StartsWithDisabled(const std::string& str) {
85 static constexpr char kDisabledStr[] = "DISABLED_";
86 static constexpr size_t kDisabledStrLen = sizeof(kDisabledStr) - 1;
87 return str.compare(0, kDisabledStrLen, kDisabledStr) == 0;
88 }
89
EnumerateTests()90 void Isolate::EnumerateTests() {
91 // Only apply --gtest_filter if present. This is the only option that changes
92 // what tests are listed.
93 std::string command(child_args_[0]);
94 if (!options_.filter().empty()) {
95 command += " --gtest_filter=" + options_.filter();
96 }
97 command += " --gtest_list_tests";
98 #if defined(__BIONIC__)
99 // Only bionic is guaranteed to support the 'e' option.
100 FILE* fp = popen(command.c_str(), "re");
101 #else
102 FILE* fp = popen(command.c_str(), "r");
103 #endif
104 if (fp == nullptr) {
105 FATAL_PLOG("Unexpected failure from popen");
106 }
107
108 size_t total_shards = options_.total_shards();
109 bool sharded = total_shards > 1;
110 size_t test_count = 0;
111 if (sharded) {
112 test_count = options_.shard_index() + 1;
113 }
114
115 bool skip_until_next_suite = false;
116 std::string suite_name;
117 char* buffer = nullptr;
118 size_t buffer_len = 0;
119 bool new_suite = false;
120 while (getline(&buffer, &buffer_len, fp) > 0) {
121 if (buffer[0] != ' ') {
122 // This is the case name.
123 suite_name = buffer;
124 auto space_index = suite_name.find(' ');
125 if (space_index != std::string::npos) {
126 suite_name.erase(space_index);
127 }
128 if (suite_name.back() == '\n') {
129 suite_name.resize(suite_name.size() - 1);
130 }
131
132 if (!options_.allow_disabled_tests() && StartsWithDisabled(suite_name)) {
133 // This whole set of tests have been disabled, skip them all.
134 skip_until_next_suite = true;
135 } else {
136 new_suite = true;
137 skip_until_next_suite = false;
138 }
139 } else if (buffer[0] == ' ' && buffer[1] == ' ') {
140 if (!skip_until_next_suite) {
141 std::string test_name = &buffer[2];
142 auto space_index = test_name.find(' ');
143 if (space_index != std::string::npos) {
144 test_name.erase(space_index);
145 }
146 if (test_name.back() == '\n') {
147 test_name.resize(test_name.size() - 1);
148 }
149 if (options_.allow_disabled_tests() || !StartsWithDisabled(test_name)) {
150 if (!sharded || --test_count == 0) {
151 tests_.push_back(std::make_tuple(suite_name, test_name));
152 total_tests_++;
153 if (new_suite) {
154 // Only increment the number of suites when we find at least one test
155 // for the suites.
156 total_suites_++;
157 new_suite = false;
158 }
159 if (sharded) {
160 test_count = total_shards;
161 }
162 }
163 } else {
164 total_disable_tests_++;
165 }
166 } else {
167 total_disable_tests_++;
168 }
169 } else {
170 printf("Unexpected output from test listing.\nCommand:\n%s\nLine:\n%s\n", command.c_str(),
171 buffer);
172 exit(1);
173 }
174 }
175 free(buffer);
176 if (pclose(fp) == -1) {
177 FATAL_PLOG("Unexpected failure from pclose");
178 }
179 }
180
ChildProcessFn(const std::tuple<std::string,std::string> & test)181 int Isolate::ChildProcessFn(const std::tuple<std::string, std::string>& test) {
182 // Make sure the filter is only coming from our command-line option.
183 unsetenv("GTEST_FILTER");
184
185 // Add the filter argument.
186 std::vector<char*> args(child_args_);
187 std::string filter("--gtest_filter=" + GetTestName(test));
188 args.push_back(filter.data());
189
190 int argc = args.size();
191 // Add the null terminator.
192 args.push_back(nullptr);
193 ::testing::InitGoogleTest(&argc, args.data());
194 return RUN_ALL_TESTS();
195 }
196
Pipe(int * read_fd,int * write_fd)197 static bool Pipe(int* read_fd, int* write_fd) {
198 int pipefd[2];
199
200 #if defined(__linux__)
201 if (pipe2(pipefd, O_CLOEXEC) != 0) {
202 return false;
203 }
204 #else // defined(__APPLE__)
205 if (pipe(pipefd) != 0) {
206 return false;
207 }
208 if (fcntl(pipefd[0], F_SETFD, FD_CLOEXEC) != 0 || fcntl(pipefd[1], F_SETFD, FD_CLOEXEC)) {
209 close(pipefd[0]);
210 close(pipefd[1]);
211 return false;
212 }
213 #endif
214
215 *read_fd = pipefd[0];
216 *write_fd = pipefd[1];
217 return true;
218 }
219
LaunchTests()220 void Isolate::LaunchTests() {
221 while (!running_indices_.empty() && cur_test_index_ < tests_.size()) {
222 int read_fd, write_fd;
223 if (!Pipe(&read_fd, &write_fd)) {
224 FATAL_PLOG("Unexpected failure from pipe");
225 }
226 if (fcntl(read_fd, F_SETFL, O_NONBLOCK) == -1) {
227 FATAL_PLOG("Unexpected failure from fcntl");
228 }
229
230 pid_t pid = fork();
231 if (pid == -1) {
232 FATAL_PLOG("Unexpected failure from fork");
233 }
234 if (pid == 0) {
235 close(read_fd);
236 close(STDOUT_FILENO);
237 close(STDERR_FILENO);
238 if (dup2(write_fd, STDOUT_FILENO) == -1) {
239 exit(1);
240 }
241 if (dup2(write_fd, STDERR_FILENO) == -1) {
242 exit(1);
243 }
244 close(write_fd);
245 UnregisterSignalHandler();
246 exit(ChildProcessFn(tests_[cur_test_index_]));
247 }
248
249 size_t run_index = running_indices_.back();
250 running_indices_.pop_back();
251 Test* test = new Test(tests_[cur_test_index_], cur_test_index_, run_index, read_fd);
252 running_by_pid_.emplace(pid, test);
253 running_[run_index] = test;
254 running_by_test_index_[cur_test_index_] = test;
255
256 pollfd* pollfd = &running_pollfds_[run_index];
257 pollfd->fd = test->fd();
258 pollfd->events = POLLIN;
259 cur_test_index_++;
260 close(write_fd);
261 }
262 }
263
ReadTestsOutput()264 void Isolate::ReadTestsOutput() {
265 int ready = poll(running_pollfds_.data(), running_pollfds_.size(), 0);
266 if (ready <= 0) {
267 return;
268 }
269
270 for (size_t i = 0; i < running_pollfds_.size(); i++) {
271 pollfd* pfd = &running_pollfds_[i];
272 if (pfd->revents & POLLIN) {
273 Test* test = running_[i];
274 if (!test->Read()) {
275 test->CloseFd();
276 pfd->fd = 0;
277 pfd->events = 0;
278 }
279 }
280 pfd->revents = 0;
281 }
282 }
283
CheckTestsFinished()284 size_t Isolate::CheckTestsFinished() {
285 size_t finished_tests = 0;
286 int status;
287 pid_t pid;
288 while ((pid = TEMP_FAILURE_RETRY(waitpid(-1, &status, WNOHANG))) > 0) {
289 if (pid == -1) {
290 FATAL_PLOG("Unexpected failure from waitpid");
291 }
292 auto entry = running_by_pid_.find(pid);
293 if (entry == running_by_pid_.end()) {
294 FATAL_LOG("Found process not spawned by the isolation framework");
295 }
296
297 std::unique_ptr<Test>& test_ptr = entry->second;
298 Test* test = test_ptr.get();
299 test->Stop();
300
301 // Read any leftover data.
302 test->ReadUntilClosed();
303 if (test->result() == TEST_NONE) {
304 if (WIFSIGNALED(status)) {
305 std::string output(test->name() + " terminated by signal: " + strsignal(WTERMSIG(status)) +
306 ".\n");
307 test->AppendOutput(output);
308 test->set_result(TEST_FAIL);
309 } else {
310 int exit_code = WEXITSTATUS(status);
311 if (exit_code != 0) {
312 std::string output(test->name() + " exited with exitcode " + std::to_string(exit_code) +
313 ".\n");
314 test->AppendOutput(output);
315 test->set_result(TEST_FAIL);
316 } else {
317 // Set the result based on the output, since skipped tests and
318 // passing tests have the same exit status.
319 test->SetResultFromOutput();
320 }
321 }
322 } else if (test->result() == TEST_TIMEOUT) {
323 uint64_t time_ms = options_.deadline_threshold_ms();
324 std::string timeout_str(test->name() + " killed because of timeout at " +
325 std::to_string(time_ms) + " ms.\n");
326 test->AppendOutput(timeout_str);
327 }
328
329 if (test->ExpectFail()) {
330 if (test->result() == TEST_FAIL) {
331 // The test is expected to fail, it failed.
332 test->set_result(TEST_XFAIL);
333 } else if (test->result() == TEST_PASS) {
334 // The test is expected to fail, it passed.
335 test->set_result(TEST_XPASS);
336 }
337 }
338
339 test->Print();
340
341 switch (test->result()) {
342 case TEST_PASS:
343 total_pass_tests_++;
344 if (test->slow()) {
345 total_slow_tests_++;
346 }
347 break;
348 case TEST_XPASS:
349 total_xpass_tests_++;
350 break;
351 case TEST_FAIL:
352 total_fail_tests_++;
353 break;
354 case TEST_TIMEOUT:
355 total_timeout_tests_++;
356 break;
357 case TEST_XFAIL:
358 total_xfail_tests_++;
359 break;
360 case TEST_SKIPPED:
361 total_skipped_tests_++;
362 break;
363 case TEST_NONE:
364 FATAL_LOG("Test result is TEST_NONE, this should not be possible");
365 }
366 finished_tests++;
367 size_t test_index = test->test_index();
368 finished_.emplace(test_index, test_ptr.release());
369 running_indices_.push_back(test->run_index());
370
371 // Remove it from all of the running indices.
372 size_t run_index = test->run_index();
373 if (running_by_pid_.erase(pid) != 1) {
374 printf("Internal error: Erasing pid %d from running_by_pid_ incorrect\n", pid);
375 }
376 if (running_by_test_index_.erase(test_index) == 0) {
377 printf("Internal error: Erasing test_index %zu from running_by_pid_ incorrect\n", test_index);
378 }
379 running_[run_index] = nullptr;
380 running_pollfds_[run_index] = {};
381 }
382
383 // The only valid error case is if ECHILD is returned because there are
384 // no more processes left running.
385 if (pid == -1 && errno != ECHILD) {
386 FATAL_PLOG("Unexpected failure from waitpid");
387 }
388 return finished_tests;
389 }
390
CheckTestsTimeout()391 void Isolate::CheckTestsTimeout() {
392 for (auto& entry : running_by_pid_) {
393 Test* test = entry.second.get();
394 if (test->result() == TEST_TIMEOUT) {
395 continue;
396 }
397
398 if (NanoTime() > test->start_ns() + deadline_threshold_ns_) {
399 test->set_result(TEST_TIMEOUT);
400 // Do not mark this as slow and timed out.
401 test->set_slow(false);
402 // Test gets cleaned up in CheckTestsFinished.
403 kill(entry.first, SIGKILL);
404 } else if (!test->slow() && NanoTime() > test->start_ns() + slow_threshold_ns_) {
405 // Mark the test as running slow.
406 test->set_slow(true);
407 }
408 }
409 }
410
HandleSignals()411 void Isolate::HandleSignals() {
412 int signal = g_signal.exchange(0);
413 if (signal == SIGINT) {
414 printf("Terminating due to signal...\n");
415 for (auto& entry : running_by_pid_) {
416 kill(entry.first, SIGKILL);
417 }
418 exit(1);
419 } else if (signal == SIGQUIT) {
420 printf("List of current running tests:\n");
421 for (const auto& entry : running_by_test_index_) {
422 const Test* test = entry.second;
423 uint64_t run_time_ms = (NanoTime() - test->start_ns()) / kNsPerMs;
424 printf(" %s (elapsed time %" PRId64 " ms)\n", test->name().c_str(), run_time_ms);
425 }
426 }
427 }
428
RunAllTests()429 void Isolate::RunAllTests() {
430 total_pass_tests_ = 0;
431 total_xpass_tests_ = 0;
432 total_fail_tests_ = 0;
433 total_xfail_tests_ = 0;
434 total_timeout_tests_ = 0;
435 total_slow_tests_ = 0;
436 total_skipped_tests_ = 0;
437
438 running_by_test_index_.clear();
439
440 size_t job_count = options_.job_count();
441 running_.clear();
442 running_.resize(job_count);
443 running_pollfds_.resize(job_count);
444 memset(running_pollfds_.data(), 0, running_pollfds_.size() * sizeof(pollfd));
445 running_indices_.clear();
446 for (size_t i = 0; i < job_count; i++) {
447 running_indices_.push_back(i);
448 }
449
450 finished_.clear();
451
452 size_t finished = 0;
453 cur_test_index_ = 0;
454 while (finished < tests_.size()) {
455 LaunchTests();
456
457 ReadTestsOutput();
458
459 finished += CheckTestsFinished();
460
461 CheckTestsTimeout();
462
463 HandleSignals();
464
465 usleep(MIN_USECONDS_WAIT);
466 }
467 }
468
PrintResults(size_t total,const ResultsType & results,std::string * footer)469 void Isolate::PrintResults(size_t total, const ResultsType& results, std::string* footer) {
470 ColoredPrintf(results.color, results.prefix);
471 if (results.list_desc != nullptr) {
472 printf(" %s %s, listed below:\n", PluralizeString(total, " test").c_str(), results.list_desc);
473 } else {
474 printf(" %s, listed below:\n", PluralizeString(total, " test").c_str());
475 }
476 for (const auto& entry : finished_) {
477 const Test* test = entry.second.get();
478 if (results.match_func(*test)) {
479 ColoredPrintf(results.color, results.prefix);
480 printf(" %s", test->name().c_str());
481 if (results.print_func != nullptr) {
482 results.print_func(options_, *test);
483 }
484 printf("\n");
485 }
486 }
487
488 if (results.title == nullptr) {
489 return;
490 }
491
492 if (total < 10) {
493 *footer += ' ';
494 }
495 *footer +=
496 PluralizeString(total, (std::string(" ") + results.title + " TEST").c_str(), true) + '\n';
497 }
498
499 Isolate::ResultsType Isolate::SlowResults = {
500 .color = COLOR_YELLOW,
501 .prefix = "[ SLOW ]",
502 .list_desc = nullptr,
503 .title = "SLOW",
__anond6bf063a0102() 504 .match_func = [](const Test& test) { return test.slow(); },
505 .print_func =
__anond6bf063a0202() 506 [](const Options& options, const Test& test) {
507 printf(" (%" PRIu64 " ms, exceeded %" PRIu64 " ms)", test.RunTimeNs() / kNsPerMs,
508 options.slow_threshold_ms());
509 },
510 };
511
512 Isolate::ResultsType Isolate::XpassFailResults = {
513 .color = COLOR_RED,
514 .prefix = "[ FAILED ]",
515 .list_desc = "should have failed",
516 .title = "SHOULD HAVE FAILED",
__anond6bf063a0302() 517 .match_func = [](const Test& test) { return test.result() == TEST_XPASS; },
518 .print_func = nullptr,
519 };
520
521 Isolate::ResultsType Isolate::FailResults = {
522 .color = COLOR_RED,
523 .prefix = "[ FAILED ]",
524 .list_desc = nullptr,
525 .title = "FAILED",
__anond6bf063a0402() 526 .match_func = [](const Test& test) { return test.result() == TEST_FAIL; },
527 .print_func = nullptr,
528 };
529
530 Isolate::ResultsType Isolate::TimeoutResults = {
531 .color = COLOR_RED,
532 .prefix = "[ TIMEOUT ]",
533 .list_desc = nullptr,
534 .title = "TIMEOUT",
__anond6bf063a0502() 535 .match_func = [](const Test& test) { return test.result() == TEST_TIMEOUT; },
536 .print_func =
__anond6bf063a0602() 537 [](const Options&, const Test& test) {
538 printf(" (stopped at %" PRIu64 " ms)", test.RunTimeNs() / kNsPerMs);
539 },
540 };
541
542 Isolate::ResultsType Isolate::SkippedResults = {
543 .color = COLOR_GREEN,
544 .prefix = "[ SKIPPED ]",
545 .list_desc = nullptr,
546 .title = nullptr,
__anond6bf063a0702() 547 .match_func = [](const Test& test) { return test.result() == TEST_SKIPPED; },
548 .print_func = nullptr,
549 };
550
PrintFooter(uint64_t elapsed_time_ns)551 void Isolate::PrintFooter(uint64_t elapsed_time_ns) {
552 ColoredPrintf(COLOR_GREEN, "[==========]");
553 printf(" %s from %s ran. (%" PRId64 " ms total)\n",
554 PluralizeString(total_tests_, " test").c_str(),
555 PluralizeString(total_suites_, " test suite").c_str(), elapsed_time_ns / kNsPerMs);
556
557 ColoredPrintf(COLOR_GREEN, "[ PASSED ]");
558 printf(" %s.", PluralizeString(total_pass_tests_ + total_xfail_tests_, " test").c_str());
559 if (total_xfail_tests_ != 0) {
560 printf(" (%s)", PluralizeString(total_xfail_tests_, " expected failure").c_str());
561 }
562 printf("\n");
563
564 std::string footer;
565
566 // Tests that were skipped.
567 if (total_skipped_tests_ != 0) {
568 PrintResults(total_skipped_tests_, SkippedResults, &footer);
569 }
570
571 // Tests that ran slow.
572 if (total_slow_tests_ != 0) {
573 PrintResults(total_slow_tests_, SlowResults, &footer);
574 }
575
576 // Tests that passed but should have failed.
577 if (total_xpass_tests_ != 0) {
578 PrintResults(total_xpass_tests_, XpassFailResults, &footer);
579 }
580
581 // Tests that timed out.
582 if (total_timeout_tests_ != 0) {
583 PrintResults(total_timeout_tests_, TimeoutResults, &footer);
584 }
585
586 // Tests that failed.
587 if (total_fail_tests_ != 0) {
588 PrintResults(total_fail_tests_, FailResults, &footer);
589 }
590
591 if (!footer.empty()) {
592 printf("\n%s", footer.c_str());
593 }
594
595 if (total_disable_tests_ != 0) {
596 if (footer.empty()) {
597 printf("\n");
598 }
599 ColoredPrintf(COLOR_YELLOW, " YOU HAVE %s\n\n",
600 PluralizeString(total_disable_tests_, " DISABLED TEST", true).c_str());
601 }
602
603 fflush(stdout);
604 }
605
XmlEscape(const std::string & xml)606 std::string XmlEscape(const std::string& xml) {
607 std::string escaped;
608 escaped.reserve(xml.size());
609
610 for (auto c : xml) {
611 switch (c) {
612 case '<':
613 escaped.append("<");
614 break;
615 case '>':
616 escaped.append(">");
617 break;
618 case '&':
619 escaped.append("&");
620 break;
621 case '\'':
622 escaped.append("'");
623 break;
624 case '"':
625 escaped.append(""");
626 break;
627 default:
628 escaped.append(1, c);
629 break;
630 }
631 }
632
633 return escaped;
634 }
635
636 class TestResultPrinter : public ::testing::EmptyTestEventListener {
637 public:
TestResultPrinter()638 TestResultPrinter() : pinfo_(nullptr) {}
OnTestStart(const::testing::TestInfo & test_info)639 virtual void OnTestStart(const ::testing::TestInfo& test_info) {
640 pinfo_ = &test_info; // Record test_info for use in OnTestPartResult.
641 }
642 virtual void OnTestPartResult(const ::testing::TestPartResult& result);
643
644 private:
645 const ::testing::TestInfo* pinfo_;
646 };
647
648 // Called after an assertion failure.
OnTestPartResult(const::testing::TestPartResult & result)649 void TestResultPrinter::OnTestPartResult(const ::testing::TestPartResult& result) {
650 // If the test part succeeded, we don't need to do anything.
651 if (result.type() == ::testing::TestPartResult::kSuccess) {
652 return;
653 }
654
655 if (result.type() == ::testing::TestPartResult::kSkip) {
656 printf("%s:(%d) Skipped\n", result.file_name(), result.line_number());
657 if (*result.message()) {
658 printf("%s\n", result.message());
659 }
660 } else {
661 // Print failure message from the assertion (e.g. expected this and got that).
662 printf("%s:(%d) Failure in test %s.%s\n%s\n", result.file_name(), result.line_number(),
663 pinfo_->test_suite_name(), pinfo_->name(), result.message());
664 }
665 fflush(stdout);
666 }
667
668 // Output xml file when --gtest_output is used, write this function as we can't reuse
669 // gtest.cc:XmlUnitTestResultPrinter. The reason is XmlUnitTestResultPrinter is totally
670 // defined in gtest.cc and not expose to outside. What's more, as we don't run gtest in
671 // the parent process, we don't have gtest classes which are needed by XmlUnitTestResultPrinter.
WriteXmlResults(uint64_t elapsed_time_ns,time_t start_time)672 void Isolate::WriteXmlResults(uint64_t elapsed_time_ns, time_t start_time) {
673 FILE* fp = fopen(options_.xml_file().c_str(), "w");
674 if (fp == nullptr) {
675 printf("Cannot open xml file '%s': %s\n", options_.xml_file().c_str(), strerror(errno));
676 exit(1);
677 }
678
679 const tm* time_struct = localtime(&start_time);
680 if (time_struct == nullptr) {
681 FATAL_PLOG("Unexpected failure from localtime");
682 }
683 char timestamp[40];
684 snprintf(timestamp, sizeof(timestamp), "%4d-%02d-%02dT%02d:%02d:%02d",
685 time_struct->tm_year + 1900, time_struct->tm_mon + 1, time_struct->tm_mday,
686 time_struct->tm_hour, time_struct->tm_min, time_struct->tm_sec);
687
688 fputs("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n", fp);
689 fprintf(fp, "<testsuites tests=\"%zu\" failures=\"%zu\" disabled=\"0\" errors=\"0\"",
690 tests_.size(), total_fail_tests_ + total_timeout_tests_ + total_xpass_tests_);
691 fprintf(fp, " timestamp=\"%s\" time=\"%.3lf\" name=\"AllTests\">\n", timestamp,
692 double(elapsed_time_ns) / kNsPerMs);
693
694 // Construct the suite information.
695 struct SuiteInfo {
696 std::string suite_name;
697 size_t fails = 0;
698 double elapsed_ms = 0;
699 std::vector<const Test*> tests;
700 };
701 std::string last_suite_name;
702 std::vector<SuiteInfo> suites;
703 SuiteInfo* info = nullptr;
704 for (const auto& entry : finished_) {
705 const Test* test = entry.second.get();
706 const std::string& suite_name = test->suite_name();
707 if (test->result() == TEST_XFAIL) {
708 // Skip XFAIL tests.
709 continue;
710 }
711 if (last_suite_name != suite_name) {
712 SuiteInfo suite_info{.suite_name = suite_name.substr(0, suite_name.size() - 1)};
713 last_suite_name = suite_name;
714 suites.push_back(suite_info);
715 info = &suites.back();
716 }
717 info->tests.push_back(test);
718 info->elapsed_ms += double(test->RunTimeNs()) / kNsPerMs;
719 if (test->result() != TEST_PASS) {
720 info->fails++;
721 }
722 }
723
724 for (auto& suite_entry : suites) {
725 fprintf(fp,
726 " <testsuite name=\"%s\" tests=\"%zu\" failures=\"%zu\" disabled=\"0\" errors=\"0\"",
727 suite_entry.suite_name.c_str(), suite_entry.tests.size(), suite_entry.fails);
728 fprintf(fp, " time=\"%.3lf\">\n", suite_entry.elapsed_ms);
729
730 for (auto test : suite_entry.tests) {
731 fprintf(fp, " <testcase name=\"%s\" status=\"run\" time=\"%.3lf\" classname=\"%s\"",
732 test->test_name().c_str(), double(test->RunTimeNs()) / kNsPerMs,
733 suite_entry.suite_name.c_str());
734 if (test->result() == TEST_PASS) {
735 fputs(" />\n", fp);
736 } else {
737 fputs(">\n", fp);
738 const std::string escaped_output = XmlEscape(test->output());
739 fprintf(fp, " <failure message=\"%s\" type=\"\">\n", escaped_output.c_str());
740 fputs(" </failure>\n", fp);
741 fputs(" </testcase>\n", fp);
742 }
743 }
744 fputs(" </testsuite>\n", fp);
745 }
746 fputs("</testsuites>\n", fp);
747 fclose(fp);
748 }
749
Run()750 int Isolate::Run() {
751 slow_threshold_ns_ = options_.slow_threshold_ms() * kNsPerMs;
752 deadline_threshold_ns_ = options_.deadline_threshold_ms() * kNsPerMs;
753
754 bool sharding_enabled = options_.total_shards() > 1;
755 if (sharding_enabled &&
756 (options_.shard_index() < 0 || options_.shard_index() >= options_.total_shards())) {
757 ColoredPrintf(COLOR_RED,
758 "Invalid environment variables: we require 0 <= GTEST_SHARD_INDEX < "
759 "GTEST_TOTAL_SHARDS, but you have GTEST_SHARD_INDEX=%" PRId64
760 ", GTEST_TOTAL_SHARDS=%" PRId64,
761 options_.shard_index(), options_.total_shards());
762 printf("\n");
763 return 1;
764 }
765
766 if (!options_.filter().empty()) {
767 ColoredPrintf(COLOR_YELLOW, "Note: Google Test filter = %s", options_.filter().c_str());
768 printf("\n");
769 }
770
771 if (sharding_enabled) {
772 ColoredPrintf(COLOR_YELLOW, "Note: This is test shard %" PRId64 " of %" PRId64,
773 options_.shard_index() + 1, options_.total_shards());
774 printf("\n");
775 }
776
777 EnumerateTests();
778
779 // Stop default result printer to avoid environment setup/teardown information for each test.
780 delete ::testing::UnitTest::GetInstance()->listeners().Release(
781 ::testing::UnitTest::GetInstance()->listeners().default_result_printer());
782 ::testing::UnitTest::GetInstance()->listeners().Append(new TestResultPrinter);
783 RegisterSignalHandler();
784
785 std::string job_info("Running " + PluralizeString(total_tests_, " test") + " from " +
786 PluralizeString(total_suites_, " test suite") + " (" +
787 PluralizeString(options_.job_count(), " job") + ").");
788
789 int exit_code = 0;
790 for (int i = 0; options_.num_iterations() < 0 || i < options_.num_iterations(); i++) {
791 if (i > 0) {
792 printf("\nRepeating all tests (iteration %d) . . .\n\n", i + 1);
793 }
794 ColoredPrintf(COLOR_GREEN, "[==========]");
795 printf(" %s\n", job_info.c_str());
796 fflush(stdout);
797
798 time_t start_time = time(nullptr);
799 uint64_t time_ns = NanoTime();
800 RunAllTests();
801 time_ns = NanoTime() - time_ns;
802
803 PrintFooter(time_ns);
804
805 if (!options_.xml_file().empty()) {
806 WriteXmlResults(time_ns, start_time);
807 }
808
809 if (total_pass_tests_ + total_skipped_tests_ + total_xfail_tests_ != tests_.size()) {
810 exit_code = 1;
811 }
812 }
813
814 return exit_code;
815 }
816
817 } // namespace gtest_extras
818 } // namespace android
819