execution.py revision 109988c7ccb6f3fd1a58574fa3dfb88beaef6632
1# Copyright 2012 the V8 project authors. All rights reserved. 2# Redistribution and use in source and binary forms, with or without 3# modification, are permitted provided that the following conditions are 4# met: 5# 6# * Redistributions of source code must retain the above copyright 7# notice, this list of conditions and the following disclaimer. 8# * Redistributions in binary form must reproduce the above 9# copyright notice, this list of conditions and the following 10# disclaimer in the documentation and/or other materials provided 11# with the distribution. 12# * Neither the name of Google Inc. nor the names of its 13# contributors may be used to endorse or promote products derived 14# from this software without specific prior written permission. 15# 16# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 17# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 18# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 19# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 20# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 26# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 28 29import collections 30import os 31import re 32import shutil 33import sys 34import time 35 36from pool import Pool 37from . import commands 38from . import perfdata 39from . import statusfile 40from . import testsuite 41from . import utils 42from ..objects import output 43 44 45# Base dir of the v8 checkout. 46BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname( 47 os.path.abspath(__file__))))) 48TEST_DIR = os.path.join(BASE_DIR, "test") 49 50 51class Instructions(object): 52 def __init__(self, command, dep_command, test_id, timeout, verbose): 53 self.command = command 54 self.dep_command = dep_command 55 self.id = test_id 56 self.timeout = timeout 57 self.verbose = verbose 58 59 60# Structure that keeps global information per worker process. 61ProcessContext = collections.namedtuple( 62 "process_context", ["suites", "context"]) 63 64 65def MakeProcessContext(context): 66 """Generate a process-local context. 67 68 This reloads all suites per process and stores the global context. 69 70 Args: 71 context: The global context from the test runner. 72 """ 73 suite_paths = utils.GetSuitePaths(TEST_DIR) 74 suites = {} 75 for root in suite_paths: 76 # Don't reinitialize global state as this is concurrently called from 77 # different processes. 78 suite = testsuite.TestSuite.LoadTestSuite( 79 os.path.join(TEST_DIR, root), global_init=False) 80 if suite: 81 suites[suite.name] = suite 82 return ProcessContext(suites, context) 83 84 85def GetCommand(test, context): 86 d8testflag = [] 87 shell = test.shell() 88 if shell == "d8": 89 d8testflag = ["--test"] 90 if utils.IsWindows(): 91 shell += ".exe" 92 if context.random_seed: 93 d8testflag += ["--random-seed=%s" % context.random_seed] 94 cmd = (context.command_prefix + 95 [os.path.abspath(os.path.join(context.shell_dir, shell))] + 96 d8testflag + 97 test.suite.GetFlagsForTestCase(test, context) + 98 context.extra_flags) 99 return cmd 100 101 102def _GetInstructions(test, context): 103 command = GetCommand(test, context) 104 timeout = context.timeout 105 if ("--stress-opt" in test.flags or 106 "--stress-opt" in context.mode_flags or 107 "--stress-opt" in context.extra_flags): 108 timeout *= 4 109 if "--noenable-vfp3" in context.extra_flags: 110 timeout *= 2 111 # FIXME(machenbach): Make this more OO. Don't expose default outcomes or 112 # the like. 113 if statusfile.IsSlow(test.outcomes or [statusfile.PASS]): 114 timeout *= 2 115 if test.dependency is not None: 116 dep_command = [ c.replace(test.path, test.dependency) for c in command ] 117 else: 118 dep_command = None 119 return Instructions( 120 command, dep_command, test.id, timeout, context.verbose) 121 122 123class Job(object): 124 """Stores data to be sent over the multi-process boundary. 125 126 All contained fields will be pickled/unpickled. 127 """ 128 129 def Run(self, process_context): 130 """Executes the job. 131 132 Args: 133 process_context: Process-local information that is initialized by the 134 executing worker. 135 """ 136 raise NotImplementedError() 137 138 139def SetupProblem(exception, test): 140 stderr = ">>> EXCEPTION: %s\n" % exception 141 match = re.match(r"^.*No such file or directory: '(.*)'$", str(exception)) 142 if match: 143 # Extra debuging information when files are claimed missing. 144 f = match.group(1) 145 stderr += ">>> File %s exists? -> %s\n" % (f, os.path.exists(f)) 146 return test.id, output.Output(1, False, "", stderr), 0 147 148 149class TestJob(Job): 150 def __init__(self, test): 151 self.test = test 152 153 def Run(self, process_context): 154 try: 155 # Retrieve a new suite object on the worker-process side. The original 156 # suite object isn't pickled. 157 self.test.SetSuiteObject(process_context.suites) 158 instr = _GetInstructions(self.test, process_context.context) 159 except Exception, e: 160 return SetupProblem(e, self.test) 161 162 start_time = time.time() 163 if instr.dep_command is not None: 164 dep_output = commands.Execute( 165 instr.dep_command, instr.verbose, instr.timeout) 166 # TODO(jkummerow): We approximate the test suite specific function 167 # IsFailureOutput() by just checking the exit code here. Currently 168 # only cctests define dependencies, for which this simplification is 169 # correct. 170 if dep_output.exit_code != 0: 171 return (instr.id, dep_output, time.time() - start_time) 172 output = commands.Execute(instr.command, instr.verbose, instr.timeout) 173 return (instr.id, output, time.time() - start_time) 174 175 176def RunTest(job, process_context): 177 return job.Run(process_context) 178 179 180class Runner(object): 181 182 def __init__(self, suites, progress_indicator, context): 183 self.datapath = os.path.join("out", "testrunner_data") 184 self.perf_data_manager = perfdata.GetPerfDataManager( 185 context, self.datapath) 186 self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode) 187 self.perf_failures = False 188 self.printed_allocations = False 189 self.tests = [ t for s in suites for t in s.tests ] 190 if not context.no_sorting: 191 for t in self.tests: 192 t.duration = self.perfdata.FetchPerfData(t) or 1.0 193 slow_key = lambda t: statusfile.IsSlow(t.outcomes) 194 self.tests.sort(key=slow_key, reverse=True) 195 self.tests.sort(key=lambda t: t.duration, reverse=True) 196 self._CommonInit(suites, progress_indicator, context) 197 198 def _CommonInit(self, suites, progress_indicator, context): 199 self.total = 0 200 for s in suites: 201 for t in s.tests: 202 t.id = self.total 203 self.total += 1 204 self.indicator = progress_indicator 205 progress_indicator.SetRunner(self) 206 self.context = context 207 self.succeeded = 0 208 self.remaining = self.total 209 self.failed = [] 210 self.crashed = 0 211 self.reran_tests = 0 212 213 def _RunPerfSafe(self, fun): 214 try: 215 fun() 216 except Exception, e: 217 print("PerfData exception: %s" % e) 218 self.perf_failures = True 219 220 def _MaybeRerun(self, pool, test): 221 if test.run <= self.context.rerun_failures_count: 222 # Possibly rerun this test if its run count is below the maximum per 223 # test. <= as the flag controls reruns not including the first run. 224 if test.run == 1: 225 # Count the overall number of reran tests on the first rerun. 226 if self.reran_tests < self.context.rerun_failures_max: 227 self.reran_tests += 1 228 else: 229 # Don't rerun this if the overall number of rerun tests has been 230 # reached. 231 return 232 if test.run >= 2 and test.duration > self.context.timeout / 20.0: 233 # Rerun slow tests at most once. 234 return 235 236 # Rerun this test. 237 test.duration = None 238 test.output = None 239 test.run += 1 240 pool.add([TestJob(test)]) 241 self.remaining += 1 242 self.total += 1 243 244 def _ProcessTestNormal(self, test, result, pool): 245 self.indicator.AboutToRun(test) 246 test.output = result[1] 247 test.duration = result[2] 248 has_unexpected_output = test.suite.HasUnexpectedOutput(test) 249 if has_unexpected_output: 250 self.failed.append(test) 251 if test.output.HasCrashed(): 252 self.crashed += 1 253 else: 254 self.succeeded += 1 255 self.remaining -= 1 256 # For the indicator, everything that happens after the first run is treated 257 # as unexpected even if it flakily passes in order to include it in the 258 # output. 259 self.indicator.HasRun(test, has_unexpected_output or test.run > 1) 260 if has_unexpected_output: 261 # Rerun test failures after the indicator has processed the results. 262 self._VerbosePrint("Attempting to rerun test after failure.") 263 self._MaybeRerun(pool, test) 264 # Update the perf database if the test succeeded. 265 return not has_unexpected_output 266 267 def _ProcessTestPredictable(self, test, result, pool): 268 def HasDifferentAllocations(output1, output2): 269 def AllocationStr(stdout): 270 for line in reversed((stdout or "").splitlines()): 271 if line.startswith("### Allocations = "): 272 self.printed_allocations = True 273 return line 274 return "" 275 return (AllocationStr(output1.stdout) != AllocationStr(output2.stdout)) 276 277 # Always pass the test duration for the database update. 278 test.duration = result[2] 279 if test.run == 1 and result[1].HasTimedOut(): 280 # If we get a timeout in the first run, we are already in an 281 # unpredictable state. Just report it as a failure and don't rerun. 282 self.indicator.AboutToRun(test) 283 test.output = result[1] 284 self.remaining -= 1 285 self.failed.append(test) 286 self.indicator.HasRun(test, True) 287 if test.run > 1 and HasDifferentAllocations(test.output, result[1]): 288 # From the second run on, check for different allocations. If a 289 # difference is found, call the indicator twice to report both tests. 290 # All runs of each test are counted as one for the statistic. 291 self.indicator.AboutToRun(test) 292 self.remaining -= 1 293 self.failed.append(test) 294 self.indicator.HasRun(test, True) 295 self.indicator.AboutToRun(test) 296 test.output = result[1] 297 self.indicator.HasRun(test, True) 298 elif test.run >= 3: 299 # No difference on the third run -> report a success. 300 self.indicator.AboutToRun(test) 301 self.remaining -= 1 302 self.succeeded += 1 303 test.output = result[1] 304 self.indicator.HasRun(test, False) 305 else: 306 # No difference yet and less than three runs -> add another run and 307 # remember the output for comparison. 308 test.run += 1 309 test.output = result[1] 310 pool.add([TestJob(test)]) 311 # Always update the perf database. 312 return True 313 314 def Run(self, jobs): 315 self.indicator.Starting() 316 self._RunInternal(jobs) 317 self.indicator.Done() 318 if self.failed: 319 return 1 320 elif self.remaining: 321 return 2 322 return 0 323 324 def _RunInternal(self, jobs): 325 pool = Pool(jobs) 326 test_map = {} 327 queued_exception = [None] 328 def gen_tests(): 329 for test in self.tests: 330 assert test.id >= 0 331 test_map[test.id] = test 332 try: 333 yield [TestJob(test)] 334 except Exception, e: 335 # If this failed, save the exception and re-raise it later (after 336 # all other tests have had a chance to run). 337 queued_exception[0] = e 338 continue 339 try: 340 it = pool.imap_unordered( 341 fn=RunTest, 342 gen=gen_tests(), 343 process_context_fn=MakeProcessContext, 344 process_context_args=[self.context], 345 ) 346 for result in it: 347 if result.heartbeat: 348 self.indicator.Heartbeat() 349 continue 350 test = test_map[result.value[0]] 351 if self.context.predictable: 352 update_perf = self._ProcessTestPredictable(test, result.value, pool) 353 else: 354 update_perf = self._ProcessTestNormal(test, result.value, pool) 355 if update_perf: 356 self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(test)) 357 finally: 358 self._VerbosePrint("Closing process pool.") 359 pool.terminate() 360 self._VerbosePrint("Closing database connection.") 361 self._RunPerfSafe(lambda: self.perf_data_manager.close()) 362 if self.perf_failures: 363 # Nuke perf data in case of failures. This might not work on windows as 364 # some files might still be open. 365 print "Deleting perf test data due to db corruption." 366 shutil.rmtree(self.datapath) 367 if queued_exception[0]: 368 raise queued_exception[0] 369 370 # Make sure that any allocations were printed in predictable mode (if we 371 # ran any tests). 372 assert ( 373 not self.total or 374 not self.context.predictable or 375 self.printed_allocations 376 ) 377 378 def _VerbosePrint(self, text): 379 if self.context.verbose: 380 print text 381 sys.stdout.flush() 382 383 384class BreakNowException(Exception): 385 def __init__(self, value): 386 self.value = value 387 def __str__(self): 388 return repr(self.value) 389