1#!/usr/bin/env python 2# Copyright (c) 2013 The Chromium Authors. All rights reserved. 3# Use of this source code is governed by a BSD-style license that can be 4# found in the LICENSE file. 5 6"""Performance Test Bisect Tool 7 8This script bisects a series of changelists using binary search. It starts at 9a bad revision where a performance metric has regressed, and asks for a last 10known-good revision. It will then binary search across this revision range by 11syncing, building, and running a performance test. If the change is 12suspected to occur as a result of WebKit/V8 changes, the script will 13further bisect changes to those depots and attempt to narrow down the revision 14range. 15 16Example usage using SVN revisions: 17 18./tools/bisect_perf_regression.py -c\ 19 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\ 20 -g 168222 -b 168232 -m shutdown/simple-user-quit 21 22Be aware that if you're using the git workflow and specify an SVN revision, 23the script will attempt to find the git SHA1 where SVN changes up to that 24revision were merged in. 25 26Example usage using git hashes: 27 28./tools/bisect_perf_regression.py -c\ 29 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\ 30 -g 1f6e67861535121c5c819c16a666f2436c207e7b\ 31 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\ 32 -m shutdown/simple-user-quit 33""" 34 35import copy 36import datetime 37import errno 38import hashlib 39import optparse 40import os 41import re 42import shlex 43import shutil 44import StringIO 45import sys 46import time 47import zipfile 48 49sys.path.append(os.path.join( 50 os.path.dirname(__file__), os.path.pardir, 'telemetry')) 51 52from bisect_results import BisectResults 53import bisect_utils 54import builder 55import math_utils 56import request_build 57import source_control as source_control_module 58from telemetry.util import cloud_storage 59 60# Below is the map of "depot" names to information about each depot. Each depot 61# is a repository, and in the process of bisecting, revision ranges in these 62# repositories may also be bisected. 63# 64# Each depot information dictionary may contain: 65# src: Path to the working directory. 66# recurse: True if this repository will get bisected. 67# depends: A list of other repositories that are actually part of the same 68# repository in svn. If the repository has any dependent repositories 69# (e.g. skia/src needs skia/include and skia/gyp to be updated), then 70# they are specified here. 71# svn: URL of SVN repository. Needed for git workflow to resolve hashes to 72# SVN revisions. 73# from: Parent depot that must be bisected before this is bisected. 74# deps_var: Key name in vars variable in DEPS file that has revision 75# information. 76DEPOT_DEPS_NAME = { 77 'chromium': { 78 'src': 'src', 79 'recurse': True, 80 'depends': None, 81 'from': ['cros', 'android-chrome'], 82 'viewvc': 83 'http://src.chromium.org/viewvc/chrome?view=revision&revision=', 84 'deps_var': 'chromium_rev' 85 }, 86 'webkit': { 87 'src': 'src/third_party/WebKit', 88 'recurse': True, 89 'depends': None, 90 'from': ['chromium'], 91 'viewvc': 92 'http://src.chromium.org/viewvc/blink?view=revision&revision=', 93 'deps_var': 'webkit_revision' 94 }, 95 'angle': { 96 'src': 'src/third_party/angle', 97 'src_old': 'src/third_party/angle_dx11', 98 'recurse': True, 99 'depends': None, 100 'from': ['chromium'], 101 'platform': 'nt', 102 'deps_var': 'angle_revision' 103 }, 104 'v8': { 105 'src': 'src/v8', 106 'recurse': True, 107 'depends': None, 108 'from': ['chromium'], 109 'custom_deps': bisect_utils.GCLIENT_CUSTOM_DEPS_V8, 110 'viewvc': 'https://code.google.com/p/v8/source/detail?r=', 111 'deps_var': 'v8_revision' 112 }, 113 'v8_bleeding_edge': { 114 'src': 'src/v8_bleeding_edge', 115 'recurse': True, 116 'depends': None, 117 'svn': 'https://v8.googlecode.com/svn/branches/bleeding_edge', 118 'from': ['v8'], 119 'viewvc': 'https://code.google.com/p/v8/source/detail?r=', 120 'deps_var': 'v8_revision' 121 }, 122 'skia/src': { 123 'src': 'src/third_party/skia/src', 124 'recurse': True, 125 'svn': 'http://skia.googlecode.com/svn/trunk/src', 126 'depends': ['skia/include', 'skia/gyp'], 127 'from': ['chromium'], 128 'viewvc': 'https://code.google.com/p/skia/source/detail?r=', 129 'deps_var': 'skia_revision' 130 }, 131 'skia/include': { 132 'src': 'src/third_party/skia/include', 133 'recurse': False, 134 'svn': 'http://skia.googlecode.com/svn/trunk/include', 135 'depends': None, 136 'from': ['chromium'], 137 'viewvc': 'https://code.google.com/p/skia/source/detail?r=', 138 'deps_var': 'None' 139 }, 140 'skia/gyp': { 141 'src': 'src/third_party/skia/gyp', 142 'recurse': False, 143 'svn': 'http://skia.googlecode.com/svn/trunk/gyp', 144 'depends': None, 145 'from': ['chromium'], 146 'viewvc': 'https://code.google.com/p/skia/source/detail?r=', 147 'deps_var': 'None' 148 } 149} 150 151DEPOT_NAMES = DEPOT_DEPS_NAME.keys() 152 153# The script is in chromium/src/tools/auto_bisect. Throughout this script, 154# we use paths to other things in the chromium/src repository. 155 156CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome' 157 158# Possible return values from BisectPerformanceMetrics.RunTest. 159BUILD_RESULT_SUCCEED = 0 160BUILD_RESULT_FAIL = 1 161BUILD_RESULT_SKIPPED = 2 162 163# Maximum time in seconds to wait after posting build request to the try server. 164# TODO: Change these values based on the actual time taken by buildbots on 165# the try server. 166MAX_MAC_BUILD_TIME = 14400 167MAX_WIN_BUILD_TIME = 14400 168MAX_LINUX_BUILD_TIME = 14400 169 170# The confidence percentage at which confidence can be consider "high". 171HIGH_CONFIDENCE = 95 172 173# Patch template to add a new file, DEPS.sha under src folder. 174# This file contains SHA1 value of the DEPS changes made while bisecting 175# dependency repositories. This patch send along with DEPS patch to try server. 176# When a build requested is posted with a patch, bisect builders on try server, 177# once build is produced, it reads SHA value from this file and appends it 178# to build archive filename. 179DEPS_SHA_PATCH = """diff --git src/DEPS.sha src/DEPS.sha 180new file mode 100644 181--- /dev/null 182+++ src/DEPS.sha 183@@ -0,0 +1 @@ 184+%(deps_sha)s 185""" 186 187# The possible values of the --bisect_mode flag, which determines what to 188# use when classifying a revision as "good" or "bad". 189BISECT_MODE_MEAN = 'mean' 190BISECT_MODE_STD_DEV = 'std_dev' 191BISECT_MODE_RETURN_CODE = 'return_code' 192 193# The perf dashboard looks for a string like "Estimated Confidence: 95%" 194# to decide whether or not to cc the author(s). If you change this, please 195# update the perf dashboard as well. 196RESULTS_BANNER = """ 197===== BISECT JOB RESULTS ===== 198Status: %(status)s 199 200Test Command: %(command)s 201Test Metric: %(metrics)s 202Relative Change: %(change)s 203Estimated Confidence: %(confidence).02f%%""" 204 205# The perf dashboard specifically looks for the string 206# "Author : " to parse out who to cc on a bug. If you change the 207# formatting here, please update the perf dashboard as well. 208RESULTS_REVISION_INFO = """ 209===== SUSPECTED CL(s) ===== 210Subject : %(subject)s 211Author : %(author)s%(email_info)s%(commit_info)s 212Commit : %(cl)s 213Date : %(cl_date)s""" 214 215REPRO_STEPS_LOCAL = """ 216==== INSTRUCTIONS TO REPRODUCE ==== 217To run locally: 218 - Use the test command given under 'BISECT JOB RESULTS' above. 219 - Consider using a profiler. Pass --profiler=list to list available profilers. 220""" 221 222REPRO_STEPS_TRYJOB = """ 223To reproduce on a performance try bot: 224 1. Edit run-perf-test.cfg 225 2. Upload your patch with: $ git cl upload --bypass-hooks 226 3. Send to the try server: $ git cl try -m tryserver.chromium.perf -b <bot> 227 228Notes: 229 a) Follow the in-file instructions in run-perf-test.cfg. 230 b) run-perf-test.cfg is under tools/ or under third_party/WebKit/Tools. 231 c) Do your edits preferably under a new git branch. 232 d) --browser=release and --browser=android-chromium-testshell are supported 233 depending on the platform (desktop|android). 234 e) Strip any src/ directories from the head of relative path names. 235 f) Make sure to use the appropriate bot on step 3. 236 237For more details please visit 238https://sites.google.com/a/chromium.org/dev/developers/performance-try-bots""" 239 240REPRO_STEPS_TRYJOB_TELEMETRY = """ 241To reproduce on a performance try bot: 242%(command)s 243(Where <bot-name> comes from tools/perf/run_benchmark --browser=list) 244 245For more details please visit 246https://sites.google.com/a/chromium.org/dev/developers/performance-try-bots 247""" 248 249RESULTS_THANKYOU = """ 250===== THANK YOU FOR CHOOSING BISECT AIRLINES ===== 251Visit http://www.chromium.org/developers/core-principles for Chrome's policy 252on perf regressions. 253Contact chrome-perf-dashboard-team with any questions or suggestions about 254bisecting. 255. .-----. 256. .---. \ \==) 257. |PERF\ \ \\ 258. | ---------'-------'-----------. 259. . 0 0 0 0 0 0 0 0 0 0 0 0 0 0 |_`-. 260. \_____________.-------._______________) 261. / / 262. / / 263. / /==) 264. ._____.""" 265 266 267def _AddAdditionalDepotInfo(depot_info): 268 """Adds additional depot info to the global depot variables.""" 269 global DEPOT_DEPS_NAME 270 global DEPOT_NAMES 271 DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() + depot_info.items()) 272 DEPOT_NAMES = DEPOT_DEPS_NAME.keys() 273 274 275def GetSHA1HexDigest(contents): 276 """Returns SHA1 hex digest of the given string.""" 277 return hashlib.sha1(contents).hexdigest() 278 279 280def GetZipFileName(build_revision=None, target_arch='ia32', patch_sha=None): 281 """Gets the archive file name for the given revision.""" 282 def PlatformName(): 283 """Return a string to be used in paths for the platform.""" 284 if bisect_utils.IsWindowsHost(): 285 # Build archive for x64 is still stored with the "win32" suffix. 286 # See chromium_utils.PlatformName(). 287 if bisect_utils.Is64BitWindows() and target_arch == 'x64': 288 return 'win32' 289 return 'win32' 290 if bisect_utils.IsLinuxHost(): 291 # Android builds are also archived with the "full-build-linux prefix. 292 return 'linux' 293 if bisect_utils.IsMacHost(): 294 return 'mac' 295 raise NotImplementedError('Unknown platform "%s".' % sys.platform) 296 297 base_name = 'full-build-%s' % PlatformName() 298 if not build_revision: 299 return base_name 300 if patch_sha: 301 build_revision = '%s_%s' % (build_revision , patch_sha) 302 return '%s_%s.zip' % (base_name, build_revision) 303 304 305def GetRemoteBuildPath(build_revision, target_platform='chromium', 306 target_arch='ia32', patch_sha=None): 307 """Returns the URL to download the build from.""" 308 def GetGSRootFolderName(target_platform): 309 """Returns the Google Cloud Storage root folder name.""" 310 if bisect_utils.IsWindowsHost(): 311 if bisect_utils.Is64BitWindows() and target_arch == 'x64': 312 return 'Win x64 Builder' 313 return 'Win Builder' 314 if bisect_utils.IsLinuxHost(): 315 if target_platform == 'android': 316 return 'android_perf_rel' 317 return 'Linux Builder' 318 if bisect_utils.IsMacHost(): 319 return 'Mac Builder' 320 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform) 321 322 base_filename = GetZipFileName( 323 build_revision, target_arch, patch_sha) 324 builder_folder = GetGSRootFolderName(target_platform) 325 return '%s/%s' % (builder_folder, base_filename) 326 327 328def FetchFromCloudStorage(bucket_name, source_path, destination_path): 329 """Fetches file(s) from the Google Cloud Storage. 330 331 Args: 332 bucket_name: Google Storage bucket name. 333 source_path: Source file path. 334 destination_path: Destination file path. 335 336 Returns: 337 Downloaded file path if exists, otherwise None. 338 """ 339 target_file = os.path.join(destination_path, os.path.basename(source_path)) 340 try: 341 if cloud_storage.Exists(bucket_name, source_path): 342 print 'Fetching file from gs//%s/%s ...' % (bucket_name, source_path) 343 cloud_storage.Get(bucket_name, source_path, destination_path) 344 if os.path.exists(target_file): 345 return target_file 346 else: 347 print ('File gs://%s/%s not found in cloud storage.' % ( 348 bucket_name, source_path)) 349 except Exception as e: 350 print 'Something went wrong while fetching file from cloud: %s' % e 351 if os.path.exists(target_file): 352 os.remove(target_file) 353 return None 354 355 356# This is copied from build/scripts/common/chromium_utils.py. 357def MaybeMakeDirectory(*path): 358 """Creates an entire path, if it doesn't already exist.""" 359 file_path = os.path.join(*path) 360 try: 361 os.makedirs(file_path) 362 except OSError as e: 363 if e.errno != errno.EEXIST: 364 return False 365 return True 366 367 368# This was copied from build/scripts/common/chromium_utils.py. 369def ExtractZip(filename, output_dir, verbose=True): 370 """ Extract the zip archive in the output directory.""" 371 MaybeMakeDirectory(output_dir) 372 373 # On Linux and Mac, we use the unzip command as it will 374 # handle links and file bits (executable), which is much 375 # easier then trying to do that with ZipInfo options. 376 # 377 # The Mac Version of unzip unfortunately does not support Zip64, whereas 378 # the python module does, so we have to fall back to the python zip module 379 # on Mac if the file size is greater than 4GB. 380 # 381 # On Windows, try to use 7z if it is installed, otherwise fall back to python 382 # zip module and pray we don't have files larger than 512MB to unzip. 383 unzip_cmd = None 384 if ((bisect_utils.IsMacHost() 385 and os.path.getsize(filename) < 4 * 1024 * 1024 * 1024) 386 or bisect_utils.IsLinuxHost()): 387 unzip_cmd = ['unzip', '-o'] 388 elif (bisect_utils.IsWindowsHost() 389 and os.path.exists('C:\\Program Files\\7-Zip\\7z.exe')): 390 unzip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y'] 391 392 if unzip_cmd: 393 # Make sure path is absolute before changing directories. 394 filepath = os.path.abspath(filename) 395 saved_dir = os.getcwd() 396 os.chdir(output_dir) 397 command = unzip_cmd + [filepath] 398 result = bisect_utils.RunProcess(command) 399 os.chdir(saved_dir) 400 if result: 401 raise IOError('unzip failed: %s => %s' % (str(command), result)) 402 else: 403 assert bisect_utils.IsWindowsHost() or bisect_utils.IsMacHost() 404 zf = zipfile.ZipFile(filename) 405 for name in zf.namelist(): 406 if verbose: 407 print 'Extracting %s' % name 408 zf.extract(name, output_dir) 409 if bisect_utils.IsMacHost(): 410 # Restore permission bits. 411 os.chmod(os.path.join(output_dir, name), 412 zf.getinfo(name).external_attr >> 16L) 413 414 415def WriteStringToFile(text, file_name): 416 """Writes text to a file, raising an RuntimeError on failure.""" 417 try: 418 with open(file_name, 'wb') as f: 419 f.write(text) 420 except IOError: 421 raise RuntimeError('Error writing to file [%s]' % file_name ) 422 423 424def ReadStringFromFile(file_name): 425 """Writes text to a file, raising an RuntimeError on failure.""" 426 try: 427 with open(file_name) as f: 428 return f.read() 429 except IOError: 430 raise RuntimeError('Error reading file [%s]' % file_name ) 431 432 433def ChangeBackslashToSlashInPatch(diff_text): 434 """Formats file paths in the given patch text to Unix-style paths.""" 435 if not diff_text: 436 return None 437 diff_lines = diff_text.split('\n') 438 for i in range(len(diff_lines)): 439 line = diff_lines[i] 440 if line.startswith('--- ') or line.startswith('+++ '): 441 diff_lines[i] = line.replace('\\', '/') 442 return '\n'.join(diff_lines) 443 444 445def _ParseRevisionsFromDEPSFileManually(deps_file_contents): 446 """Parses the vars section of the DEPS file using regular expressions. 447 448 Args: 449 deps_file_contents: The DEPS file contents as a string. 450 451 Returns: 452 A dictionary in the format {depot: revision} if successful, otherwise None. 453 """ 454 # We'll parse the "vars" section of the DEPS file. 455 rxp = re.compile('vars = {(?P<vars_body>[^}]+)', re.MULTILINE) 456 re_results = rxp.search(deps_file_contents) 457 458 if not re_results: 459 return None 460 461 # We should be left with a series of entries in the vars component of 462 # the DEPS file with the following format: 463 # 'depot_name': 'revision', 464 vars_body = re_results.group('vars_body') 465 rxp = re.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'", 466 re.MULTILINE) 467 re_results = rxp.findall(vars_body) 468 469 return dict(re_results) 470 471 472def _WaitUntilBuildIsReady( 473 fetch_build, bot_name, builder_host, builder_port, build_request_id, 474 max_timeout): 475 """Waits until build is produced by bisect builder on try server. 476 477 Args: 478 fetch_build: Function to check and download build from cloud storage. 479 bot_name: Builder bot name on try server. 480 builder_host Try server host name. 481 builder_port: Try server port. 482 build_request_id: A unique ID of the build request posted to try server. 483 max_timeout: Maximum time to wait for the build. 484 485 Returns: 486 Downloaded archive file path if exists, otherwise None. 487 """ 488 # Build number on the try server. 489 build_num = None 490 # Interval to check build on cloud storage. 491 poll_interval = 60 492 # Interval to check build status on try server in seconds. 493 status_check_interval = 600 494 last_status_check = time.time() 495 start_time = time.time() 496 while True: 497 # Checks for build on gs://chrome-perf and download if exists. 498 res = fetch_build() 499 if res: 500 return (res, 'Build successfully found') 501 elapsed_status_check = time.time() - last_status_check 502 # To avoid overloading try server with status check requests, we check 503 # build status for every 10 minutes. 504 if elapsed_status_check > status_check_interval: 505 last_status_check = time.time() 506 if not build_num: 507 # Get the build number on try server for the current build. 508 build_num = request_build.GetBuildNumFromBuilder( 509 build_request_id, bot_name, builder_host, builder_port) 510 # Check the status of build using the build number. 511 # Note: Build is treated as PENDING if build number is not found 512 # on the the try server. 513 build_status, status_link = request_build.GetBuildStatus( 514 build_num, bot_name, builder_host, builder_port) 515 if build_status == request_build.FAILED: 516 return (None, 'Failed to produce build, log: %s' % status_link) 517 elapsed_time = time.time() - start_time 518 if elapsed_time > max_timeout: 519 return (None, 'Timed out: %ss without build' % max_timeout) 520 521 print 'Time elapsed: %ss without build.' % elapsed_time 522 time.sleep(poll_interval) 523 # For some reason, mac bisect bots were not flushing stdout periodically. 524 # As a result buildbot command is timed-out. Flush stdout on all platforms 525 # while waiting for build. 526 sys.stdout.flush() 527 528 529def _UpdateV8Branch(deps_content): 530 """Updates V8 branch in DEPS file to process v8_bleeding_edge. 531 532 Check for "v8_branch" in DEPS file if exists update its value 533 with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS 534 variable from DEPS revision 254916, therefore check for "src/v8": 535 <v8 source path> in DEPS in order to support prior DEPS revisions 536 and update it. 537 538 Args: 539 deps_content: DEPS file contents to be modified. 540 541 Returns: 542 Modified DEPS file contents as a string. 543 """ 544 new_branch = r'branches/bleeding_edge' 545 v8_branch_pattern = re.compile(r'(?<="v8_branch": ")(.*)(?=")') 546 if re.search(v8_branch_pattern, deps_content): 547 deps_content = re.sub(v8_branch_pattern, new_branch, deps_content) 548 else: 549 # Replaces the branch assigned to "src/v8" key in DEPS file. 550 # Format of "src/v8" in DEPS: 551 # "src/v8": 552 # (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"), 553 # So, "/trunk@" is replace with "/branches/bleeding_edge@" 554 v8_src_pattern = re.compile( 555 r'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re.MULTILINE) 556 if re.search(v8_src_pattern, deps_content): 557 deps_content = re.sub(v8_src_pattern, new_branch, deps_content) 558 return deps_content 559 560 561def _UpdateDEPSForAngle(revision, depot, deps_file): 562 """Updates DEPS file with new revision for Angle repository. 563 564 This is a hack for Angle depot case because, in DEPS file "vars" dictionary 565 variable contains "angle_revision" key that holds git hash instead of 566 SVN revision. 567 568 And sometimes "angle_revision" key is not specified in "vars" variable, 569 in such cases check "deps" dictionary variable that matches 570 angle.git@[a-fA-F0-9]{40}$ and replace git hash. 571 """ 572 deps_var = DEPOT_DEPS_NAME[depot]['deps_var'] 573 try: 574 deps_contents = ReadStringFromFile(deps_file) 575 # Check whether the depot and revision pattern in DEPS file vars variable 576 # e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa". 577 angle_rev_pattern = re.compile(r'(?<="%s": ")([a-fA-F0-9]{40})(?=")' % 578 deps_var, re.MULTILINE) 579 match = re.search(angle_rev_pattern % deps_var, deps_contents) 580 if match: 581 # Update the revision information for the given depot 582 new_data = re.sub(angle_rev_pattern, revision, deps_contents) 583 else: 584 # Check whether the depot and revision pattern in DEPS file deps 585 # variable. e.g., 586 # "src/third_party/angle": Var("chromium_git") + 587 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",. 588 angle_rev_pattern = re.compile( 589 r'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re.MULTILINE) 590 match = re.search(angle_rev_pattern, deps_contents) 591 if not match: 592 print 'Could not find angle revision information in DEPS file.' 593 return False 594 new_data = re.sub(angle_rev_pattern, revision, deps_contents) 595 # Write changes to DEPS file 596 WriteStringToFile(new_data, deps_file) 597 return True 598 except IOError, e: 599 print 'Something went wrong while updating DEPS file, %s' % e 600 return False 601 602 603def _TryParseHistogramValuesFromOutput(metric, text): 604 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>. 605 606 Args: 607 metric: The metric as a list of [<trace>, <value>] strings. 608 text: The text to parse the metric values from. 609 610 Returns: 611 A list of floating point numbers found, [] if none were found. 612 """ 613 metric_formatted = 'HISTOGRAM %s: %s= ' % (metric[0], metric[1]) 614 615 text_lines = text.split('\n') 616 values_list = [] 617 618 for current_line in text_lines: 619 if metric_formatted in current_line: 620 current_line = current_line[len(metric_formatted):] 621 622 try: 623 histogram_values = eval(current_line) 624 625 for b in histogram_values['buckets']: 626 average_for_bucket = float(b['high'] + b['low']) * 0.5 627 # Extends the list with N-elements with the average for that bucket. 628 values_list.extend([average_for_bucket] * b['count']) 629 except Exception: 630 pass 631 632 return values_list 633 634 635def _TryParseResultValuesFromOutput(metric, text): 636 """Attempts to parse a metric in the format RESULT <graph>: <trace>= ... 637 638 Args: 639 metric: The metric as a list of [<trace>, <value>] string pairs. 640 text: The text to parse the metric values from. 641 642 Returns: 643 A list of floating point numbers found. 644 """ 645 # Format is: RESULT <graph>: <trace>= <value> <units> 646 metric_re = re.escape('RESULT %s: %s=' % (metric[0], metric[1])) 647 648 # The log will be parsed looking for format: 649 # <*>RESULT <graph_name>: <trace_name>= <value> 650 single_result_re = re.compile( 651 metric_re + '\s*(?P<VALUE>[-]?\d*(\.\d*)?)') 652 653 # The log will be parsed looking for format: 654 # <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...] 655 multi_results_re = re.compile( 656 metric_re + '\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]') 657 658 # The log will be parsed looking for format: 659 # <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>} 660 mean_stddev_re = re.compile( 661 metric_re + 662 '\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}') 663 664 text_lines = text.split('\n') 665 values_list = [] 666 for current_line in text_lines: 667 # Parse the output from the performance test for the metric we're 668 # interested in. 669 single_result_match = single_result_re.search(current_line) 670 multi_results_match = multi_results_re.search(current_line) 671 mean_stddev_match = mean_stddev_re.search(current_line) 672 if (not single_result_match is None and 673 single_result_match.group('VALUE')): 674 values_list += [single_result_match.group('VALUE')] 675 elif (not multi_results_match is None and 676 multi_results_match.group('VALUES')): 677 metric_values = multi_results_match.group('VALUES') 678 values_list += metric_values.split(',') 679 elif (not mean_stddev_match is None and 680 mean_stddev_match.group('MEAN')): 681 values_list += [mean_stddev_match.group('MEAN')] 682 683 values_list = [float(v) for v in values_list 684 if bisect_utils.IsStringFloat(v)] 685 686 # If the metric is times/t, we need to sum the timings in order to get 687 # similar regression results as the try-bots. 688 metrics_to_sum = [ 689 ['times', 't'], 690 ['times', 'page_load_time'], 691 ['cold_times', 'page_load_time'], 692 ['warm_times', 'page_load_time'], 693 ] 694 695 if metric in metrics_to_sum: 696 if values_list: 697 values_list = [reduce(lambda x, y: float(x) + float(y), values_list)] 698 699 return values_list 700 701 702def _ParseMetricValuesFromOutput(metric, text): 703 """Parses output from performance_ui_tests and retrieves the results for 704 a given metric. 705 706 Args: 707 metric: The metric as a list of [<trace>, <value>] strings. 708 text: The text to parse the metric values from. 709 710 Returns: 711 A list of floating point numbers found. 712 """ 713 metric_values = _TryParseResultValuesFromOutput(metric, text) 714 715 if not metric_values: 716 metric_values = _TryParseHistogramValuesFromOutput(metric, text) 717 718 return metric_values 719 720 721def _GenerateProfileIfNecessary(command_args): 722 """Checks the command line of the performance test for dependencies on 723 profile generation, and runs tools/perf/generate_profile as necessary. 724 725 Args: 726 command_args: Command line being passed to performance test, as a list. 727 728 Returns: 729 False if profile generation was necessary and failed, otherwise True. 730 """ 731 if '--profile-dir' in ' '.join(command_args): 732 # If we were using python 2.7+, we could just use the argparse 733 # module's parse_known_args to grab --profile-dir. Since some of the 734 # bots still run 2.6, have to grab the arguments manually. 735 arg_dict = {} 736 args_to_parse = ['--profile-dir', '--browser'] 737 738 for arg_to_parse in args_to_parse: 739 for i, current_arg in enumerate(command_args): 740 if arg_to_parse in current_arg: 741 current_arg_split = current_arg.split('=') 742 743 # Check 2 cases, --arg=<val> and --arg <val> 744 if len(current_arg_split) == 2: 745 arg_dict[arg_to_parse] = current_arg_split[1] 746 elif i + 1 < len(command_args): 747 arg_dict[arg_to_parse] = command_args[i+1] 748 749 path_to_generate = os.path.join('tools', 'perf', 'generate_profile') 750 751 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'): 752 profile_path, profile_type = os.path.split(arg_dict['--profile-dir']) 753 return not bisect_utils.RunProcess(['python', path_to_generate, 754 '--profile-type-to-generate', profile_type, 755 '--browser', arg_dict['--browser'], '--output-dir', profile_path]) 756 return False 757 return True 758 759 760def _AddRevisionsIntoRevisionData(revisions, depot, sort, revision_data): 761 """Adds new revisions to the revision_data dictionary and initializes them. 762 763 Args: 764 revisions: List of revisions to add. 765 depot: Depot that's currently in use (src, webkit, etc...) 766 sort: Sorting key for displaying revisions. 767 revision_data: A dictionary to add the new revisions into. 768 Existing revisions will have their sort keys adjusted. 769 """ 770 num_depot_revisions = len(revisions) 771 772 for _, v in revision_data.iteritems(): 773 if v['sort'] > sort: 774 v['sort'] += num_depot_revisions 775 776 for i in xrange(num_depot_revisions): 777 r = revisions[i] 778 revision_data[r] = { 779 'revision' : r, 780 'depot' : depot, 781 'value' : None, 782 'perf_time' : 0, 783 'build_time' : 0, 784 'passed' : '?', 785 'sort' : i + sort + 1, 786 } 787 788 789def _PrintThankYou(): 790 print RESULTS_THANKYOU 791 792 793def _PrintTableRow(column_widths, row_data): 794 """Prints out a row in a formatted table that has columns aligned. 795 796 Args: 797 column_widths: A list of column width numbers. 798 row_data: A list of items for each column in this row. 799 """ 800 assert len(column_widths) == len(row_data) 801 text = '' 802 for i in xrange(len(column_widths)): 803 current_row_data = row_data[i].center(column_widths[i], ' ') 804 text += ('%%%ds' % column_widths[i]) % current_row_data 805 print text 806 807 808def _PrintStepTime(revision_data_sorted): 809 """Prints information about how long various steps took. 810 811 Args: 812 revision_data_sorted: The sorted list of revision data dictionaries.""" 813 step_perf_time_avg = 0.0 814 step_build_time_avg = 0.0 815 step_count = 0.0 816 for _, current_data in revision_data_sorted: 817 if current_data['value']: 818 step_perf_time_avg += current_data['perf_time'] 819 step_build_time_avg += current_data['build_time'] 820 step_count += 1 821 if step_count: 822 step_perf_time_avg = step_perf_time_avg / step_count 823 step_build_time_avg = step_build_time_avg / step_count 824 print 825 print 'Average build time : %s' % datetime.timedelta( 826 seconds=int(step_build_time_avg)) 827 print 'Average test time : %s' % datetime.timedelta( 828 seconds=int(step_perf_time_avg)) 829 830 831class DepotDirectoryRegistry(object): 832 833 def __init__(self, src_cwd): 834 self.depot_cwd = {} 835 for depot in DEPOT_NAMES: 836 # The working directory of each depot is just the path to the depot, but 837 # since we're already in 'src', we can skip that part. 838 path_in_src = DEPOT_DEPS_NAME[depot]['src'][4:] 839 self.AddDepot(depot, os.path.join(src_cwd, path_in_src)) 840 841 self.AddDepot('chromium', src_cwd) 842 self.AddDepot('cros', os.path.join(src_cwd, 'tools', 'cros')) 843 844 def AddDepot(self, depot_name, depot_dir): 845 self.depot_cwd[depot_name] = depot_dir 846 847 def GetDepotDir(self, depot_name): 848 if depot_name in self.depot_cwd: 849 return self.depot_cwd[depot_name] 850 else: 851 assert False, ('Unknown depot [ %s ] encountered. Possibly a new one ' 852 'was added without proper support?' % depot_name) 853 854 def ChangeToDepotDir(self, depot_name): 855 """Given a depot, changes to the appropriate working directory. 856 857 Args: 858 depot_name: The name of the depot (see DEPOT_NAMES). 859 """ 860 os.chdir(self.GetDepotDir(depot_name)) 861 862 863class BisectPerformanceMetrics(object): 864 """This class contains functionality to perform a bisection of a range of 865 revisions to narrow down where performance regressions may have occurred. 866 867 The main entry-point is the Run method. 868 """ 869 870 def __init__(self, source_control, opts): 871 super(BisectPerformanceMetrics, self).__init__() 872 873 self.opts = opts 874 self.source_control = source_control 875 876 # The src directory here is NOT the src/ directory for the repository 877 # where the bisect script is running from. Instead, it's the src/ directory 878 # inside the bisect/ directory which is created before running. 879 self.src_cwd = os.getcwd() 880 881 self.depot_registry = DepotDirectoryRegistry(self.src_cwd) 882 self.cleanup_commands = [] 883 self.warnings = [] 884 self.builder = builder.Builder.FromOpts(opts) 885 886 def PerformCleanup(self): 887 """Performs cleanup when script is finished.""" 888 os.chdir(self.src_cwd) 889 for c in self.cleanup_commands: 890 if c[0] == 'mv': 891 shutil.move(c[1], c[2]) 892 else: 893 assert False, 'Invalid cleanup command.' 894 895 def GetRevisionList(self, depot, bad_revision, good_revision): 896 """Retrieves a list of all the commits between the bad revision and 897 last known good revision.""" 898 899 revision_work_list = [] 900 901 if depot == 'cros': 902 revision_range_start = good_revision 903 revision_range_end = bad_revision 904 905 cwd = os.getcwd() 906 self.depot_registry.ChangeToDepotDir('cros') 907 908 # Print the commit timestamps for every commit in the revision time 909 # range. We'll sort them and bisect by that. There is a remote chance that 910 # 2 (or more) commits will share the exact same timestamp, but it's 911 # probably safe to ignore that case. 912 cmd = ['repo', 'forall', '-c', 913 'git log --format=%%ct --before=%d --after=%d' % ( 914 revision_range_end, revision_range_start)] 915 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(cmd) 916 917 assert not return_code, ('An error occurred while running ' 918 '"%s"' % ' '.join(cmd)) 919 920 os.chdir(cwd) 921 922 revision_work_list = list(set( 923 [int(o) for o in output.split('\n') if bisect_utils.IsStringInt(o)])) 924 revision_work_list = sorted(revision_work_list, reverse=True) 925 else: 926 cwd = self.depot_registry.GetDepotDir(depot) 927 revision_work_list = self.source_control.GetRevisionList(bad_revision, 928 good_revision, cwd=cwd) 929 930 return revision_work_list 931 932 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self, revision): 933 commit_position = self.source_control.GetCommitPosition(revision) 934 935 if bisect_utils.IsStringInt(commit_position): 936 # V8 is tricky to bisect, in that there are only a few instances when 937 # we can dive into bleeding_edge and get back a meaningful result. 938 # Try to detect a V8 "business as usual" case, which is when: 939 # 1. trunk revision N has description "Version X.Y.Z" 940 # 2. bleeding_edge revision (N-1) has description "Prepare push to 941 # trunk. Now working on X.Y.(Z+1)." 942 # 943 # As of 01/24/2014, V8 trunk descriptions are formatted: 944 # "Version 3.X.Y (based on bleeding_edge revision rZ)" 945 # So we can just try parsing that out first and fall back to the old way. 946 v8_dir = self.depot_registry.GetDepotDir('v8') 947 v8_bleeding_edge_dir = self.depot_registry.GetDepotDir('v8_bleeding_edge') 948 949 revision_info = self.source_control.QueryRevisionInfo(revision, 950 cwd=v8_dir) 951 952 version_re = re.compile("Version (?P<values>[0-9,.]+)") 953 954 regex_results = version_re.search(revision_info['subject']) 955 956 if regex_results: 957 git_revision = None 958 959 # Look for "based on bleeding_edge" and parse out revision 960 if 'based on bleeding_edge' in revision_info['subject']: 961 try: 962 bleeding_edge_revision = revision_info['subject'].split( 963 'bleeding_edge revision r')[1] 964 bleeding_edge_revision = int(bleeding_edge_revision.split(')')[0]) 965 git_revision = self.source_control.ResolveToRevision( 966 bleeding_edge_revision, 'v8_bleeding_edge', DEPOT_DEPS_NAME, 1, 967 cwd=v8_bleeding_edge_dir) 968 return git_revision 969 except (IndexError, ValueError): 970 pass 971 972 if not git_revision: 973 # Wasn't successful, try the old way of looking for "Prepare push to" 974 git_revision = self.source_control.ResolveToRevision( 975 int(commit_position) - 1, 'v8_bleeding_edge', DEPOT_DEPS_NAME, -1, 976 cwd=v8_bleeding_edge_dir) 977 978 if git_revision: 979 revision_info = self.source_control.QueryRevisionInfo(git_revision, 980 cwd=v8_bleeding_edge_dir) 981 982 if 'Prepare push to trunk' in revision_info['subject']: 983 return git_revision 984 return None 985 986 def _GetNearestV8BleedingEdgeFromTrunk(self, revision, search_forward=True): 987 cwd = self.depot_registry.GetDepotDir('v8') 988 cmd = ['log', '--format=%ct', '-1', revision] 989 output = bisect_utils.CheckRunGit(cmd, cwd=cwd) 990 commit_time = int(output) 991 commits = [] 992 993 if search_forward: 994 cmd = ['log', '--format=%H', '-10', '--after=%d' % commit_time, 995 'origin/master'] 996 output = bisect_utils.CheckRunGit(cmd, cwd=cwd) 997 output = output.split() 998 commits = output 999 commits = reversed(commits) 1000 else: 1001 cmd = ['log', '--format=%H', '-10', '--before=%d' % commit_time, 1002 'origin/master'] 1003 output = bisect_utils.CheckRunGit(cmd, cwd=cwd) 1004 output = output.split() 1005 commits = output 1006 1007 bleeding_edge_revision = None 1008 1009 for c in commits: 1010 bleeding_edge_revision = self._GetV8BleedingEdgeFromV8TrunkIfMappable(c) 1011 if bleeding_edge_revision: 1012 break 1013 1014 return bleeding_edge_revision 1015 1016 def _ParseRevisionsFromDEPSFile(self, depot): 1017 """Parses the local DEPS file to determine blink/skia/v8 revisions which may 1018 be needed if the bisect recurses into those depots later. 1019 1020 Args: 1021 depot: Name of depot being bisected. 1022 1023 Returns: 1024 A dict in the format {depot:revision} if successful, otherwise None. 1025 """ 1026 try: 1027 deps_data = { 1028 'Var': lambda _: deps_data["vars"][_], 1029 'From': lambda *args: None, 1030 } 1031 1032 deps_file = bisect_utils.FILE_DEPS_GIT 1033 if not os.path.exists(deps_file): 1034 deps_file = bisect_utils.FILE_DEPS 1035 execfile(deps_file, {}, deps_data) 1036 deps_data = deps_data['deps'] 1037 1038 rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)") 1039 results = {} 1040 for depot_name, depot_data in DEPOT_DEPS_NAME.iteritems(): 1041 if (depot_data.get('platform') and 1042 depot_data.get('platform') != os.name): 1043 continue 1044 1045 if (depot_data.get('recurse') and depot in depot_data.get('from')): 1046 depot_data_src = depot_data.get('src') or depot_data.get('src_old') 1047 src_dir = deps_data.get(depot_data_src) 1048 if src_dir: 1049 self.depot_registry.AddDepot(depot_name, os.path.join( 1050 self.src_cwd, depot_data_src[4:])) 1051 re_results = rxp.search(src_dir) 1052 if re_results: 1053 results[depot_name] = re_results.group('revision') 1054 else: 1055 warning_text = ('Could not parse revision for %s while bisecting ' 1056 '%s' % (depot_name, depot)) 1057 if not warning_text in self.warnings: 1058 self.warnings.append(warning_text) 1059 else: 1060 results[depot_name] = None 1061 return results 1062 except ImportError: 1063 deps_file_contents = ReadStringFromFile(deps_file) 1064 parse_results = _ParseRevisionsFromDEPSFileManually(deps_file_contents) 1065 results = {} 1066 for depot_name, depot_revision in parse_results.iteritems(): 1067 depot_revision = depot_revision.strip('@') 1068 print depot_name, depot_revision 1069 for current_name, current_data in DEPOT_DEPS_NAME.iteritems(): 1070 if (current_data.has_key('deps_var') and 1071 current_data['deps_var'] == depot_name): 1072 src_name = current_name 1073 results[src_name] = depot_revision 1074 break 1075 return results 1076 1077 def _Get3rdPartyRevisions(self, depot): 1078 """Parses the DEPS file to determine WebKit/v8/etc... versions. 1079 1080 Args: 1081 depot: A depot name. Should be in the DEPOT_NAMES list. 1082 1083 Returns: 1084 A dict in the format {depot: revision} if successful, otherwise None. 1085 """ 1086 cwd = os.getcwd() 1087 self.depot_registry.ChangeToDepotDir(depot) 1088 1089 results = {} 1090 1091 if depot == 'chromium' or depot == 'android-chrome': 1092 results = self._ParseRevisionsFromDEPSFile(depot) 1093 os.chdir(cwd) 1094 1095 if depot == 'cros': 1096 cmd = [ 1097 bisect_utils.CROS_SDK_PATH, 1098 '--', 1099 'portageq-%s' % self.opts.cros_board, 1100 'best_visible', 1101 '/build/%s' % self.opts.cros_board, 1102 'ebuild', 1103 CROS_CHROMEOS_PATTERN 1104 ] 1105 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(cmd) 1106 1107 assert not return_code, ('An error occurred while running ' 1108 '"%s"' % ' '.join(cmd)) 1109 1110 if len(output) > CROS_CHROMEOS_PATTERN: 1111 output = output[len(CROS_CHROMEOS_PATTERN):] 1112 1113 if len(output) > 1: 1114 output = output.split('_')[0] 1115 1116 if len(output) > 3: 1117 contents = output.split('.') 1118 1119 version = contents[2] 1120 1121 if contents[3] != '0': 1122 warningText = ('Chrome version: %s.%s but using %s.0 to bisect.' % 1123 (version, contents[3], version)) 1124 if not warningText in self.warnings: 1125 self.warnings.append(warningText) 1126 1127 cwd = os.getcwd() 1128 self.depot_registry.ChangeToDepotDir('chromium') 1129 cmd = ['log', '-1', '--format=%H', 1130 '--author=chrome-release@google.com', 1131 '--grep=to %s' % version, 'origin/master'] 1132 return_code = bisect_utils.CheckRunGit(cmd) 1133 os.chdir(cwd) 1134 1135 results['chromium'] = output.strip() 1136 1137 if depot == 'v8': 1138 # We can't try to map the trunk revision to bleeding edge yet, because 1139 # we don't know which direction to try to search in. Have to wait until 1140 # the bisect has narrowed the results down to 2 v8 rolls. 1141 results['v8_bleeding_edge'] = None 1142 1143 return results 1144 1145 def BackupOrRestoreOutputDirectory(self, restore=False, build_type='Release'): 1146 """Backs up or restores build output directory based on restore argument. 1147 1148 Args: 1149 restore: Indicates whether to restore or backup. Default is False(Backup) 1150 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.) 1151 1152 Returns: 1153 Path to backup or restored location as string. otherwise None if it fails. 1154 """ 1155 build_dir = os.path.abspath( 1156 builder.GetBuildOutputDirectory(self.opts, self.src_cwd)) 1157 source_dir = os.path.join(build_dir, build_type) 1158 destination_dir = os.path.join(build_dir, '%s.bak' % build_type) 1159 if restore: 1160 source_dir, destination_dir = destination_dir, source_dir 1161 if os.path.exists(source_dir): 1162 RmTreeAndMkDir(destination_dir, skip_makedir=True) 1163 shutil.move(source_dir, destination_dir) 1164 return destination_dir 1165 return None 1166 1167 def GetBuildArchiveForRevision(self, revision, gs_bucket, target_arch, 1168 patch_sha, out_dir): 1169 """Checks and downloads build archive for a given revision. 1170 1171 Checks for build archive with Git hash or SVN revision. If either of the 1172 file exists, then downloads the archive file. 1173 1174 Args: 1175 revision: A Git hash revision. 1176 gs_bucket: Cloud storage bucket name 1177 target_arch: 32 or 64 bit build target 1178 patch: A DEPS patch (used while bisecting 3rd party repositories). 1179 out_dir: Build output directory where downloaded file is stored. 1180 1181 Returns: 1182 Downloaded archive file path if exists, otherwise None. 1183 """ 1184 # Source archive file path on cloud storage using Git revision. 1185 source_file = GetRemoteBuildPath( 1186 revision, self.opts.target_platform, target_arch, patch_sha) 1187 downloaded_archive = FetchFromCloudStorage(gs_bucket, source_file, out_dir) 1188 if not downloaded_archive: 1189 # Get commit position for the given SHA. 1190 commit_position = self.source_control.GetCommitPosition(revision) 1191 if commit_position: 1192 # Source archive file path on cloud storage using SVN revision. 1193 source_file = GetRemoteBuildPath( 1194 commit_position, self.opts.target_platform, target_arch, patch_sha) 1195 return FetchFromCloudStorage(gs_bucket, source_file, out_dir) 1196 return downloaded_archive 1197 1198 def DownloadCurrentBuild(self, revision, build_type='Release', patch=None): 1199 """Downloads the build archive for the given revision. 1200 1201 Args: 1202 revision: The Git revision to download or build. 1203 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.) 1204 patch: A DEPS patch (used while bisecting 3rd party repositories). 1205 1206 Returns: 1207 True if download succeeds, otherwise False. 1208 """ 1209 patch_sha = None 1210 if patch: 1211 # Get the SHA of the DEPS changes patch. 1212 patch_sha = GetSHA1HexDigest(patch) 1213 1214 # Update the DEPS changes patch with a patch to create a new file named 1215 # 'DEPS.sha' and add patch_sha evaluated above to it. 1216 patch = '%s\n%s' % (patch, DEPS_SHA_PATCH % {'deps_sha': patch_sha}) 1217 1218 # Get Build output directory 1219 abs_build_dir = os.path.abspath( 1220 builder.GetBuildOutputDirectory(self.opts, self.src_cwd)) 1221 1222 fetch_build_func = lambda: self.GetBuildArchiveForRevision( 1223 revision, self.opts.gs_bucket, self.opts.target_arch, 1224 patch_sha, abs_build_dir) 1225 1226 # Downloaded archive file path, downloads build archive for given revision. 1227 downloaded_file = fetch_build_func() 1228 1229 # When build archive doesn't exists, post a build request to tryserver 1230 # and wait for the build to be produced. 1231 if not downloaded_file: 1232 downloaded_file = self.PostBuildRequestAndWait( 1233 revision, fetch_build=fetch_build_func, patch=patch) 1234 if not downloaded_file: 1235 return False 1236 1237 # Generic name for the archive, created when archive file is extracted. 1238 output_dir = os.path.join( 1239 abs_build_dir, GetZipFileName(target_arch=self.opts.target_arch)) 1240 # Unzip build archive directory. 1241 try: 1242 RmTreeAndMkDir(output_dir, skip_makedir=True) 1243 self.BackupOrRestoreOutputDirectory(restore=False) 1244 # Build output directory based on target(e.g. out/Release, out/Debug). 1245 target_build_output_dir = os.path.join(abs_build_dir, build_type) 1246 ExtractZip(downloaded_file, abs_build_dir) 1247 if not os.path.exists(output_dir): 1248 # Due to recipe changes, the builds extract folder contains 1249 # out/Release instead of full-build-<platform>/Release. 1250 if os.path.exists(os.path.join(abs_build_dir, 'out', build_type)): 1251 output_dir = os.path.join(abs_build_dir, 'out', build_type) 1252 else: 1253 raise IOError('Missing extracted folder %s ' % output_dir) 1254 1255 print 'Moving build from %s to %s' % ( 1256 output_dir, target_build_output_dir) 1257 shutil.move(output_dir, target_build_output_dir) 1258 return True 1259 except Exception as e: 1260 print 'Something went wrong while extracting archive file: %s' % e 1261 self.BackupOrRestoreOutputDirectory(restore=True) 1262 # Cleanup any leftovers from unzipping. 1263 if os.path.exists(output_dir): 1264 RmTreeAndMkDir(output_dir, skip_makedir=True) 1265 finally: 1266 # Delete downloaded archive 1267 if os.path.exists(downloaded_file): 1268 os.remove(downloaded_file) 1269 return False 1270 1271 def PostBuildRequestAndWait(self, git_revision, fetch_build, patch=None): 1272 """POSTs the build request job to the try server instance. 1273 1274 A try job build request is posted to tryserver.chromium.perf master, 1275 and waits for the binaries to be produced and archived on cloud storage. 1276 Once the build is ready and stored onto cloud, build archive is downloaded 1277 into the output folder. 1278 1279 Args: 1280 git_revision: A Git hash revision. 1281 fetch_build: Function to check and download build from cloud storage. 1282 patch: A DEPS patch (used while bisecting 3rd party repositories). 1283 1284 Returns: 1285 Downloaded archive file path when requested build exists and download is 1286 successful, otherwise None. 1287 """ 1288 def GetBuilderNameAndBuildTime(target_platform, target_arch='ia32'): 1289 """Gets builder bot name and build time in seconds based on platform.""" 1290 # Bot names should match the one listed in tryserver.chromium's 1291 # master.cfg which produces builds for bisect. 1292 if bisect_utils.IsWindowsHost(): 1293 if bisect_utils.Is64BitWindows() and target_arch == 'x64': 1294 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME) 1295 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME) 1296 if bisect_utils.IsLinuxHost(): 1297 if target_platform == 'android': 1298 return ('android_perf_bisect_builder', MAX_LINUX_BUILD_TIME) 1299 return ('linux_perf_bisect_builder', MAX_LINUX_BUILD_TIME) 1300 if bisect_utils.IsMacHost(): 1301 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME) 1302 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform) 1303 if not fetch_build: 1304 return False 1305 1306 bot_name, build_timeout = GetBuilderNameAndBuildTime( 1307 self.opts.target_platform, self.opts.target_arch) 1308 builder_host = self.opts.builder_host 1309 builder_port = self.opts.builder_port 1310 # Create a unique ID for each build request posted to try server builders. 1311 # This ID is added to "Reason" property of the build. 1312 build_request_id = GetSHA1HexDigest( 1313 '%s-%s-%s' % (git_revision, patch, time.time())) 1314 1315 # Creates a try job description. 1316 # Always use Git hash to post build request since Commit positions are 1317 # not supported by builders to build. 1318 job_args = { 1319 'revision': 'src@%s' % git_revision, 1320 'bot': bot_name, 1321 'name': build_request_id, 1322 } 1323 # Update patch information if supplied. 1324 if patch: 1325 job_args['patch'] = patch 1326 # Posts job to build the revision on the server. 1327 if request_build.PostTryJob(builder_host, builder_port, job_args): 1328 target_file, error_msg = _WaitUntilBuildIsReady( 1329 fetch_build, bot_name, builder_host, builder_port, build_request_id, 1330 build_timeout) 1331 if not target_file: 1332 print '%s [revision: %s]' % (error_msg, git_revision) 1333 return None 1334 return target_file 1335 print 'Failed to post build request for revision: [%s]' % git_revision 1336 return None 1337 1338 def IsDownloadable(self, depot): 1339 """Checks if build can be downloaded based on target platform and depot.""" 1340 if (self.opts.target_platform in ['chromium', 'android'] and 1341 self.opts.gs_bucket): 1342 return (depot == 'chromium' or 1343 'chromium' in DEPOT_DEPS_NAME[depot]['from'] or 1344 'v8' in DEPOT_DEPS_NAME[depot]['from']) 1345 return False 1346 1347 def UpdateDepsContents(self, deps_contents, depot, git_revision, deps_key): 1348 """Returns modified version of DEPS file contents. 1349 1350 Args: 1351 deps_contents: DEPS file content. 1352 depot: Current depot being bisected. 1353 git_revision: A git hash to be updated in DEPS. 1354 deps_key: Key in vars section of DEPS file to be searched. 1355 1356 Returns: 1357 Updated DEPS content as string if deps key is found, otherwise None. 1358 """ 1359 # Check whether the depot and revision pattern in DEPS file vars 1360 # e.g. for webkit the format is "webkit_revision": "12345". 1361 deps_revision = re.compile(r'(?<="%s": ")([0-9]+)(?=")' % deps_key, 1362 re.MULTILINE) 1363 new_data = None 1364 if re.search(deps_revision, deps_contents): 1365 commit_position = self.source_control.GetCommitPosition( 1366 git_revision, self.depot_registry.GetDepotDir(depot)) 1367 if not commit_position: 1368 print 'Could not determine commit position for %s' % git_revision 1369 return None 1370 # Update the revision information for the given depot 1371 new_data = re.sub(deps_revision, str(commit_position), deps_contents) 1372 else: 1373 # Check whether the depot and revision pattern in DEPS file vars 1374 # e.g. for webkit the format is "webkit_revision": "559a6d4ab7a84c539..". 1375 deps_revision = re.compile( 1376 r'(?<=["\']%s["\']: ["\'])([a-fA-F0-9]{40})(?=["\'])' % deps_key, 1377 re.MULTILINE) 1378 if re.search(deps_revision, deps_contents): 1379 new_data = re.sub(deps_revision, git_revision, deps_contents) 1380 if new_data: 1381 # For v8_bleeding_edge revisions change V8 branch in order 1382 # to fetch bleeding edge revision. 1383 if depot == 'v8_bleeding_edge': 1384 new_data = _UpdateV8Branch(new_data) 1385 if not new_data: 1386 return None 1387 return new_data 1388 1389 def UpdateDeps(self, revision, depot, deps_file): 1390 """Updates DEPS file with new revision of dependency repository. 1391 1392 This method search DEPS for a particular pattern in which depot revision 1393 is specified (e.g "webkit_revision": "123456"). If a match is found then 1394 it resolves the given git hash to SVN revision and replace it in DEPS file. 1395 1396 Args: 1397 revision: A git hash revision of the dependency repository. 1398 depot: Current depot being bisected. 1399 deps_file: Path to DEPS file. 1400 1401 Returns: 1402 True if DEPS file is modified successfully, otherwise False. 1403 """ 1404 if not os.path.exists(deps_file): 1405 return False 1406 1407 deps_var = DEPOT_DEPS_NAME[depot]['deps_var'] 1408 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME. 1409 if not deps_var: 1410 print 'DEPS update not supported for Depot: %s', depot 1411 return False 1412 1413 # Hack for Angle repository. In the DEPS file, "vars" dictionary variable 1414 # contains "angle_revision" key that holds git hash instead of SVN revision. 1415 # And sometime "angle_revision" key is not specified in "vars" variable. 1416 # In such cases check, "deps" dictionary variable that matches 1417 # angle.git@[a-fA-F0-9]{40}$ and replace git hash. 1418 if depot == 'angle': 1419 return _UpdateDEPSForAngle(revision, depot, deps_file) 1420 1421 try: 1422 deps_contents = ReadStringFromFile(deps_file) 1423 updated_deps_content = self.UpdateDepsContents( 1424 deps_contents, depot, revision, deps_var) 1425 # Write changes to DEPS file 1426 if updated_deps_content: 1427 WriteStringToFile(updated_deps_content, deps_file) 1428 return True 1429 except IOError, e: 1430 print 'Something went wrong while updating DEPS file. [%s]' % e 1431 return False 1432 1433 def CreateDEPSPatch(self, depot, revision): 1434 """Modifies DEPS and returns diff as text. 1435 1436 Args: 1437 depot: Current depot being bisected. 1438 revision: A git hash revision of the dependency repository. 1439 1440 Returns: 1441 A tuple with git hash of chromium revision and DEPS patch text. 1442 """ 1443 deps_file_path = os.path.join(self.src_cwd, bisect_utils.FILE_DEPS) 1444 if not os.path.exists(deps_file_path): 1445 raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path) 1446 # Get current chromium revision (git hash). 1447 cmd = ['rev-parse', 'HEAD'] 1448 chromium_sha = bisect_utils.CheckRunGit(cmd).strip() 1449 if not chromium_sha: 1450 raise RuntimeError('Failed to determine Chromium revision for %s' % 1451 revision) 1452 if ('chromium' in DEPOT_DEPS_NAME[depot]['from'] or 1453 'v8' in DEPOT_DEPS_NAME[depot]['from']): 1454 # Checkout DEPS file for the current chromium revision. 1455 if self.source_control.CheckoutFileAtRevision( 1456 bisect_utils.FILE_DEPS, chromium_sha, cwd=self.src_cwd): 1457 if self.UpdateDeps(revision, depot, deps_file_path): 1458 diff_command = [ 1459 'diff', 1460 '--src-prefix=src/', 1461 '--dst-prefix=src/', 1462 '--no-ext-diff', 1463 bisect_utils.FILE_DEPS, 1464 ] 1465 diff_text = bisect_utils.CheckRunGit(diff_command, cwd=self.src_cwd) 1466 return (chromium_sha, ChangeBackslashToSlashInPatch(diff_text)) 1467 else: 1468 raise RuntimeError( 1469 'Failed to update DEPS file for chromium: [%s]' % chromium_sha) 1470 else: 1471 raise RuntimeError( 1472 'DEPS checkout Failed for chromium revision : [%s]' % chromium_sha) 1473 return (None, None) 1474 1475 def BuildCurrentRevision(self, depot, revision=None): 1476 """Builds chrome and performance_ui_tests on the current revision. 1477 1478 Returns: 1479 True if the build was successful. 1480 """ 1481 if self.opts.debug_ignore_build: 1482 return True 1483 1484 build_success = False 1485 cwd = os.getcwd() 1486 os.chdir(self.src_cwd) 1487 # Fetch build archive for the given revision from the cloud storage when 1488 # the storage bucket is passed. 1489 if self.IsDownloadable(depot) and revision: 1490 deps_patch = None 1491 if depot != 'chromium': 1492 # Create a DEPS patch with new revision for dependency repository. 1493 revision, deps_patch = self.CreateDEPSPatch(depot, revision) 1494 if self.DownloadCurrentBuild(revision, patch=deps_patch): 1495 if deps_patch: 1496 # Reverts the changes to DEPS file. 1497 self.source_control.CheckoutFileAtRevision( 1498 bisect_utils.FILE_DEPS, revision, cwd=self.src_cwd) 1499 build_success = True 1500 else: 1501 # These codes are executed when bisect bots builds binaries locally. 1502 build_success = self.builder.Build(depot, self.opts) 1503 os.chdir(cwd) 1504 return build_success 1505 1506 def RunGClientHooks(self): 1507 """Runs gclient with runhooks command. 1508 1509 Returns: 1510 True if gclient reports no errors. 1511 """ 1512 if self.opts.debug_ignore_build: 1513 return True 1514 return not bisect_utils.RunGClient(['runhooks'], cwd=self.src_cwd) 1515 1516 def _IsBisectModeUsingMetric(self): 1517 return self.opts.bisect_mode in [BISECT_MODE_MEAN, BISECT_MODE_STD_DEV] 1518 1519 def _IsBisectModeReturnCode(self): 1520 return self.opts.bisect_mode in [BISECT_MODE_RETURN_CODE] 1521 1522 def _IsBisectModeStandardDeviation(self): 1523 return self.opts.bisect_mode in [BISECT_MODE_STD_DEV] 1524 1525 def GetCompatibleCommand(self, command_to_run, revision, depot): 1526 """Return a possibly modified test command depending on the revision. 1527 1528 Prior to crrev.com/274857 *only* android-chromium-testshell 1529 Then until crrev.com/276628 *both* (android-chromium-testshell and 1530 android-chrome-shell) work. After that rev 276628 *only* 1531 android-chrome-shell works. The bisect_perf_regression.py script should 1532 handle these cases and set appropriate browser type based on revision. 1533 """ 1534 if self.opts.target_platform in ['android']: 1535 # When its a third_party depot, get the chromium revision. 1536 if depot != 'chromium': 1537 revision = bisect_utils.CheckRunGit( 1538 ['rev-parse', 'HEAD'], cwd=self.src_cwd).strip() 1539 commit_position = self.source_control.GetCommitPosition(revision, 1540 cwd=self.src_cwd) 1541 if not commit_position: 1542 return command_to_run 1543 cmd_re = re.compile('--browser=(?P<browser_type>\S+)') 1544 matches = cmd_re.search(command_to_run) 1545 if bisect_utils.IsStringInt(commit_position) and matches: 1546 cmd_browser = matches.group('browser_type') 1547 if commit_position <= 274857 and cmd_browser == 'android-chrome-shell': 1548 return command_to_run.replace(cmd_browser, 1549 'android-chromium-testshell') 1550 elif (commit_position >= 276628 and 1551 cmd_browser == 'android-chromium-testshell'): 1552 return command_to_run.replace(cmd_browser, 1553 'android-chrome-shell') 1554 return command_to_run 1555 1556 def RunPerformanceTestAndParseResults( 1557 self, command_to_run, metric, reset_on_first_run=False, 1558 upload_on_last_run=False, results_label=None): 1559 """Runs a performance test on the current revision and parses the results. 1560 1561 Args: 1562 command_to_run: The command to be run to execute the performance test. 1563 metric: The metric to parse out from the results of the performance test. 1564 This is the result chart name and trace name, separated by slash. 1565 May be None for perf try jobs. 1566 reset_on_first_run: If True, pass the flag --reset-results on first run. 1567 upload_on_last_run: If True, pass the flag --upload-results on last run. 1568 results_label: A value for the option flag --results-label. 1569 The arguments reset_on_first_run, upload_on_last_run and results_label 1570 are all ignored if the test is not a Telemetry test. 1571 1572 Returns: 1573 (values dict, 0) if --debug_ignore_perf_test was passed. 1574 (values dict, 0, test output) if the test was run successfully. 1575 (error message, -1) if the test couldn't be run. 1576 (error message, -1, test output) if the test ran but there was an error. 1577 """ 1578 success_code, failure_code = 0, -1 1579 1580 if self.opts.debug_ignore_perf_test: 1581 fake_results = { 1582 'mean': 0.0, 1583 'std_err': 0.0, 1584 'std_dev': 0.0, 1585 'values': [0.0] 1586 } 1587 return (fake_results, success_code) 1588 1589 # For Windows platform set posix=False, to parse windows paths correctly. 1590 # On Windows, path separators '\' or '\\' are replace by '' when posix=True, 1591 # refer to http://bugs.python.org/issue1724822. By default posix=True. 1592 args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost()) 1593 1594 if not _GenerateProfileIfNecessary(args): 1595 err_text = 'Failed to generate profile for performance test.' 1596 return (err_text, failure_code) 1597 1598 # If running a Telemetry test for Chrome OS, insert the remote IP and 1599 # identity parameters. 1600 is_telemetry = bisect_utils.IsTelemetryCommand(command_to_run) 1601 if self.opts.target_platform == 'cros' and is_telemetry: 1602 args.append('--remote=%s' % self.opts.cros_remote_ip) 1603 args.append('--identity=%s' % bisect_utils.CROS_TEST_KEY_PATH) 1604 1605 start_time = time.time() 1606 1607 metric_values = [] 1608 output_of_all_runs = '' 1609 for i in xrange(self.opts.repeat_test_count): 1610 # Can ignore the return code since if the tests fail, it won't return 0. 1611 current_args = copy.copy(args) 1612 if is_telemetry: 1613 if i == 0 and reset_on_first_run: 1614 current_args.append('--reset-results') 1615 elif i == self.opts.repeat_test_count - 1 and upload_on_last_run: 1616 current_args.append('--upload-results') 1617 if results_label: 1618 current_args.append('--results-label=%s' % results_label) 1619 try: 1620 output, return_code = bisect_utils.RunProcessAndRetrieveOutput( 1621 current_args, cwd=self.src_cwd) 1622 except OSError, e: 1623 if e.errno == errno.ENOENT: 1624 err_text = ('Something went wrong running the performance test. ' 1625 'Please review the command line:\n\n') 1626 if 'src/' in ' '.join(args): 1627 err_text += ('Check that you haven\'t accidentally specified a ' 1628 'path with src/ in the command.\n\n') 1629 err_text += ' '.join(args) 1630 err_text += '\n' 1631 1632 return (err_text, failure_code) 1633 raise 1634 1635 output_of_all_runs += output 1636 if self.opts.output_buildbot_annotations: 1637 print output 1638 1639 if metric and self._IsBisectModeUsingMetric(): 1640 metric_values += _ParseMetricValuesFromOutput(metric, output) 1641 # If we're bisecting on a metric (ie, changes in the mean or 1642 # standard deviation) and no metric values are produced, bail out. 1643 if not metric_values: 1644 break 1645 elif self._IsBisectModeReturnCode(): 1646 metric_values.append(return_code) 1647 1648 elapsed_minutes = (time.time() - start_time) / 60.0 1649 if elapsed_minutes >= self.opts.max_time_minutes: 1650 break 1651 1652 if metric and len(metric_values) == 0: 1653 err_text = 'Metric %s was not found in the test output.' % metric 1654 # TODO(qyearsley): Consider also getting and displaying a list of metrics 1655 # that were found in the output here. 1656 return (err_text, failure_code, output_of_all_runs) 1657 1658 # If we're bisecting on return codes, we're really just looking for zero vs 1659 # non-zero. 1660 values = {} 1661 if self._IsBisectModeReturnCode(): 1662 # If any of the return codes is non-zero, output 1. 1663 overall_return_code = 0 if ( 1664 all(current_value == 0 for current_value in metric_values)) else 1 1665 1666 values = { 1667 'mean': overall_return_code, 1668 'std_err': 0.0, 1669 'std_dev': 0.0, 1670 'values': metric_values, 1671 } 1672 1673 print 'Results of performance test: Command returned with %d' % ( 1674 overall_return_code) 1675 print 1676 elif metric: 1677 # Need to get the average value if there were multiple values. 1678 truncated_mean = math_utils.TruncatedMean( 1679 metric_values, self.opts.truncate_percent) 1680 standard_err = math_utils.StandardError(metric_values) 1681 standard_dev = math_utils.StandardDeviation(metric_values) 1682 1683 if self._IsBisectModeStandardDeviation(): 1684 metric_values = [standard_dev] 1685 1686 values = { 1687 'mean': truncated_mean, 1688 'std_err': standard_err, 1689 'std_dev': standard_dev, 1690 'values': metric_values, 1691 } 1692 1693 print 'Results of performance test: %12f %12f' % ( 1694 truncated_mean, standard_err) 1695 print 1696 return (values, success_code, output_of_all_runs) 1697 1698 def _FindAllRevisionsToSync(self, revision, depot): 1699 """Finds all dependent revisions and depots that need to be synced. 1700 1701 For example skia is broken up into 3 git mirrors over skia/src, 1702 skia/gyp, and skia/include. To sync skia/src properly, one has to find 1703 the proper revisions in skia/gyp and skia/include. 1704 1705 This is only useful in the git workflow, as an SVN depot may be split into 1706 multiple mirrors. 1707 1708 Args: 1709 revision: The revision to sync to. 1710 depot: The depot in use at the moment (probably skia). 1711 1712 Returns: 1713 A list of [depot, revision] pairs that need to be synced. 1714 """ 1715 revisions_to_sync = [[depot, revision]] 1716 1717 is_base = ((depot == 'chromium') or (depot == 'cros') or 1718 (depot == 'android-chrome')) 1719 1720 # Some SVN depots were split into multiple git depots, so we need to 1721 # figure out for each mirror which git revision to grab. There's no 1722 # guarantee that the SVN revision will exist for each of the dependent 1723 # depots, so we have to grep the git logs and grab the next earlier one. 1724 if (not is_base 1725 and DEPOT_DEPS_NAME[depot]['depends'] 1726 and self.source_control.IsGit()): 1727 commit_position = self.source_control.GetCommitPosition(revision) 1728 1729 for d in DEPOT_DEPS_NAME[depot]['depends']: 1730 self.depot_registry.ChangeToDepotDir(d) 1731 1732 dependant_rev = self.source_control.ResolveToRevision( 1733 commit_position, d, DEPOT_DEPS_NAME, -1000) 1734 1735 if dependant_rev: 1736 revisions_to_sync.append([d, dependant_rev]) 1737 1738 num_resolved = len(revisions_to_sync) 1739 num_needed = len(DEPOT_DEPS_NAME[depot]['depends']) 1740 1741 self.depot_registry.ChangeToDepotDir(depot) 1742 1743 if not ((num_resolved - 1) == num_needed): 1744 return None 1745 1746 return revisions_to_sync 1747 1748 def PerformPreBuildCleanup(self): 1749 """Performs cleanup between runs.""" 1750 print 'Cleaning up between runs.' 1751 print 1752 1753 # Leaving these .pyc files around between runs may disrupt some perf tests. 1754 for (path, _, files) in os.walk(self.src_cwd): 1755 for cur_file in files: 1756 if cur_file.endswith('.pyc'): 1757 path_to_file = os.path.join(path, cur_file) 1758 os.remove(path_to_file) 1759 1760 def PerformCrosChrootCleanup(self): 1761 """Deletes the chroot. 1762 1763 Returns: 1764 True if successful. 1765 """ 1766 cwd = os.getcwd() 1767 self.depot_registry.ChangeToDepotDir('cros') 1768 cmd = [bisect_utils.CROS_SDK_PATH, '--delete'] 1769 return_code = bisect_utils.RunProcess(cmd) 1770 os.chdir(cwd) 1771 return not return_code 1772 1773 def CreateCrosChroot(self): 1774 """Creates a new chroot. 1775 1776 Returns: 1777 True if successful. 1778 """ 1779 cwd = os.getcwd() 1780 self.depot_registry.ChangeToDepotDir('cros') 1781 cmd = [bisect_utils.CROS_SDK_PATH, '--create'] 1782 return_code = bisect_utils.RunProcess(cmd) 1783 os.chdir(cwd) 1784 return not return_code 1785 1786 def _PerformPreSyncCleanup(self, depot): 1787 """Performs any necessary cleanup before syncing. 1788 1789 Args: 1790 depot: Depot name. 1791 1792 Returns: 1793 True if successful. 1794 """ 1795 if depot == 'chromium' or depot == 'android-chrome': 1796 # Removes third_party/libjingle. At some point, libjingle was causing 1797 # issues syncing when using the git workflow (crbug.com/266324). 1798 os.chdir(self.src_cwd) 1799 if not bisect_utils.RemoveThirdPartyDirectory('libjingle'): 1800 return False 1801 # Removes third_party/skia. At some point, skia was causing 1802 # issues syncing when using the git workflow (crbug.com/377951). 1803 if not bisect_utils.RemoveThirdPartyDirectory('skia'): 1804 return False 1805 elif depot == 'cros': 1806 return self.PerformCrosChrootCleanup() 1807 return True 1808 1809 def _RunPostSync(self, depot): 1810 """Performs any work after syncing. 1811 1812 Args: 1813 depot: Depot name. 1814 1815 Returns: 1816 True if successful. 1817 """ 1818 if self.opts.target_platform == 'android': 1819 if not builder.SetupAndroidBuildEnvironment(self.opts, 1820 path_to_src=self.src_cwd): 1821 return False 1822 1823 if depot == 'cros': 1824 return self.CreateCrosChroot() 1825 else: 1826 return self.RunGClientHooks() 1827 return True 1828 1829 def ShouldSkipRevision(self, depot, revision): 1830 """Checks whether a particular revision can be safely skipped. 1831 1832 Some commits can be safely skipped (such as a DEPS roll), since the tool 1833 is git based those changes would have no effect. 1834 1835 Args: 1836 depot: The depot being bisected. 1837 revision: Current revision we're synced to. 1838 1839 Returns: 1840 True if we should skip building/testing this revision. 1841 """ 1842 if depot == 'chromium': 1843 if self.source_control.IsGit(): 1844 cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', revision] 1845 output = bisect_utils.CheckRunGit(cmd) 1846 1847 files = output.splitlines() 1848 1849 if len(files) == 1 and files[0] == 'DEPS': 1850 return True 1851 1852 return False 1853 1854 def RunTest(self, revision, depot, command, metric, skippable=False): 1855 """Performs a full sync/build/run of the specified revision. 1856 1857 Args: 1858 revision: The revision to sync to. 1859 depot: The depot that's being used at the moment (src, webkit, etc.) 1860 command: The command to execute the performance test. 1861 metric: The performance metric being tested. 1862 1863 Returns: 1864 On success, a tuple containing the results of the performance test. 1865 Otherwise, a tuple with the error message. 1866 """ 1867 # Decide which sync program to use. 1868 sync_client = None 1869 if depot == 'chromium' or depot == 'android-chrome': 1870 sync_client = 'gclient' 1871 elif depot == 'cros': 1872 sync_client = 'repo' 1873 1874 # Decide what depots will need to be synced to what revisions. 1875 revisions_to_sync = self._FindAllRevisionsToSync(revision, depot) 1876 if not revisions_to_sync: 1877 return ('Failed to resolve dependent depots.', BUILD_RESULT_FAIL) 1878 1879 if not self._PerformPreSyncCleanup(depot): 1880 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL) 1881 1882 # Do the syncing for all depots. 1883 if not self.opts.debug_ignore_sync: 1884 if not self._SyncAllRevisions(revisions_to_sync, sync_client): 1885 return ('Failed to sync: [%s]' % str(revision), BUILD_RESULT_FAIL) 1886 1887 # Try to do any post-sync steps. This may include "gclient runhooks". 1888 if not self._RunPostSync(depot): 1889 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL) 1890 1891 # Skip this revision if it can be skipped. 1892 if skippable and self.ShouldSkipRevision(depot, revision): 1893 return ('Skipped revision: [%s]' % str(revision), 1894 BUILD_RESULT_SKIPPED) 1895 1896 # Obtain a build for this revision. This may be done by requesting a build 1897 # from another builder, waiting for it and downloading it. 1898 start_build_time = time.time() 1899 build_success = self.BuildCurrentRevision(depot, revision) 1900 if not build_success: 1901 return ('Failed to build revision: [%s]' % str(revision), 1902 BUILD_RESULT_FAIL) 1903 after_build_time = time.time() 1904 1905 # Possibly alter the command. 1906 command = self.GetCompatibleCommand(command, revision, depot) 1907 1908 # Run the command and get the results. 1909 results = self.RunPerformanceTestAndParseResults(command, metric) 1910 1911 # Restore build output directory once the tests are done, to avoid 1912 # any discrepancies. 1913 if self.IsDownloadable(depot) and revision: 1914 self.BackupOrRestoreOutputDirectory(restore=True) 1915 1916 # A value other than 0 indicates that the test couldn't be run, and results 1917 # should also include an error message. 1918 if results[1] != 0: 1919 return results 1920 1921 external_revisions = self._Get3rdPartyRevisions(depot) 1922 1923 if not external_revisions is None: 1924 return (results[0], results[1], external_revisions, 1925 time.time() - after_build_time, after_build_time - 1926 start_build_time) 1927 else: 1928 return ('Failed to parse DEPS file for external revisions.', 1929 BUILD_RESULT_FAIL) 1930 1931 def _SyncAllRevisions(self, revisions_to_sync, sync_client): 1932 """Syncs multiple depots to particular revisions. 1933 1934 Args: 1935 revisions_to_sync: A list of (depot, revision) pairs to be synced. 1936 sync_client: Program used to sync, e.g. "gclient", "repo". Can be None. 1937 1938 Returns: 1939 True if successful, False otherwise. 1940 """ 1941 for depot, revision in revisions_to_sync: 1942 self.depot_registry.ChangeToDepotDir(depot) 1943 1944 if sync_client: 1945 self.PerformPreBuildCleanup() 1946 1947 # When using gclient to sync, you need to specify the depot you 1948 # want so that all the dependencies sync properly as well. 1949 # i.e. gclient sync src@<SHA1> 1950 if sync_client == 'gclient': 1951 revision = '%s@%s' % (DEPOT_DEPS_NAME[depot]['src'], revision) 1952 1953 sync_success = self.source_control.SyncToRevision(revision, sync_client) 1954 if not sync_success: 1955 return False 1956 1957 return True 1958 1959 def _CheckIfRunPassed(self, current_value, known_good_value, known_bad_value): 1960 """Given known good and bad values, decide if the current_value passed 1961 or failed. 1962 1963 Args: 1964 current_value: The value of the metric being checked. 1965 known_bad_value: The reference value for a "failed" run. 1966 known_good_value: The reference value for a "passed" run. 1967 1968 Returns: 1969 True if the current_value is closer to the known_good_value than the 1970 known_bad_value. 1971 """ 1972 if self.opts.bisect_mode == BISECT_MODE_STD_DEV: 1973 dist_to_good_value = abs(current_value['std_dev'] - 1974 known_good_value['std_dev']) 1975 dist_to_bad_value = abs(current_value['std_dev'] - 1976 known_bad_value['std_dev']) 1977 else: 1978 dist_to_good_value = abs(current_value['mean'] - known_good_value['mean']) 1979 dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean']) 1980 1981 return dist_to_good_value < dist_to_bad_value 1982 1983 def _FillInV8BleedingEdgeInfo(self, min_revision_data, max_revision_data): 1984 r1 = self._GetNearestV8BleedingEdgeFromTrunk(min_revision_data['revision'], 1985 search_forward=True) 1986 r2 = self._GetNearestV8BleedingEdgeFromTrunk(max_revision_data['revision'], 1987 search_forward=False) 1988 min_revision_data['external']['v8_bleeding_edge'] = r1 1989 max_revision_data['external']['v8_bleeding_edge'] = r2 1990 1991 if (not self._GetV8BleedingEdgeFromV8TrunkIfMappable( 1992 min_revision_data['revision']) 1993 or not self._GetV8BleedingEdgeFromV8TrunkIfMappable( 1994 max_revision_data['revision'])): 1995 self.warnings.append( 1996 'Trunk revisions in V8 did not map directly to bleeding_edge. ' 1997 'Attempted to expand the range to find V8 rolls which did map ' 1998 'directly to bleeding_edge revisions, but results might not be ' 1999 'valid.') 2000 2001 def _FindNextDepotToBisect( 2002 self, current_depot, min_revision_data, max_revision_data): 2003 """Decides which depot the script should dive into next (if any). 2004 2005 Args: 2006 current_depot: Current depot being bisected. 2007 min_revision_data: Data about the earliest revision in the bisect range. 2008 max_revision_data: Data about the latest revision in the bisect range. 2009 2010 Returns: 2011 Name of the depot to bisect next, or None. 2012 """ 2013 external_depot = None 2014 for next_depot in DEPOT_NAMES: 2015 if DEPOT_DEPS_NAME[next_depot].has_key('platform'): 2016 if DEPOT_DEPS_NAME[next_depot]['platform'] != os.name: 2017 continue 2018 2019 if not (DEPOT_DEPS_NAME[next_depot]['recurse'] 2020 and min_revision_data['depot'] 2021 in DEPOT_DEPS_NAME[next_depot]['from']): 2022 continue 2023 2024 if current_depot == 'v8': 2025 # We grab the bleeding_edge info here rather than earlier because we 2026 # finally have the revision range. From that we can search forwards and 2027 # backwards to try to match trunk revisions to bleeding_edge. 2028 self._FillInV8BleedingEdgeInfo(min_revision_data, max_revision_data) 2029 2030 if (min_revision_data['external'].get(next_depot) == 2031 max_revision_data['external'].get(next_depot)): 2032 continue 2033 2034 if (min_revision_data['external'].get(next_depot) and 2035 max_revision_data['external'].get(next_depot)): 2036 external_depot = next_depot 2037 break 2038 2039 return external_depot 2040 2041 def PrepareToBisectOnDepot( 2042 self, current_depot, end_revision, start_revision, previous_revision): 2043 """Changes to the appropriate directory and gathers a list of revisions 2044 to bisect between |start_revision| and |end_revision|. 2045 2046 Args: 2047 current_depot: The depot we want to bisect. 2048 end_revision: End of the revision range. 2049 start_revision: Start of the revision range. 2050 previous_revision: The last revision we synced to on |previous_depot|. 2051 2052 Returns: 2053 A list containing the revisions between |start_revision| and 2054 |end_revision| inclusive. 2055 """ 2056 # Change into working directory of external library to run 2057 # subsequent commands. 2058 self.depot_registry.ChangeToDepotDir(current_depot) 2059 2060 # V8 (and possibly others) is merged in periodically. Bisecting 2061 # this directory directly won't give much good info. 2062 if DEPOT_DEPS_NAME[current_depot].has_key('custom_deps'): 2063 config_path = os.path.join(self.src_cwd, '..') 2064 if bisect_utils.RunGClientAndCreateConfig(self.opts, 2065 DEPOT_DEPS_NAME[current_depot]['custom_deps'], cwd=config_path): 2066 return [] 2067 if bisect_utils.RunGClient( 2068 ['sync', '--revision', previous_revision], cwd=self.src_cwd): 2069 return [] 2070 2071 if current_depot == 'v8_bleeding_edge': 2072 self.depot_registry.ChangeToDepotDir('chromium') 2073 2074 shutil.move('v8', 'v8.bak') 2075 shutil.move('v8_bleeding_edge', 'v8') 2076 2077 self.cleanup_commands.append(['mv', 'v8', 'v8_bleeding_edge']) 2078 self.cleanup_commands.append(['mv', 'v8.bak', 'v8']) 2079 2080 self.depot_registry.AddDepot('v8_bleeding_edge', 2081 os.path.join(self.src_cwd, 'v8')) 2082 self.depot_registry.AddDepot('v8', os.path.join(self.src_cwd, 'v8.bak')) 2083 2084 self.depot_registry.ChangeToDepotDir(current_depot) 2085 2086 depot_revision_list = self.GetRevisionList(current_depot, 2087 end_revision, 2088 start_revision) 2089 2090 self.depot_registry.ChangeToDepotDir('chromium') 2091 2092 return depot_revision_list 2093 2094 def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric, target_depot): 2095 """Gathers reference values by running the performance tests on the 2096 known good and bad revisions. 2097 2098 Args: 2099 good_rev: The last known good revision where the performance regression 2100 has not occurred yet. 2101 bad_rev: A revision where the performance regression has already occurred. 2102 cmd: The command to execute the performance test. 2103 metric: The metric being tested for regression. 2104 2105 Returns: 2106 A tuple with the results of building and running each revision. 2107 """ 2108 bad_run_results = self.RunTest(bad_rev, target_depot, cmd, metric) 2109 2110 good_run_results = None 2111 2112 if not bad_run_results[1]: 2113 good_run_results = self.RunTest(good_rev, target_depot, cmd, metric) 2114 2115 return (bad_run_results, good_run_results) 2116 2117 def PrintRevisionsToBisectMessage(self, revision_list, depot): 2118 if self.opts.output_buildbot_annotations: 2119 step_name = 'Bisection Range: [%s - %s]' % ( 2120 revision_list[len(revision_list)-1], revision_list[0]) 2121 bisect_utils.OutputAnnotationStepStart(step_name) 2122 2123 print 2124 print 'Revisions to bisect on [%s]:' % depot 2125 for revision_id in revision_list: 2126 print ' -> %s' % (revision_id, ) 2127 print 2128 2129 if self.opts.output_buildbot_annotations: 2130 bisect_utils.OutputAnnotationStepClosed() 2131 2132 def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision, 2133 good_svn_revision=None): 2134 """Checks to see if changes to DEPS file occurred, and that the revision 2135 range also includes the change to .DEPS.git. If it doesn't, attempts to 2136 expand the revision range to include it. 2137 2138 Args: 2139 bad_revision: First known bad git revision. 2140 good_revision: Last known good git revision. 2141 good_svn_revision: Last known good svn revision. 2142 2143 Returns: 2144 A tuple with the new bad and good revisions. 2145 """ 2146 # DONOT perform nudge because at revision 291563 .DEPS.git was removed 2147 # and source contain only DEPS file for dependency changes. 2148 if good_svn_revision >= 291563: 2149 return (bad_revision, good_revision) 2150 2151 if self.source_control.IsGit() and self.opts.target_platform == 'chromium': 2152 changes_to_deps = self.source_control.QueryFileRevisionHistory( 2153 bisect_utils.FILE_DEPS, good_revision, bad_revision) 2154 2155 if changes_to_deps: 2156 # DEPS file was changed, search from the oldest change to DEPS file to 2157 # bad_revision to see if there are matching .DEPS.git changes. 2158 oldest_deps_change = changes_to_deps[-1] 2159 changes_to_gitdeps = self.source_control.QueryFileRevisionHistory( 2160 bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision) 2161 2162 if len(changes_to_deps) != len(changes_to_gitdeps): 2163 # Grab the timestamp of the last DEPS change 2164 cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]] 2165 output = bisect_utils.CheckRunGit(cmd) 2166 commit_time = int(output) 2167 2168 # Try looking for a commit that touches the .DEPS.git file in the 2169 # next 15 minutes after the DEPS file change. 2170 cmd = ['log', '--format=%H', '-1', 2171 '--before=%d' % (commit_time + 900), '--after=%d' % commit_time, 2172 'origin/master', '--', bisect_utils.FILE_DEPS_GIT] 2173 output = bisect_utils.CheckRunGit(cmd) 2174 output = output.strip() 2175 if output: 2176 self.warnings.append('Detected change to DEPS and modified ' 2177 'revision range to include change to .DEPS.git') 2178 return (output, good_revision) 2179 else: 2180 self.warnings.append('Detected change to DEPS but couldn\'t find ' 2181 'matching change to .DEPS.git') 2182 return (bad_revision, good_revision) 2183 2184 def CheckIfRevisionsInProperOrder( 2185 self, target_depot, good_revision, bad_revision): 2186 """Checks that |good_revision| is an earlier revision than |bad_revision|. 2187 2188 Args: 2189 good_revision: Number/tag of the known good revision. 2190 bad_revision: Number/tag of the known bad revision. 2191 2192 Returns: 2193 True if the revisions are in the proper order (good earlier than bad). 2194 """ 2195 if self.source_control.IsGit() and target_depot != 'cros': 2196 cwd = self.depot_registry.GetDepotDir(target_depot) 2197 2198 cmd = ['log', '--format=%ct', '-1', good_revision] 2199 output = bisect_utils.CheckRunGit(cmd, cwd=cwd) 2200 good_commit_time = int(output) 2201 2202 cmd = ['log', '--format=%ct', '-1', bad_revision] 2203 output = bisect_utils.CheckRunGit(cmd, cwd=cwd) 2204 bad_commit_time = int(output) 2205 2206 return good_commit_time <= bad_commit_time 2207 else: 2208 # CrOS and SVN use integers. 2209 return int(good_revision) <= int(bad_revision) 2210 2211 def CanPerformBisect(self, good_revision, bad_revision): 2212 """Checks whether a given revision is bisectable. 2213 2214 Checks for following: 2215 1. Non-bisectable revsions for android bots (refer to crbug.com/385324). 2216 2. Non-bisectable revsions for Windows bots (refer to crbug.com/405274). 2217 2218 Args: 2219 good_revision: Known good revision. 2220 bad_revision: Known bad revision. 2221 2222 Returns: 2223 A dictionary indicating the result. If revision is not bisectable, 2224 this will contain the field "error", otherwise None. 2225 """ 2226 if self.opts.target_platform == 'android': 2227 good_revision = self.source_control.GetCommitPosition(good_revision) 2228 if (bisect_utils.IsStringInt(good_revision) 2229 and good_revision < 265549): 2230 return {'error': ( 2231 'Bisect cannot continue for the given revision range.\n' 2232 'It is impossible to bisect Android regressions ' 2233 'prior to r265549, which allows the bisect bot to ' 2234 'rely on Telemetry to do apk installation of the most recently ' 2235 'built local ChromeShell(refer to crbug.com/385324).\n' 2236 'Please try bisecting revisions greater than or equal to r265549.')} 2237 2238 if bisect_utils.IsWindowsHost(): 2239 good_revision = self.source_control.GetCommitPosition(good_revision) 2240 bad_revision = self.source_control.GetCommitPosition(bad_revision) 2241 if (bisect_utils.IsStringInt(good_revision) and 2242 bisect_utils.IsStringInt(bad_revision)): 2243 if (289987 <= good_revision < 290716 or 2244 289987 <= bad_revision < 290716): 2245 return {'error': ('Oops! Revision between r289987 and r290716 are ' 2246 'marked as dead zone for Windows due to ' 2247 'crbug.com/405274. Please try another range.')} 2248 2249 return None 2250 2251 def Run(self, command_to_run, bad_revision_in, good_revision_in, metric): 2252 """Given known good and bad revisions, run a binary search on all 2253 intermediate revisions to determine the CL where the performance regression 2254 occurred. 2255 2256 Args: 2257 command_to_run: Specify the command to execute the performance test. 2258 good_revision: Number/tag of the known good revision. 2259 bad_revision: Number/tag of the known bad revision. 2260 metric: The performance metric to monitor. 2261 2262 Returns: 2263 A BisectResults object. 2264 """ 2265 results = BisectResults(self.depot_registry, self.source_control) 2266 2267 # Choose depot to bisect first 2268 target_depot = 'chromium' 2269 if self.opts.target_platform == 'cros': 2270 target_depot = 'cros' 2271 elif self.opts.target_platform == 'android-chrome': 2272 target_depot = 'android-chrome' 2273 2274 cwd = os.getcwd() 2275 self.depot_registry.ChangeToDepotDir(target_depot) 2276 2277 # If they passed SVN revisions, we can try match them to git SHA1 hashes. 2278 bad_revision = self.source_control.ResolveToRevision( 2279 bad_revision_in, target_depot, DEPOT_DEPS_NAME, 100) 2280 good_revision = self.source_control.ResolveToRevision( 2281 good_revision_in, target_depot, DEPOT_DEPS_NAME, -100) 2282 2283 os.chdir(cwd) 2284 if bad_revision is None: 2285 results.error = 'Couldn\'t resolve [%s] to SHA1.' % bad_revision_in 2286 return results 2287 2288 if good_revision is None: 2289 results.error = 'Couldn\'t resolve [%s] to SHA1.' % good_revision_in 2290 return results 2291 2292 # Check that they didn't accidentally swap good and bad revisions. 2293 if not self.CheckIfRevisionsInProperOrder( 2294 target_depot, good_revision, bad_revision): 2295 results.error = ('bad_revision < good_revision, did you swap these ' 2296 'by mistake?') 2297 return results 2298 bad_revision, good_revision = self.NudgeRevisionsIfDEPSChange( 2299 bad_revision, good_revision, good_revision_in) 2300 if self.opts.output_buildbot_annotations: 2301 bisect_utils.OutputAnnotationStepStart('Gathering Revisions') 2302 2303 cannot_bisect = self.CanPerformBisect(good_revision, bad_revision) 2304 if cannot_bisect: 2305 results.error = cannot_bisect.get('error') 2306 return results 2307 2308 print 'Gathering revision range for bisection.' 2309 # Retrieve a list of revisions to do bisection on. 2310 src_revision_list = self.GetRevisionList( 2311 target_depot, bad_revision, good_revision) 2312 2313 if self.opts.output_buildbot_annotations: 2314 bisect_utils.OutputAnnotationStepClosed() 2315 2316 if src_revision_list: 2317 # revision_data will store information about a revision such as the 2318 # depot it came from, the webkit/V8 revision at that time, 2319 # performance timing, build state, etc... 2320 revision_data = results.revision_data 2321 2322 # revision_list is the list we're binary searching through at the moment. 2323 revision_list = [] 2324 2325 sort_key_ids = 0 2326 2327 for current_revision_id in src_revision_list: 2328 sort_key_ids += 1 2329 2330 revision_data[current_revision_id] = { 2331 'value' : None, 2332 'passed' : '?', 2333 'depot' : target_depot, 2334 'external' : None, 2335 'perf_time' : 0, 2336 'build_time' : 0, 2337 'sort' : sort_key_ids, 2338 } 2339 revision_list.append(current_revision_id) 2340 2341 min_revision = 0 2342 max_revision = len(revision_list) - 1 2343 2344 self.PrintRevisionsToBisectMessage(revision_list, target_depot) 2345 2346 if self.opts.output_buildbot_annotations: 2347 bisect_utils.OutputAnnotationStepStart('Gathering Reference Values') 2348 2349 print 'Gathering reference values for bisection.' 2350 2351 # Perform the performance tests on the good and bad revisions, to get 2352 # reference values. 2353 bad_results, good_results = self.GatherReferenceValues(good_revision, 2354 bad_revision, 2355 command_to_run, 2356 metric, 2357 target_depot) 2358 2359 if self.opts.output_buildbot_annotations: 2360 bisect_utils.OutputAnnotationStepClosed() 2361 2362 if bad_results[1]: 2363 results.error = ('An error occurred while building and running ' 2364 'the \'bad\' reference value. The bisect cannot continue without ' 2365 'a working \'bad\' revision to start from.\n\nError: %s' % 2366 bad_results[0]) 2367 return results 2368 2369 if good_results[1]: 2370 results.error = ('An error occurred while building and running ' 2371 'the \'good\' reference value. The bisect cannot continue without ' 2372 'a working \'good\' revision to start from.\n\nError: %s' % 2373 good_results[0]) 2374 return results 2375 2376 2377 # We need these reference values to determine if later runs should be 2378 # classified as pass or fail. 2379 known_bad_value = bad_results[0] 2380 known_good_value = good_results[0] 2381 2382 # Can just mark the good and bad revisions explicitly here since we 2383 # already know the results. 2384 bad_revision_data = revision_data[revision_list[0]] 2385 bad_revision_data['external'] = bad_results[2] 2386 bad_revision_data['perf_time'] = bad_results[3] 2387 bad_revision_data['build_time'] = bad_results[4] 2388 bad_revision_data['passed'] = False 2389 bad_revision_data['value'] = known_bad_value 2390 2391 good_revision_data = revision_data[revision_list[max_revision]] 2392 good_revision_data['external'] = good_results[2] 2393 good_revision_data['perf_time'] = good_results[3] 2394 good_revision_data['build_time'] = good_results[4] 2395 good_revision_data['passed'] = True 2396 good_revision_data['value'] = known_good_value 2397 2398 next_revision_depot = target_depot 2399 2400 while True: 2401 if not revision_list: 2402 break 2403 2404 min_revision_data = revision_data[revision_list[min_revision]] 2405 max_revision_data = revision_data[revision_list[max_revision]] 2406 2407 if max_revision - min_revision <= 1: 2408 current_depot = min_revision_data['depot'] 2409 if min_revision_data['passed'] == '?': 2410 next_revision_index = min_revision 2411 elif max_revision_data['passed'] == '?': 2412 next_revision_index = max_revision 2413 elif current_depot in ['android-chrome', 'cros', 'chromium', 'v8']: 2414 previous_revision = revision_list[min_revision] 2415 # If there were changes to any of the external libraries we track, 2416 # should bisect the changes there as well. 2417 external_depot = self._FindNextDepotToBisect( 2418 current_depot, min_revision_data, max_revision_data) 2419 # If there was no change in any of the external depots, the search 2420 # is over. 2421 if not external_depot: 2422 if current_depot == 'v8': 2423 self.warnings.append('Unfortunately, V8 bisection couldn\'t ' 2424 'continue any further. The script can only bisect into ' 2425 'V8\'s bleeding_edge repository if both the current and ' 2426 'previous revisions in trunk map directly to revisions in ' 2427 'bleeding_edge.') 2428 break 2429 2430 earliest_revision = max_revision_data['external'][external_depot] 2431 latest_revision = min_revision_data['external'][external_depot] 2432 2433 new_revision_list = self.PrepareToBisectOnDepot( 2434 external_depot, latest_revision, earliest_revision, 2435 previous_revision) 2436 2437 if not new_revision_list: 2438 results.error = ('An error occurred attempting to retrieve ' 2439 'revision range: [%s..%s]' % 2440 (earliest_revision, latest_revision)) 2441 return results 2442 2443 _AddRevisionsIntoRevisionData( 2444 new_revision_list, external_depot, min_revision_data['sort'], 2445 revision_data) 2446 2447 # Reset the bisection and perform it on the newly inserted 2448 # changelists. 2449 revision_list = new_revision_list 2450 min_revision = 0 2451 max_revision = len(revision_list) - 1 2452 sort_key_ids += len(revision_list) 2453 2454 print ('Regression in metric %s appears to be the result of ' 2455 'changes in [%s].' % (metric, external_depot)) 2456 2457 self.PrintRevisionsToBisectMessage(revision_list, external_depot) 2458 2459 continue 2460 else: 2461 break 2462 else: 2463 next_revision_index = (int((max_revision - min_revision) / 2) + 2464 min_revision) 2465 2466 next_revision_id = revision_list[next_revision_index] 2467 next_revision_data = revision_data[next_revision_id] 2468 next_revision_depot = next_revision_data['depot'] 2469 2470 self.depot_registry.ChangeToDepotDir(next_revision_depot) 2471 2472 if self.opts.output_buildbot_annotations: 2473 step_name = 'Working on [%s]' % next_revision_id 2474 bisect_utils.OutputAnnotationStepStart(step_name) 2475 2476 print 'Working on revision: [%s]' % next_revision_id 2477 2478 run_results = self.RunTest( 2479 next_revision_id, next_revision_depot, command_to_run, metric, 2480 skippable=True) 2481 2482 # If the build is successful, check whether or not the metric 2483 # had regressed. 2484 if not run_results[1]: 2485 if len(run_results) > 2: 2486 next_revision_data['external'] = run_results[2] 2487 next_revision_data['perf_time'] = run_results[3] 2488 next_revision_data['build_time'] = run_results[4] 2489 2490 passed_regression = self._CheckIfRunPassed(run_results[0], 2491 known_good_value, 2492 known_bad_value) 2493 2494 next_revision_data['passed'] = passed_regression 2495 next_revision_data['value'] = run_results[0] 2496 2497 if passed_regression: 2498 max_revision = next_revision_index 2499 else: 2500 min_revision = next_revision_index 2501 else: 2502 if run_results[1] == BUILD_RESULT_SKIPPED: 2503 next_revision_data['passed'] = 'Skipped' 2504 elif run_results[1] == BUILD_RESULT_FAIL: 2505 next_revision_data['passed'] = 'Build Failed' 2506 2507 print run_results[0] 2508 2509 # If the build is broken, remove it and redo search. 2510 revision_list.pop(next_revision_index) 2511 2512 max_revision -= 1 2513 2514 if self.opts.output_buildbot_annotations: 2515 self._PrintPartialResults(results) 2516 bisect_utils.OutputAnnotationStepClosed() 2517 else: 2518 # Weren't able to sync and retrieve the revision range. 2519 results.error = ('An error occurred attempting to retrieve revision ' 2520 'range: [%s..%s]' % (good_revision, bad_revision)) 2521 2522 return results 2523 2524 def _PrintPartialResults(self, results): 2525 results_dict = results.GetResultsDict() 2526 self._PrintTestedCommitsTable(results_dict['revision_data_sorted'], 2527 results_dict['first_working_revision'], 2528 results_dict['last_broken_revision'], 2529 100, final_step=False) 2530 2531 def _ConfidenceLevelStatus(self, results_dict): 2532 if not results_dict['confidence']: 2533 return None 2534 confidence_status = 'Successful with %(level)s confidence%(warning)s.' 2535 if results_dict['confidence'] >= HIGH_CONFIDENCE: 2536 level = 'high' 2537 else: 2538 level = 'low' 2539 warning = ' and warnings' 2540 if not self.warnings: 2541 warning = '' 2542 return confidence_status % {'level': level, 'warning': warning} 2543 2544 def _GetViewVCLinkFromDepotAndHash(self, cl, depot): 2545 info = self.source_control.QueryRevisionInfo(cl, 2546 self.depot_registry.GetDepotDir(depot)) 2547 if depot and DEPOT_DEPS_NAME[depot].has_key('viewvc'): 2548 try: 2549 # Format is "git-svn-id: svn://....@123456 <other data>" 2550 svn_line = [i for i in info['body'].splitlines() if 'git-svn-id:' in i] 2551 svn_revision = svn_line[0].split('@') 2552 svn_revision = svn_revision[1].split(' ')[0] 2553 return DEPOT_DEPS_NAME[depot]['viewvc'] + svn_revision 2554 except IndexError: 2555 return '' 2556 return '' 2557 2558 def _PrintRevisionInfo(self, cl, info, depot=None): 2559 email_info = '' 2560 if not info['email'].startswith(info['author']): 2561 email_info = '\nEmail : %s' % info['email'] 2562 commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot) 2563 if commit_link: 2564 commit_info = '\nLink : %s' % commit_link 2565 else: 2566 commit_info = ('\nFailed to parse SVN revision from body:\n%s' % 2567 info['body']) 2568 print RESULTS_REVISION_INFO % { 2569 'subject': info['subject'], 2570 'author': info['author'], 2571 'email_info': email_info, 2572 'commit_info': commit_info, 2573 'cl': cl, 2574 'cl_date': info['date'] 2575 } 2576 2577 def _PrintTestedCommitsHeader(self): 2578 if self.opts.bisect_mode == BISECT_MODE_MEAN: 2579 _PrintTableRow( 2580 [20, 70, 14, 12, 13], 2581 ['Depot', 'Commit SHA', 'Mean', 'Std. Error', 'State']) 2582 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV: 2583 _PrintTableRow( 2584 [20, 70, 14, 12, 13], 2585 ['Depot', 'Commit SHA', 'Std. Error', 'Mean', 'State']) 2586 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE: 2587 _PrintTableRow( 2588 [20, 70, 14, 13], 2589 ['Depot', 'Commit SHA', 'Return Code', 'State']) 2590 else: 2591 assert False, 'Invalid bisect_mode specified.' 2592 2593 def _PrintTestedCommitsEntry(self, current_data, cl_link, state_str): 2594 if self.opts.bisect_mode == BISECT_MODE_MEAN: 2595 std_error = '+-%.02f' % current_data['value']['std_err'] 2596 mean = '%.02f' % current_data['value']['mean'] 2597 _PrintTableRow( 2598 [20, 70, 12, 14, 13], 2599 [current_data['depot'], cl_link, mean, std_error, state_str]) 2600 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV: 2601 std_error = '+-%.02f' % current_data['value']['std_err'] 2602 mean = '%.02f' % current_data['value']['mean'] 2603 _PrintTableRow( 2604 [20, 70, 12, 14, 13], 2605 [current_data['depot'], cl_link, std_error, mean, state_str]) 2606 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE: 2607 mean = '%d' % current_data['value']['mean'] 2608 _PrintTableRow( 2609 [20, 70, 14, 13], 2610 [current_data['depot'], cl_link, mean, state_str]) 2611 2612 def _PrintTestedCommitsTable( 2613 self, revision_data_sorted, first_working_revision, last_broken_revision, 2614 confidence, final_step=True): 2615 print 2616 if final_step: 2617 print '===== TESTED COMMITS =====' 2618 else: 2619 print '===== PARTIAL RESULTS =====' 2620 self._PrintTestedCommitsHeader() 2621 state = 0 2622 for current_id, current_data in revision_data_sorted: 2623 if current_data['value']: 2624 if (current_id == last_broken_revision or 2625 current_id == first_working_revision): 2626 # If confidence is too low, don't add this empty line since it's 2627 # used to put focus on a suspected CL. 2628 if confidence and final_step: 2629 print 2630 state += 1 2631 if state == 2 and not final_step: 2632 # Just want a separation between "bad" and "good" cl's. 2633 print 2634 2635 state_str = 'Bad' 2636 if state == 1 and final_step: 2637 state_str = 'Suspected CL' 2638 elif state == 2: 2639 state_str = 'Good' 2640 2641 # If confidence is too low, don't bother outputting good/bad. 2642 if not confidence: 2643 state_str = '' 2644 state_str = state_str.center(13, ' ') 2645 2646 cl_link = self._GetViewVCLinkFromDepotAndHash(current_id, 2647 current_data['depot']) 2648 if not cl_link: 2649 cl_link = current_id 2650 self._PrintTestedCommitsEntry(current_data, cl_link, state_str) 2651 2652 def _PrintReproSteps(self): 2653 """Prints out a section of the results explaining how to run the test. 2654 2655 This message includes the command used to run the test. 2656 """ 2657 command = '$ ' + self.opts.command 2658 if bisect_utils.IsTelemetryCommand(self.opts.command): 2659 command += ('\nAlso consider passing --profiler=list to see available ' 2660 'profilers.') 2661 print REPRO_STEPS_LOCAL 2662 if bisect_utils.IsTelemetryCommand(self.opts.command): 2663 telemetry_command = re.sub(r'--browser=[^\s]+', 2664 '--browser=<bot-name>', 2665 command) 2666 print REPRO_STEPS_TRYJOB_TELEMETRY % {'command': telemetry_command} 2667 else: 2668 print REPRO_STEPS_TRYJOB 2669 2670 def _PrintOtherRegressions(self, other_regressions, revision_data): 2671 """Prints a section of the results about other potential regressions.""" 2672 print 2673 print 'Other regressions may have occurred:' 2674 print ' %8s %70s %10s' % ('Depot'.center(8, ' '), 2675 'Range'.center(70, ' '), 'Confidence'.center(10, ' ')) 2676 for regression in other_regressions: 2677 current_id, previous_id, confidence = regression 2678 current_data = revision_data[current_id] 2679 previous_data = revision_data[previous_id] 2680 2681 current_link = self._GetViewVCLinkFromDepotAndHash(current_id, 2682 current_data['depot']) 2683 previous_link = self._GetViewVCLinkFromDepotAndHash(previous_id, 2684 previous_data['depot']) 2685 2686 # If we can't map it to a viewable URL, at least show the original hash. 2687 if not current_link: 2688 current_link = current_id 2689 if not previous_link: 2690 previous_link = previous_id 2691 2692 print ' %8s %70s %s' % ( 2693 current_data['depot'], current_link, 2694 ('%d%%' % confidence).center(10, ' ')) 2695 print ' %8s %70s' % ( 2696 previous_data['depot'], previous_link) 2697 print 2698 2699 def _CheckForWarnings(self, results_dict): 2700 if len(results_dict['culprit_revisions']) > 1: 2701 self.warnings.append('Due to build errors, regression range could ' 2702 'not be narrowed down to a single commit.') 2703 if self.opts.repeat_test_count == 1: 2704 self.warnings.append('Tests were only set to run once. This may ' 2705 'be insufficient to get meaningful results.') 2706 if 0 < results_dict['confidence'] < HIGH_CONFIDENCE: 2707 self.warnings.append('Confidence is not high. Try bisecting again ' 2708 'with increased repeat_count, larger range, or ' 2709 'on another metric.') 2710 if not results_dict['confidence']: 2711 self.warnings.append('Confidence score is 0%. Try bisecting again on ' 2712 'another platform or another metric.') 2713 2714 def FormatAndPrintResults(self, bisect_results): 2715 """Prints the results from a bisection run in a readable format. 2716 2717 Args: 2718 bisect_results: The results from a bisection test run. 2719 """ 2720 results_dict = bisect_results.GetResultsDict() 2721 2722 self._CheckForWarnings(results_dict) 2723 2724 if self.opts.output_buildbot_annotations: 2725 bisect_utils.OutputAnnotationStepStart('Build Status Per Revision') 2726 2727 print 2728 print 'Full results of bisection:' 2729 for current_id, current_data in results_dict['revision_data_sorted']: 2730 build_status = current_data['passed'] 2731 2732 if type(build_status) is bool: 2733 if build_status: 2734 build_status = 'Good' 2735 else: 2736 build_status = 'Bad' 2737 2738 print ' %20s %40s %s' % (current_data['depot'], 2739 current_id, build_status) 2740 print 2741 2742 if self.opts.output_buildbot_annotations: 2743 bisect_utils.OutputAnnotationStepClosed() 2744 # The perf dashboard scrapes the "results" step in order to comment on 2745 # bugs. If you change this, please update the perf dashboard as well. 2746 bisect_utils.OutputAnnotationStepStart('Results') 2747 2748 self._PrintBanner(results_dict) 2749 self._PrintWarnings() 2750 2751 if results_dict['culprit_revisions'] and results_dict['confidence']: 2752 for culprit in results_dict['culprit_revisions']: 2753 cl, info, depot = culprit 2754 self._PrintRevisionInfo(cl, info, depot) 2755 if results_dict['other_regressions']: 2756 self._PrintOtherRegressions(results_dict['other_regressions'], 2757 results_dict['revision_data']) 2758 self._PrintTestedCommitsTable(results_dict['revision_data_sorted'], 2759 results_dict['first_working_revision'], 2760 results_dict['last_broken_revision'], 2761 results_dict['confidence']) 2762 _PrintStepTime(results_dict['revision_data_sorted']) 2763 self._PrintReproSteps() 2764 _PrintThankYou() 2765 if self.opts.output_buildbot_annotations: 2766 bisect_utils.OutputAnnotationStepClosed() 2767 2768 def _PrintBanner(self, results_dict): 2769 if self._IsBisectModeReturnCode(): 2770 metrics = 'N/A' 2771 change = 'Yes' 2772 else: 2773 metrics = '/'.join(self.opts.metric) 2774 change = '%.02f%% (+/-%.02f%%)' % ( 2775 results_dict['regression_size'], results_dict['regression_std_err']) 2776 2777 if results_dict['culprit_revisions'] and results_dict['confidence']: 2778 status = self._ConfidenceLevelStatus(results_dict) 2779 else: 2780 status = 'Failure, could not reproduce.' 2781 change = 'Bisect could not reproduce a change.' 2782 2783 print RESULTS_BANNER % { 2784 'status': status, 2785 'command': self.opts.command, 2786 'metrics': metrics, 2787 'change': change, 2788 'confidence': results_dict['confidence'], 2789 } 2790 2791 def _PrintWarnings(self): 2792 """Prints a list of warning strings if there are any.""" 2793 if not self.warnings: 2794 return 2795 print 2796 print 'WARNINGS:' 2797 for w in set(self.warnings): 2798 print ' ! %s' % w 2799 2800 2801def _IsPlatformSupported(): 2802 """Checks that this platform and build system are supported. 2803 2804 Args: 2805 opts: The options parsed from the command line. 2806 2807 Returns: 2808 True if the platform and build system are supported. 2809 """ 2810 # Haven't tested the script out on any other platforms yet. 2811 supported = ['posix', 'nt'] 2812 return os.name in supported 2813 2814 2815def RmTreeAndMkDir(path_to_dir, skip_makedir=False): 2816 """Removes the directory tree specified, and then creates an empty 2817 directory in the same location (if not specified to skip). 2818 2819 Args: 2820 path_to_dir: Path to the directory tree. 2821 skip_makedir: Whether to skip creating empty directory, default is False. 2822 2823 Returns: 2824 True if successful, False if an error occurred. 2825 """ 2826 try: 2827 if os.path.exists(path_to_dir): 2828 shutil.rmtree(path_to_dir) 2829 except OSError, e: 2830 if e.errno != errno.ENOENT: 2831 return False 2832 2833 if not skip_makedir: 2834 return MaybeMakeDirectory(path_to_dir) 2835 2836 return True 2837 2838 2839def RemoveBuildFiles(build_type): 2840 """Removes build files from previous runs.""" 2841 if RmTreeAndMkDir(os.path.join('out', build_type)): 2842 if RmTreeAndMkDir(os.path.join('build', build_type)): 2843 return True 2844 return False 2845 2846 2847class BisectOptions(object): 2848 """Options to be used when running bisection.""" 2849 def __init__(self): 2850 super(BisectOptions, self).__init__() 2851 2852 self.target_platform = 'chromium' 2853 self.build_preference = None 2854 self.good_revision = None 2855 self.bad_revision = None 2856 self.use_goma = None 2857 self.goma_dir = None 2858 self.cros_board = None 2859 self.cros_remote_ip = None 2860 self.repeat_test_count = 20 2861 self.truncate_percent = 25 2862 self.max_time_minutes = 20 2863 self.metric = None 2864 self.command = None 2865 self.output_buildbot_annotations = None 2866 self.no_custom_deps = False 2867 self.working_directory = None 2868 self.extra_src = None 2869 self.debug_ignore_build = None 2870 self.debug_ignore_sync = None 2871 self.debug_ignore_perf_test = None 2872 self.gs_bucket = None 2873 self.target_arch = 'ia32' 2874 self.target_build_type = 'Release' 2875 self.builder_host = None 2876 self.builder_port = None 2877 self.bisect_mode = BISECT_MODE_MEAN 2878 2879 @staticmethod 2880 def _CreateCommandLineParser(): 2881 """Creates a parser with bisect options. 2882 2883 Returns: 2884 An instance of optparse.OptionParser. 2885 """ 2886 usage = ('%prog [options] [-- chromium-options]\n' 2887 'Perform binary search on revision history to find a minimal ' 2888 'range of revisions where a performance metric regressed.\n') 2889 2890 parser = optparse.OptionParser(usage=usage) 2891 2892 group = optparse.OptionGroup(parser, 'Bisect options') 2893 group.add_option('-c', '--command', 2894 type='str', 2895 help='A command to execute your performance test at' + 2896 ' each point in the bisection.') 2897 group.add_option('-b', '--bad_revision', 2898 type='str', 2899 help='A bad revision to start bisection. ' + 2900 'Must be later than good revision. May be either a git' + 2901 ' or svn revision.') 2902 group.add_option('-g', '--good_revision', 2903 type='str', 2904 help='A revision to start bisection where performance' + 2905 ' test is known to pass. Must be earlier than the ' + 2906 'bad revision. May be either a git or svn revision.') 2907 group.add_option('-m', '--metric', 2908 type='str', 2909 help='The desired metric to bisect on. For example ' + 2910 '"vm_rss_final_b/vm_rss_f_b"') 2911 group.add_option('-r', '--repeat_test_count', 2912 type='int', 2913 default=20, 2914 help='The number of times to repeat the performance ' 2915 'test. Values will be clamped to range [1, 100]. ' 2916 'Default value is 20.') 2917 group.add_option('--max_time_minutes', 2918 type='int', 2919 default=20, 2920 help='The maximum time (in minutes) to take running the ' 2921 'performance tests. The script will run the performance ' 2922 'tests according to --repeat_test_count, so long as it ' 2923 'doesn\'t exceed --max_time_minutes. Values will be ' 2924 'clamped to range [1, 60].' 2925 'Default value is 20.') 2926 group.add_option('-t', '--truncate_percent', 2927 type='int', 2928 default=25, 2929 help='The highest/lowest % are discarded to form a ' 2930 'truncated mean. Values will be clamped to range [0, ' 2931 '25]. Default value is 25 (highest/lowest 25% will be ' 2932 'discarded).') 2933 group.add_option('--bisect_mode', 2934 type='choice', 2935 choices=[BISECT_MODE_MEAN, BISECT_MODE_STD_DEV, 2936 BISECT_MODE_RETURN_CODE], 2937 default=BISECT_MODE_MEAN, 2938 help='The bisect mode. Choices are to bisect on the ' 2939 'difference in mean, std_dev, or return_code.') 2940 parser.add_option_group(group) 2941 2942 group = optparse.OptionGroup(parser, 'Build options') 2943 group.add_option('-w', '--working_directory', 2944 type='str', 2945 help='Path to the working directory where the script ' 2946 'will do an initial checkout of the chromium depot. The ' 2947 'files will be placed in a subdirectory "bisect" under ' 2948 'working_directory and that will be used to perform the ' 2949 'bisection. This parameter is optional, if it is not ' 2950 'supplied, the script will work from the current depot.') 2951 group.add_option('--build_preference', 2952 type='choice', 2953 choices=['msvs', 'ninja', 'make'], 2954 help='The preferred build system to use. On linux/mac ' 2955 'the options are make/ninja. On Windows, the options ' 2956 'are msvs/ninja.') 2957 group.add_option('--target_platform', 2958 type='choice', 2959 choices=['chromium', 'cros', 'android', 'android-chrome'], 2960 default='chromium', 2961 help='The target platform. Choices are "chromium" ' 2962 '(current platform), "cros", or "android". If you ' 2963 'specify something other than "chromium", you must be ' 2964 'properly set up to build that platform.') 2965 group.add_option('--no_custom_deps', 2966 dest='no_custom_deps', 2967 action='store_true', 2968 default=False, 2969 help='Run the script with custom_deps or not.') 2970 group.add_option('--extra_src', 2971 type='str', 2972 help='Path to a script which can be used to modify ' 2973 'the bisect script\'s behavior.') 2974 group.add_option('--cros_board', 2975 type='str', 2976 help='The cros board type to build.') 2977 group.add_option('--cros_remote_ip', 2978 type='str', 2979 help='The remote machine to image to.') 2980 group.add_option('--use_goma', 2981 action='store_true', 2982 help='Add a bunch of extra threads for goma, and enable ' 2983 'goma') 2984 group.add_option('--goma_dir', 2985 help='Path to goma tools (or system default if not ' 2986 'specified).') 2987 group.add_option('--output_buildbot_annotations', 2988 action='store_true', 2989 help='Add extra annotation output for buildbot.') 2990 group.add_option('--gs_bucket', 2991 default='', 2992 dest='gs_bucket', 2993 type='str', 2994 help=('Name of Google Storage bucket to upload or ' 2995 'download build. e.g., chrome-perf')) 2996 group.add_option('--target_arch', 2997 type='choice', 2998 choices=['ia32', 'x64', 'arm'], 2999 default='ia32', 3000 dest='target_arch', 3001 help=('The target build architecture. Choices are "ia32" ' 3002 '(default), "x64" or "arm".')) 3003 group.add_option('--target_build_type', 3004 type='choice', 3005 choices=['Release', 'Debug'], 3006 default='Release', 3007 help='The target build type. Choices are "Release" ' 3008 '(default), or "Debug".') 3009 group.add_option('--builder_host', 3010 dest='builder_host', 3011 type='str', 3012 help=('Host address of server to produce build by posting' 3013 ' try job request.')) 3014 group.add_option('--builder_port', 3015 dest='builder_port', 3016 type='int', 3017 help=('HTTP port of the server to produce build by posting' 3018 ' try job request.')) 3019 parser.add_option_group(group) 3020 3021 group = optparse.OptionGroup(parser, 'Debug options') 3022 group.add_option('--debug_ignore_build', 3023 action='store_true', 3024 help='DEBUG: Don\'t perform builds.') 3025 group.add_option('--debug_ignore_sync', 3026 action='store_true', 3027 help='DEBUG: Don\'t perform syncs.') 3028 group.add_option('--debug_ignore_perf_test', 3029 action='store_true', 3030 help='DEBUG: Don\'t perform performance tests.') 3031 parser.add_option_group(group) 3032 return parser 3033 3034 def ParseCommandLine(self): 3035 """Parses the command line for bisect options.""" 3036 parser = self._CreateCommandLineParser() 3037 opts, _ = parser.parse_args() 3038 3039 try: 3040 if not opts.command: 3041 raise RuntimeError('missing required parameter: --command') 3042 3043 if not opts.good_revision: 3044 raise RuntimeError('missing required parameter: --good_revision') 3045 3046 if not opts.bad_revision: 3047 raise RuntimeError('missing required parameter: --bad_revision') 3048 3049 if not opts.metric and opts.bisect_mode != BISECT_MODE_RETURN_CODE: 3050 raise RuntimeError('missing required parameter: --metric') 3051 3052 if opts.gs_bucket: 3053 if not cloud_storage.List(opts.gs_bucket): 3054 raise RuntimeError('Invalid Google Storage: gs://%s' % opts.gs_bucket) 3055 if not opts.builder_host: 3056 raise RuntimeError('Must specify try server host name using ' 3057 '--builder_host when gs_bucket is used.') 3058 if not opts.builder_port: 3059 raise RuntimeError('Must specify try server port number using ' 3060 '--builder_port when gs_bucket is used.') 3061 if opts.target_platform == 'cros': 3062 # Run sudo up front to make sure credentials are cached for later. 3063 print 'Sudo is required to build cros:' 3064 print 3065 bisect_utils.RunProcess(['sudo', 'true']) 3066 3067 if not opts.cros_board: 3068 raise RuntimeError('missing required parameter: --cros_board') 3069 3070 if not opts.cros_remote_ip: 3071 raise RuntimeError('missing required parameter: --cros_remote_ip') 3072 3073 if not opts.working_directory: 3074 raise RuntimeError('missing required parameter: --working_directory') 3075 3076 if opts.bisect_mode != BISECT_MODE_RETURN_CODE: 3077 metric_values = opts.metric.split('/') 3078 if len(metric_values) != 2: 3079 raise RuntimeError('Invalid metric specified: [%s]' % opts.metric) 3080 opts.metric = metric_values 3081 3082 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100) 3083 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60) 3084 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25) 3085 opts.truncate_percent = opts.truncate_percent / 100.0 3086 3087 for k, v in opts.__dict__.iteritems(): 3088 assert hasattr(self, k), 'Invalid %s attribute in BisectOptions.' % k 3089 setattr(self, k, v) 3090 except RuntimeError, e: 3091 output_string = StringIO.StringIO() 3092 parser.print_help(file=output_string) 3093 error_message = '%s\n\n%s' % (e.message, output_string.getvalue()) 3094 output_string.close() 3095 raise RuntimeError(error_message) 3096 3097 @staticmethod 3098 def FromDict(values): 3099 """Creates an instance of BisectOptions from a dictionary. 3100 3101 Args: 3102 values: a dict containing options to set. 3103 3104 Returns: 3105 An instance of BisectOptions. 3106 """ 3107 opts = BisectOptions() 3108 for k, v in values.iteritems(): 3109 assert hasattr(opts, k), 'Invalid %s attribute in BisectOptions.' % k 3110 setattr(opts, k, v) 3111 3112 if opts.metric and opts.bisect_mode != BISECT_MODE_RETURN_CODE: 3113 metric_values = opts.metric.split('/') 3114 if len(metric_values) != 2: 3115 raise RuntimeError('Invalid metric specified: [%s]' % opts.metric) 3116 opts.metric = metric_values 3117 3118 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100) 3119 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60) 3120 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25) 3121 opts.truncate_percent = opts.truncate_percent / 100.0 3122 3123 return opts 3124 3125 3126def main(): 3127 3128 try: 3129 opts = BisectOptions() 3130 opts.ParseCommandLine() 3131 3132 if opts.extra_src: 3133 extra_src = bisect_utils.LoadExtraSrc(opts.extra_src) 3134 if not extra_src: 3135 raise RuntimeError('Invalid or missing --extra_src.') 3136 _AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo()) 3137 3138 if opts.working_directory: 3139 custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS 3140 if opts.no_custom_deps: 3141 custom_deps = None 3142 bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, custom_deps) 3143 3144 os.chdir(os.path.join(os.getcwd(), 'src')) 3145 3146 if not RemoveBuildFiles(opts.target_build_type): 3147 raise RuntimeError('Something went wrong removing the build files.') 3148 3149 if not _IsPlatformSupported(): 3150 raise RuntimeError('Sorry, this platform isn\'t supported yet.') 3151 3152 # Check what source control method is being used, and create a 3153 # SourceControl object if possible. 3154 source_control = source_control_module.DetermineAndCreateSourceControl(opts) 3155 3156 if not source_control: 3157 raise RuntimeError( 3158 'Sorry, only the git workflow is supported at the moment.') 3159 3160 # gClient sync seems to fail if you're not in master branch. 3161 if (not source_control.IsInProperBranch() and 3162 not opts.debug_ignore_sync and 3163 not opts.working_directory): 3164 raise RuntimeError('You must switch to master branch to run bisection.') 3165 bisect_test = BisectPerformanceMetrics(source_control, opts) 3166 try: 3167 bisect_results = bisect_test.Run(opts.command, 3168 opts.bad_revision, 3169 opts.good_revision, 3170 opts.metric) 3171 if bisect_results.error: 3172 raise RuntimeError(bisect_results.error) 3173 bisect_test.FormatAndPrintResults(bisect_results) 3174 return 0 3175 finally: 3176 bisect_test.PerformCleanup() 3177 except RuntimeError, e: 3178 if opts.output_buildbot_annotations: 3179 # The perf dashboard scrapes the "results" step in order to comment on 3180 # bugs. If you change this, please update the perf dashboard as well. 3181 bisect_utils.OutputAnnotationStepStart('Results') 3182 print 'Error: %s' % e.message 3183 if opts.output_buildbot_annotations: 3184 bisect_utils.OutputAnnotationStepClosed() 3185 return 1 3186 3187 3188if __name__ == '__main__': 3189 sys.exit(main()) 3190