1#!/usr/bin/python 2 3"""Run layout tests using Android emulator and instrumentation. 4 5 First, you need to get an SD card or sdcard image that has layout tests on it. 6 Layout tests are in following directory: 7 /sdcard/android/layout_tests 8 For example, /sdcard/android/layout_tests/fast 9 10 Usage: 11 Run all tests under fast/ directory: 12 run_layout_tests.py, or 13 run_layout_tests.py fast 14 15 Run all tests under a sub directory: 16 run_layout_tests.py fast/dom 17 18 Run a single test: 19 run_layout_tests.py fast/dom/ 20 21 After a merge, if there are changes of layout tests in SD card, you need to 22 use --refresh-test-list option *once* to re-generate test list on the card. 23 24 Some other options are: 25 --rebaseline generates expected layout tests results under /sdcard/android/expected_result/ 26 --time-out-ms (default is 8000 millis) for each test 27 --adb-options="-e" passes option string to adb 28 --results-directory=..., (default is ./layout-test-results) directory name under which results are stored. 29 --js-engine the JavaScript engine currently in use, determines which set of Android-specific expected results we should use, should be 'jsc' or 'v8' 30""" 31 32import logging 33import optparse 34import os 35import subprocess 36import sys 37import time 38 39def CountLineNumber(filename): 40 """Compute the number of lines in a given file. 41 42 Args: 43 filename: a file name related to the current directory. 44 """ 45 46 fp = open(os.path.abspath(filename), "r"); 47 lines = 0 48 for line in fp.readlines(): 49 lines = lines + 1 50 fp.close() 51 return lines 52 53def DumpRenderTreeFinished(adb_cmd): 54 """ Check if DumpRenderTree finished running tests 55 56 Args: 57 output: adb_cmd string 58 """ 59 60 # pull /sdcard/android/running_test.txt, if the content is "#DONE", it's done 61 shell_cmd_str = adb_cmd + " shell cat /sdcard/android/running_test.txt" 62 adb_output = subprocess.Popen(shell_cmd_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0] 63 return adb_output.strip() == "#DONE" 64 65def DiffResults(marker, new_results, old_results, diff_results, strip_reason, 66 new_count_first=True): 67 """ Given two result files, generate diff and 68 write to diff_results file. All arguments are absolute paths 69 to files. 70 """ 71 old_file = open(old_results, "r") 72 new_file = open(new_results, "r") 73 diff_file = open(diff_results, "a") 74 75 # Read lines from each file 76 ndict = new_file.readlines() 77 cdict = old_file.readlines() 78 79 # Write marker to diff file 80 diff_file.writelines(marker + "\n") 81 diff_file.writelines("###############\n") 82 83 # Strip reason from result lines 84 if strip_reason is True: 85 for i in range(0, len(ndict)): 86 ndict[i] = ndict[i].split(' ')[0] + "\n" 87 for i in range(0, len(cdict)): 88 cdict[i] = cdict[i].split(' ')[0] + "\n" 89 90 params = { 91 "new": [0, ndict, cdict, "+"], 92 "miss": [0, cdict, ndict, "-"] 93 } 94 if new_count_first: 95 order = ["new", "miss"] 96 else: 97 order = ["miss", "new"] 98 99 for key in order: 100 for line in params[key][1]: 101 if line not in params[key][2]: 102 if line[-1] != "\n": 103 line += "\n"; 104 diff_file.writelines(params[key][3] + line) 105 params[key][0] += 1 106 107 logging.info(marker + " >>> " + str(params["new"][0]) + " new, " + 108 str(params["miss"][0]) + " misses") 109 110 diff_file.writelines("\n\n") 111 112 old_file.close() 113 new_file.close() 114 diff_file.close() 115 return 116 117def CompareResults(ref_dir, results_dir): 118 """Compare results in two directories 119 120 Args: 121 ref_dir: the reference directory having layout results as references 122 results_dir: the results directory 123 """ 124 logging.info("Comparing results to " + ref_dir) 125 126 diff_result = os.path.join(results_dir, "layout_tests_diff.txt") 127 if os.path.exists(diff_result): 128 os.remove(diff_result) 129 130 files=["crashed", "failed", "passed", "nontext"] 131 for f in files: 132 result_file_name = "layout_tests_" + f + ".txt" 133 DiffResults(f, os.path.join(results_dir, result_file_name), 134 os.path.join(ref_dir, result_file_name), diff_result, 135 False, f != "passed") 136 logging.info("Detailed diffs are in " + diff_result) 137 138def main(options, args): 139 """Run the tests. Will call sys.exit when complete. 140 141 Args: 142 options: a dictionary of command line options 143 args: a list of sub directories or files to test 144 """ 145 146 # Set up logging format. 147 log_level = logging.INFO 148 if options.verbose: 149 log_level = logging.DEBUG 150 logging.basicConfig(level=log_level, 151 format='%(message)s') 152 153 # Include all tests if none are specified. 154 if not args: 155 path = '/'; 156 else: 157 path = ' '.join(args); 158 159 adb_cmd = "adb "; 160 if options.adb_options: 161 adb_cmd += options.adb_options 162 163 # Re-generate the test list if --refresh-test-list is on 164 if options.refresh_test_list: 165 logging.info("Generating test list."); 166 generate_test_list_cmd_str = adb_cmd + " shell am instrument -e class com.android.dumprendertree.LayoutTestsAutoTest#generateTestList -e path \"" + path + "\" -w com.android.dumprendertree/.LayoutTestsAutoRunner" 167 adb_output = subprocess.Popen(generate_test_list_cmd_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0] 168 169 if adb_output.find('Process crashed') != -1: 170 logging.info("Aborting because cannot generate test list.\n" + adb_output) 171 sys.exit(1) 172 173 174 logging.info("Running tests") 175 176 # Count crashed tests. 177 crashed_tests = [] 178 179 timeout_ms = '30000' 180 if options.time_out_ms: 181 timeout_ms = options.time_out_ms 182 183 # Run test until it's done 184 185 run_layout_test_cmd_prefix = adb_cmd + " shell am instrument" 186 187 run_layout_test_cmd_postfix = " -e path \"" + path + "\" -e timeout " + timeout_ms 188 if options.rebaseline: 189 run_layout_test_cmd_postfix += " -e rebaseline true" 190 191 # If the JS engine is not specified on the command line, try reading the 192 # JS_ENGINE environment variable, which is used by the build system in 193 # external/webkit/Android.mk. 194 js_engine = options.js_engine 195 if not js_engine and os.environ.has_key('JS_ENGINE'): 196 js_engine = os.environ['JS_ENGINE'] 197 if js_engine: 198 run_layout_test_cmd_postfix += " -e jsengine " + js_engine 199 200 run_layout_test_cmd_postfix += " -w com.android.dumprendertree/.LayoutTestsAutoRunner" 201 202 # Call LayoutTestsAutoTest::startLayoutTests. 203 run_layout_test_cmd = run_layout_test_cmd_prefix + " -e class com.android.dumprendertree.LayoutTestsAutoTest#startLayoutTests" + run_layout_test_cmd_postfix 204 205 adb_output = subprocess.Popen(run_layout_test_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0] 206 while not DumpRenderTreeFinished(adb_cmd): 207 # Get the running_test.txt 208 logging.error("DumpRenderTree crashed, output:\n" + adb_output) 209 210 shell_cmd_str = adb_cmd + " shell cat /sdcard/android/running_test.txt" 211 crashed_test = "" 212 while not crashed_test: 213 (crashed_test, err) = subprocess.Popen( 214 shell_cmd_str, shell=True, stdout=subprocess.PIPE, 215 stderr=subprocess.PIPE).communicate() 216 crashed_test = crashed_test.strip() 217 if not crashed_test: 218 logging.error('Cannot get crashed test name, device offline?') 219 logging.error('stderr: ' + err) 220 logging.error('retrying in 10s...') 221 time.sleep(10) 222 223 logging.info(crashed_test + " CRASHED"); 224 crashed_tests.append(crashed_test); 225 226 logging.info("Resuming layout test runner..."); 227 # Call LayoutTestsAutoTest::resumeLayoutTests 228 run_layout_test_cmd = run_layout_test_cmd_prefix + " -e class com.android.dumprendertree.LayoutTestsAutoTest#resumeLayoutTests" + run_layout_test_cmd_postfix 229 230 adb_output = subprocess.Popen(run_layout_test_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0] 231 232 if adb_output.find('INSTRUMENTATION_FAILED') != -1: 233 logging.error("Error happened : " + adb_output) 234 sys.exit(1) 235 236 logging.debug(adb_output); 237 logging.info("Done\n"); 238 239 # Pull results from /sdcard 240 results_dir = options.results_directory 241 if not os.path.exists(results_dir): 242 os.makedirs(results_dir) 243 if not os.path.isdir(results_dir): 244 logging.error("Cannot create results dir: " + results_dir); 245 sys.exit(1); 246 247 result_files = ["/sdcard/layout_tests_passed.txt", 248 "/sdcard/layout_tests_failed.txt", 249 "/sdcard/layout_tests_ignored.txt", 250 "/sdcard/layout_tests_nontext.txt"] 251 for file in result_files: 252 shell_cmd_str = adb_cmd + " pull " + file + " " + results_dir 253 adb_output = subprocess.Popen(shell_cmd_str, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0] 254 logging.debug(adb_output) 255 256 # Create the crash list. 257 fp = open(results_dir + "/layout_tests_crashed.txt", "w"); 258 for crashed_test in crashed_tests: 259 fp.writelines(crashed_test + '\n') 260 fp.close() 261 262 # Count the number of tests in each category. 263 passed_tests = CountLineNumber(results_dir + "/layout_tests_passed.txt") 264 logging.info(str(passed_tests) + " passed") 265 failed_tests = CountLineNumber(results_dir + "/layout_tests_failed.txt") 266 logging.info(str(failed_tests) + " failed") 267 ignored_tests = CountLineNumber(results_dir + "/layout_tests_ignored.txt") 268 logging.info(str(ignored_tests) + " ignored results") 269 crashed_tests = CountLineNumber(results_dir + "/layout_tests_crashed.txt") 270 logging.info(str(crashed_tests) + " crashed") 271 nontext_tests = CountLineNumber(results_dir + "/layout_tests_nontext.txt") 272 logging.info(str(nontext_tests) + " no dumpAsText") 273 logging.info(str(passed_tests + failed_tests + ignored_tests + crashed_tests + nontext_tests) + " TOTAL") 274 275 logging.info("Results are stored under: " + results_dir + "\n") 276 277 # Comparing results to references to find new fixes and regressions. 278 results_dir = os.path.abspath(options.results_directory) 279 ref_dir = options.ref_directory 280 281 # if ref_dir is null, cannonify ref_dir to the script dir. 282 if not ref_dir: 283 script_self = sys.argv[0] 284 script_dir = os.path.dirname(script_self) 285 ref_dir = os.path.join(script_dir, "results") 286 287 ref_dir = os.path.abspath(ref_dir) 288 289 CompareResults(ref_dir, results_dir) 290 291if '__main__' == __name__: 292 option_parser = optparse.OptionParser() 293 option_parser.add_option("", "--rebaseline", action="store_true", 294 default=False, 295 help="generate expected results for those tests not having one") 296 option_parser.add_option("", "--time-out-ms", 297 default=None, 298 help="set the timeout for each test") 299 option_parser.add_option("", "--verbose", action="store_true", 300 default=False, 301 help="include debug-level logging") 302 option_parser.add_option("", "--refresh-test-list", action="store_true", 303 default=False, 304 help="re-generate test list, it may take some time.") 305 option_parser.add_option("", "--adb-options", 306 default=None, 307 help="pass options to adb, such as -d -e, etc"); 308 option_parser.add_option("", "--results-directory", 309 default="layout-test-results", 310 help="directory which results are stored.") 311 option_parser.add_option("", "--ref-directory", 312 default=None, 313 dest="ref_directory", 314 help="directory where reference results are stored.") 315 option_parser.add_option("", "--js-engine", 316 default=None, 317 help="The JavaScript engine currently in use, which determines which set of Android-specific expected results we should use. Should be 'jsc' or 'v8'."); 318 319 options, args = option_parser.parse_args(); 320 main(options, args) 321