1# Copyright (C) 2012 Google Inc. All rights reserved.
2#
3# Redistribution and use in source and binary forms, with or without
4# modification, are permitted provided that the following conditions are
5# met:
6#
7#     * Redistributions of source code must retain the above copyright
8# notice, this list of conditions and the following disclaimer.
9#     * Redistributions in binary form must reproduce the above
10# copyright notice, this list of conditions and the following disclaimer
11# in the documentation and/or other materials provided with the
12# distribution.
13#     * Neither the name of Google Inc. nor the names of its
14# contributors may be used to endorse or promote products derived from
15# this software without specific prior written permission.
16#
17# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
29"""Run Inspector's perf tests in perf mode."""
30
31import os
32import json
33import logging
34import optparse
35import time
36import datetime
37
38from webkitpy.common import find_files
39from webkitpy.common.checkout.scm.detection import SCMDetector
40from webkitpy.common.config.urls import view_source_url
41from webkitpy.common.host import Host
42from webkitpy.common.net.file_uploader import FileUploader
43from webkitpy.performance_tests.perftest import PerfTestFactory
44from webkitpy.performance_tests.perftest import DEFAULT_TEST_RUNNER_COUNT
45
46
47_log = logging.getLogger(__name__)
48
49
50class PerfTestsRunner(object):
51    _default_branch = 'webkit-trunk'
52    EXIT_CODE_BAD_BUILD = -1
53    EXIT_CODE_BAD_SOURCE_JSON = -2
54    EXIT_CODE_BAD_MERGE = -3
55    EXIT_CODE_FAILED_UPLOADING = -4
56    EXIT_CODE_BAD_PREPARATION = -5
57
58    _DEFAULT_JSON_FILENAME = 'PerformanceTestsResults.json'
59
60    def __init__(self, args=None, port=None):
61        self._options, self._args = PerfTestsRunner._parse_args(args)
62        if port:
63            self._port = port
64            self._host = self._port.host
65        else:
66            self._host = Host()
67            self._port = self._host.port_factory.get(self._options.platform, self._options)
68        self._host.initialize_scm()
69        self._webkit_base_dir_len = len(self._port.webkit_base())
70        self._base_path = self._port.perf_tests_dir()
71        self._timestamp = time.time()
72        self._utc_timestamp = datetime.datetime.utcnow()
73
74
75    @staticmethod
76    def _parse_args(args=None):
77        def _expand_path(option, opt_str, value, parser):
78            path = os.path.expandvars(os.path.expanduser(value))
79            setattr(parser.values, option.dest, path)
80        perf_option_list = [
81            optparse.make_option('--debug', action='store_const', const='Debug', dest="configuration",
82                help='Set the configuration to Debug'),
83            optparse.make_option('--release', action='store_const', const='Release', dest="configuration",
84                help='Set the configuration to Release'),
85            optparse.make_option("--platform",
86                help="Specify port/platform being tested (e.g. mac)"),
87            optparse.make_option("--chromium",
88                action="store_const", const='chromium', dest='platform', help='Alias for --platform=chromium'),
89            optparse.make_option("--android",
90                action="store_const", const='android', dest='platform', help='Alias for --platform=android'),
91            optparse.make_option("--builder-name",
92                help=("The name of the builder shown on the waterfall running this script e.g. google-mac-2.")),
93            optparse.make_option("--build-number",
94                help=("The build number of the builder running this script.")),
95            optparse.make_option("--build", dest="build", action="store_true", default=True,
96                help="Check to ensure the DumpRenderTree build is up-to-date (default)."),
97            optparse.make_option("--no-build", dest="build", action="store_false",
98                help="Don't check to see if the DumpRenderTree build is up-to-date."),
99            optparse.make_option("--build-directory",
100                help="Path to the directory under which build files are kept (should not include configuration)"),
101            optparse.make_option("--time-out-ms", default=600 * 1000,
102                help="Set the timeout for each test"),
103            optparse.make_option("--no-results", action="store_false", dest="generate_results", default=True,
104                help="Do no generate results JSON and results page."),
105            optparse.make_option("--output-json-path", action='callback', callback=_expand_path, type="str",
106                help="Path to generate a JSON file at; may contain previous results if it already exists."),
107            optparse.make_option("--reset-results", action="store_true",
108                help="Clears the content in the generated JSON file before adding the results."),
109            optparse.make_option("--slave-config-json-path", action='callback', callback=_expand_path, type="str",
110                help="Only used on bots. Path to a slave configuration file."),
111            optparse.make_option("--description",
112                help="Add a description to the output JSON file if one is generated"),
113            optparse.make_option("--no-show-results", action="store_false", default=True, dest="show_results",
114                help="Don't launch a browser with results after the tests are done"),
115            optparse.make_option("--test-results-server",
116                help="Upload the generated JSON file to the specified server when --output-json-path is present."),
117            optparse.make_option("--force", dest="use_skipped_list", action="store_false", default=True,
118                help="Run all tests, including the ones in the Skipped list."),
119            optparse.make_option("--profile", action="store_true",
120                help="Output per-test profile information."),
121            optparse.make_option("--profiler", action="store",
122                help="Output per-test profile information, using the specified profiler."),
123            optparse.make_option("--additional-drt-flag", action="append",
124                default=[], help="Additional command line flag to pass to DumpRenderTree "
125                     "Specify multiple times to add multiple flags."),
126            optparse.make_option("--driver-name", type="string",
127                help="Alternative DumpRenderTree binary to use"),
128            optparse.make_option("--content-shell", action="store_true",
129                help="Use Content Shell instead of DumpRenderTree"),
130            optparse.make_option("--repeat", default=1, type="int",
131                help="Specify number of times to run test set (default: 1)."),
132            optparse.make_option("--test-runner-count", default=DEFAULT_TEST_RUNNER_COUNT, type="int",
133                help="Specify number of times to invoke test runner for each performance test."),
134            ]
135        return optparse.OptionParser(option_list=(perf_option_list)).parse_args(args)
136
137    def _collect_tests(self):
138        test_extensions = ['.html', '.svg']
139
140        def _is_test_file(filesystem, dirname, filename):
141            return filesystem.splitext(filename)[1] in test_extensions
142
143        filesystem = self._host.filesystem
144
145        paths = []
146        for arg in self._args:
147            if filesystem.exists(filesystem.join(self._base_path, arg)):
148                paths.append(arg)
149            else:
150                relpath = filesystem.relpath(arg, self._base_path)
151                if filesystem.exists(filesystem.join(self._base_path, relpath)):
152                    paths.append(filesystem.normpath(relpath))
153                else:
154                    _log.warn('Path was not found:' + arg)
155
156        skipped_directories = set(['.svn', 'resources'])
157        test_files = find_files.find(filesystem, self._base_path, paths, skipped_directories, _is_test_file)
158        tests = []
159        for path in test_files:
160            relative_path = filesystem.relpath(path, self._base_path).replace('\\', '/')
161            if self._options.use_skipped_list and self._port.skips_perf_test(relative_path) and filesystem.normpath(relative_path) not in paths:
162                continue
163            test = PerfTestFactory.create_perf_test(self._port, relative_path, path, test_runner_count=self._options.test_runner_count)
164            tests.append(test)
165
166        return tests
167
168    def _start_http_servers(self):
169        self._port.acquire_http_lock()
170        self._port.start_http_server(number_of_servers=2)
171
172    def _stop_http_servers(self):
173        self._port.stop_http_server()
174        self._port.release_http_lock()
175
176    def run(self):
177        needs_http = self._port.requires_http_server()
178
179        if not self._port.check_build(needs_http=needs_http):
180            _log.error("Build not up to date for %s" % self._port._path_to_driver())
181            return self.EXIT_CODE_BAD_BUILD
182
183        run_count = 0
184        repeat = self._options.repeat
185        while (run_count < repeat):
186            run_count += 1
187
188            tests = self._collect_tests()
189            runs = ' (Run %d of %d)' % (run_count, repeat) if repeat > 1 else ''
190            _log.info("Running %d tests%s" % (len(tests), runs))
191
192            for test in tests:
193                if not test.prepare(self._options.time_out_ms):
194                    return self.EXIT_CODE_BAD_PREPARATION
195
196            try:
197                if needs_http:
198                    self._start_http_servers()
199                unexpected = self._run_tests_set(sorted(list(tests), key=lambda test: test.test_name()))
200
201            finally:
202                if needs_http:
203                    self._stop_http_servers()
204
205            if self._options.generate_results and not self._options.profile:
206                exit_code = self._generate_results()
207                if exit_code:
208                    return exit_code
209
210        if self._options.generate_results and not self._options.profile:
211            test_results_server = self._options.test_results_server
212            if test_results_server and not self._upload_json(test_results_server, self._output_json_path()):
213                return self.EXIT_CODE_FAILED_UPLOADING
214
215            if self._options.show_results:
216                self._port.show_results_html_file(self._results_page_path())
217
218        return unexpected
219
220    def _output_json_path(self):
221        output_json_path = self._options.output_json_path
222        if output_json_path:
223            return output_json_path
224        return self._host.filesystem.join(self._port.perf_results_directory(), self._DEFAULT_JSON_FILENAME)
225
226    def _results_page_path(self):
227        return self._host.filesystem.splitext(self._output_json_path())[0] + '.html'
228
229    def _generate_results(self):
230        options = self._options
231        output_json_path = self._output_json_path()
232        output = self._generate_results_dict(self._timestamp, options.description, options.platform, options.builder_name, options.build_number)
233
234        if options.slave_config_json_path:
235            output = self._merge_slave_config_json(options.slave_config_json_path, output)
236            if not output:
237                return self.EXIT_CODE_BAD_SOURCE_JSON
238
239        output = self._merge_outputs_if_needed(output_json_path, output)
240        if not output:
241            return self.EXIT_CODE_BAD_MERGE
242
243        filesystem = self._host.filesystem
244        json_output = json.dumps(output)
245        filesystem.write_text_file(output_json_path, json_output)
246
247        template_path = filesystem.join(self._port.perf_tests_dir(), 'resources/results-template.html')
248        template = filesystem.read_text_file(template_path)
249
250        absolute_path_to_trunk = filesystem.dirname(self._port.perf_tests_dir())
251        results_page = template.replace('%AbsolutePathToWebKitTrunk%', absolute_path_to_trunk)
252        results_page = results_page.replace('%PeformanceTestsResultsJSON%', json_output)
253
254        filesystem.write_text_file(self._results_page_path(), results_page)
255
256    def _generate_results_dict(self, timestamp, description, platform, builder_name, build_number):
257        revisions = {}
258        for (name, path) in self._port.repository_paths():
259            scm = SCMDetector(self._host.filesystem, self._host.executive).detect_scm_system(path) or self._host.scm()
260            revision = scm.svn_revision(path)
261            revisions[name] = {'revision': revision, 'timestamp': scm.timestamp_of_revision(path, revision)}
262
263        meta_info = {
264            'description': description,
265            'buildTime': self._datetime_in_ES5_compatible_iso_format(self._utc_timestamp),
266            'platform': platform,
267            'revisions': revisions,
268            'builderName': builder_name,
269            'buildNumber': int(build_number) if build_number else None}
270
271        contents = {'tests': {}}
272        for key, value in meta_info.items():
273            if value:
274                contents[key] = value
275
276        for test, metrics in self._results:
277            for metric_name, iteration_values in metrics.iteritems():
278                if not isinstance(iteration_values, list):  # We can't reports results without individual measurements.
279                    continue
280
281                tests = contents['tests']
282                path = test.test_name_without_file_extension().split('/')
283                for i in range(0, len(path)):
284                    is_last_token = i + 1 == len(path)
285                    url = view_source_url('PerformanceTests/' + (test.test_name() if is_last_token else '/'.join(path[0:i + 1])))
286                    tests.setdefault(path[i], {'url': url})
287                    current_test = tests[path[i]]
288                    if is_last_token:
289                        current_test.setdefault('metrics', {})
290                        assert metric_name not in current_test['metrics']
291                        current_test['metrics'][metric_name] = {'current': iteration_values}
292                    else:
293                        current_test.setdefault('tests', {})
294                        tests = current_test['tests']
295
296        return contents
297
298    @staticmethod
299    def _datetime_in_ES5_compatible_iso_format(datetime):
300        return datetime.strftime('%Y-%m-%dT%H:%M:%S.%f')
301
302    def _merge_slave_config_json(self, slave_config_json_path, contents):
303        if not self._host.filesystem.isfile(slave_config_json_path):
304            _log.error("Missing slave configuration JSON file: %s" % slave_config_json_path)
305            return None
306
307        try:
308            slave_config_json = self._host.filesystem.open_text_file_for_reading(slave_config_json_path)
309            slave_config = json.load(slave_config_json)
310            for key in slave_config:
311                contents['builder' + key.capitalize()] = slave_config[key]
312            return contents
313        except Exception, error:
314            _log.error("Failed to merge slave configuration JSON file %s: %s" % (slave_config_json_path, error))
315        return None
316
317    def _merge_outputs_if_needed(self, output_json_path, output):
318        if self._options.reset_results or not self._host.filesystem.isfile(output_json_path):
319            return [output]
320        try:
321            existing_outputs = json.loads(self._host.filesystem.read_text_file(output_json_path))
322            return existing_outputs + [output]
323        except Exception, error:
324            _log.error("Failed to merge output JSON file %s: %s" % (output_json_path, error))
325        return None
326
327    def _upload_json(self, test_results_server, json_path, host_path="/api/report", file_uploader=FileUploader):
328        url = "https://%s%s" % (test_results_server, host_path)
329        uploader = file_uploader(url, 120)
330        try:
331            response = uploader.upload_single_text_file(self._host.filesystem, 'application/json', json_path)
332        except Exception, error:
333            _log.error("Failed to upload JSON file to %s in 120s: %s" % (url, error))
334            return False
335
336        response_body = [line.strip('\n') for line in response]
337        if response_body != ['OK']:
338            try:
339                parsed_response = json.loads('\n'.join(response_body))
340            except:
341                _log.error("Uploaded JSON to %s but got a bad response:" % url)
342                for line in response_body:
343                    _log.error(line)
344                return False
345            if parsed_response.get('status') != 'OK':
346                _log.error("Uploaded JSON to %s but got an error:" % url)
347                _log.error(json.dumps(parsed_response, indent=4))
348                return False
349
350        _log.info("JSON file uploaded to %s." % url)
351        return True
352
353    def _run_tests_set(self, tests):
354        result_count = len(tests)
355        failures = 0
356        self._results = []
357
358        for i, test in enumerate(tests):
359            _log.info('Running %s (%d of %d)' % (test.test_name(), i + 1, len(tests)))
360            start_time = time.time()
361            metrics = test.run(self._options.time_out_ms)
362            if metrics:
363                self._results.append((test, metrics))
364            else:
365                failures += 1
366                _log.error('FAILED')
367
368            _log.info('Finished: %f s' % (time.time() - start_time))
369            _log.info('')
370
371        return failures
372