1# Copyright (c) 2012 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Class for running instrumentation tests on a single device."""
6
7import logging
8import os
9import re
10import sys
11import time
12
13
14sys.path.append(os.path.join(sys.path[0],
15                             os.pardir, os.pardir, 'build', 'util', 'lib',
16                             'common'))
17import perf_tests_results_helper
18
19from pylib import android_commands
20from pylib import constants
21from pylib import flag_changer
22from pylib import valgrind_tools
23from pylib.base import base_test_result
24from pylib.base import base_test_runner
25from pylib.instrumentation import json_perf_parser
26
27import test_result
28
29
30_PERF_TEST_ANNOTATION = 'PerfTest'
31
32
33def _GetDataFilesForTestSuite(suite_basename):
34  """Returns a list of data files/dirs needed by the test suite.
35
36  Args:
37    suite_basename: The test suite basename for which to return file paths.
38
39  Returns:
40    A list of test file and directory paths.
41  """
42  test_files = []
43  if suite_basename in ['ChromeTest', 'ContentShellTest']:
44    test_files += [
45        'net/data/ssl/certificates/',
46    ]
47  return test_files
48
49
50class TestRunner(base_test_runner.BaseTestRunner):
51  """Responsible for running a series of tests connected to a single device."""
52
53  _DEVICE_DATA_DIR = 'chrome/test/data'
54  _DEVICE_COVERAGE_DIR = 'chrome/test/coverage'
55  _HOSTMACHINE_PERF_OUTPUT_FILE = '/tmp/chrome-profile'
56  _DEVICE_PERF_OUTPUT_SEARCH_PREFIX = (constants.DEVICE_PERF_OUTPUT_DIR +
57                                       '/chrome-profile*')
58  _DEVICE_HAS_TEST_FILES = {}
59
60  def __init__(self, test_options, device, shard_index, test_pkg,
61               additional_flags=None):
62    """Create a new TestRunner.
63
64    Args:
65      test_options: An InstrumentationOptions object.
66      device: Attached android device.
67      shard_index: Shard index.
68      test_pkg: A TestPackage object.
69      additional_flags: A list of additional flags to add to the command line.
70    """
71    super(TestRunner, self).__init__(device, test_options.tool,
72                                     test_options.push_deps,
73                                     test_options.cleanup_test_files)
74    self._lighttp_port = constants.LIGHTTPD_RANDOM_PORT_FIRST + shard_index
75
76    self.options = test_options
77    self.test_pkg = test_pkg
78    self.coverage_dir = test_options.coverage_dir
79    # Use the correct command line file for the package under test.
80    cmdline_file = [a.cmdline_file for a in constants.PACKAGE_INFO.itervalues()
81                    if a.test_package == self.test_pkg.GetPackageName()]
82    assert len(cmdline_file) < 2, 'Multiple packages have the same test package'
83    if len(cmdline_file) and cmdline_file[0]:
84      self.flags = flag_changer.FlagChanger(self.adb, cmdline_file[0])
85    else:
86      self.flags = flag_changer.FlagChanger(self.adb)
87    if additional_flags:
88      self.flags.AddFlags(additional_flags)
89
90  #override
91  def InstallTestPackage(self):
92    self.test_pkg.Install(self.adb)
93
94  #override
95  def PushDataDeps(self):
96    # TODO(frankf): Implement a general approach for copying/installing
97    # once across test runners.
98    if TestRunner._DEVICE_HAS_TEST_FILES.get(self.device, False):
99      logging.warning('Already copied test files to device %s, skipping.',
100                      self.device)
101      return
102
103    test_data = _GetDataFilesForTestSuite(self.test_pkg.GetApkName())
104    if test_data:
105      # Make sure SD card is ready.
106      self.adb.WaitForSdCardReady(20)
107      for p in test_data:
108        self.adb.PushIfNeeded(
109            os.path.join(constants.DIR_SOURCE_ROOT, p),
110            os.path.join(self.adb.GetExternalStorage(), p))
111
112    # TODO(frankf): Specify test data in this file as opposed to passing
113    # as command-line.
114    for dest_host_pair in self.options.test_data:
115      dst_src = dest_host_pair.split(':',1)
116      dst_layer = dst_src[0]
117      host_src = dst_src[1]
118      host_test_files_path = '%s/%s' % (constants.DIR_SOURCE_ROOT, host_src)
119      if os.path.exists(host_test_files_path):
120        self.adb.PushIfNeeded(host_test_files_path, '%s/%s/%s' % (
121            self.adb.GetExternalStorage(), TestRunner._DEVICE_DATA_DIR,
122            dst_layer))
123    self.tool.CopyFiles()
124    TestRunner._DEVICE_HAS_TEST_FILES[self.device] = True
125
126  def _GetInstrumentationArgs(self):
127    ret = {}
128    if self.options.wait_for_debugger:
129      ret['debug'] = 'true'
130    if self.coverage_dir:
131      ret['coverage'] = 'true'
132      ret['coverageFile'] = self.coverage_device_file
133
134    return ret
135
136  def _TakeScreenshot(self, test):
137    """Takes a screenshot from the device."""
138    screenshot_name = os.path.join(constants.SCREENSHOTS_DIR, '%s.png' % test)
139    logging.info('Taking screenshot named %s', screenshot_name)
140    self.adb.TakeScreenshot(screenshot_name)
141
142  def SetUp(self):
143    """Sets up the test harness and device before all tests are run."""
144    super(TestRunner, self).SetUp()
145    if not self.adb.IsRootEnabled():
146      logging.warning('Unable to enable java asserts for %s, non rooted device',
147                      self.device)
148    else:
149      if self.adb.SetJavaAssertsEnabled(True):
150        self.adb.Reboot(full_reboot=False)
151
152    # We give different default value to launch HTTP server based on shard index
153    # because it may have race condition when multiple processes are trying to
154    # launch lighttpd with same port at same time.
155    http_server_ports = self.LaunchTestHttpServer(
156        os.path.join(constants.DIR_SOURCE_ROOT), self._lighttp_port)
157    self.flags.AddFlags(['--disable-fre', '--enable-test-intents'])
158
159  def TearDown(self):
160    """Cleans up the test harness and saves outstanding data from test run."""
161    self.flags.Restore()
162    super(TestRunner, self).TearDown()
163
164  def TestSetup(self, test):
165    """Sets up the test harness for running a particular test.
166
167    Args:
168      test: The name of the test that will be run.
169    """
170    self.SetupPerfMonitoringIfNeeded(test)
171    self._SetupIndividualTestTimeoutScale(test)
172    self.tool.SetupEnvironment()
173
174    # Make sure the forwarder is still running.
175    self._RestartHttpServerForwarderIfNecessary()
176
177    if self.coverage_dir:
178      coverage_basename = '%s.ec' % test
179      self.coverage_device_file = '%s/%s/%s' % (self.adb.GetExternalStorage(),
180                                                TestRunner._DEVICE_COVERAGE_DIR,
181                                                coverage_basename)
182      self.coverage_host_file = os.path.join(
183          self.coverage_dir, coverage_basename)
184
185  def _IsPerfTest(self, test):
186    """Determines whether a test is a performance test.
187
188    Args:
189      test: The name of the test to be checked.
190
191    Returns:
192      Whether the test is annotated as a performance test.
193    """
194    return _PERF_TEST_ANNOTATION in self.test_pkg.GetTestAnnotations(test)
195
196  def SetupPerfMonitoringIfNeeded(self, test):
197    """Sets up performance monitoring if the specified test requires it.
198
199    Args:
200      test: The name of the test to be run.
201    """
202    if not self._IsPerfTest(test):
203      return
204    self.adb.Adb().SendCommand('shell rm ' +
205                               TestRunner._DEVICE_PERF_OUTPUT_SEARCH_PREFIX)
206    self.adb.StartMonitoringLogcat()
207
208  def TestTeardown(self, test, raw_result):
209    """Cleans up the test harness after running a particular test.
210
211    Depending on the options of this TestRunner this might handle performance
212    tracking.  This method will only be called if the test passed.
213
214    Args:
215      test: The name of the test that was just run.
216      raw_result: result for this test.
217    """
218
219    self.tool.CleanUpEnvironment()
220
221    # The logic below relies on the test passing.
222    if not raw_result or raw_result.GetStatusCode():
223      return
224
225    self.TearDownPerfMonitoring(test)
226
227    if self.coverage_dir:
228      self.adb.Adb().Pull(self.coverage_device_file, self.coverage_host_file)
229      self.adb.RunShellCommand('rm -f %s' % self.coverage_device_file)
230
231  def TearDownPerfMonitoring(self, test):
232    """Cleans up performance monitoring if the specified test required it.
233
234    Args:
235      test: The name of the test that was just run.
236    Raises:
237      Exception: if there's anything wrong with the perf data.
238    """
239    if not self._IsPerfTest(test):
240      return
241    raw_test_name = test.split('#')[1]
242
243    # Wait and grab annotation data so we can figure out which traces to parse
244    regex = self.adb.WaitForLogMatch(re.compile('\*\*PERFANNOTATION\(' +
245                                                raw_test_name +
246                                                '\)\:(.*)'), None)
247
248    # If the test is set to run on a specific device type only (IE: only
249    # tablet or phone) and it is being run on the wrong device, the test
250    # just quits and does not do anything.  The java test harness will still
251    # print the appropriate annotation for us, but will add --NORUN-- for
252    # us so we know to ignore the results.
253    # The --NORUN-- tag is managed by MainActivityTestBase.java
254    if regex.group(1) != '--NORUN--':
255
256      # Obtain the relevant perf data.  The data is dumped to a
257      # JSON formatted file.
258      json_string = self.adb.GetProtectedFileContents(
259          '/data/data/com.google.android.apps.chrome/files/PerfTestData.txt')
260
261      if json_string:
262        json_string = '\n'.join(json_string)
263      else:
264        raise Exception('Perf file does not exist or is empty')
265
266      if self.options.save_perf_json:
267        json_local_file = '/tmp/chromium-android-perf-json-' + raw_test_name
268        with open(json_local_file, 'w') as f:
269          f.write(json_string)
270        logging.info('Saving Perf UI JSON from test ' +
271                     test + ' to ' + json_local_file)
272
273      raw_perf_data = regex.group(1).split(';')
274
275      for raw_perf_set in raw_perf_data:
276        if raw_perf_set:
277          perf_set = raw_perf_set.split(',')
278          if len(perf_set) != 3:
279            raise Exception('Unexpected number of tokens in perf annotation '
280                            'string: ' + raw_perf_set)
281
282          # Process the performance data
283          result = json_perf_parser.GetAverageRunInfoFromJSONString(json_string,
284                                                                    perf_set[0])
285          perf_tests_results_helper.PrintPerfResult(perf_set[1], perf_set[2],
286                                                    [result['average']],
287                                                    result['units'])
288
289  def _SetupIndividualTestTimeoutScale(self, test):
290    timeout_scale = self._GetIndividualTestTimeoutScale(test)
291    valgrind_tools.SetChromeTimeoutScale(self.adb, timeout_scale)
292
293  def _GetIndividualTestTimeoutScale(self, test):
294    """Returns the timeout scale for the given |test|."""
295    annotations = self.test_pkg.GetTestAnnotations(test)
296    timeout_scale = 1
297    if 'TimeoutScale' in annotations:
298      for annotation in annotations:
299        scale_match = re.match('TimeoutScale:([0-9]+)', annotation)
300        if scale_match:
301          timeout_scale = int(scale_match.group(1))
302    if self.options.wait_for_debugger:
303      timeout_scale *= 100
304    return timeout_scale
305
306  def _GetIndividualTestTimeoutSecs(self, test):
307    """Returns the timeout in seconds for the given |test|."""
308    annotations = self.test_pkg.GetTestAnnotations(test)
309    if 'Manual' in annotations:
310      return 600 * 60
311    if 'External' in annotations:
312      return 10 * 60
313    if 'LargeTest' in annotations or _PERF_TEST_ANNOTATION in annotations:
314      return 5 * 60
315    if 'MediumTest' in annotations:
316      return 3 * 60
317    return 1 * 60
318
319  def _RunTest(self, test, timeout):
320    try:
321      return self.adb.RunInstrumentationTest(
322          test, self.test_pkg.GetPackageName(),
323          self._GetInstrumentationArgs(), timeout)
324    except android_commands.errors.WaitForResponseTimedOutError:
325      logging.info('Ran the test with timeout of %ds.' % timeout)
326      raise
327
328  #override
329  def RunTest(self, test):
330    raw_result = None
331    start_date_ms = None
332    results = base_test_result.TestRunResults()
333    timeout=(self._GetIndividualTestTimeoutSecs(test) *
334             self._GetIndividualTestTimeoutScale(test) *
335             self.tool.GetTimeoutScale())
336    try:
337      self.TestSetup(test)
338      start_date_ms = int(time.time()) * 1000
339      raw_result = self._RunTest(test, timeout)
340      duration_ms = int(time.time()) * 1000 - start_date_ms
341      status_code = raw_result.GetStatusCode()
342      if status_code:
343        if self.options.screenshot_failures:
344          self._TakeScreenshot(test)
345        log = raw_result.GetFailureReason()
346        if not log:
347          log = 'No information.'
348        result_type = base_test_result.ResultType.FAIL
349        package = self.adb.DismissCrashDialogIfNeeded()
350        # Assume test package convention of ".test" suffix
351        if package and package in self.test_pkg.GetPackageName():
352          result_type = base_test_result.ResultType.CRASH
353        result = test_result.InstrumentationTestResult(
354            test, result_type, start_date_ms, duration_ms, log=log)
355      else:
356        result = test_result.InstrumentationTestResult(
357            test, base_test_result.ResultType.PASS, start_date_ms, duration_ms)
358      results.AddResult(result)
359    # Catch exceptions thrown by StartInstrumentation().
360    # See ../../third_party/android/testrunner/adb_interface.py
361    except (android_commands.errors.WaitForResponseTimedOutError,
362            android_commands.errors.DeviceUnresponsiveError,
363            android_commands.errors.InstrumentationError), e:
364      if start_date_ms:
365        duration_ms = int(time.time()) * 1000 - start_date_ms
366      else:
367        start_date_ms = int(time.time()) * 1000
368        duration_ms = 0
369      message = str(e)
370      if not message:
371        message = 'No information.'
372      results.AddResult(test_result.InstrumentationTestResult(
373          test, base_test_result.ResultType.CRASH, start_date_ms, duration_ms,
374          log=message))
375      raw_result = None
376    self.TestTeardown(test, raw_result)
377    return (results, None if results.DidRunPass() else test)
378