test.py revision 5019d69a182f2a7b8616d9cbcab54ffeaf3efe66
1# Shell class for a test, inherited by all individual tests
2#
3# Methods:
4#       __init__        initialise
5#       initialize      run once for each job
6#       setup           run once for each new version of the test installed
7#       run             run the test (wrapped by job.run_test())
8#
9# Data:
10#       job             backreference to the job this test instance is part of
11#       outputdir       eg. results/<job>/<testname.tag>
12#       resultsdir      eg. results/<job>/<testname.tag>/results
13#       profdir         eg. results/<job>/<testname.tag>/profiling
14#       debugdir        eg. results/<job>/<testname.tag>/debug
15#       bindir          eg. tests/<test>
16#       src             eg. tests/<test>/src
17#       tmpdir          eg. tmp/<tempname>_<testname.tag>
18
19#pylint: disable=C0111
20
21import fcntl
22import json
23import logging
24import os
25import re
26import shutil
27import stat
28import sys
29import tempfile
30import time
31import traceback
32
33from autotest_lib.client.bin import utils
34from autotest_lib.client.common_lib import error
35from autotest_lib.client.common_lib import utils as client_utils
36
37try:
38    from chromite.lib import metrics
39except ImportError:
40    metrics = client_utils.metrics_mock
41
42
43class base_test(object):
44    preserve_srcdir = False
45
46    def __init__(self, job, bindir, outputdir):
47        self.job = job
48        self.pkgmgr = job.pkgmgr
49        self.autodir = job.autodir
50        self.outputdir = outputdir
51        self.tagged_testname = os.path.basename(self.outputdir)
52        self.resultsdir = os.path.join(self.outputdir, 'results')
53        os.mkdir(self.resultsdir)
54        self.profdir = os.path.join(self.outputdir, 'profiling')
55        os.mkdir(self.profdir)
56        self.debugdir = os.path.join(self.outputdir, 'debug')
57        os.mkdir(self.debugdir)
58        # TODO(ericli): figure out how autotest crash handler work with cros
59        # Once this is re-enabled import getpass. crosbug.com/31232
60        # crash handler, we should restore it in near term.
61        # if getpass.getuser() == 'root':
62        #     self.configure_crash_handler()
63        # else:
64        self.crash_handling_enabled = False
65        self.bindir = bindir
66        self.srcdir = os.path.join(self.bindir, 'src')
67        self.tmpdir = tempfile.mkdtemp("_" + self.tagged_testname,
68                                       dir=job.tmpdir)
69        self._keyvals = []
70        self._new_keyval = False
71        self.failed_constraints = []
72        self.iteration = 0
73        self.before_iteration_hooks = []
74        self.after_iteration_hooks = []
75
76        # Flag to indicate if the test has succeeded or failed.
77        self.success = False
78
79
80    def configure_crash_handler(self):
81        pass
82
83
84    def crash_handler_report(self):
85        pass
86
87
88    def assert_(self, expr, msg='Assertion failed.'):
89        if not expr:
90            raise error.TestError(msg)
91
92
93    def write_test_keyval(self, attr_dict):
94        utils.write_keyval(self.outputdir, attr_dict)
95
96
97    @staticmethod
98    def _append_type_to_keys(dictionary, typename):
99        new_dict = {}
100        for key, value in dictionary.iteritems():
101            new_key = "%s{%s}" % (key, typename)
102            new_dict[new_key] = value
103        return new_dict
104
105
106    def output_perf_value(self, description, value, units=None,
107                          higher_is_better=None, graph=None,
108                          replacement='_', replace_existing_values=False):
109        """
110        Records a measured performance value in an output file.
111
112        The output file will subsequently be parsed by the TKO parser to have
113        the information inserted into the results database.
114
115        @param description: A string describing the measured perf value. Must
116                be maximum length 256, and may only contain letters, numbers,
117                periods, dashes, and underscores.  For example:
118                "page_load_time", "scrolling-frame-rate".
119        @param value: A number representing the measured perf value, or a list
120                of measured values if a test takes multiple measurements.
121                Measured perf values can be either ints or floats.
122        @param units: A string describing the units associated with the
123                measured perf value. Must be maximum length 32, and may only
124                contain letters, numbers, periods, dashes, and underscores.
125                For example: "msec", "fps", "score", "runs_per_second".
126        @param higher_is_better: A boolean indicating whether or not a "higher"
127                measured perf value is considered to be better. If False, it is
128                assumed that a "lower" measured value is considered to be
129                better. This impacts dashboard plotting and email notification.
130                Pure autotests are expected to specify either True or False!
131                This value can be set to "None" to indicate that the perf
132                dashboard should apply the rules encoded via Chromium
133                unit-info.json. This is only used for tracking Chromium based
134                tests (in particular telemetry).
135        @param graph: A string indicating the name of the graph on which
136                the perf value will be subsequently displayed on the chrome perf
137                dashboard. This allows multiple metrics be grouped together on
138                the same graphs. Defaults to None, indicating that the perf
139                value should be displayed individually on a separate graph.
140        @param replacement: string to replace illegal characters in
141                |description| and |units| with.
142        @param replace_existing_values: A boolean indicating whether or not a
143                new added perf value should replace existing perf.
144        """
145        if len(description) > 256:
146            raise ValueError('The description must be at most 256 characters.')
147        if units and len(units) > 32:
148            raise ValueError('The units must be at most 32 characters.')
149
150        # If |replacement| is legal replace illegal characters with it.
151        string_regex = re.compile(r'[^-\.\w]')
152        if replacement is None or re.search(string_regex, replacement):
153            raise ValueError('Invalid replacement string to mask illegal '
154                             'characters. May only contain letters, numbers, '
155                             'periods, dashes, and underscores. '
156                             'replacement: %s' % replacement)
157        description = re.sub(string_regex, replacement, description)
158        units = re.sub(string_regex, replacement, units) if units else None
159
160        charts = {}
161        output_file = os.path.join(self.resultsdir, 'results-chart.json')
162        if os.path.isfile(output_file):
163            with open(output_file, 'r') as fp:
164                contents = fp.read()
165                if contents:
166                     charts = json.loads(contents)
167
168        if graph:
169            first_level = graph
170            second_level = description
171        else:
172            first_level = description
173            second_level = 'summary'
174
175        direction = 'up' if higher_is_better else 'down'
176
177        # All input should be a number - but at times there are strings
178        # representing numbers logged, attempt to convert them to numbers.
179        # If a non number string is logged an exception will be thrown.
180        if isinstance(value, list):
181          value = map(float, value)
182        else:
183          value = float(value)
184
185        result_type = 'scalar'
186        value_key = 'value'
187        result_value = value
188
189        # The chart json spec go/telemetry-json differenciates between a single
190        # value vs a list of values.  Lists of values get extra processing in
191        # the chromeperf dashboard ( mean, standard deviation etc)
192        # Tests can log one or more values for the same metric, to adhere stricly
193        # to the specification the first value logged is a scalar but if another
194        # value is logged the results become a list of scalar.
195        # TODO Figure out if there would be any difference of always using list
196        # of scalar even if there is just one item in the list.
197        if isinstance(value, list):
198            result_type = 'list_of_scalar_values'
199            value_key = 'values'
200            if first_level in charts and second_level in charts[first_level]:
201                if 'values' in charts[first_level][second_level]:
202                    result_value = charts[first_level][second_level]['values']
203                elif 'value' in charts[first_level][second_level]:
204                    result_value = [charts[first_level][second_level]['value']]
205                if replace_existing_values:
206                    result_value = value
207                else:
208                    result_value.extend(value)
209            else:
210                result_value = value
211        elif (first_level in charts and second_level in charts[first_level] and
212              not replace_existing_values):
213            result_type = 'list_of_scalar_values'
214            value_key = 'values'
215            if 'values' in charts[first_level][second_level]:
216                result_value = charts[first_level][second_level]['values']
217                result_value.append(value)
218            else:
219                result_value = [charts[first_level][second_level]['value'], value]
220
221        test_data = {
222            second_level: {
223                 'type': result_type,
224                 'units': units,
225                 value_key: result_value,
226                 'improvement_direction': direction
227           }
228        }
229
230        if first_level in charts:
231            charts[first_level].update(test_data)
232        else:
233            charts.update({first_level: test_data})
234
235        with open(output_file, 'w') as fp:
236            fp.write(json.dumps(charts, indent=2))
237
238
239    def write_perf_keyval(self, perf_dict):
240        self.write_iteration_keyval({}, perf_dict)
241
242
243    def write_attr_keyval(self, attr_dict):
244        self.write_iteration_keyval(attr_dict, {})
245
246
247    def write_iteration_keyval(self, attr_dict, perf_dict):
248        # append the dictionaries before they have the {perf} and {attr} added
249        self._keyvals.append({'attr':attr_dict, 'perf':perf_dict})
250        self._new_keyval = True
251
252        if attr_dict:
253            attr_dict = self._append_type_to_keys(attr_dict, "attr")
254            utils.write_keyval(self.resultsdir, attr_dict, type_tag="attr")
255
256        if perf_dict:
257            perf_dict = self._append_type_to_keys(perf_dict, "perf")
258            utils.write_keyval(self.resultsdir, perf_dict, type_tag="perf")
259
260        keyval_path = os.path.join(self.resultsdir, "keyval")
261        print >> open(keyval_path, "a"), ""
262
263
264    def analyze_perf_constraints(self, constraints):
265        if not self._new_keyval:
266            return
267
268        # create a dict from the keyvals suitable as an environment for eval
269        keyval_env = self._keyvals[-1]['perf'].copy()
270        keyval_env['__builtins__'] = None
271        self._new_keyval = False
272        failures = []
273
274        # evaluate each constraint using the current keyvals
275        for constraint in constraints:
276            logging.info('___________________ constraint = %s', constraint)
277            logging.info('___________________ keyvals = %s', keyval_env)
278
279            try:
280                if not eval(constraint, keyval_env):
281                    failures.append('%s: constraint was not met' % constraint)
282            except:
283                failures.append('could not evaluate constraint: %s'
284                                % constraint)
285
286        # keep track of the errors for each iteration
287        self.failed_constraints.append(failures)
288
289
290    def process_failed_constraints(self):
291        msg = ''
292        for i, failures in enumerate(self.failed_constraints):
293            if failures:
294                msg += 'iteration %d:%s  ' % (i, ','.join(failures))
295
296        if msg:
297            raise error.TestFail(msg)
298
299
300    def register_before_iteration_hook(self, iteration_hook):
301        """
302        This is how we expect test writers to register a before_iteration_hook.
303        This adds the method to the list of hooks which are executed
304        before each iteration.
305
306        @param iteration_hook: Method to run before each iteration. A valid
307                               hook accepts a single argument which is the
308                               test object.
309        """
310        self.before_iteration_hooks.append(iteration_hook)
311
312
313    def register_after_iteration_hook(self, iteration_hook):
314        """
315        This is how we expect test writers to register an after_iteration_hook.
316        This adds the method to the list of hooks which are executed
317        after each iteration. Hooks are executed starting with the most-
318        recently registered, in stack fashion.
319
320        @param iteration_hook: Method to run after each iteration. A valid
321                               hook accepts a single argument which is the
322                               test object.
323        """
324        self.after_iteration_hooks.append(iteration_hook)
325
326
327    def initialize(self):
328        pass
329
330
331    def setup(self):
332        pass
333
334
335    def warmup(self, *args, **dargs):
336        pass
337
338
339    def drop_caches_between_iterations(self):
340        if self.job.drop_caches_between_iterations:
341            utils.drop_caches()
342
343
344    def _call_run_once_with_retry(self, constraints, profile_only,
345                                  postprocess_profiled_run, args, dargs):
346        """Thin wrapper around _call_run_once that retries unsuccessful tests.
347
348        If the job object's attribute test_retry is > 0 retry any tests that
349        ran unsuccessfully X times.
350        *Note this does not competely re-initialize the test, it only
351            re-executes code once all the initial job set up (packages,
352            sysinfo, etc) is complete.
353        """
354        if self.job.test_retry != 0:
355            logging.info('Test will be retried a maximum of %d times',
356                         self.job.test_retry)
357
358        max_runs = self.job.test_retry
359        for retry_run in xrange(0, max_runs+1):
360            try:
361                self._call_run_once(constraints, profile_only,
362                                    postprocess_profiled_run, args, dargs)
363                break
364            except error.TestFailRetry as err:
365                if retry_run == max_runs:
366                    raise
367                self.job.record('INFO', None, None, 'Run %s failed with %s' % (
368                        retry_run, err))
369        if retry_run > 0:
370            self.write_test_keyval({'test_retries_before_success': retry_run})
371
372
373    def _call_run_once(self, constraints, profile_only,
374                       postprocess_profiled_run, args, dargs):
375        self.drop_caches_between_iterations()
376        # execute iteration hooks
377        logging.debug('Starting before_iteration_hooks for %s',
378                      self.tagged_testname)
379        with metrics.SecondsTimer(
380                'chromeos/autotest/job/before_iteration_hook_duration'):
381            for hook in self.before_iteration_hooks:
382                hook(self)
383        logging.debug('before_iteration_hooks completed')
384
385        try:
386            if profile_only:
387                if not self.job.profilers.present():
388                    self.job.record('WARN', None, None,
389                                    'No profilers have been added but '
390                                    'profile_only is set - nothing '
391                                    'will be run')
392                self.run_once_profiling(postprocess_profiled_run,
393                                        *args, **dargs)
394            else:
395                self.before_run_once()
396                logging.debug('starting test(run_once()), test details follow'
397                              '\n%r', args)
398                self.run_once(*args, **dargs)
399                logging.debug('The test has completed successfully')
400                self.after_run_once()
401
402            self.postprocess_iteration()
403            self.analyze_perf_constraints(constraints)
404        # Catch and re-raise to let after_iteration_hooks see the exception.
405        except Exception as e:
406            logging.debug('Test failed due to %s. Exception log follows the '
407                          'after_iteration_hooks.', str(e))
408            raise
409        finally:
410            logging.debug('Starting after_iteration_hooks for %s',
411                          self.tagged_testname)
412            with metrics.SecondsTimer(
413                    'chromeos/autotest/job/after_iteration_hook_duration'):
414                for hook in reversed(self.after_iteration_hooks):
415                    hook(self)
416            logging.debug('after_iteration_hooks completed')
417
418
419    def execute(self, iterations=None, test_length=None, profile_only=None,
420                _get_time=time.time, postprocess_profiled_run=None,
421                constraints=(), *args, **dargs):
422        """
423        This is the basic execute method for the tests inherited from base_test.
424        If you want to implement a benchmark test, it's better to implement
425        the run_once function, to cope with the profiling infrastructure. For
426        other tests, you can just override the default implementation.
427
428        @param test_length: The minimum test length in seconds. We'll run the
429            run_once function for a number of times large enough to cover the
430            minimum test length.
431
432        @param iterations: A number of iterations that we'll run the run_once
433            function. This parameter is incompatible with test_length and will
434            be silently ignored if you specify both.
435
436        @param profile_only: If true run X iterations with profilers enabled.
437            If false run X iterations and one with profiling if profiles are
438            enabled. If None, default to the value of job.default_profile_only.
439
440        @param _get_time: [time.time] Used for unit test time injection.
441
442        @param postprocess_profiled_run: Run the postprocessing for the
443            profiled run.
444        """
445
446        # For our special class of tests, the benchmarks, we don't want
447        # profilers to run during the test iterations. Let's reserve only
448        # the last iteration for profiling, if needed. So let's stop
449        # all profilers if they are present and active.
450        profilers = self.job.profilers
451        if profilers.active():
452            profilers.stop(self)
453        if profile_only is None:
454            profile_only = self.job.default_profile_only
455        # If the user called this test in an odd way (specified both iterations
456        # and test_length), let's warn them.
457        if iterations and test_length:
458            logging.debug('Iterations parameter ignored (timed execution)')
459        if test_length:
460            test_start = _get_time()
461            time_elapsed = 0
462            timed_counter = 0
463            logging.debug('Test started. Specified %d s as the minimum test '
464                          'length', test_length)
465            while time_elapsed < test_length:
466                timed_counter = timed_counter + 1
467                if time_elapsed == 0:
468                    logging.debug('Executing iteration %d', timed_counter)
469                elif time_elapsed > 0:
470                    logging.debug('Executing iteration %d, time_elapsed %d s',
471                                  timed_counter, time_elapsed)
472                self._call_run_once_with_retry(constraints, profile_only,
473                                               postprocess_profiled_run, args,
474                                               dargs)
475                test_iteration_finish = _get_time()
476                time_elapsed = test_iteration_finish - test_start
477            logging.debug('Test finished after %d iterations, '
478                          'time elapsed: %d s', timed_counter, time_elapsed)
479        else:
480            if iterations is None:
481                iterations = 1
482            if iterations > 1:
483                logging.debug('Test started. Specified %d iterations',
484                              iterations)
485            for self.iteration in xrange(1, iterations + 1):
486                if iterations > 1:
487                    logging.debug('Executing iteration %d of %d',
488                                  self.iteration, iterations)
489                self._call_run_once_with_retry(constraints, profile_only,
490                                               postprocess_profiled_run, args,
491                                               dargs)
492
493        if not profile_only:
494            self.iteration += 1
495            self.run_once_profiling(postprocess_profiled_run, *args, **dargs)
496
497        # Do any postprocessing, normally extracting performance keyvals, etc
498        self.postprocess()
499        self.process_failed_constraints()
500
501
502    def run_once_profiling(self, postprocess_profiled_run, *args, **dargs):
503        profilers = self.job.profilers
504        # Do a profiling run if necessary
505        if profilers.present():
506            self.drop_caches_between_iterations()
507            profilers.before_start(self)
508
509            self.before_run_once()
510            profilers.start(self)
511            logging.debug('Profilers present. Profiling run started')
512
513            try:
514                self.run_once(*args, **dargs)
515
516                # Priority to the run_once() argument over the attribute.
517                postprocess_attribute = getattr(self,
518                                                'postprocess_profiled_run',
519                                                False)
520
521                if (postprocess_profiled_run or
522                    (postprocess_profiled_run is None and
523                     postprocess_attribute)):
524                    self.postprocess_iteration()
525
526            finally:
527                profilers.stop(self)
528                profilers.report(self)
529
530            self.after_run_once()
531
532
533    def postprocess(self):
534        pass
535
536
537    def postprocess_iteration(self):
538        pass
539
540
541    def cleanup(self):
542        pass
543
544
545    def before_run_once(self):
546        """
547        Override in tests that need it, will be called before any run_once()
548        call including the profiling run (when it's called before starting
549        the profilers).
550        """
551        pass
552
553
554    def after_run_once(self):
555        """
556        Called after every run_once (including from a profiled run when it's
557        called after stopping the profilers).
558        """
559        pass
560
561
562    @staticmethod
563    def _make_writable_to_others(directory):
564        mode = os.stat(directory).st_mode
565        mode = mode | stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH
566        os.chmod(directory, mode)
567
568
569    def _exec(self, args, dargs):
570        self.job.logging.tee_redirect_debug_dir(self.debugdir,
571                                                log_name=self.tagged_testname)
572        try:
573            # write out the test attributes into a keyval
574            dargs   = dargs.copy()
575            run_cleanup = dargs.pop('run_cleanup', self.job.run_test_cleanup)
576            keyvals = dargs.pop('test_attributes', {}).copy()
577            keyvals['version'] = self.version
578            for i, arg in enumerate(args):
579                keyvals['param-%d' % i] = repr(arg)
580            for name, arg in dargs.iteritems():
581                keyvals['param-%s' % name] = repr(arg)
582            self.write_test_keyval(keyvals)
583
584            _validate_args(args, dargs, self.initialize, self.setup,
585                           self.execute, self.cleanup)
586
587            try:
588                # Make resultsdir and tmpdir accessible to everyone. We may
589                # output data to these directories as others, e.g., chronos.
590                self._make_writable_to_others(self.tmpdir)
591                self._make_writable_to_others(self.resultsdir)
592
593                # Initialize:
594                _cherry_pick_call(self.initialize, *args, **dargs)
595
596                lockfile = open(os.path.join(self.job.tmpdir, '.testlock'), 'w')
597                try:
598                    fcntl.flock(lockfile, fcntl.LOCK_EX)
599                    # Setup: (compile and install the test, if needed)
600                    p_args, p_dargs = _cherry_pick_args(self.setup, args, dargs)
601                    utils.update_version(self.srcdir, self.preserve_srcdir,
602                                         self.version, self.setup,
603                                         *p_args, **p_dargs)
604                finally:
605                    fcntl.flock(lockfile, fcntl.LOCK_UN)
606                    lockfile.close()
607
608                # Execute:
609                os.chdir(self.outputdir)
610
611                # call self.warmup cherry picking the arguments it accepts and
612                # translate exceptions if needed
613                _call_test_function(_cherry_pick_call, self.warmup,
614                                    *args, **dargs)
615
616                if hasattr(self, 'run_once'):
617                    p_args, p_dargs = _cherry_pick_args(self.run_once,
618                                                        args, dargs)
619                    # pull in any non-* and non-** args from self.execute
620                    for param in _get_nonstar_args(self.execute):
621                        if param in dargs:
622                            p_dargs[param] = dargs[param]
623                else:
624                    p_args, p_dargs = _cherry_pick_args(self.execute,
625                                                        args, dargs)
626
627                _call_test_function(self.execute, *p_args, **p_dargs)
628            except Exception:
629                # Save the exception while we run our cleanup() before
630                # reraising it, but log it to so actual time of error is known.
631                exc_info = sys.exc_info()
632                logging.warning('The test failed with the following exception',
633                                exc_info=True)
634
635                try:
636                    try:
637                        if run_cleanup:
638                            logging.debug('Running cleanup for test.')
639                            _cherry_pick_call(self.cleanup, *args, **dargs)
640                    except Exception:
641                        logging.error('Ignoring exception during cleanup() '
642                                      'phase:')
643                        traceback.print_exc()
644                        logging.error('Now raising the earlier %s error',
645                                      exc_info[0])
646                    self.crash_handler_report()
647                finally:
648                    # Raise exception after running cleanup, reporting crash,
649                    # and restoring job's logging, even if the first two
650                    # actions fail.
651                    self.job.logging.restore()
652                    try:
653                        raise exc_info[0], exc_info[1], exc_info[2]
654                    finally:
655                        # http://docs.python.org/library/sys.html#sys.exc_info
656                        # Be nice and prevent a circular reference.
657                        del exc_info
658            else:
659                try:
660                    if run_cleanup:
661                        _cherry_pick_call(self.cleanup, *args, **dargs)
662                    self.crash_handler_report()
663                finally:
664                    self.job.logging.restore()
665        except error.AutotestError:
666            # Pass already-categorized errors on up.
667            raise
668        except Exception, e:
669            # Anything else is an ERROR in our own code, not execute().
670            raise error.UnhandledTestError(e)
671
672    def runsubtest(self, url, *args, **dargs):
673        """
674        Execute another autotest test from inside the current test's scope.
675
676        @param test: Parent test.
677        @param url: Url of new test.
678        @param tag: Tag added to test name.
679        @param args: Args for subtest.
680        @param dargs: Dictionary with args for subtest.
681        @iterations: Number of subtest iterations.
682        @profile_only: If true execute one profiled run.
683        """
684        dargs["profile_only"] = dargs.get("profile_only", False)
685        test_basepath = self.outputdir[len(self.job.resultdir + "/"):]
686        return self.job.run_test(url, master_testpath=test_basepath,
687                                 *args, **dargs)
688
689
690def _get_nonstar_args(func):
691    """Extract all the (normal) function parameter names.
692
693    Given a function, returns a tuple of parameter names, specifically
694    excluding the * and ** parameters, if the function accepts them.
695
696    @param func: A callable that we want to chose arguments for.
697
698    @return: A tuple of parameters accepted by the function.
699    """
700    return func.func_code.co_varnames[:func.func_code.co_argcount]
701
702
703def _cherry_pick_args(func, args, dargs):
704    """Sanitize positional and keyword arguments before calling a function.
705
706    Given a callable (func), an argument tuple and a dictionary of keyword
707    arguments, pick only those arguments which the function is prepared to
708    accept and return a new argument tuple and keyword argument dictionary.
709
710    Args:
711      func: A callable that we want to choose arguments for.
712      args: A tuple of positional arguments to consider passing to func.
713      dargs: A dictionary of keyword arguments to consider passing to func.
714    Returns:
715      A tuple of: (args tuple, keyword arguments dictionary)
716    """
717    # Cherry pick args:
718    if func.func_code.co_flags & 0x04:
719        # func accepts *args, so return the entire args.
720        p_args = args
721    else:
722        p_args = ()
723
724    # Cherry pick dargs:
725    if func.func_code.co_flags & 0x08:
726        # func accepts **dargs, so return the entire dargs.
727        p_dargs = dargs
728    else:
729        # Only return the keyword arguments that func accepts.
730        p_dargs = {}
731        for param in _get_nonstar_args(func):
732            if param in dargs:
733                p_dargs[param] = dargs[param]
734
735    return p_args, p_dargs
736
737
738def _cherry_pick_call(func, *args, **dargs):
739    """Cherry picks arguments from args/dargs based on what "func" accepts
740    and calls the function with the picked arguments."""
741    p_args, p_dargs = _cherry_pick_args(func, args, dargs)
742    return func(*p_args, **p_dargs)
743
744
745def _validate_args(args, dargs, *funcs):
746    """Verify that arguments are appropriate for at least one callable.
747
748    Given a list of callables as additional parameters, verify that
749    the proposed keyword arguments in dargs will each be accepted by at least
750    one of the callables.
751
752    NOTE: args is currently not supported and must be empty.
753
754    Args:
755      args: A tuple of proposed positional arguments.
756      dargs: A dictionary of proposed keyword arguments.
757      *funcs: Callables to be searched for acceptance of args and dargs.
758    Raises:
759      error.AutotestError: if an arg won't be accepted by any of *funcs.
760    """
761    all_co_flags = 0
762    all_varnames = ()
763    for func in funcs:
764        all_co_flags |= func.func_code.co_flags
765        all_varnames += func.func_code.co_varnames[:func.func_code.co_argcount]
766
767    # Check if given args belongs to at least one of the methods below.
768    if len(args) > 0:
769        # Current implementation doesn't allow the use of args.
770        raise error.TestError('Unnamed arguments not accepted. Please '
771                              'call job.run_test with named args only')
772
773    # Check if given dargs belongs to at least one of the methods below.
774    if len(dargs) > 0:
775        if not all_co_flags & 0x08:
776            # no func accepts *dargs, so:
777            for param in dargs:
778                if not param in all_varnames:
779                    raise error.AutotestError('Unknown parameter: %s' % param)
780
781
782def _installtest(job, url):
783    (group, name) = job.pkgmgr.get_package_name(url, 'test')
784
785    # Bail if the test is already installed
786    group_dir = os.path.join(job.testdir, "download", group)
787    if os.path.exists(os.path.join(group_dir, name)):
788        return (group, name)
789
790    # If the group directory is missing create it and add
791    # an empty  __init__.py so that sub-directories are
792    # considered for import.
793    if not os.path.exists(group_dir):
794        os.makedirs(group_dir)
795        f = file(os.path.join(group_dir, '__init__.py'), 'w+')
796        f.close()
797
798    logging.debug("%s: installing test url=%s", name, url)
799    tarball = os.path.basename(url)
800    tarball_path = os.path.join(group_dir, tarball)
801    test_dir = os.path.join(group_dir, name)
802    job.pkgmgr.fetch_pkg(tarball, tarball_path,
803                         repo_url = os.path.dirname(url))
804
805    # Create the directory for the test
806    if not os.path.exists(test_dir):
807        os.mkdir(os.path.join(group_dir, name))
808
809    job.pkgmgr.untar_pkg(tarball_path, test_dir)
810
811    os.remove(tarball_path)
812
813    # For this 'sub-object' to be importable via the name
814    # 'group.name' we need to provide an __init__.py,
815    # so link the main entry point to this.
816    os.symlink(name + '.py', os.path.join(group_dir, name,
817                            '__init__.py'))
818
819    # The test is now installed.
820    return (group, name)
821
822
823def _call_test_function(func, *args, **dargs):
824    """Calls a test function and translates exceptions so that errors
825    inside test code are considered test failures."""
826    try:
827        return func(*args, **dargs)
828    except error.AutotestError:
829        raise
830    except Exception, e:
831        # Other exceptions must be treated as a FAIL when
832        # raised during the test functions
833        raise error.UnhandledTestFail(e)
834
835
836def runtest(job, url, tag, args, dargs,
837            local_namespace={}, global_namespace={},
838            before_test_hook=None, after_test_hook=None,
839            before_iteration_hook=None, after_iteration_hook=None):
840    local_namespace = local_namespace.copy()
841    global_namespace = global_namespace.copy()
842    # if this is not a plain test name then download and install the
843    # specified test
844    if url.endswith('.tar.bz2'):
845        (testgroup, testname) = _installtest(job, url)
846        bindir = os.path.join(job.testdir, 'download', testgroup, testname)
847        importdir = os.path.join(job.testdir, 'download')
848        modulename = '%s.%s' % (re.sub('/', '.', testgroup), testname)
849        classname = '%s.%s' % (modulename, testname)
850        path = testname
851    else:
852        # If the test is local, it may be under either testdir or site_testdir.
853        # Tests in site_testdir override tests defined in testdir
854        testname = path = url
855        testgroup = ''
856        path = re.sub(':', '/', testname)
857        modulename = os.path.basename(path)
858        classname = '%s.%s' % (modulename, modulename)
859
860        # Try installing the test package
861        # The job object may be either a server side job or a client side job.
862        # 'install_pkg' method will be present only if it's a client side job.
863        if hasattr(job, 'install_pkg'):
864            try:
865                bindir = os.path.join(job.testdir, testname)
866                job.install_pkg(testname, 'test', bindir)
867            except error.PackageInstallError:
868                # continue as a fall back mechanism and see if the test code
869                # already exists on the machine
870                pass
871
872        bindir = None
873        for dir in [job.testdir, getattr(job, 'site_testdir', None)]:
874            if dir is not None and os.path.exists(os.path.join(dir, path)):
875                importdir = bindir = os.path.join(dir, path)
876        if not bindir:
877            raise error.TestError(testname + ': test does not exist')
878
879    subdir = os.path.join(dargs.pop('master_testpath', ""), testname)
880    outputdir = os.path.join(job.resultdir, subdir)
881    if tag:
882        outputdir += '.' + tag
883
884    local_namespace['job'] = job
885    local_namespace['bindir'] = bindir
886    local_namespace['outputdir'] = outputdir
887
888    sys.path.insert(0, importdir)
889    try:
890        exec ('import %s' % modulename, local_namespace, global_namespace)
891        exec ("mytest = %s(job, bindir, outputdir)" % classname,
892              local_namespace, global_namespace)
893    finally:
894        sys.path.pop(0)
895
896    pwd = os.getcwd()
897    os.chdir(outputdir)
898
899    try:
900        mytest = global_namespace['mytest']
901        mytest.success = False
902        if not job.fast and before_test_hook:
903            logging.info('Starting before_hook for %s', mytest.tagged_testname)
904            with metrics.SecondsTimer(
905                    'chromeos/autotest/job/before_hook_duration'):
906                before_test_hook(mytest)
907            logging.info('before_hook completed')
908
909        # we use the register iteration hooks methods to register the passed
910        # in hooks
911        if before_iteration_hook:
912            mytest.register_before_iteration_hook(before_iteration_hook)
913        if after_iteration_hook:
914            mytest.register_after_iteration_hook(after_iteration_hook)
915        mytest._exec(args, dargs)
916        mytest.success = True
917    finally:
918        os.chdir(pwd)
919        if after_test_hook and (not mytest.success or not job.fast):
920            logging.info('Starting after_hook for %s', mytest.tagged_testname)
921            with metrics.SecondsTimer(
922                    'chromeos/autotest/job/after_hook_duration'):
923                after_test_hook(mytest)
924            logging.info('after_hook completed')
925
926        shutil.rmtree(mytest.tmpdir, ignore_errors=True)
927