test.py revision 2ca9777726b79bae8c4d14ed16a39338a6890992
1# Shell class for a test, inherited by all individual tests
2#
3# Methods:
4#       __init__        initialise
5#       initialize      run once for each job
6#       setup           run once for each new version of the test installed
7#       run             run the test (wrapped by job.run_test())
8#
9# Data:
10#       job             backreference to the job this test instance is part of
11#       outputdir       eg. results/<job>/<testname.tag>
12#       resultsdir      eg. results/<job>/<testname.tag>/results
13#       profdir         eg. results/<job>/<testname.tag>/profiling
14#       debugdir        eg. results/<job>/<testname.tag>/debug
15#       bindir          eg. tests/<test>
16#       src             eg. tests/<test>/src
17#       tmpdir          eg. tmp/<tempname>_<testname.tag>
18
19#pylint: disable-msg=C0111
20
21import fcntl, json, os, re, sys, shutil, tempfile, time, traceback
22import logging
23
24from autotest_lib.client.common_lib import error
25from autotest_lib.client.bin import utils
26
27
28class base_test(object):
29    preserve_srcdir = False
30    network_destabilizing = False
31
32    def __init__(self, job, bindir, outputdir):
33        self.job = job
34        self.pkgmgr = job.pkgmgr
35        self.autodir = job.autodir
36        self.outputdir = outputdir
37        self.tagged_testname = os.path.basename(self.outputdir)
38        self.resultsdir = os.path.join(self.outputdir, 'results')
39        os.mkdir(self.resultsdir)
40        self.profdir = os.path.join(self.outputdir, 'profiling')
41        os.mkdir(self.profdir)
42        self.debugdir = os.path.join(self.outputdir, 'debug')
43        os.mkdir(self.debugdir)
44        # TODO(ericli): figure out how autotest crash handler work with cros
45        # Once this is re-enabled import getpass. crosbug.com/31232
46        # crash handler, we should restore it in near term.
47        # if getpass.getuser() == 'root':
48        #     self.configure_crash_handler()
49        # else:
50        self.crash_handling_enabled = False
51        self.bindir = bindir
52        self.srcdir = os.path.join(self.bindir, 'src')
53        self.tmpdir = tempfile.mkdtemp("_" + self.tagged_testname,
54                                       dir=job.tmpdir)
55        self._keyvals = []
56        self._new_keyval = False
57        self.failed_constraints = []
58        self.iteration = 0
59        self.before_iteration_hooks = []
60        self.after_iteration_hooks = []
61
62        # Flag to indicate if the test has succeeded or failed.
63        self.success = False
64
65
66    def configure_crash_handler(self):
67        pass
68
69
70    def crash_handler_report(self):
71        pass
72
73
74    def assert_(self, expr, msg='Assertion failed.'):
75        if not expr:
76            raise error.TestError(msg)
77
78
79    def write_test_keyval(self, attr_dict):
80        utils.write_keyval(self.outputdir, attr_dict,
81                           tap_report=self.job._tap)
82
83    @staticmethod
84    def _append_type_to_keys(dictionary, typename):
85        new_dict = {}
86        for key, value in dictionary.iteritems():
87            new_key = "%s{%s}" % (key, typename)
88            new_dict[new_key] = value
89        return new_dict
90
91
92    def output_perf_value(self, description, value, units=None,
93                          higher_is_better=True, graph=None):
94        """
95        Records a measured performance value in an output file.
96
97        The output file will subsequently be parsed by the TKO parser to have
98        the information inserted into the results database.
99
100        @param description: A string describing the measured perf value. Must
101                be maximum length 256, and may only contain letters, numbers,
102                periods, dashes, and underscores.  For example:
103                "page_load_time", "scrolling-frame-rate".
104        @param value: A number representing the measured perf value, or a list
105                of measured values if a test takes multiple measurements.
106                Measured perf values can be either ints or floats.
107        @param units: A string describing the units associated with the
108                measured perf value. Must be maximum length 32, and may only
109                contain letters, numbers, periods, dashes, and underscores.
110                For example: "msec", "fps", "score", "runs_per_second".
111        @param higher_is_better: A boolean indicating whether or not a "higher"
112                measured perf value is considered to be better. If False, it is
113                assumed that a "lower" measured value is considered to be
114                better.
115        @param graph: A string indicating the name of the graph on which
116                      the perf value will be subsequently displayed on
117                      the chrome perf dashboard.
118                      This allows multiple metrics be grouped together
119                      on the same graphs. Defaults to None, indicating
120                      that the perf value should be displayed individually
121                      on a separate graph.
122
123        """
124        if len(description) > 256:
125            raise ValueError('The description must be at most 256 characters.')
126        if len(units) > 32:
127            raise ValueError('The units must be at most 32 characters.')
128        string_regex = re.compile(r'^[-\.\w]+$')
129        if (not string_regex.search(description) or
130            (units and not string_regex.search(units))):
131            raise ValueError('Invalid description or units string. May only '
132                             'contain letters, numbers, periods, dashes, and '
133                             'underscores. description: %s, units: %s' %
134                             (description, units))
135
136        entry = {
137            'description': description,
138            'value': value,
139            'units': units,
140            'higher_is_better': higher_is_better,
141            'graph': graph
142        }
143
144        output_path = os.path.join(self.resultsdir, 'perf_measurements')
145        with open(output_path, 'a') as fp:
146            fp.write(json.dumps(entry, sort_keys=True) + '\n')
147
148
149    def write_perf_keyval(self, perf_dict):
150        self.write_iteration_keyval({}, perf_dict,
151                                    tap_report=self.job._tap)
152
153
154    def write_attr_keyval(self, attr_dict):
155        self.write_iteration_keyval(attr_dict, {},
156                                    tap_report=self.job._tap)
157
158
159    def write_iteration_keyval(self, attr_dict, perf_dict, tap_report=None):
160        # append the dictionaries before they have the {perf} and {attr} added
161        self._keyvals.append({'attr':attr_dict, 'perf':perf_dict})
162        self._new_keyval = True
163
164        if attr_dict:
165            attr_dict = self._append_type_to_keys(attr_dict, "attr")
166            utils.write_keyval(self.resultsdir, attr_dict, type_tag="attr",
167                               tap_report=tap_report)
168
169        if perf_dict:
170            perf_dict = self._append_type_to_keys(perf_dict, "perf")
171            utils.write_keyval(self.resultsdir, perf_dict, type_tag="perf",
172                               tap_report=tap_report)
173
174        keyval_path = os.path.join(self.resultsdir, "keyval")
175        print >> open(keyval_path, "a"), ""
176
177
178    def analyze_perf_constraints(self, constraints):
179        if not self._new_keyval:
180            return
181
182        # create a dict from the keyvals suitable as an environment for eval
183        keyval_env = self._keyvals[-1]['perf'].copy()
184        keyval_env['__builtins__'] = None
185        self._new_keyval = False
186        failures = []
187
188        # evaluate each constraint using the current keyvals
189        for constraint in constraints:
190            logging.info('___________________ constraint = %s', constraint)
191            logging.info('___________________ keyvals = %s', keyval_env)
192
193            try:
194                if not eval(constraint, keyval_env):
195                    failures.append('%s: constraint was not met' % constraint)
196            except:
197                failures.append('could not evaluate constraint: %s'
198                                % constraint)
199
200        # keep track of the errors for each iteration
201        self.failed_constraints.append(failures)
202
203
204    def process_failed_constraints(self):
205        msg = ''
206        for i, failures in enumerate(self.failed_constraints):
207            if failures:
208                msg += 'iteration %d:%s  ' % (i, ','.join(failures))
209
210        if msg:
211            raise error.TestFail(msg)
212
213
214    def register_before_iteration_hook(self, iteration_hook):
215        """
216        This is how we expect test writers to register a before_iteration_hook.
217        This adds the method to the list of hooks which are executed
218        before each iteration.
219
220        @param iteration_hook: Method to run before each iteration. A valid
221                               hook accepts a single argument which is the
222                               test object.
223        """
224        self.before_iteration_hooks.append(iteration_hook)
225
226
227    def register_after_iteration_hook(self, iteration_hook):
228        """
229        This is how we expect test writers to register an after_iteration_hook.
230        This adds the method to the list of hooks which are executed
231        after each iteration.
232
233        @param iteration_hook: Method to run after each iteration. A valid
234                               hook accepts a single argument which is the
235                               test object.
236        """
237        self.after_iteration_hooks.append(iteration_hook)
238
239
240    def initialize(self):
241        pass
242
243
244    def setup(self):
245        pass
246
247
248    def warmup(self, *args, **dargs):
249        pass
250
251
252    def drop_caches_between_iterations(self):
253        if self.job.drop_caches_between_iterations:
254            utils.drop_caches()
255
256
257    def _call_run_once_with_retry(self, constraints, profile_only,
258                                  postprocess_profiled_run, args, dargs):
259        """Thin wrapper around _call_run_once that retries unsuccessful tests.
260
261        If the job object's attribute test_retry is > 0 retry any tests that
262        ran unsuccessfully X times.
263        *Note this does not competely re-initialize the test, it only
264            re-executes code once all the initial job set up (packages,
265            sysinfo, etc) is complete.
266        """
267        if self.job.test_retry != 0:
268            logging.info('Test will be retried a maximum of %d times',
269                         self.job.test_retry)
270
271        max_runs = self.job.test_retry
272        for retry_run in xrange(0, max_runs+1):
273            try:
274                self._call_run_once(constraints, profile_only,
275                                    postprocess_profiled_run, args, dargs)
276                break
277            except error.TestFailRetry as err:
278                if retry_run == max_runs:
279                    raise
280                self.job.record('INFO', None, None, 'Run %s failed with %s' % (
281                        retry_run, err))
282        if retry_run > 0:
283            self.write_test_keyval({'test_retries_before_success': retry_run})
284
285
286    def _call_run_once(self, constraints, profile_only,
287                       postprocess_profiled_run, args, dargs):
288        self.drop_caches_between_iterations()
289        # execute iteration hooks
290        for hook in self.before_iteration_hooks:
291            hook(self)
292
293        try:
294            if profile_only:
295                if not self.job.profilers.present():
296                    self.job.record('WARN', None, None,
297                                    'No profilers have been added but '
298                                    'profile_only is set - nothing '
299                                    'will be run')
300                self.run_once_profiling(postprocess_profiled_run,
301                                        *args, **dargs)
302            else:
303                self.before_run_once()
304                self.run_once(*args, **dargs)
305                self.after_run_once()
306
307            self.postprocess_iteration()
308            self.analyze_perf_constraints(constraints)
309        finally:
310            for hook in self.after_iteration_hooks:
311                hook(self)
312
313
314    def execute(self, iterations=None, test_length=None, profile_only=None,
315                _get_time=time.time, postprocess_profiled_run=None,
316                constraints=(), *args, **dargs):
317        """
318        This is the basic execute method for the tests inherited from base_test.
319        If you want to implement a benchmark test, it's better to implement
320        the run_once function, to cope with the profiling infrastructure. For
321        other tests, you can just override the default implementation.
322
323        @param test_length: The minimum test length in seconds. We'll run the
324            run_once function for a number of times large enough to cover the
325            minimum test length.
326
327        @param iterations: A number of iterations that we'll run the run_once
328            function. This parameter is incompatible with test_length and will
329            be silently ignored if you specify both.
330
331        @param profile_only: If true run X iterations with profilers enabled.
332            If false run X iterations and one with profiling if profiles are
333            enabled. If None, default to the value of job.default_profile_only.
334
335        @param _get_time: [time.time] Used for unit test time injection.
336
337        @param postprocess_profiled_run: Run the postprocessing for the
338            profiled run.
339        """
340
341        # For our special class of tests, the benchmarks, we don't want
342        # profilers to run during the test iterations. Let's reserve only
343        # the last iteration for profiling, if needed. So let's stop
344        # all profilers if they are present and active.
345        profilers = self.job.profilers
346        if profilers.active():
347            profilers.stop(self)
348        if profile_only is None:
349            profile_only = self.job.default_profile_only
350        # If the user called this test in an odd way (specified both iterations
351        # and test_length), let's warn them.
352        if iterations and test_length:
353            logging.debug('Iterations parameter ignored (timed execution)')
354        if test_length:
355            test_start = _get_time()
356            time_elapsed = 0
357            timed_counter = 0
358            logging.debug('Test started. Specified %d s as the minimum test '
359                          'length', test_length)
360            while time_elapsed < test_length:
361                timed_counter = timed_counter + 1
362                if time_elapsed == 0:
363                    logging.debug('Executing iteration %d', timed_counter)
364                elif time_elapsed > 0:
365                    logging.debug('Executing iteration %d, time_elapsed %d s',
366                                  timed_counter, time_elapsed)
367                self._call_run_once_with_retry(constraints, profile_only,
368                                               postprocess_profiled_run, args,
369                                               dargs)
370                test_iteration_finish = _get_time()
371                time_elapsed = test_iteration_finish - test_start
372            logging.debug('Test finished after %d iterations, '
373                          'time elapsed: %d s', timed_counter, time_elapsed)
374        else:
375            if iterations is None:
376                iterations = 1
377            if iterations > 1:
378                logging.debug('Test started. Specified %d iterations',
379                              iterations)
380            for self.iteration in xrange(1, iterations + 1):
381                if iterations > 1:
382                    logging.debug('Executing iteration %d of %d',
383                                  self.iteration, iterations)
384                self._call_run_once_with_retry(constraints, profile_only,
385                                               postprocess_profiled_run, args,
386                                               dargs)
387
388        if not profile_only:
389            self.iteration += 1
390            self.run_once_profiling(postprocess_profiled_run, *args, **dargs)
391
392        # Do any postprocessing, normally extracting performance keyvals, etc
393        self.postprocess()
394        self.process_failed_constraints()
395
396
397    def run_once_profiling(self, postprocess_profiled_run, *args, **dargs):
398        profilers = self.job.profilers
399        # Do a profiling run if necessary
400        if profilers.present():
401            self.drop_caches_between_iterations()
402            profilers.before_start(self)
403
404            self.before_run_once()
405            profilers.start(self)
406            logging.debug('Profilers present. Profiling run started')
407
408            try:
409                self.run_once(*args, **dargs)
410
411                # Priority to the run_once() argument over the attribute.
412                postprocess_attribute = getattr(self,
413                                                'postprocess_profiled_run',
414                                                False)
415
416                if (postprocess_profiled_run or
417                    (postprocess_profiled_run is None and
418                     postprocess_attribute)):
419                    self.postprocess_iteration()
420
421            finally:
422                profilers.stop(self)
423                profilers.report(self)
424
425            self.after_run_once()
426
427
428    def postprocess(self):
429        pass
430
431
432    def postprocess_iteration(self):
433        pass
434
435
436    def cleanup(self):
437        pass
438
439
440    def before_run_once(self):
441        """
442        Override in tests that need it, will be called before any run_once()
443        call including the profiling run (when it's called before starting
444        the profilers).
445        """
446        pass
447
448
449    def after_run_once(self):
450        """
451        Called after every run_once (including from a profiled run when it's
452        called after stopping the profilers).
453        """
454        pass
455
456
457    def _exec(self, args, dargs):
458        self.job.logging.tee_redirect_debug_dir(self.debugdir,
459                                                log_name=self.tagged_testname)
460        try:
461            if self.network_destabilizing:
462                self.job.disable_warnings("NETWORK")
463
464            # write out the test attributes into a keyval
465            dargs   = dargs.copy()
466            run_cleanup = dargs.pop('run_cleanup', self.job.run_test_cleanup)
467            keyvals = dargs.pop('test_attributes', {}).copy()
468            keyvals['version'] = self.version
469            for i, arg in enumerate(args):
470                keyvals['param-%d' % i] = repr(arg)
471            for name, arg in dargs.iteritems():
472                keyvals['param-%s' % name] = repr(arg)
473            self.write_test_keyval(keyvals)
474
475            _validate_args(args, dargs, self.initialize, self.setup,
476                           self.execute, self.cleanup)
477
478            try:
479                # Initialize:
480                _cherry_pick_call(self.initialize, *args, **dargs)
481
482                lockfile = open(os.path.join(self.job.tmpdir, '.testlock'), 'w')
483                try:
484                    fcntl.flock(lockfile, fcntl.LOCK_EX)
485                    # Setup: (compile and install the test, if needed)
486                    p_args, p_dargs = _cherry_pick_args(self.setup,args,dargs)
487                    utils.update_version(self.srcdir, self.preserve_srcdir,
488                                         self.version, self.setup,
489                                         *p_args, **p_dargs)
490                finally:
491                    fcntl.flock(lockfile, fcntl.LOCK_UN)
492                    lockfile.close()
493
494                # Execute:
495                os.chdir(self.outputdir)
496
497                # call self.warmup cherry picking the arguments it accepts and
498                # translate exceptions if needed
499                _call_test_function(_cherry_pick_call, self.warmup,
500                                    *args, **dargs)
501
502                if hasattr(self, 'run_once'):
503                    p_args, p_dargs = _cherry_pick_args(self.run_once,
504                                                        args, dargs)
505                    # pull in any non-* and non-** args from self.execute
506                    for param in _get_nonstar_args(self.execute):
507                        if param in dargs:
508                            p_dargs[param] = dargs[param]
509                else:
510                    p_args, p_dargs = _cherry_pick_args(self.execute,
511                                                        args, dargs)
512
513                _call_test_function(self.execute, *p_args, **p_dargs)
514            except Exception:
515                # Save the exception while we run our cleanup() before
516                # reraising it.
517                exc_info = sys.exc_info()
518                try:
519                    try:
520                        if run_cleanup:
521                            _cherry_pick_call(self.cleanup, *args, **dargs)
522                    except Exception:
523                        logging.error('Ignoring exception during cleanup() phase:')
524                        traceback.print_exc()
525                        logging.error('Now raising the earlier %s error',
526                                      exc_info[0])
527                    self.crash_handler_report()
528                finally:
529                    self.job.logging.restore()
530                    try:
531                        raise exc_info[0], exc_info[1], exc_info[2]
532                    finally:
533                        # http://docs.python.org/library/sys.html#sys.exc_info
534                        # Be nice and prevent a circular reference.
535                        del exc_info
536            else:
537                try:
538                    if run_cleanup:
539                        _cherry_pick_call(self.cleanup, *args, **dargs)
540                    self.crash_handler_report()
541                finally:
542                    self.job.logging.restore()
543        except error.AutotestError:
544            if self.network_destabilizing:
545                self.job.enable_warnings("NETWORK")
546            # Pass already-categorized errors on up.
547            raise
548        except Exception, e:
549            if self.network_destabilizing:
550                self.job.enable_warnings("NETWORK")
551            # Anything else is an ERROR in our own code, not execute().
552            raise error.UnhandledTestError(e)
553        else:
554            if self.network_destabilizing:
555                self.job.enable_warnings("NETWORK")
556
557
558    def runsubtest(self, url, *args, **dargs):
559        """
560        Execute another autotest test from inside the current test's scope.
561
562        @param test: Parent test.
563        @param url: Url of new test.
564        @param tag: Tag added to test name.
565        @param args: Args for subtest.
566        @param dargs: Dictionary with args for subtest.
567        @iterations: Number of subtest iterations.
568        @profile_only: If true execute one profiled run.
569        """
570        dargs["profile_only"] = dargs.get("profile_only", False)
571        test_basepath = self.outputdir[len(self.job.resultdir + "/"):]
572        return self.job.run_test(url, master_testpath=test_basepath,
573                                 *args, **dargs)
574
575
576def _get_nonstar_args(func):
577    """Extract all the (normal) function parameter names.
578
579    Given a function, returns a tuple of parameter names, specifically
580    excluding the * and ** parameters, if the function accepts them.
581
582    @param func: A callable that we want to chose arguments for.
583
584    @return: A tuple of parameters accepted by the function.
585    """
586    return func.func_code.co_varnames[:func.func_code.co_argcount]
587
588
589def _cherry_pick_args(func, args, dargs):
590    """Sanitize positional and keyword arguments before calling a function.
591
592    Given a callable (func), an argument tuple and a dictionary of keyword
593    arguments, pick only those arguments which the function is prepared to
594    accept and return a new argument tuple and keyword argument dictionary.
595
596    Args:
597      func: A callable that we want to choose arguments for.
598      args: A tuple of positional arguments to consider passing to func.
599      dargs: A dictionary of keyword arguments to consider passing to func.
600    Returns:
601      A tuple of: (args tuple, keyword arguments dictionary)
602    """
603    # Cherry pick args:
604    if func.func_code.co_flags & 0x04:
605        # func accepts *args, so return the entire args.
606        p_args = args
607    else:
608        p_args = ()
609
610    # Cherry pick dargs:
611    if func.func_code.co_flags & 0x08:
612        # func accepts **dargs, so return the entire dargs.
613        p_dargs = dargs
614    else:
615        # Only return the keyword arguments that func accepts.
616        p_dargs = {}
617        for param in _get_nonstar_args(func):
618            if param in dargs:
619                p_dargs[param] = dargs[param]
620
621    return p_args, p_dargs
622
623
624def _cherry_pick_call(func, *args, **dargs):
625    """Cherry picks arguments from args/dargs based on what "func" accepts
626    and calls the function with the picked arguments."""
627    p_args, p_dargs = _cherry_pick_args(func, args, dargs)
628    return func(*p_args, **p_dargs)
629
630
631def _validate_args(args, dargs, *funcs):
632    """Verify that arguments are appropriate for at least one callable.
633
634    Given a list of callables as additional parameters, verify that
635    the proposed keyword arguments in dargs will each be accepted by at least
636    one of the callables.
637
638    NOTE: args is currently not supported and must be empty.
639
640    Args:
641      args: A tuple of proposed positional arguments.
642      dargs: A dictionary of proposed keyword arguments.
643      *funcs: Callables to be searched for acceptance of args and dargs.
644    Raises:
645      error.AutotestError: if an arg won't be accepted by any of *funcs.
646    """
647    all_co_flags = 0
648    all_varnames = ()
649    for func in funcs:
650        all_co_flags |= func.func_code.co_flags
651        all_varnames += func.func_code.co_varnames[:func.func_code.co_argcount]
652
653    # Check if given args belongs to at least one of the methods below.
654    if len(args) > 0:
655        # Current implementation doesn't allow the use of args.
656        raise error.TestError('Unnamed arguments not accepted. Please '
657                              'call job.run_test with named args only')
658
659    # Check if given dargs belongs to at least one of the methods below.
660    if len(dargs) > 0:
661        if not all_co_flags & 0x08:
662            # no func accepts *dargs, so:
663            for param in dargs:
664                if not param in all_varnames:
665                    raise error.AutotestError('Unknown parameter: %s' % param)
666
667
668def _installtest(job, url):
669    (group, name) = job.pkgmgr.get_package_name(url, 'test')
670
671    # Bail if the test is already installed
672    group_dir = os.path.join(job.testdir, "download", group)
673    if os.path.exists(os.path.join(group_dir, name)):
674        return (group, name)
675
676    # If the group directory is missing create it and add
677    # an empty  __init__.py so that sub-directories are
678    # considered for import.
679    if not os.path.exists(group_dir):
680        os.makedirs(group_dir)
681        f = file(os.path.join(group_dir, '__init__.py'), 'w+')
682        f.close()
683
684    logging.debug("%s: installing test url=%s", name, url)
685    tarball = os.path.basename(url)
686    tarball_path = os.path.join(group_dir, tarball)
687    test_dir = os.path.join(group_dir, name)
688    job.pkgmgr.fetch_pkg(tarball, tarball_path,
689                         repo_url = os.path.dirname(url))
690
691    # Create the directory for the test
692    if not os.path.exists(test_dir):
693        os.mkdir(os.path.join(group_dir, name))
694
695    job.pkgmgr.untar_pkg(tarball_path, test_dir)
696
697    os.remove(tarball_path)
698
699    # For this 'sub-object' to be importable via the name
700    # 'group.name' we need to provide an __init__.py,
701    # so link the main entry point to this.
702    os.symlink(name + '.py', os.path.join(group_dir, name,
703                            '__init__.py'))
704
705    # The test is now installed.
706    return (group, name)
707
708
709def _call_test_function(func, *args, **dargs):
710    """Calls a test function and translates exceptions so that errors
711    inside test code are considered test failures."""
712    try:
713        return func(*args, **dargs)
714    except error.AutotestError:
715        raise
716    except Exception, e:
717        # Other exceptions must be treated as a FAIL when
718        # raised during the test functions
719        raise error.UnhandledTestFail(e)
720
721
722def runtest(job, url, tag, args, dargs,
723            local_namespace={}, global_namespace={},
724            before_test_hook=None, after_test_hook=None,
725            before_iteration_hook=None, after_iteration_hook=None):
726    local_namespace = local_namespace.copy()
727    global_namespace = global_namespace.copy()
728    # if this is not a plain test name then download and install the
729    # specified test
730    if url.endswith('.tar.bz2'):
731        (testgroup, testname) = _installtest(job, url)
732        bindir = os.path.join(job.testdir, 'download', testgroup, testname)
733        importdir = os.path.join(job.testdir, 'download')
734        site_bindir = None
735        modulename = '%s.%s' % (re.sub('/', '.', testgroup), testname)
736        classname = '%s.%s' % (modulename, testname)
737        path = testname
738    else:
739        # If the test is local, it may be under either testdir or site_testdir.
740        # Tests in site_testdir override tests defined in testdir
741        testname = path = url
742        testgroup = ''
743        path = re.sub(':', '/', testname)
744        modulename = os.path.basename(path)
745        classname = '%s.%s' % (modulename, modulename)
746
747        # Try installing the test package
748        # The job object may be either a server side job or a client side job.
749        # 'install_pkg' method will be present only if it's a client side job.
750        if hasattr(job, 'install_pkg'):
751            try:
752                bindir = os.path.join(job.testdir, testname)
753                job.install_pkg(testname, 'test', bindir)
754            except error.PackageInstallError, e:
755                # continue as a fall back mechanism and see if the test code
756                # already exists on the machine
757                pass
758
759        bindir = testdir = None
760        for dir in [job.testdir, getattr(job, 'site_testdir', None)]:
761            if dir is not None and os.path.exists(os.path.join(dir, path)):
762                testdir = dir
763                importdir = bindir = os.path.join(dir, path)
764        if not bindir:
765            raise error.TestError(testname + ': test does not exist')
766
767    subdir = os.path.join(dargs.pop('master_testpath', ""), testname)
768    outputdir = os.path.join(job.resultdir, subdir)
769    if tag:
770        outputdir += '.' + tag
771
772    local_namespace['job'] = job
773    local_namespace['bindir'] = bindir
774    local_namespace['outputdir'] = outputdir
775
776    sys.path.insert(0, importdir)
777    try:
778        exec ('import %s' % modulename, local_namespace, global_namespace)
779        exec ("mytest = %s(job, bindir, outputdir)" % classname,
780              local_namespace, global_namespace)
781    finally:
782        sys.path.pop(0)
783
784    pwd = os.getcwd()
785    os.chdir(outputdir)
786
787    try:
788        mytest = global_namespace['mytest']
789        mytest.success = False
790        if before_test_hook:
791            before_test_hook(mytest)
792
793        # we use the register iteration hooks methods to register the passed
794        # in hooks
795        if before_iteration_hook:
796            mytest.register_before_iteration_hook(before_iteration_hook)
797        if after_iteration_hook:
798            mytest.register_after_iteration_hook(after_iteration_hook)
799        mytest._exec(args, dargs)
800        mytest.success = True
801    finally:
802        os.chdir(pwd)
803        if after_test_hook:
804            after_test_hook(mytest)
805        shutil.rmtree(mytest.tmpdir, ignore_errors=True)
806