test.py revision 5c1bb2564e01e94f1fccb9272dbd613666db3e63
1# Shell class for a test, inherited by all individual tests
2#
3# Methods:
4#       __init__        initialise
5#       initialize      run once for each job
6#       setup           run once for each new version of the test installed
7#       run             run the test (wrapped by job.run_test())
8#
9# Data:
10#       job             backreference to the job this test instance is part of
11#       outputdir       eg. results/<job>/<testname.tag>
12#       resultsdir      eg. results/<job>/<testname.tag>/results
13#       profdir         eg. results/<job>/<testname.tag>/profiling
14#       debugdir        eg. results/<job>/<testname.tag>/debug
15#       bindir          eg. tests/<test>
16#       src             eg. tests/<test>/src
17#       tmpdir          eg. tmp/<tempname>_<testname.tag>
18
19import fcntl, os, re, sys, shutil, tarfile, tempfile, time, traceback
20import warnings, logging
21
22from autotest_lib.client.common_lib import error, packages
23from autotest_lib.client.bin import utils
24
25
26class base_test:
27    preserve_srcdir = False
28
29    def __init__(self, job, bindir, outputdir):
30        self.job = job
31        self.pkgmgr = job.pkgmgr
32        self.autodir = job.autodir
33
34        self.outputdir = outputdir
35        self.tagged_testname = os.path.basename(self.outputdir)
36        self.resultsdir = os.path.join(self.outputdir, 'results')
37        os.mkdir(self.resultsdir)
38        self.profdir = os.path.join(self.outputdir, 'profiling')
39        os.mkdir(self.profdir)
40        self.debugdir = os.path.join(self.outputdir, 'debug')
41        os.mkdir(self.debugdir)
42        self.bindir = bindir
43        if hasattr(job, 'libdir'):
44            self.libdir = job.libdir
45        self.srcdir = os.path.join(self.bindir, 'src')
46        self.tmpdir = tempfile.mkdtemp("_" + self.tagged_testname,
47                                       dir=job.tmpdir)
48
49
50    def assert_(self, expr, msg='Assertion failed.'):
51        if not expr:
52            raise error.TestError(msg)
53
54
55    def write_test_keyval(self, attr_dict):
56        utils.write_keyval(self.outputdir, attr_dict)
57
58
59    @staticmethod
60    def _append_type_to_keys(dictionary, typename):
61        new_dict = {}
62        for key, value in dictionary.iteritems():
63            new_key = "%s{%s}" % (key, typename)
64            new_dict[new_key] = value
65        return new_dict
66
67
68    def write_perf_keyval(self, perf_dict):
69        self.write_iteration_keyval({}, perf_dict)
70
71
72    def write_attr_keyval(self, attr_dict):
73        self.write_iteration_keyval(attr_dict, {})
74
75
76    def write_iteration_keyval(self, attr_dict, perf_dict):
77        if attr_dict:
78            attr_dict = self._append_type_to_keys(attr_dict, "attr")
79            utils.write_keyval(self.resultsdir, attr_dict, type_tag="attr")
80
81        if perf_dict:
82            perf_dict = self._append_type_to_keys(perf_dict, "perf")
83            utils.write_keyval(self.resultsdir, perf_dict, type_tag="perf")
84
85        keyval_path = os.path.join(self.resultsdir, "keyval")
86        print >> open(keyval_path, "a"), ""
87
88
89    def initialize(self):
90        pass
91
92
93    def setup(self):
94        pass
95
96
97    def warmup(self, *args, **dargs):
98        pass
99
100
101    def drop_caches_between_iterations(self):
102        if self.job.drop_caches_between_iterations:
103            print "Dropping caches between iterations"
104            utils.drop_caches()
105
106
107    def _call_run_once(self, before_iteration_hook, after_iteration_hook,
108                       args, dargs):
109        self.drop_caches_between_iterations()
110
111        # execute iteration hooks
112        if before_iteration_hook:
113            before_iteration_hook(self)
114        self.run_once(*args, **dargs)
115        if after_iteration_hook:
116            after_iteration_hook(self)
117
118        self.postprocess_iteration()
119
120
121    def execute(self, iterations=None, test_length=None, profile_only=False,
122                _get_time=time.time, postprocess_profiled_run=None,
123                before_iteration_hook=None, after_iteration_hook=None,
124                *args, **dargs):
125        """
126        This is the basic execute method for the tests inherited from base_test.
127        If you want to implement a benchmark test, it's better to implement
128        the run_once function, to cope with the profiling infrastructure. For
129        other tests, you can just override the default implementation.
130
131        @param test_length: The minimum test length in seconds. We'll run the
132            run_once function for a number of times large enough to cover the
133            minimum test length.
134
135        @param iterations: A number of iterations that we'll run the run_once
136            function. This parameter is incompatible with test_length and will
137            be silently ignored if you specify both.
138
139        @param profile_only: Do not run any test iterations before running
140            the test under the profiler.  This is equivalent to specifying
141            iterations=0 but is much easier to remember/read/comprehend when
142            making control files with job.run_test(profile_only=True) in it
143            rather than job.run_test(iterations=0).
144
145        @param _get_time: [time.time] Used for unit test time injection.
146
147        @param postprocess_profiled_run: Run the postprocessing for the
148            profiled run.
149        """
150
151        # For our special class of tests, the benchmarks, we don't want
152        # profilers to run during the test iterations. Let's reserve only
153        # the last iteration for profiling, if needed. So let's stop
154        # all profilers if they are present and active.
155        profilers = self.job.profilers
156        if profilers.active():
157            profilers.stop(self)
158        # If the user called this test in an odd way (specified both iterations
159        # and test_length), let's warn them.
160        if iterations and test_length:
161            logging.info(
162                    'Iterations parameter ignored (timed execution).')
163        if test_length:
164            test_start = _get_time()
165            time_elapsed = 0
166            timed_counter = 0
167            logging.info('Test started. Minimum test length: %d s',
168                               test_length)
169            while time_elapsed < test_length:
170                timed_counter = timed_counter + 1
171                if time_elapsed == 0:
172                    logging.info('Executing iteration %d', timed_counter)
173                elif time_elapsed > 0:
174                    logging.info(
175                            'Executing iteration %d, time_elapsed %d s',
176                            timed_counter, time_elapsed)
177                self._call_run_once(before_iteration_hook, after_iteration_hook,
178                                    args, dargs)
179                test_iteration_finish = _get_time()
180                time_elapsed = test_iteration_finish - test_start
181            logging.info('Test finished after %d iterations',
182                               timed_counter)
183            logging.info('Time elapsed: %d s', time_elapsed)
184        else:
185            orig_iterations = iterations
186            if profile_only:
187                if iterations:
188                    logging.info('Iterations parameter ignored '
189                                       '(profile_only=True).')
190                iterations = 0
191            elif iterations is None:
192                iterations = 1
193            if iterations:
194                logging.info('Test started. '
195                                   'Number of iterations: %d', iterations)
196                for self.iteration in xrange(1, iterations+1):
197                    logging.info('Executing iteration %d of %d',
198                                 self.iteration, iterations)
199                    self._call_run_once(before_iteration_hook,
200                                        after_iteration_hook, args, dargs)
201                logging.info('Test finished after %d iterations.',
202                             iterations)
203
204        self.run_once_profiling(postprocess_profiled_run, *args, **dargs)
205
206        # Do any postprocessing, normally extracting performance keyvals, etc
207        self.postprocess()
208
209
210    def run_once_profiling(self, postprocess_profiled_run, *args, **dargs):
211        profilers = self.job.profilers
212        # Do a profiling run if necessary
213        if profilers.present():
214            self.drop_caches_between_iterations()
215            profilers.start(self)
216            print 'Profilers present. Profiling run started'
217            try:
218                self.iteration = 0       # indicator this is a profiling run
219                self.run_once(*args, **dargs)
220
221                # Priority to the run_once() argument over the attribute.
222                postprocess_attribute = getattr(self,
223                                                'postprocess_profiled_run',
224                                                False)
225
226                if (postprocess_profiled_run or
227                    (postprocess_profiled_run is None and
228                     postprocess_attribute)):
229                    self.postprocess_iteration()
230
231            finally:
232                profilers.stop(self)
233                profilers.report(self)
234
235
236    def postprocess(self):
237        pass
238
239
240    def postprocess_iteration(self):
241        pass
242
243
244    def cleanup(self):
245        pass
246
247
248    def _setup_test_logging_handler(self):
249        """
250        Adds a file handler during test execution, which will give test writers
251        the ability to obtain test results on the test results dir just by
252        making logging calls.
253        """
254        result_filename = os.path.join(self.resultsdir,
255                                       '%s.log' % self.tagged_testname)
256        self.test_handler = logging.FileHandler(filename=result_filename,
257                                                mode='w')
258        fmt_str = '[%(asctime)s - %(module)-15s - %(levelname)-8s] %(message)s'
259        self.test_formatter = logging.Formatter(fmt_str)
260        self.test_handler.setFormatter(self.test_formatter)
261        self.logger = logging.getLogger()
262        self.logger.addHandler(self.test_handler)
263
264
265    def _exec(self, before_iteration_hook, after_iteration_hook, args, dargs):
266
267        self.job.stdout.tee_redirect(os.path.join(self.debugdir, 'stdout'))
268        self.job.stderr.tee_redirect(os.path.join(self.debugdir, 'stderr'))
269        self._setup_test_logging_handler()
270        try:
271            # write out the test attributes into a keyval
272            dargs   = dargs.copy()
273            run_cleanup = dargs.pop('run_cleanup', self.job.run_test_cleanup)
274            keyvals = dargs.pop('test_attributes', {}).copy()
275            keyvals['version'] = self.version
276            for i, arg in enumerate(args):
277                keyvals['param-%d' % i] = repr(arg)
278            for name, arg in dargs.iteritems():
279                keyvals['param-%s' % name] = repr(arg)
280            self.write_test_keyval(keyvals)
281
282            _validate_args(args, dargs, self.initialize, self.setup,
283                           self.execute, self.cleanup)
284
285            try:
286                # Initialize:
287                _cherry_pick_call(self.initialize, *args, **dargs)
288
289                lockfile = open(os.path.join(self.job.tmpdir, '.testlock'), 'w')
290                try:
291                    fcntl.flock(lockfile, fcntl.LOCK_EX)
292                    # Setup: (compile and install the test, if needed)
293                    p_args, p_dargs = _cherry_pick_args(self.setup,args,dargs)
294                    utils.update_version(self.srcdir, self.preserve_srcdir,
295                                         self.version, self.setup,
296                                         *p_args, **p_dargs)
297                finally:
298                    fcntl.flock(lockfile, fcntl.LOCK_UN)
299                    lockfile.close()
300
301                # Execute:
302                os.chdir(self.outputdir)
303
304                # call self.warmup cherry picking the arguments it accepts and
305                # translate exceptions if needed
306                _call_test_function(_cherry_pick_call, self.warmup,
307                                    *args, **dargs)
308
309                # insert the iteration hooks arguments to be potentially
310                # passed over to self.execute() if it accepts them
311                dargs['before_iteration_hook'] = before_iteration_hook
312                dargs['after_iteration_hook'] = after_iteration_hook
313
314                if hasattr(self, 'run_once'):
315                    p_args, p_dargs = _cherry_pick_args(self.run_once,
316                                                        args, dargs)
317                    # pull in any non-* and non-** args from self.execute
318                    for param in _get_nonstar_args(self.execute):
319                        if param in dargs:
320                            p_dargs[param] = dargs[param]
321                else:
322                    p_args, p_dargs = _cherry_pick_args(self.execute,
323                                                        args, dargs)
324
325                _call_test_function(self.execute, *p_args, **p_dargs)
326            except Exception:
327                # Save the exception while we run our cleanup() before
328                # reraising it.
329                exc_info = sys.exc_info()
330                try:
331                    try:
332                        if run_cleanup:
333                            _cherry_pick_call(self.cleanup, *args, **dargs)
334                    except Exception:
335                        print 'Ignoring exception during cleanup() phase:'
336                        traceback.print_exc()
337                        print 'Now raising the earlier %s error' % exc_info[0]
338                finally:
339                    self.job.stderr.restore()
340                    self.job.stdout.restore()
341                    try:
342                        raise exc_info[0], exc_info[1], exc_info[2]
343                    finally:
344                        # http://docs.python.org/library/sys.html#sys.exc_info
345                        # Be nice and prevent a circular reference.
346                        del exc_info
347            else:
348                try:
349                    if run_cleanup:
350                        _cherry_pick_call(self.cleanup, *args, **dargs)
351                finally:
352                    self.logger.removeHandler(self.test_handler)
353                    self.job.stderr.restore()
354                    self.job.stdout.restore()
355        except error.AutotestError:
356            # Pass already-categorized errors on up.
357            raise
358        except Exception, e:
359            # Anything else is an ERROR in our own code, not execute().
360            raise error.UnhandledTestError(e)
361
362
363def _get_nonstar_args(func):
364    """Extract all the (normal) function parameter names.
365
366    Given a function, returns a tuple of parameter names, specifically
367    excluding the * and ** parameters, if the function accepts them.
368
369    @param func: A callable that we want to chose arguments for.
370
371    @return: A tuple of parameters accepted by the function.
372    """
373    return func.func_code.co_varnames[:func.func_code.co_argcount]
374
375
376def _cherry_pick_args(func, args, dargs):
377    """Sanitize positional and keyword arguments before calling a function.
378
379    Given a callable (func), an argument tuple and a dictionary of keyword
380    arguments, pick only those arguments which the function is prepared to
381    accept and return a new argument tuple and keyword argument dictionary.
382
383    Args:
384      func: A callable that we want to choose arguments for.
385      args: A tuple of positional arguments to consider passing to func.
386      dargs: A dictionary of keyword arguments to consider passing to func.
387    Returns:
388      A tuple of: (args tuple, keyword arguments dictionary)
389    """
390    # Cherry pick args:
391    if func.func_code.co_flags & 0x04:
392        # func accepts *args, so return the entire args.
393        p_args = args
394    else:
395        p_args = ()
396
397    # Cherry pick dargs:
398    if func.func_code.co_flags & 0x08:
399        # func accepts **dargs, so return the entire dargs.
400        p_dargs = dargs
401    else:
402        # Only return the keyword arguments that func accepts.
403        p_dargs = {}
404        for param in _get_nonstar_args(func):
405            if param in dargs:
406                p_dargs[param] = dargs[param]
407
408    return p_args, p_dargs
409
410
411def _cherry_pick_call(func, *args, **dargs):
412    """Cherry picks arguments from args/dargs based on what "func" accepts
413    and calls the function with the picked arguments."""
414    p_args, p_dargs = _cherry_pick_args(func, args, dargs)
415    return func(*p_args, **p_dargs)
416
417
418def _validate_args(args, dargs, *funcs):
419    """Verify that arguments are appropriate for at least one callable.
420
421    Given a list of callables as additional parameters, verify that
422    the proposed keyword arguments in dargs will each be accepted by at least
423    one of the callables.
424
425    NOTE: args is currently not supported and must be empty.
426
427    Args:
428      args: A tuple of proposed positional arguments.
429      dargs: A dictionary of proposed keyword arguments.
430      *funcs: Callables to be searched for acceptance of args and dargs.
431    Raises:
432      error.AutotestError: if an arg won't be accepted by any of *funcs.
433    """
434    all_co_flags = 0
435    all_varnames = ()
436    for func in funcs:
437        all_co_flags |= func.func_code.co_flags
438        all_varnames += func.func_code.co_varnames[:func.func_code.co_argcount]
439
440    # Check if given args belongs to at least one of the methods below.
441    if len(args) > 0:
442        # Current implementation doesn't allow the use of args.
443        raise error.TestError('Unnamed arguments not accepted. Please '
444                              'call job.run_test with named args only')
445
446    # Check if given dargs belongs to at least one of the methods below.
447    if len(dargs) > 0:
448        if not all_co_flags & 0x08:
449            # no func accepts *dargs, so:
450            for param in dargs:
451                if not param in all_varnames:
452                    raise error.AutotestError('Unknown parameter: %s' % param)
453
454
455def _installtest(job, url):
456    (group, name) = job.pkgmgr.get_package_name(url, 'test')
457
458    # Bail if the test is already installed
459    group_dir = os.path.join(job.testdir, "download", group)
460    if os.path.exists(os.path.join(group_dir, name)):
461        return (group, name)
462
463    # If the group directory is missing create it and add
464    # an empty  __init__.py so that sub-directories are
465    # considered for import.
466    if not os.path.exists(group_dir):
467        os.mkdir(group_dir)
468        f = file(os.path.join(group_dir, '__init__.py'), 'w+')
469        f.close()
470
471    print name + ": installing test url=" + url
472    tarball = os.path.basename(url)
473    tarball_path = os.path.join(group_dir, tarball)
474    test_dir = os.path.join(group_dir, name)
475    job.pkgmgr.fetch_pkg(tarball, tarball_path,
476                         repo_url = os.path.dirname(url))
477
478    # Create the directory for the test
479    if not os.path.exists(test_dir):
480        os.mkdir(os.path.join(group_dir, name))
481
482    job.pkgmgr.untar_pkg(tarball_path, test_dir)
483
484    os.remove(tarball_path)
485
486    # For this 'sub-object' to be importable via the name
487    # 'group.name' we need to provide an __init__.py,
488    # so link the main entry point to this.
489    os.symlink(name + '.py', os.path.join(group_dir, name,
490                            '__init__.py'))
491
492    # The test is now installed.
493    return (group, name)
494
495
496def _call_test_function(func, *args, **dargs):
497    """Calls a test function and translates exceptions so that errors
498    inside test code are considered test failures."""
499    try:
500        return func(*args, **dargs)
501    except error.AutotestError:
502        # Pass already-categorized errors on up as is.
503        raise
504    except Exception, e:
505        # Other exceptions must be treated as a FAIL when
506        # raised during the test functions
507        raise error.UnhandledTestFail(e)
508
509
510def runtest(job, url, tag, args, dargs,
511            local_namespace={}, global_namespace={},
512            before_test_hook=None, after_test_hook=None,
513            before_iteration_hook=None, after_iteration_hook=None):
514    local_namespace = local_namespace.copy()
515    global_namespace = global_namespace.copy()
516
517    # if this is not a plain test name then download and install the
518    # specified test
519    if url.endswith('.tar.bz2'):
520        (group, testname) = _installtest(job, url)
521        bindir = os.path.join(job.testdir, 'download', group, testname)
522        site_bindir = None
523    else:
524        # if the test is local, it can be found in either testdir
525        # or site_testdir. tests in site_testdir override tests
526        # defined in testdir
527        (group, testname) = ('', url)
528        bindir = os.path.join(job.testdir, group, testname)
529        if hasattr(job, 'site_testdir'):
530            site_bindir = os.path.join(job.site_testdir,
531                                       group, testname)
532        else:
533            site_bindir = None
534
535        # The job object here can be that of a server side job or a client
536        # side job. 'install_pkg' method won't be present for server side
537        # jobs, so do the fetch only if that method is present in the job
538        # obj.
539        if hasattr(job, 'install_pkg'):
540            try:
541                job.install_pkg(testname, 'test', bindir)
542            except packages.PackageInstallError, e:
543                # continue as a fall back mechanism and see if the test code
544                # already exists on the machine
545                pass
546
547    outputdir = os.path.join(job.resultdir, testname)
548    if tag:
549        outputdir += '.' + tag
550
551    # if we can find the test in site_bindir, use this version
552    if site_bindir and os.path.exists(site_bindir):
553        bindir = site_bindir
554        testdir = job.site_testdir
555    elif os.path.exists(bindir):
556        testdir = job.testdir
557    else:
558        raise error.TestError(testname + ': test does not exist')
559
560    local_namespace['job'] = job
561    local_namespace['bindir'] = bindir
562    local_namespace['outputdir'] = outputdir
563
564    if group:
565        sys.path.insert(0, os.path.join(testdir, 'download'))
566        group += '.'
567    else:
568        sys.path.insert(0, os.path.join(testdir, testname))
569
570    try:
571        exec ("import %s%s" % (group, testname),
572              local_namespace, global_namespace)
573        exec ("mytest = %s%s.%s(job, bindir, outputdir)" %
574              (group, testname, testname),
575              local_namespace, global_namespace)
576    finally:
577        sys.path.pop(0)
578
579    pwd = os.getcwd()
580    os.chdir(outputdir)
581
582    try:
583        mytest = global_namespace['mytest']
584        if before_test_hook:
585            before_test_hook(mytest)
586        # args/dargs are the test arguments as given by the control file so
587        # we cannot put the iteration hooks inside the dictionary
588        mytest._exec(before_iteration_hook, after_iteration_hook, args, dargs)
589    finally:
590        os.chdir(pwd)
591        if after_test_hook:
592            after_test_hook(mytest)
593        shutil.rmtree(mytest.tmpdir, ignore_errors=True)
594