autoserv.py revision ff78f11ff85372a1e2f8ddf8eefbc8b772686148
1#!/usr/bin/python -u
2# Copyright 2007-2008 Martin J. Bligh <mbligh@google.com>, Google Inc.
3# Released under the GPL v2
4
5"""
6Run a control file through the server side engine
7"""
8
9import ast
10import datetime
11import getpass
12import logging
13import os
14import re
15import signal
16import socket
17import sys
18import traceback
19import time
20import urllib2
21
22import common
23
24from autotest_lib.client.common_lib import control_data
25from autotest_lib.client.common_lib import global_config
26from autotest_lib.client.common_lib import utils
27from autotest_lib.client.common_lib.cros.graphite import autotest_es
28from autotest_lib.client.common_lib.cros.graphite import autotest_stats
29try:
30    from autotest_lib.puppylab import results_mocker
31except ImportError:
32    results_mocker = None
33
34require_atfork = global_config.global_config.get_config_value(
35        'AUTOSERV', 'require_atfork_module', type=bool, default=True)
36
37
38# Number of seconds to wait before returning if testing mode is enabled
39TESTING_MODE_SLEEP_SECS = 1
40
41try:
42    import atfork
43    atfork.monkeypatch_os_fork_functions()
44    import atfork.stdlib_fixer
45    # Fix the Python standard library for threading+fork safety with its
46    # internal locks.  http://code.google.com/p/python-atfork/
47    import warnings
48    warnings.filterwarnings('ignore', 'logging module already imported')
49    atfork.stdlib_fixer.fix_logging_module()
50except ImportError, e:
51    from autotest_lib.client.common_lib import global_config
52    if global_config.global_config.get_config_value(
53            'AUTOSERV', 'require_atfork_module', type=bool, default=False):
54        print >>sys.stderr, 'Please run utils/build_externals.py'
55        print e
56        sys.exit(1)
57
58from autotest_lib.server import frontend
59from autotest_lib.server import server_logging_config
60from autotest_lib.server import server_job, utils, autoserv_parser, autotest
61from autotest_lib.server import utils as server_utils
62from autotest_lib.site_utils import job_directories
63from autotest_lib.site_utils import job_overhead
64from autotest_lib.site_utils import lxc
65from autotest_lib.site_utils import lxc_utils
66from autotest_lib.client.common_lib import pidfile, logging_manager
67from autotest_lib.client.common_lib.cros.graphite import autotest_stats
68
69# Control segment to stage server-side package.
70STAGE_SERVER_SIDE_PACKAGE_CONTROL_FILE = server_job._control_segment_path(
71        'stage_server_side_package')
72
73def log_alarm(signum, frame):
74    logging.error("Received SIGALARM. Ignoring and continuing on.")
75    sys.exit(1)
76
77
78def _get_machines(parser):
79    """Get a list of machine names from command line arg -m or a file.
80
81    @param parser: Parser for the command line arguments.
82
83    @return: A list of machine names from command line arg -m or the
84             machines file specified in the command line arg -M.
85    """
86    if parser.options.machines:
87        machines = parser.options.machines.replace(',', ' ').strip().split()
88    else:
89        machines = []
90    machines_file = parser.options.machines_file
91    if machines_file:
92        machines = []
93        for m in open(machines_file, 'r').readlines():
94            # remove comments, spaces
95            m = re.sub('#.*', '', m).strip()
96            if m:
97                machines.append(m)
98        logging.debug('Read list of machines from file: %s', machines_file)
99        logging.debug('Machines: %s', ','.join(machines))
100
101    if machines:
102        for machine in machines:
103            if not machine or re.search('\s', machine):
104                parser.parser.error("Invalid machine: %s" % str(machine))
105        machines = list(set(machines))
106        machines.sort()
107    return machines
108
109
110def _stage_ssp(parser):
111    """Stage server-side package.
112
113    This function calls a control segment to stage server-side package based on
114    the job and autoserv command line option. The detail implementation could
115    be different for each host type. Currently, only CrosHost has
116    stage_server_side_package function defined.
117    The script returns None if no server-side package is available. However,
118    it may raise exception if it failed for reasons other than artifact (the
119    server-side package) not found.
120
121    @param parser: Command line arguments parser passed in the autoserv process.
122
123    @return: url of the staged server-side package. Return None if server-
124             side package is not found for the build.
125    """
126    # If test_source_build is not specified, default to use server-side test
127    # code from build specified in --image.
128    namespace = {'machines': _get_machines(parser),
129                 'image': (parser.options.test_source_build or
130                           parser.options.image),}
131    script_locals = {}
132    execfile(STAGE_SERVER_SIDE_PACKAGE_CONTROL_FILE, namespace, script_locals)
133    return script_locals['ssp_url']
134
135
136def _run_with_ssp(container_name, job_id, results, parser, ssp_url):
137    """Run the server job with server-side packaging.
138
139    @param container_name: Name of the container to run the test.
140    @param job_id: ID of the test job.
141    @param results: Folder to store results. This could be different from
142                    parser.options.results:
143                    parser.options.results  can be set to None for results to be
144                    stored in a temp folder.
145                    results can be None for autoserv run requires no logging.
146    @param parser: Command line parser that contains the options.
147    @param ssp_url: url of the staged server-side package.
148    """
149    bucket = lxc.ContainerBucket()
150    control = (parser.args[0] if len(parser.args) > 0 and parser.args[0] != ''
151               else None)
152    test_container = bucket.setup_test(container_name, job_id, ssp_url, results,
153                                       control=control)
154    args = sys.argv[:]
155    args.remove('--require-ssp')
156
157    # A dictionary of paths to replace in the command line. Key is the path to
158    # be replaced with the one in value.
159    paths_to_replace = {}
160    # Replace the control file path with the one in container.
161    if control:
162        container_control_filename = os.path.join(
163                lxc.CONTROL_TEMP_PATH, os.path.basename(control))
164        paths_to_replace[control] = container_control_filename
165    # Update result directory with the one in container.
166    if parser.options.results:
167        container_result_dir = os.path.join(lxc.RESULT_DIR_FMT % job_id)
168        paths_to_replace[parser.options.results] = container_result_dir
169    # Update parse_job directory with the one in container. The assumption is
170    # that the result folder to be parsed is always the same as the results_dir.
171    if parser.options.parse_job:
172        container_parse_dir = os.path.join(lxc.RESULT_DIR_FMT % job_id)
173        paths_to_replace[parser.options.parse_job] = container_result_dir
174
175    args = [paths_to_replace.get(arg, arg) for arg in args]
176
177    # Apply --use-existing-results, results directory is aready created and
178    # mounted in container. Apply this arg to avoid exception being raised.
179    if not '--use-existing-results' in args:
180        args.append('--use-existing-results')
181
182    # Make sure autoserv running in container using a different pid file.
183    if not '--pidfile-label' in args:
184        args.extend(['--pidfile-label', 'container_autoserv'])
185
186    cmd_line = ' '.join(["'%s'" % arg if ' ' in arg else arg for arg in args])
187    logging.info('Run command in container: %s', cmd_line)
188    success = False
189    try:
190        test_container.attach_run(cmd_line)
191        success = True
192    finally:
193        counter_key = '%s.%s' % (lxc.STATS_KEY,
194                                 'success' if success else 'fail')
195        autotest_stats.Counter(counter_key).increment()
196        # metadata is uploaded separately so it can use http to upload.
197        metadata = {'drone': socket.gethostname(),
198                    'job_id': job_id,
199                    'success': success}
200        autotest_es.post(use_http=True,
201                         type_str=lxc.CONTAINER_RUN_TEST_METADB_TYPE,
202                         metadata=metadata)
203        test_container.destroy()
204
205
206def correct_results_folder_permission(results):
207    """Make sure the results folder has the right permission settings.
208
209    For tests running with server-side packaging, the results folder has the
210    owner of root. This must be changed to the user running the autoserv
211    process, so parsing job can access the results folder.
212    TODO(dshi): crbug.com/459344 Remove this function when test container can be
213    unprivileged container.
214
215    @param results: Path to the results folder.
216
217    """
218    if not results:
219        return
220
221    utils.run('sudo chown -R %s "%s"' % (os.getuid(), results))
222    utils.run('sudo chgrp -R %s "%s"' % (os.getgid(), results))
223
224
225def run_autoserv(pid_file_manager, results, parser, ssp_url, use_ssp):
226    """Run server job with given options.
227
228    @param pid_file_manager: PidFileManager used to monitor the autoserv process
229    @param results: Folder to store results.
230    @param parser: Parser for the command line arguments.
231    @param ssp_url: Url to server-side package.
232    @param use_ssp: Set to True to run with server-side packaging.
233    """
234    if parser.options.warn_no_ssp:
235        # Post a warning in the log.
236        logging.warn('Autoserv is required to run with server-side packaging. '
237                     'However, no drone is found to support server-side '
238                     'packaging. The test will be executed in a drone without '
239                     'server-side packaging supported.')
240
241    # send stdin to /dev/null
242    dev_null = os.open(os.devnull, os.O_RDONLY)
243    os.dup2(dev_null, sys.stdin.fileno())
244    os.close(dev_null)
245
246    # Create separate process group
247    os.setpgrp()
248
249    # Container name is predefined so the container can be destroyed in
250    # handle_sigterm.
251    job_or_task_id = job_directories.get_job_id_or_task_id(
252            parser.options.results)
253    container_name = (lxc.TEST_CONTAINER_NAME_FMT %
254                      (job_or_task_id, time.time(), os.getpid()))
255
256    # Implement SIGTERM handler
257    def handle_sigterm(signum, frame):
258        logging.debug('Received SIGTERM')
259        if pid_file_manager:
260            pid_file_manager.close_file(1, signal.SIGTERM)
261        logging.debug('Finished writing to pid_file. Killing process.')
262
263        # Update results folder's file permission. This needs to be done ASAP
264        # before the parsing process tries to access the log.
265        if use_ssp and results:
266            correct_results_folder_permission(results)
267
268        # TODO (sbasi) - remove the time.sleep when crbug.com/302815 is solved.
269        # This sleep allows the pending output to be logged before the kill
270        # signal is sent.
271        time.sleep(.1)
272        if use_ssp:
273            logging.debug('Destroy container %s before aborting the autoserv '
274                          'process.', container_name)
275            metadata = {'drone': socket.gethostname(),
276                        'job_id': job_or_task_id,
277                        'container_name': container_name,
278                        'action': 'abort',
279                        'success': True}
280            try:
281                bucket = lxc.ContainerBucket()
282                container = bucket.get(container_name)
283                if container:
284                    container.destroy()
285                else:
286                    metadata['success'] = False
287                    metadata['error'] = 'container not found'
288                    logging.debug('Container %s is not found.', container_name)
289            except:
290                metadata['success'] = False
291                metadata['error'] = 'Exception: %s' % sys.exc_info()
292                # Handle any exception so the autoserv process can be aborted.
293                logging.error('Failed to destroy container %s. Error: %s',
294                              container_name, sys.exc_info())
295            autotest_es.post(use_http=True,
296                             type_str=lxc.CONTAINER_RUN_TEST_METADB_TYPE,
297                             metadata=metadata)
298
299        os.killpg(os.getpgrp(), signal.SIGKILL)
300
301    # Set signal handler
302    signal.signal(signal.SIGTERM, handle_sigterm)
303
304    # faulthandler is only needed to debug in the Lab and is not avaliable to
305    # be imported in the chroot as part of VMTest, so Try-Except it.
306    try:
307        import faulthandler
308        faulthandler.register(signal.SIGTERM, all_threads=True, chain=True)
309        logging.debug('faulthandler registered on SIGTERM.')
310    except ImportError:
311        pass
312
313    # Ignore SIGTTOU's generated by output from forked children.
314    signal.signal(signal.SIGTTOU, signal.SIG_IGN)
315
316    # If we received a SIGALARM, let's be loud about it.
317    signal.signal(signal.SIGALRM, log_alarm)
318
319    # Server side tests that call shell scripts often depend on $USER being set
320    # but depending on how you launch your autotest scheduler it may not be set.
321    os.environ['USER'] = getpass.getuser()
322
323    label = parser.options.label
324    group_name = parser.options.group_name
325    user = parser.options.user
326    client = parser.options.client
327    server = parser.options.server
328    install_before = parser.options.install_before
329    install_after = parser.options.install_after
330    verify = parser.options.verify
331    repair = parser.options.repair
332    cleanup = parser.options.cleanup
333    provision = parser.options.provision
334    reset = parser.options.reset
335    job_labels = parser.options.job_labels
336    no_tee = parser.options.no_tee
337    parse_job = parser.options.parse_job
338    execution_tag = parser.options.execution_tag
339    if not execution_tag:
340        execution_tag = parse_job
341    host_protection = parser.options.host_protection
342    ssh_user = parser.options.ssh_user
343    ssh_port = parser.options.ssh_port
344    ssh_pass = parser.options.ssh_pass
345    collect_crashinfo = parser.options.collect_crashinfo
346    control_filename = parser.options.control_filename
347    test_retry = parser.options.test_retry
348    verify_job_repo_url = parser.options.verify_job_repo_url
349    skip_crash_collection = parser.options.skip_crash_collection
350    ssh_verbosity = int(parser.options.ssh_verbosity)
351    ssh_options = parser.options.ssh_options
352    no_use_packaging = parser.options.no_use_packaging
353
354    # can't be both a client and a server side test
355    if client and server:
356        parser.parser.error("Can not specify a test as both server and client!")
357
358    if provision and client:
359        parser.parser.error("Cannot specify provisioning and client!")
360
361    is_special_task = (verify or repair or cleanup or collect_crashinfo or
362                       provision or reset)
363    if len(parser.args) < 1 and not is_special_task:
364        parser.parser.error("Missing argument: control file")
365
366    if ssh_verbosity > 0:
367        # ssh_verbosity is an integer between 0 and 3, inclusive
368        ssh_verbosity_flag = '-' + 'v' * ssh_verbosity
369    else:
370        ssh_verbosity_flag = ''
371
372    # We have a control file unless it's just a verify/repair/cleanup job
373    if len(parser.args) > 0:
374        control = parser.args[0]
375    else:
376        control = None
377
378    machines = _get_machines(parser)
379    if group_name and len(machines) < 2:
380        parser.parser.error('-G %r may only be supplied with more than one '
381                            'machine.' % group_name)
382
383    kwargs = {'group_name': group_name, 'tag': execution_tag,
384              'disable_sysinfo': parser.options.disable_sysinfo}
385    if control_filename:
386        kwargs['control_filename'] = control_filename
387    job = server_job.server_job(control, parser.args[1:], results, label,
388                                user, machines, client, parse_job,
389                                ssh_user, ssh_port, ssh_pass,
390                                ssh_verbosity_flag, ssh_options,
391                                test_retry, **kwargs)
392
393    job.logging.start_logging()
394    job.init_parser()
395
396    # perform checks
397    job.precheck()
398
399    # run the job
400    exit_code = 0
401    try:
402        try:
403            if repair:
404                job.repair(host_protection, job_labels)
405            elif verify:
406                job.verify(job_labels)
407            elif provision:
408                job.provision(job_labels)
409            elif reset:
410                job.reset(job_labels)
411            elif cleanup:
412                job.cleanup(job_labels)
413            else:
414                if use_ssp:
415                    try:
416                        _run_with_ssp(container_name, job_or_task_id, results,
417                                      parser, ssp_url)
418                    finally:
419                        # Update the ownership of files in result folder.
420                        correct_results_folder_permission(results)
421                else:
422                    job.run(install_before, install_after,
423                            verify_job_repo_url=verify_job_repo_url,
424                            only_collect_crashinfo=collect_crashinfo,
425                            skip_crash_collection=skip_crash_collection,
426                            job_labels=job_labels,
427                            use_packaging=(not no_use_packaging))
428        finally:
429            while job.hosts:
430                host = job.hosts.pop()
431                host.close()
432    except:
433        exit_code = 1
434        traceback.print_exc()
435
436    if pid_file_manager:
437        pid_file_manager.num_tests_failed = job.num_tests_failed
438        pid_file_manager.close_file(exit_code)
439    job.cleanup_parser()
440
441    sys.exit(exit_code)
442
443
444def record_autoserv(options, duration_secs):
445    """Record autoserv end-to-end time in metadata db.
446
447    @param options: parser options.
448    @param duration_secs: How long autoserv has taken, in secs.
449    """
450    # Get machine hostname
451    machines = options.machines.replace(
452            ',', ' ').strip().split() if options.machines else []
453    num_machines = len(machines)
454    if num_machines > 1:
455        # Skip the case where atomic group is used.
456        return
457    elif num_machines == 0:
458        machines.append('hostless')
459
460    # Determine the status that will be reported.
461    s = job_overhead.STATUS
462    task_mapping = {
463            'reset': s.RESETTING, 'verify': s.VERIFYING,
464            'provision': s.PROVISIONING, 'repair': s.REPAIRING,
465            'cleanup': s.CLEANING, 'collect_crashinfo': s.GATHERING}
466    # option_dict will be like {'reset': True, 'repair': False, ...}
467    option_dict = ast.literal_eval(str(options))
468    match = filter(lambda task: option_dict.get(task) == True, task_mapping)
469    status = task_mapping[match[0]] if match else s.RUNNING
470    is_special_task = status not in [s.RUNNING, s.GATHERING]
471    job_or_task_id = job_directories.get_job_id_or_task_id(options.results)
472    job_overhead.record_state_duration(
473            job_or_task_id, machines[0], status, duration_secs,
474            is_special_task=is_special_task)
475
476
477def main():
478    start_time = datetime.datetime.now()
479    # White list of tests with run time measurement enabled.
480    measure_run_time_tests_names = global_config.global_config.get_config_value(
481                        'AUTOSERV', 'measure_run_time_tests', type=str)
482    if measure_run_time_tests_names:
483        measure_run_time_tests = [t.strip() for t in
484                                  measure_run_time_tests_names.split(',')]
485    else:
486        measure_run_time_tests = []
487    # grab the parser
488    parser = autoserv_parser.autoserv_parser
489    parser.parse_args()
490
491    if len(sys.argv) == 1:
492        parser.parser.print_help()
493        sys.exit(1)
494
495    # If the job requires to run with server-side package, try to stage server-
496    # side package first. If that fails with error that autotest server package
497    # does not exist, fall back to run the job without using server-side
498    # packaging. If option warn_no_ssp is specified, that means autoserv is
499    # running in a drone does not support SSP, thus no need to stage server-side
500    # package.
501    ssp_url = None
502    ssp_url_warning = False
503    if (not parser.options.warn_no_ssp and parser.options.require_ssp):
504        ssp_url = _stage_ssp(parser)
505        # The build does not have autotest server package. Fall back to not
506        # to use server-side package. Logging is postponed until logging being
507        # set up.
508        ssp_url_warning = not ssp_url
509
510    if parser.options.no_logging:
511        results = None
512    else:
513        results = parser.options.results
514        if not results:
515            results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S')
516        results  = os.path.abspath(results)
517        resultdir_exists = False
518        for filename in ('control.srv', 'status.log', '.autoserv_execute'):
519            if os.path.exists(os.path.join(results, filename)):
520                resultdir_exists = True
521        if not parser.options.use_existing_results and resultdir_exists:
522            error = "Error: results directory already exists: %s\n" % results
523            sys.stderr.write(error)
524            sys.exit(1)
525
526        # Now that we certified that there's no leftover results dir from
527        # previous jobs, lets create the result dir since the logging system
528        # needs to create the log file in there.
529        if not os.path.isdir(results):
530            os.makedirs(results)
531
532    # Server-side packaging will only be used if it's required and the package
533    # is available. If warn_no_ssp is specified, it means that autoserv is
534    # running in a drone does not have SSP supported and a warning will be logs.
535    # Therefore, it should not run with SSP.
536    use_ssp = (not parser.options.warn_no_ssp and parser.options.require_ssp
537               and ssp_url)
538    if use_ssp:
539        log_dir = os.path.join(results, 'ssp_logs') if results else None
540        if log_dir and not os.path.exists(log_dir):
541            os.makedirs(log_dir)
542    else:
543        log_dir = results
544
545    logging_manager.configure_logging(
546            server_logging_config.ServerLoggingConfig(),
547            results_dir=log_dir,
548            use_console=not parser.options.no_tee,
549            verbose=parser.options.verbose,
550            no_console_prefix=parser.options.no_console_prefix)
551
552    if ssp_url_warning:
553        logging.warn(
554                'Autoserv is required to run with server-side packaging. '
555                'However, no server-side package can be found based on '
556                '`--image`, host attribute job_repo_url or host label of '
557                'cros-version. The test will be executed without '
558                'server-side packaging supported.')
559
560    if results:
561        logging.info("Results placed in %s" % results)
562
563        # wait until now to perform this check, so it get properly logged
564        if (parser.options.use_existing_results and not resultdir_exists and
565            not utils.is_in_container()):
566            logging.error("No existing results directory found: %s", results)
567            sys.exit(1)
568
569    logging.debug('autoserv is running in drone %s.', socket.gethostname())
570    logging.debug('autoserv command was: %s', ' '.join(sys.argv))
571
572    if parser.options.write_pidfile and results:
573        pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label,
574                                                  results)
575        pid_file_manager.open_file()
576    else:
577        pid_file_manager = None
578
579    autotest.BaseAutotest.set_install_in_tmpdir(
580        parser.options.install_in_tmpdir)
581
582    timer = None
583    try:
584        # Take the first argument as control file name, get the test name from
585        # the control file. If the test name exists in the list of tests with
586        # run time measurement enabled, start a timer to begin measurement.
587        if (len(parser.args) > 0 and parser.args[0] != '' and
588            parser.options.machines):
589            try:
590                test_name = control_data.parse_control(parser.args[0],
591                                                       raise_warnings=True).name
592            except control_data.ControlVariableException:
593                logging.debug('Failed to retrieve test name from control file.')
594                test_name = None
595            if test_name in measure_run_time_tests:
596                machines = parser.options.machines.replace(',', ' '
597                                                           ).strip().split()
598                try:
599                    afe = frontend.AFE()
600                    board = server_utils.get_board_from_afe(machines[0], afe)
601                    timer = autotest_stats.Timer('autoserv_run_time.%s.%s' %
602                                                 (board, test_name))
603                    timer.start()
604                except (urllib2.HTTPError, urllib2.URLError):
605                    # Ignore error if RPC failed to get board
606                    pass
607    except control_data.ControlVariableException as e:
608        logging.error(str(e))
609    exit_code = 0
610    # TODO(beeps): Extend this to cover different failure modes.
611    # Testing exceptions are matched against labels sent to autoserv. Eg,
612    # to allow only the hostless job to run, specify
613    # testing_exceptions: test_suite in the shadow_config. To allow both
614    # the hostless job and dummy_Pass to run, specify
615    # testing_exceptions: test_suite,dummy_Pass. You can figure out
616    # what label autoserv is invoked with by looking through the logs of a test
617    # for the autoserv command's -l option.
618    testing_exceptions = global_config.global_config.get_config_value(
619            'AUTOSERV', 'testing_exceptions', type=list, default=[])
620    test_mode = global_config.global_config.get_config_value(
621            'AUTOSERV', 'testing_mode', type=bool, default=False)
622    test_mode = (results_mocker and test_mode and not
623                 any([ex in parser.options.label
624                      for ex in testing_exceptions]))
625    is_task = (parser.options.verify or parser.options.repair or
626               parser.options.provision or parser.options.reset or
627               parser.options.cleanup or parser.options.collect_crashinfo)
628    try:
629        try:
630            if test_mode:
631                # The parser doesn't run on tasks anyway, so we can just return
632                # happy signals without faking results.
633                if not is_task:
634                    machine = parser.options.results.split('/')[-1]
635
636                    # TODO(beeps): The proper way to do this would be to
637                    # refactor job creation so we can invoke job.record
638                    # directly. To do that one needs to pipe the test_name
639                    # through run_autoserv and bail just before invoking
640                    # the server job. See the comment in
641                    # puppylab/results_mocker for more context.
642                    results_mocker.ResultsMocker(
643                            test_name if test_name else 'unknown-test',
644                            parser.options.results, machine
645                            ).mock_results()
646                return
647            else:
648                run_autoserv(pid_file_manager, results, parser, ssp_url,
649                             use_ssp)
650        except SystemExit as e:
651            exit_code = e.code
652            if exit_code:
653                logging.exception(e)
654        except Exception as e:
655            # If we don't know what happened, we'll classify it as
656            # an 'abort' and return 1.
657            logging.exception(e)
658            exit_code = 1
659    finally:
660        if pid_file_manager:
661            pid_file_manager.close_file(exit_code)
662        if timer:
663            timer.stop()
664        # Record the autoserv duration time. Must be called
665        # just before the system exits to ensure accuracy.
666        duration_secs = (datetime.datetime.now() - start_time).total_seconds()
667        record_autoserv(parser.options, duration_secs)
668    sys.exit(exit_code)
669
670
671if __name__ == '__main__':
672    main()
673