autoserv.py revision a06f3e2ec1a3b0f015533ef7e50da36f1376b8b6
1#!/usr/bin/python -u
2# Copyright 2007-2008 Martin J. Bligh <mbligh@google.com>, Google Inc.
3# Released under the GPL v2
4
5"""
6Run a control file through the server side engine
7"""
8
9import ast
10import datetime
11import getpass
12import logging
13import os
14import re
15import signal
16import socket
17import sys
18import traceback
19import time
20import urllib2
21
22import common
23
24from autotest_lib.client.common_lib import control_data
25from autotest_lib.client.common_lib import error
26from autotest_lib.client.common_lib import global_config
27from autotest_lib.client.common_lib import utils
28from autotest_lib.client.common_lib.cros.graphite import autotest_es
29from autotest_lib.client.common_lib.cros.graphite import autotest_stats
30try:
31    from autotest_lib.puppylab import results_mocker
32except ImportError:
33    results_mocker = None
34
35_CONFIG = global_config.global_config
36
37require_atfork = _CONFIG.get_config_value(
38        'AUTOSERV', 'require_atfork_module', type=bool, default=True)
39
40
41# Number of seconds to wait before returning if testing mode is enabled
42TESTING_MODE_SLEEP_SECS = 1
43
44try:
45    import atfork
46    atfork.monkeypatch_os_fork_functions()
47    import atfork.stdlib_fixer
48    # Fix the Python standard library for threading+fork safety with its
49    # internal locks.  http://code.google.com/p/python-atfork/
50    import warnings
51    warnings.filterwarnings('ignore', 'logging module already imported')
52    atfork.stdlib_fixer.fix_logging_module()
53except ImportError, e:
54    from autotest_lib.client.common_lib import global_config
55    if _CONFIG.get_config_value(
56            'AUTOSERV', 'require_atfork_module', type=bool, default=False):
57        print >>sys.stderr, 'Please run utils/build_externals.py'
58        print e
59        sys.exit(1)
60
61from autotest_lib.server import frontend
62from autotest_lib.server import server_logging_config
63from autotest_lib.server import server_job, utils, autoserv_parser, autotest
64from autotest_lib.server import utils as server_utils
65from autotest_lib.site_utils import job_directories
66from autotest_lib.site_utils import job_overhead
67from autotest_lib.site_utils import lxc
68from autotest_lib.site_utils import lxc_utils
69from autotest_lib.client.common_lib import pidfile, logging_manager
70from autotest_lib.client.common_lib.cros.graphite import autotest_stats
71
72# Control segment to stage server-side package.
73STAGE_SERVER_SIDE_PACKAGE_CONTROL_FILE = server_job._control_segment_path(
74        'stage_server_side_package')
75
76# Command line to start servod in a moblab.
77START_SERVOD_CMD = 'sudo start servod BOARD=%s PORT=%s'
78STOP_SERVOD_CMD = 'sudo stop servod'
79
80def log_alarm(signum, frame):
81    logging.error("Received SIGALARM. Ignoring and continuing on.")
82    sys.exit(1)
83
84
85def _get_machines(parser):
86    """Get a list of machine names from command line arg -m or a file.
87
88    @param parser: Parser for the command line arguments.
89
90    @return: A list of machine names from command line arg -m or the
91             machines file specified in the command line arg -M.
92    """
93    if parser.options.machines:
94        machines = parser.options.machines.replace(',', ' ').strip().split()
95    else:
96        machines = []
97    machines_file = parser.options.machines_file
98    if machines_file:
99        machines = []
100        for m in open(machines_file, 'r').readlines():
101            # remove comments, spaces
102            m = re.sub('#.*', '', m).strip()
103            if m:
104                machines.append(m)
105        logging.debug('Read list of machines from file: %s', machines_file)
106        logging.debug('Machines: %s', ','.join(machines))
107
108    if machines:
109        for machine in machines:
110            if not machine or re.search('\s', machine):
111                parser.parser.error("Invalid machine: %s" % str(machine))
112        machines = list(set(machines))
113        machines.sort()
114    return machines
115
116
117def _stage_ssp(parser):
118    """Stage server-side package.
119
120    This function calls a control segment to stage server-side package based on
121    the job and autoserv command line option. The detail implementation could
122    be different for each host type. Currently, only CrosHost has
123    stage_server_side_package function defined.
124    The script returns None if no server-side package is available. However,
125    it may raise exception if it failed for reasons other than artifact (the
126    server-side package) not found.
127
128    @param parser: Command line arguments parser passed in the autoserv process.
129
130    @return: url of the staged server-side package. Return None if server-
131             side package is not found for the build.
132    """
133    # If test_source_build is not specified, default to use server-side test
134    # code from build specified in --image.
135    namespace = {'machines': _get_machines(parser),
136                 'image': (parser.options.test_source_build or
137                           parser.options.image),}
138    script_locals = {}
139    execfile(STAGE_SERVER_SIDE_PACKAGE_CONTROL_FILE, namespace, script_locals)
140    return script_locals['ssp_url']
141
142
143def _run_with_ssp(container_name, job_id, results, parser, ssp_url):
144    """Run the server job with server-side packaging.
145
146    @param container_name: Name of the container to run the test.
147    @param job_id: ID of the test job.
148    @param results: Folder to store results. This could be different from
149                    parser.options.results:
150                    parser.options.results  can be set to None for results to be
151                    stored in a temp folder.
152                    results can be None for autoserv run requires no logging.
153    @param parser: Command line parser that contains the options.
154    @param ssp_url: url of the staged server-side package.
155    """
156    bucket = lxc.ContainerBucket()
157    control = (parser.args[0] if len(parser.args) > 0 and parser.args[0] != ''
158               else None)
159    test_container = bucket.setup_test(container_name, job_id, ssp_url, results,
160                                       control=control)
161    args = sys.argv[:]
162    args.remove('--require-ssp')
163    # --parent_job_id is only useful in autoserv running in host, not in
164    # container. Include this argument will cause test to fail for builds before
165    # CL 286265 was merged.
166    if '--parent_job_id' in args:
167        index = args.index('--parent_job_id')
168        args.remove('--parent_job_id')
169        # Remove the actual parent job id in command line arg.
170        del args[index]
171
172    # A dictionary of paths to replace in the command line. Key is the path to
173    # be replaced with the one in value.
174    paths_to_replace = {}
175    # Replace the control file path with the one in container.
176    if control:
177        container_control_filename = os.path.join(
178                lxc.CONTROL_TEMP_PATH, os.path.basename(control))
179        paths_to_replace[control] = container_control_filename
180    # Update result directory with the one in container.
181    if parser.options.results:
182        container_result_dir = os.path.join(lxc.RESULT_DIR_FMT % job_id)
183        paths_to_replace[parser.options.results] = container_result_dir
184    # Update parse_job directory with the one in container. The assumption is
185    # that the result folder to be parsed is always the same as the results_dir.
186    if parser.options.parse_job:
187        container_parse_dir = os.path.join(lxc.RESULT_DIR_FMT % job_id)
188        paths_to_replace[parser.options.parse_job] = container_result_dir
189
190    args = [paths_to_replace.get(arg, arg) for arg in args]
191
192    # Apply --use-existing-results, results directory is aready created and
193    # mounted in container. Apply this arg to avoid exception being raised.
194    if not '--use-existing-results' in args:
195        args.append('--use-existing-results')
196
197    # Make sure autoserv running in container using a different pid file.
198    if not '--pidfile-label' in args:
199        args.extend(['--pidfile-label', 'container_autoserv'])
200
201    cmd_line = ' '.join(["'%s'" % arg if ' ' in arg else arg for arg in args])
202    logging.info('Run command in container: %s', cmd_line)
203    success = False
204    try:
205        test_container.attach_run(cmd_line)
206        success = True
207    finally:
208        counter_key = '%s.%s' % (lxc.STATS_KEY,
209                                 'success' if success else 'fail')
210        autotest_stats.Counter(counter_key).increment()
211        # metadata is uploaded separately so it can use http to upload.
212        metadata = {'drone': socket.gethostname(),
213                    'job_id': job_id,
214                    'success': success}
215        autotest_es.post(use_http=True,
216                         type_str=lxc.CONTAINER_RUN_TEST_METADB_TYPE,
217                         metadata=metadata)
218        test_container.destroy()
219
220
221def correct_results_folder_permission(results):
222    """Make sure the results folder has the right permission settings.
223
224    For tests running with server-side packaging, the results folder has the
225    owner of root. This must be changed to the user running the autoserv
226    process, so parsing job can access the results folder.
227    TODO(dshi): crbug.com/459344 Remove this function when test container can be
228    unprivileged container.
229
230    @param results: Path to the results folder.
231
232    """
233    if not results:
234        return
235
236    try:
237        utils.run('sudo -n chown -R %s "%s"' % (os.getuid(), results))
238        utils.run('sudo -n chgrp -R %s "%s"' % (os.getgid(), results))
239    except error.CmdError as e:
240        metadata = {'error': str(e),
241                    'result_folder': results,
242                    'drone': socket.gethostname()}
243        autotest_es.post(use_http=True, type_str='correct_results_folder_failure',
244                         metadata=metadata)
245        raise
246
247
248def _start_servod(machine):
249    """Try to start servod in moblab if it's not already running or running with
250    different board or port.
251
252    @param machine: Name of the dut used for test.
253    """
254    if not utils.is_moblab():
255        return
256
257    try:
258        afe = frontend.AFE()
259        board = server_utils.get_board_from_afe(machine, afe)
260        hosts = afe.get_hosts(hostname=machine)
261        servo_host = hosts[0].attributes.get('servo_host', None)
262        servo_port = hosts[0].attributes.get('servo_port', 9999)
263        if not servo_host in ['localhost', '127.0.0.1']:
264            return
265    except (urllib2.HTTPError, urllib2.URLError):
266        # Ignore error if RPC failed to get board
267        logging.error('Failed to get board name from AFE. Start servod is '
268                      'aborted')
269        return
270
271    try:
272        pid = utils.run('pgrep servod').stdout
273        cmd_line = utils.run('ps -fp %s' % pid).stdout
274        if ('--board %s' % board in cmd_line and
275            '--port %s' % servo_port in cmd_line):
276            logging.debug('Servod is already running with given board and port.'
277                          ' There is no need to restart servod.')
278            return
279        logging.debug('Servod is running with different board or port. '
280                      'Stopping existing servod.')
281        utils.run('sudo stop servod')
282    except error.CmdError:
283        # servod is not running.
284        pass
285
286    try:
287        utils.run(START_SERVOD_CMD % (board, servo_port))
288        logging.debug('Servod is started')
289    except error.CmdError as e:
290        logging.error('Servod failed to be started, error: %s', e)
291
292
293def run_autoserv(pid_file_manager, results, parser, ssp_url, use_ssp):
294    """Run server job with given options.
295
296    @param pid_file_manager: PidFileManager used to monitor the autoserv process
297    @param results: Folder to store results.
298    @param parser: Parser for the command line arguments.
299    @param ssp_url: Url to server-side package.
300    @param use_ssp: Set to True to run with server-side packaging.
301    """
302    if parser.options.warn_no_ssp:
303        # Post a warning in the log.
304        logging.warn('Autoserv is required to run with server-side packaging. '
305                     'However, no drone is found to support server-side '
306                     'packaging. The test will be executed in a drone without '
307                     'server-side packaging supported.')
308
309    # send stdin to /dev/null
310    dev_null = os.open(os.devnull, os.O_RDONLY)
311    os.dup2(dev_null, sys.stdin.fileno())
312    os.close(dev_null)
313
314    # Create separate process group
315    os.setpgrp()
316
317    # Container name is predefined so the container can be destroyed in
318    # handle_sigterm.
319    job_or_task_id = job_directories.get_job_id_or_task_id(
320            parser.options.results)
321    container_name = (lxc.TEST_CONTAINER_NAME_FMT %
322                      (job_or_task_id, time.time(), os.getpid()))
323
324    # Implement SIGTERM handler
325    def handle_sigterm(signum, frame):
326        logging.debug('Received SIGTERM')
327        if pid_file_manager:
328            pid_file_manager.close_file(1, signal.SIGTERM)
329        logging.debug('Finished writing to pid_file. Killing process.')
330
331        # Update results folder's file permission. This needs to be done ASAP
332        # before the parsing process tries to access the log.
333        if use_ssp and results:
334            correct_results_folder_permission(results)
335
336        # TODO (sbasi) - remove the time.sleep when crbug.com/302815 is solved.
337        # This sleep allows the pending output to be logged before the kill
338        # signal is sent.
339        time.sleep(.1)
340        if use_ssp:
341            logging.debug('Destroy container %s before aborting the autoserv '
342                          'process.', container_name)
343            metadata = {'drone': socket.gethostname(),
344                        'job_id': job_or_task_id,
345                        'container_name': container_name,
346                        'action': 'abort',
347                        'success': True}
348            try:
349                bucket = lxc.ContainerBucket()
350                container = bucket.get(container_name)
351                if container:
352                    container.destroy()
353                else:
354                    metadata['success'] = False
355                    metadata['error'] = 'container not found'
356                    logging.debug('Container %s is not found.', container_name)
357            except:
358                metadata['success'] = False
359                metadata['error'] = 'Exception: %s' % sys.exc_info()
360                # Handle any exception so the autoserv process can be aborted.
361                logging.error('Failed to destroy container %s. Error: %s',
362                              container_name, sys.exc_info())
363            autotest_es.post(use_http=True,
364                             type_str=lxc.CONTAINER_RUN_TEST_METADB_TYPE,
365                             metadata=metadata)
366            # Try to correct the result file permission again after the
367            # container is destroyed, as the container might have created some
368            # new files in the result folder.
369            if results:
370                correct_results_folder_permission(results)
371
372        os.killpg(os.getpgrp(), signal.SIGKILL)
373
374    # Set signal handler
375    signal.signal(signal.SIGTERM, handle_sigterm)
376
377    # faulthandler is only needed to debug in the Lab and is not avaliable to
378    # be imported in the chroot as part of VMTest, so Try-Except it.
379    try:
380        import faulthandler
381        faulthandler.register(signal.SIGTERM, all_threads=True, chain=True)
382        logging.debug('faulthandler registered on SIGTERM.')
383    except ImportError:
384        sys.exc_clear()
385
386    # Ignore SIGTTOU's generated by output from forked children.
387    signal.signal(signal.SIGTTOU, signal.SIG_IGN)
388
389    # If we received a SIGALARM, let's be loud about it.
390    signal.signal(signal.SIGALRM, log_alarm)
391
392    # Server side tests that call shell scripts often depend on $USER being set
393    # but depending on how you launch your autotest scheduler it may not be set.
394    os.environ['USER'] = getpass.getuser()
395
396    label = parser.options.label
397    group_name = parser.options.group_name
398    user = parser.options.user
399    client = parser.options.client
400    server = parser.options.server
401    install_before = parser.options.install_before
402    install_after = parser.options.install_after
403    verify = parser.options.verify
404    repair = parser.options.repair
405    cleanup = parser.options.cleanup
406    provision = parser.options.provision
407    reset = parser.options.reset
408    job_labels = parser.options.job_labels
409    no_tee = parser.options.no_tee
410    parse_job = parser.options.parse_job
411    execution_tag = parser.options.execution_tag
412    if not execution_tag:
413        execution_tag = parse_job
414    host_protection = parser.options.host_protection
415    ssh_user = parser.options.ssh_user
416    ssh_port = parser.options.ssh_port
417    ssh_pass = parser.options.ssh_pass
418    collect_crashinfo = parser.options.collect_crashinfo
419    control_filename = parser.options.control_filename
420    test_retry = parser.options.test_retry
421    verify_job_repo_url = parser.options.verify_job_repo_url
422    skip_crash_collection = parser.options.skip_crash_collection
423    ssh_verbosity = int(parser.options.ssh_verbosity)
424    ssh_options = parser.options.ssh_options
425    no_use_packaging = parser.options.no_use_packaging
426
427    # can't be both a client and a server side test
428    if client and server:
429        parser.parser.error("Can not specify a test as both server and client!")
430
431    if provision and client:
432        parser.parser.error("Cannot specify provisioning and client!")
433
434    is_special_task = (verify or repair or cleanup or collect_crashinfo or
435                       provision or reset)
436    if len(parser.args) < 1 and not is_special_task:
437        parser.parser.error("Missing argument: control file")
438
439    if ssh_verbosity > 0:
440        # ssh_verbosity is an integer between 0 and 3, inclusive
441        ssh_verbosity_flag = '-' + 'v' * ssh_verbosity
442    else:
443        ssh_verbosity_flag = ''
444
445    # We have a control file unless it's just a verify/repair/cleanup job
446    if len(parser.args) > 0:
447        control = parser.args[0]
448    else:
449        control = None
450
451    machines = _get_machines(parser)
452    if group_name and len(machines) < 2:
453        parser.parser.error('-G %r may only be supplied with more than one '
454                            'machine.' % group_name)
455
456    kwargs = {'group_name': group_name, 'tag': execution_tag,
457              'disable_sysinfo': parser.options.disable_sysinfo}
458    if parser.options.parent_job_id:
459        kwargs['parent_job_id'] = int(parser.options.parent_job_id)
460    if control_filename:
461        kwargs['control_filename'] = control_filename
462    job = server_job.server_job(control, parser.args[1:], results, label,
463                                user, machines, client, parse_job,
464                                ssh_user, ssh_port, ssh_pass,
465                                ssh_verbosity_flag, ssh_options,
466                                test_retry, **kwargs)
467
468    job.logging.start_logging()
469    job.init_parser()
470
471    # perform checks
472    job.precheck()
473
474    # run the job
475    exit_code = 0
476    try:
477        try:
478            if repair:
479                job.repair(host_protection, job_labels)
480            elif verify:
481                job.verify(job_labels)
482            elif provision:
483                job.provision(job_labels)
484            elif reset:
485                job.reset(job_labels)
486            elif cleanup:
487                job.cleanup(job_labels)
488            else:
489                auto_start_servod = _CONFIG.get_config_value(
490                        'AUTOSERV', 'auto_start_servod', type=bool,
491                        default=False)
492                if auto_start_servod and len(machines) == 1:
493                    _start_servod(machines[0])
494                if use_ssp:
495                    try:
496                        _run_with_ssp(container_name, job_or_task_id, results,
497                                      parser, ssp_url)
498                    finally:
499                        # Update the ownership of files in result folder.
500                        correct_results_folder_permission(results)
501                else:
502                    job.run(install_before, install_after,
503                            verify_job_repo_url=verify_job_repo_url,
504                            only_collect_crashinfo=collect_crashinfo,
505                            skip_crash_collection=skip_crash_collection,
506                            job_labels=job_labels,
507                            use_packaging=(not no_use_packaging))
508        finally:
509            while job.hosts:
510                host = job.hosts.pop()
511                host.close()
512    except:
513        exit_code = 1
514        traceback.print_exc()
515
516    if pid_file_manager:
517        pid_file_manager.num_tests_failed = job.num_tests_failed
518        pid_file_manager.close_file(exit_code)
519    job.cleanup_parser()
520
521    sys.exit(exit_code)
522
523
524def record_autoserv(options, duration_secs):
525    """Record autoserv end-to-end time in metadata db.
526
527    @param options: parser options.
528    @param duration_secs: How long autoserv has taken, in secs.
529    """
530    # Get machine hostname
531    machines = options.machines.replace(
532            ',', ' ').strip().split() if options.machines else []
533    num_machines = len(machines)
534    if num_machines > 1:
535        # Skip the case where atomic group is used.
536        return
537    elif num_machines == 0:
538        machines.append('hostless')
539
540    # Determine the status that will be reported.
541    s = job_overhead.STATUS
542    task_mapping = {
543            'reset': s.RESETTING, 'verify': s.VERIFYING,
544            'provision': s.PROVISIONING, 'repair': s.REPAIRING,
545            'cleanup': s.CLEANING, 'collect_crashinfo': s.GATHERING}
546    match = filter(lambda task: getattr(options, task, False) == True,
547                   task_mapping)
548    status = task_mapping[match[0]] if match else s.RUNNING
549    is_special_task = status not in [s.RUNNING, s.GATHERING]
550    job_or_task_id = job_directories.get_job_id_or_task_id(options.results)
551    job_overhead.record_state_duration(
552            job_or_task_id, machines[0], status, duration_secs,
553            is_special_task=is_special_task)
554
555
556def main():
557    start_time = datetime.datetime.now()
558    # White list of tests with run time measurement enabled.
559    measure_run_time_tests_names = _CONFIG.get_config_value(
560            'AUTOSERV', 'measure_run_time_tests', type=str)
561    if measure_run_time_tests_names:
562        measure_run_time_tests = [t.strip() for t in
563                                  measure_run_time_tests_names.split(',')]
564    else:
565        measure_run_time_tests = []
566    # grab the parser
567    parser = autoserv_parser.autoserv_parser
568    parser.parse_args()
569
570    if len(sys.argv) == 1:
571        parser.parser.print_help()
572        sys.exit(1)
573
574    # If the job requires to run with server-side package, try to stage server-
575    # side package first. If that fails with error that autotest server package
576    # does not exist, fall back to run the job without using server-side
577    # packaging. If option warn_no_ssp is specified, that means autoserv is
578    # running in a drone does not support SSP, thus no need to stage server-side
579    # package.
580    ssp_url = None
581    ssp_url_warning = False
582    if (not parser.options.warn_no_ssp and parser.options.require_ssp):
583        ssp_url = _stage_ssp(parser)
584        # The build does not have autotest server package. Fall back to not
585        # to use server-side package. Logging is postponed until logging being
586        # set up.
587        ssp_url_warning = not ssp_url
588
589    if parser.options.no_logging:
590        results = None
591    else:
592        results = parser.options.results
593        if not results:
594            results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S')
595        results  = os.path.abspath(results)
596        resultdir_exists = False
597        for filename in ('control.srv', 'status.log', '.autoserv_execute'):
598            if os.path.exists(os.path.join(results, filename)):
599                resultdir_exists = True
600        if not parser.options.use_existing_results and resultdir_exists:
601            error = "Error: results directory already exists: %s\n" % results
602            sys.stderr.write(error)
603            sys.exit(1)
604
605        # Now that we certified that there's no leftover results dir from
606        # previous jobs, lets create the result dir since the logging system
607        # needs to create the log file in there.
608        if not os.path.isdir(results):
609            os.makedirs(results)
610
611    # Server-side packaging will only be used if it's required and the package
612    # is available. If warn_no_ssp is specified, it means that autoserv is
613    # running in a drone does not have SSP supported and a warning will be logs.
614    # Therefore, it should not run with SSP.
615    use_ssp = (not parser.options.warn_no_ssp and parser.options.require_ssp
616               and ssp_url)
617    if use_ssp:
618        log_dir = os.path.join(results, 'ssp_logs') if results else None
619        if log_dir and not os.path.exists(log_dir):
620            os.makedirs(log_dir)
621    else:
622        log_dir = results
623
624    logging_manager.configure_logging(
625            server_logging_config.ServerLoggingConfig(),
626            results_dir=log_dir,
627            use_console=not parser.options.no_tee,
628            verbose=parser.options.verbose,
629            no_console_prefix=parser.options.no_console_prefix)
630
631    if ssp_url_warning:
632        logging.warn(
633                'Autoserv is required to run with server-side packaging. '
634                'However, no server-side package can be found based on '
635                '`--image`, host attribute job_repo_url or host label of '
636                'cros-version. The test will be executed without '
637                'server-side packaging supported.')
638
639    if results:
640        logging.info("Results placed in %s" % results)
641
642        # wait until now to perform this check, so it get properly logged
643        if (parser.options.use_existing_results and not resultdir_exists and
644            not utils.is_in_container()):
645            logging.error("No existing results directory found: %s", results)
646            sys.exit(1)
647
648    logging.debug('autoserv is running in drone %s.', socket.gethostname())
649    logging.debug('autoserv command was: %s', ' '.join(sys.argv))
650
651    if parser.options.write_pidfile and results:
652        pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label,
653                                                  results)
654        pid_file_manager.open_file()
655    else:
656        pid_file_manager = None
657
658    autotest.BaseAutotest.set_install_in_tmpdir(
659        parser.options.install_in_tmpdir)
660
661    timer = None
662    try:
663        # Take the first argument as control file name, get the test name from
664        # the control file. If the test name exists in the list of tests with
665        # run time measurement enabled, start a timer to begin measurement.
666        if (len(parser.args) > 0 and parser.args[0] != '' and
667            parser.options.machines):
668            try:
669                test_name = control_data.parse_control(parser.args[0],
670                                                       raise_warnings=True).name
671            except control_data.ControlVariableException:
672                logging.debug('Failed to retrieve test name from control file.')
673                test_name = None
674            if test_name in measure_run_time_tests:
675                machines = parser.options.machines.replace(',', ' '
676                                                           ).strip().split()
677                try:
678                    afe = frontend.AFE()
679                    board = server_utils.get_board_from_afe(machines[0], afe)
680                    timer = autotest_stats.Timer('autoserv_run_time.%s.%s' %
681                                                 (board, test_name))
682                    timer.start()
683                except (urllib2.HTTPError, urllib2.URLError):
684                    # Ignore error if RPC failed to get board
685                    pass
686    except control_data.ControlVariableException as e:
687        logging.error(str(e))
688    exit_code = 0
689    # TODO(beeps): Extend this to cover different failure modes.
690    # Testing exceptions are matched against labels sent to autoserv. Eg,
691    # to allow only the hostless job to run, specify
692    # testing_exceptions: test_suite in the shadow_config. To allow both
693    # the hostless job and dummy_Pass to run, specify
694    # testing_exceptions: test_suite,dummy_Pass. You can figure out
695    # what label autoserv is invoked with by looking through the logs of a test
696    # for the autoserv command's -l option.
697    testing_exceptions = _CONFIG.get_config_value(
698            'AUTOSERV', 'testing_exceptions', type=list, default=[])
699    test_mode = _CONFIG.get_config_value(
700            'AUTOSERV', 'testing_mode', type=bool, default=False)
701    test_mode = (results_mocker and test_mode and not
702                 any([ex in parser.options.label
703                      for ex in testing_exceptions]))
704    is_task = (parser.options.verify or parser.options.repair or
705               parser.options.provision or parser.options.reset or
706               parser.options.cleanup or parser.options.collect_crashinfo)
707    try:
708        try:
709            if test_mode:
710                # The parser doesn't run on tasks anyway, so we can just return
711                # happy signals without faking results.
712                if not is_task:
713                    machine = parser.options.results.split('/')[-1]
714
715                    # TODO(beeps): The proper way to do this would be to
716                    # refactor job creation so we can invoke job.record
717                    # directly. To do that one needs to pipe the test_name
718                    # through run_autoserv and bail just before invoking
719                    # the server job. See the comment in
720                    # puppylab/results_mocker for more context.
721                    results_mocker.ResultsMocker(
722                            test_name if test_name else 'unknown-test',
723                            parser.options.results, machine
724                            ).mock_results()
725                return
726            else:
727                run_autoserv(pid_file_manager, results, parser, ssp_url,
728                             use_ssp)
729        except SystemExit as e:
730            exit_code = e.code
731            if exit_code:
732                logging.exception(e)
733        except Exception as e:
734            # If we don't know what happened, we'll classify it as
735            # an 'abort' and return 1.
736            logging.exception(e)
737            exit_code = 1
738    finally:
739        if pid_file_manager:
740            pid_file_manager.close_file(exit_code)
741        if timer:
742            timer.stop()
743        # Record the autoserv duration time. Must be called
744        # just before the system exits to ensure accuracy.
745        duration_secs = (datetime.datetime.now() - start_time).total_seconds()
746        record_autoserv(parser.options, duration_secs)
747    sys.exit(exit_code)
748
749
750if __name__ == '__main__':
751    main()
752