1#!/usr/bin/python -u
2# Copyright 2007-2008 Martin J. Bligh <mbligh@google.com>, Google Inc.
3# Released under the GPL v2
4
5"""
6Run a control file through the server side engine
7"""
8
9import datetime
10import contextlib
11import getpass
12import logging
13import os
14import re
15import signal
16import socket
17import sys
18import traceback
19import time
20import urllib2
21
22
23import common
24from autotest_lib.client.common_lib import control_data
25from autotest_lib.client.common_lib import error
26from autotest_lib.client.common_lib import global_config
27from autotest_lib.client.common_lib import utils
28from autotest_lib.client.common_lib.cros.graphite import autotest_es
29
30try:
31    from chromite.lib import metrics
32except ImportError:
33    metrics = utils.metrics_mock
34
35try:
36    from autotest_lib.puppylab import results_mocker
37except ImportError:
38    results_mocker = None
39
40_CONFIG = global_config.global_config
41
42
43# Number of seconds to wait before returning if testing mode is enabled
44TESTING_MODE_SLEEP_SECS = 1
45
46
47from autotest_lib.server import frontend
48from autotest_lib.server import server_logging_config
49from autotest_lib.server import server_job, utils, autoserv_parser, autotest
50from autotest_lib.server import utils as server_utils
51from autotest_lib.server import site_utils
52from autotest_lib.server.cros.dynamic_suite import frontend_wrappers
53from autotest_lib.site_utils import job_directories
54from autotest_lib.site_utils import job_overhead
55from autotest_lib.site_utils import lxc
56from autotest_lib.site_utils import lxc_utils
57from autotest_lib.client.common_lib import pidfile, logging_manager
58
59
60# Control segment to stage server-side package.
61STAGE_SERVER_SIDE_PACKAGE_CONTROL_FILE = server_job._control_segment_path(
62        'stage_server_side_package')
63
64# Command line to start servod in a moblab.
65START_SERVOD_CMD = 'sudo start servod BOARD=%s PORT=%s'
66STOP_SERVOD_CMD = 'sudo stop servod'
67
68def log_alarm(signum, frame):
69    logging.error("Received SIGALARM. Ignoring and continuing on.")
70    sys.exit(1)
71
72
73def _get_machines(parser):
74    """Get a list of machine names from command line arg -m or a file.
75
76    @param parser: Parser for the command line arguments.
77
78    @return: A list of machine names from command line arg -m or the
79             machines file specified in the command line arg -M.
80    """
81    if parser.options.machines:
82        machines = parser.options.machines.replace(',', ' ').strip().split()
83    else:
84        machines = []
85    machines_file = parser.options.machines_file
86    if machines_file:
87        machines = []
88        for m in open(machines_file, 'r').readlines():
89            # remove comments, spaces
90            m = re.sub('#.*', '', m).strip()
91            if m:
92                machines.append(m)
93        logging.debug('Read list of machines from file: %s', machines_file)
94        logging.debug('Machines: %s', ','.join(machines))
95
96    if machines:
97        for machine in machines:
98            if not machine or re.search('\s', machine):
99                parser.parser.error("Invalid machine: %s" % str(machine))
100        machines = list(set(machines))
101        machines.sort()
102    return machines
103
104
105def _stage_ssp(parser):
106    """Stage server-side package.
107
108    This function calls a control segment to stage server-side package based on
109    the job and autoserv command line option. The detail implementation could
110    be different for each host type. Currently, only CrosHost has
111    stage_server_side_package function defined.
112    The script returns None if no server-side package is available. However,
113    it may raise exception if it failed for reasons other than artifact (the
114    server-side package) not found.
115
116    @param parser: Command line arguments parser passed in the autoserv process.
117
118    @return: (ssp_url, error_msg), where
119              ssp_url is a url to the autotest server-side package. None if
120              server-side package is not supported.
121              error_msg is a string indicating the failures. None if server-
122              side package is staged successfully.
123    """
124    machines_list = _get_machines(parser)
125    machines_list = server_job.get_machine_dicts(
126            machines_list, parser.options.lab, parser.options.host_attributes)
127
128    # If test_source_build is not specified, default to use server-side test
129    # code from build specified in --image.
130    namespace = {'machines': machines_list,
131                 'image': (parser.options.test_source_build or
132                           parser.options.image),}
133    script_locals = {}
134    execfile(STAGE_SERVER_SIDE_PACKAGE_CONTROL_FILE, namespace, script_locals)
135    return script_locals['ssp_url'], script_locals['error_msg']
136
137
138def _run_with_ssp(job, container_name, job_id, results, parser, ssp_url,
139                  job_folder, machines):
140    """Run the server job with server-side packaging.
141
142    @param job: The server job object.
143    @param container_name: Name of the container to run the test.
144    @param job_id: ID of the test job.
145    @param results: Folder to store results. This could be different from
146                    parser.options.results:
147                    parser.options.results  can be set to None for results to be
148                    stored in a temp folder.
149                    results can be None for autoserv run requires no logging.
150    @param parser: Command line parser that contains the options.
151    @param ssp_url: url of the staged server-side package.
152    @param job_folder: Name of the job result folder.
153    @param machines: A list of machines to run the test.
154    """
155    bucket = lxc.ContainerBucket()
156    control = (parser.args[0] if len(parser.args) > 0 and parser.args[0] != ''
157               else None)
158    try:
159        dut_name = machines[0] if len(machines) >= 1 else None
160        test_container = bucket.setup_test(container_name, job_id, ssp_url,
161                                           results, control=control,
162                                           job_folder=job_folder,
163                                           dut_name=dut_name)
164    except Exception as e:
165        job.record('FAIL', None, None,
166                   'Failed to setup container for test: %s. Check logs in '
167                   'ssp_logs folder for more details.' % e)
168        raise
169
170    args = sys.argv[:]
171    args.remove('--require-ssp')
172    # --parent_job_id is only useful in autoserv running in host, not in
173    # container. Include this argument will cause test to fail for builds before
174    # CL 286265 was merged.
175    if '--parent_job_id' in args:
176        index = args.index('--parent_job_id')
177        args.remove('--parent_job_id')
178        # Remove the actual parent job id in command line arg.
179        del args[index]
180
181    # A dictionary of paths to replace in the command line. Key is the path to
182    # be replaced with the one in value.
183    paths_to_replace = {}
184    # Replace the control file path with the one in container.
185    if control:
186        container_control_filename = os.path.join(
187                lxc.CONTROL_TEMP_PATH, os.path.basename(control))
188        paths_to_replace[control] = container_control_filename
189    # Update result directory with the one in container.
190    container_result_dir = os.path.join(lxc.RESULT_DIR_FMT % job_folder)
191    if parser.options.results:
192        paths_to_replace[parser.options.results] = container_result_dir
193    # Update parse_job directory with the one in container. The assumption is
194    # that the result folder to be parsed is always the same as the results_dir.
195    if parser.options.parse_job:
196        paths_to_replace[parser.options.parse_job] = container_result_dir
197
198    args = [paths_to_replace.get(arg, arg) for arg in args]
199
200    # Apply --use-existing-results, results directory is aready created and
201    # mounted in container. Apply this arg to avoid exception being raised.
202    if not '--use-existing-results' in args:
203        args.append('--use-existing-results')
204
205    # Make sure autoserv running in container using a different pid file.
206    if not '--pidfile-label' in args:
207        args.extend(['--pidfile-label', 'container_autoserv'])
208
209    cmd_line = ' '.join(["'%s'" % arg if ' ' in arg else arg for arg in args])
210    logging.info('Run command in container: %s', cmd_line)
211    success = False
212    try:
213        test_container.attach_run(cmd_line)
214        success = True
215    except Exception as e:
216        # If the test run inside container fails without generating any log,
217        # write a message to status.log to help troubleshooting.
218        debug_files = os.listdir(os.path.join(results, 'debug'))
219        if not debug_files:
220            job.record('FAIL', None, None,
221                       'Failed to run test inside the container: %s. Check '
222                       'logs in ssp_logs folder for more details.' % e)
223        raise
224    finally:
225        metrics.Counter(
226            'chromeos/autotest/experimental/execute_job_in_ssp').increment(
227                fields={'success': success})
228        # metadata is uploaded separately so it can use http to upload.
229        metadata = {'drone': socket.gethostname(),
230                    'job_id': job_id,
231                    'success': success}
232        autotest_es.post(use_http=True,
233                         type_str=lxc.CONTAINER_RUN_TEST_METADB_TYPE,
234                         metadata=metadata)
235        test_container.destroy()
236
237
238def correct_results_folder_permission(results):
239    """Make sure the results folder has the right permission settings.
240
241    For tests running with server-side packaging, the results folder has the
242    owner of root. This must be changed to the user running the autoserv
243    process, so parsing job can access the results folder.
244    TODO(dshi): crbug.com/459344 Remove this function when test container can be
245    unprivileged container.
246
247    @param results: Path to the results folder.
248
249    """
250    if not results:
251        return
252
253    try:
254        utils.run('sudo -n chown -R %s "%s"' % (os.getuid(), results))
255        utils.run('sudo -n chgrp -R %s "%s"' % (os.getgid(), results))
256    except error.CmdError as e:
257        metadata = {'error': str(e),
258                    'result_folder': results,
259                    'drone': socket.gethostname()}
260        autotest_es.post(use_http=True, type_str='correct_results_folder_failure',
261                         metadata=metadata)
262        raise
263
264
265def _start_servod(machine):
266    """Try to start servod in moblab if it's not already running or running with
267    different board or port.
268
269    @param machine: Name of the dut used for test.
270    """
271    if not utils.is_moblab():
272        return
273
274    logging.debug('Trying to start servod.')
275    try:
276        afe = frontend.AFE()
277        board = server_utils.get_board_from_afe(machine, afe)
278        hosts = afe.get_hosts(hostname=machine)
279        servo_host = hosts[0].attributes.get('servo_host', None)
280        servo_port = hosts[0].attributes.get('servo_port', 9999)
281        if not servo_host in ['localhost', '127.0.0.1']:
282            logging.warn('Starting servod is aborted. The dut\'s servo_host '
283                         'attribute is not set to localhost.')
284            return
285    except (urllib2.HTTPError, urllib2.URLError):
286        # Ignore error if RPC failed to get board
287        logging.error('Failed to get board name from AFE. Start servod is '
288                      'aborted')
289        return
290
291    try:
292        pid = utils.run('pgrep servod').stdout
293        cmd_line = utils.run('ps -fp %s' % pid).stdout
294        if ('--board %s' % board in cmd_line and
295            '--port %s' % servo_port in cmd_line):
296            logging.debug('Servod is already running with given board and port.'
297                          ' There is no need to restart servod.')
298            return
299        logging.debug('Servod is running with different board or port. '
300                      'Stopping existing servod.')
301        utils.run('sudo stop servod')
302    except error.CmdError:
303        # servod is not running.
304        pass
305
306    try:
307        utils.run(START_SERVOD_CMD % (board, servo_port))
308        logging.debug('Servod is started')
309    except error.CmdError as e:
310        logging.error('Servod failed to be started, error: %s', e)
311
312
313def run_autoserv(pid_file_manager, results, parser, ssp_url, use_ssp):
314    """Run server job with given options.
315
316    @param pid_file_manager: PidFileManager used to monitor the autoserv process
317    @param results: Folder to store results.
318    @param parser: Parser for the command line arguments.
319    @param ssp_url: Url to server-side package.
320    @param use_ssp: Set to True to run with server-side packaging.
321    """
322    if parser.options.warn_no_ssp:
323        # Post a warning in the log.
324        logging.warn('Autoserv is required to run with server-side packaging. '
325                     'However, no drone is found to support server-side '
326                     'packaging. The test will be executed in a drone without '
327                     'server-side packaging supported.')
328
329    # send stdin to /dev/null
330    dev_null = os.open(os.devnull, os.O_RDONLY)
331    os.dup2(dev_null, sys.stdin.fileno())
332    os.close(dev_null)
333
334    # Create separate process group if the process is not a process group
335    # leader. This allows autoserv process to keep running after the caller
336    # process (drone manager call) exits.
337    if os.getpid() != os.getpgid(0):
338        os.setsid()
339
340    # Container name is predefined so the container can be destroyed in
341    # handle_sigterm.
342    job_or_task_id = job_directories.get_job_id_or_task_id(
343            parser.options.results)
344    container_name = (lxc.TEST_CONTAINER_NAME_FMT %
345                      (job_or_task_id, time.time(), os.getpid()))
346    job_folder = job_directories.get_job_folder_name(parser.options.results)
347
348    # Implement SIGTERM handler
349    def handle_sigterm(signum, frame):
350        logging.debug('Received SIGTERM')
351        if pid_file_manager:
352            pid_file_manager.close_file(1, signal.SIGTERM)
353        logging.debug('Finished writing to pid_file. Killing process.')
354
355        # Update results folder's file permission. This needs to be done ASAP
356        # before the parsing process tries to access the log.
357        if use_ssp and results:
358            correct_results_folder_permission(results)
359
360        # TODO (sbasi) - remove the time.sleep when crbug.com/302815 is solved.
361        # This sleep allows the pending output to be logged before the kill
362        # signal is sent.
363        time.sleep(.1)
364        if use_ssp:
365            logging.debug('Destroy container %s before aborting the autoserv '
366                          'process.', container_name)
367            metadata = {'drone': socket.gethostname(),
368                        'job_id': job_or_task_id,
369                        'container_name': container_name,
370                        'action': 'abort',
371                        'success': True}
372            try:
373                bucket = lxc.ContainerBucket()
374                container = bucket.get(container_name)
375                if container:
376                    container.destroy()
377                else:
378                    metadata['success'] = False
379                    metadata['error'] = 'container not found'
380                    logging.debug('Container %s is not found.', container_name)
381            except:
382                metadata['success'] = False
383                metadata['error'] = 'Exception: %s' % str(sys.exc_info())
384                # Handle any exception so the autoserv process can be aborted.
385                logging.exception('Failed to destroy container %s.',
386                                  container_name)
387            autotest_es.post(use_http=True,
388                             type_str=lxc.CONTAINER_RUN_TEST_METADB_TYPE,
389                             metadata=metadata)
390            # Try to correct the result file permission again after the
391            # container is destroyed, as the container might have created some
392            # new files in the result folder.
393            if results:
394                correct_results_folder_permission(results)
395
396        os.killpg(os.getpgrp(), signal.SIGKILL)
397
398    # Set signal handler
399    signal.signal(signal.SIGTERM, handle_sigterm)
400
401    # faulthandler is only needed to debug in the Lab and is not avaliable to
402    # be imported in the chroot as part of VMTest, so Try-Except it.
403    try:
404        import faulthandler
405        faulthandler.register(signal.SIGTERM, all_threads=True, chain=True)
406        logging.debug('faulthandler registered on SIGTERM.')
407    except ImportError:
408        sys.exc_clear()
409
410    # Ignore SIGTTOU's generated by output from forked children.
411    signal.signal(signal.SIGTTOU, signal.SIG_IGN)
412
413    # If we received a SIGALARM, let's be loud about it.
414    signal.signal(signal.SIGALRM, log_alarm)
415
416    # Server side tests that call shell scripts often depend on $USER being set
417    # but depending on how you launch your autotest scheduler it may not be set.
418    os.environ['USER'] = getpass.getuser()
419
420    label = parser.options.label
421    group_name = parser.options.group_name
422    user = parser.options.user
423    client = parser.options.client
424    server = parser.options.server
425    install_before = parser.options.install_before
426    install_after = parser.options.install_after
427    verify = parser.options.verify
428    repair = parser.options.repair
429    cleanup = parser.options.cleanup
430    provision = parser.options.provision
431    reset = parser.options.reset
432    job_labels = parser.options.job_labels
433    no_tee = parser.options.no_tee
434    parse_job = parser.options.parse_job
435    execution_tag = parser.options.execution_tag
436    if not execution_tag:
437        execution_tag = parse_job
438    ssh_user = parser.options.ssh_user
439    ssh_port = parser.options.ssh_port
440    ssh_pass = parser.options.ssh_pass
441    collect_crashinfo = parser.options.collect_crashinfo
442    control_filename = parser.options.control_filename
443    test_retry = parser.options.test_retry
444    verify_job_repo_url = parser.options.verify_job_repo_url
445    skip_crash_collection = parser.options.skip_crash_collection
446    ssh_verbosity = int(parser.options.ssh_verbosity)
447    ssh_options = parser.options.ssh_options
448    no_use_packaging = parser.options.no_use_packaging
449    host_attributes = parser.options.host_attributes
450    in_lab = bool(parser.options.lab)
451
452    # can't be both a client and a server side test
453    if client and server:
454        parser.parser.error("Can not specify a test as both server and client!")
455
456    if provision and client:
457        parser.parser.error("Cannot specify provisioning and client!")
458
459    is_special_task = (verify or repair or cleanup or collect_crashinfo or
460                       provision or reset)
461    if len(parser.args) < 1 and not is_special_task:
462        parser.parser.error("Missing argument: control file")
463
464    if ssh_verbosity > 0:
465        # ssh_verbosity is an integer between 0 and 3, inclusive
466        ssh_verbosity_flag = '-' + 'v' * ssh_verbosity
467    else:
468        ssh_verbosity_flag = ''
469
470    # We have a control file unless it's just a verify/repair/cleanup job
471    if len(parser.args) > 0:
472        control = parser.args[0]
473    else:
474        control = None
475
476    machines = _get_machines(parser)
477    if group_name and len(machines) < 2:
478        parser.parser.error('-G %r may only be supplied with more than one '
479                            'machine.' % group_name)
480
481    kwargs = {'group_name': group_name, 'tag': execution_tag,
482              'disable_sysinfo': parser.options.disable_sysinfo}
483    if parser.options.parent_job_id:
484        kwargs['parent_job_id'] = int(parser.options.parent_job_id)
485    if control_filename:
486        kwargs['control_filename'] = control_filename
487    if host_attributes:
488        kwargs['host_attributes'] = host_attributes
489    kwargs['in_lab'] = in_lab
490    job = server_job.server_job(control, parser.args[1:], results, label,
491                                user, machines, client, parse_job,
492                                ssh_user, ssh_port, ssh_pass,
493                                ssh_verbosity_flag, ssh_options,
494                                test_retry, **kwargs)
495
496    job.logging.start_logging()
497    job.init_parser()
498
499    # perform checks
500    job.precheck()
501
502    # run the job
503    exit_code = 0
504    auto_start_servod = _CONFIG.get_config_value(
505            'AUTOSERV', 'auto_start_servod', type=bool, default=False)
506
507    site_utils.SetupTsMonGlobalState('autoserv', indirect=False,
508                                     short_lived=True)
509    try:
510        try:
511            if repair:
512                if auto_start_servod and len(machines) == 1:
513                    _start_servod(machines[0])
514                job.repair(job_labels)
515            elif verify:
516                job.verify(job_labels)
517            elif provision:
518                job.provision(job_labels)
519            elif reset:
520                job.reset(job_labels)
521            elif cleanup:
522                job.cleanup(job_labels)
523            else:
524                if auto_start_servod and len(machines) == 1:
525                    _start_servod(machines[0])
526                if use_ssp:
527                    try:
528                        _run_with_ssp(job, container_name, job_or_task_id,
529                                        results, parser, ssp_url, job_folder,
530                                        machines)
531                    finally:
532                        # Update the ownership of files in result folder.
533                        correct_results_folder_permission(results)
534                else:
535                    if collect_crashinfo:
536                        # Update the ownership of files in result folder. If the
537                        # job to collect crashinfo was running inside container
538                        # (SSP) and crashed before correcting folder permission,
539                        # the result folder might have wrong permission setting.
540                        try:
541                            correct_results_folder_permission(results)
542                        except:
543                            # Ignore any error as the user may not have root
544                            # permission to run sudo command.
545                            pass
546                    metric_name = ('chromeos/autotest/experimental/'
547                                   'autoserv_job_run_duration')
548                    f = {'in_container': utils.is_in_container(),
549                         'success': False}
550                    with metrics.SecondsTimer(metric_name, fields=f) as c:
551                        job.run(install_before, install_after,
552                                verify_job_repo_url=verify_job_repo_url,
553                                only_collect_crashinfo=collect_crashinfo,
554                                skip_crash_collection=skip_crash_collection,
555                                job_labels=job_labels,
556                                use_packaging=(not no_use_packaging))
557                        c['success'] = True
558
559        finally:
560            while job.hosts:
561                host = job.hosts.pop()
562                host.close()
563    except:
564        exit_code = 1
565        traceback.print_exc()
566    finally:
567        metrics.Flush()
568
569    if pid_file_manager:
570        pid_file_manager.num_tests_failed = job.num_tests_failed
571        pid_file_manager.close_file(exit_code)
572    job.cleanup_parser()
573
574    sys.exit(exit_code)
575
576
577def record_autoserv(options, duration_secs):
578    """Record autoserv end-to-end time in metadata db.
579
580    @param options: parser options.
581    @param duration_secs: How long autoserv has taken, in secs.
582    """
583    # Get machine hostname
584    machines = options.machines.replace(
585            ',', ' ').strip().split() if options.machines else []
586    num_machines = len(machines)
587    if num_machines > 1:
588        # Skip the case where atomic group is used.
589        return
590    elif num_machines == 0:
591        machines.append('hostless')
592
593    # Determine the status that will be reported.
594    s = job_overhead.STATUS
595    task_mapping = {
596            'reset': s.RESETTING, 'verify': s.VERIFYING,
597            'provision': s.PROVISIONING, 'repair': s.REPAIRING,
598            'cleanup': s.CLEANING, 'collect_crashinfo': s.GATHERING}
599    match = filter(lambda task: getattr(options, task, False) == True,
600                   task_mapping)
601    status = task_mapping[match[0]] if match else s.RUNNING
602    is_special_task = status not in [s.RUNNING, s.GATHERING]
603    job_or_task_id = job_directories.get_job_id_or_task_id(options.results)
604    job_overhead.record_state_duration(
605            job_or_task_id, machines[0], status, duration_secs,
606            is_special_task=is_special_task)
607
608
609def main():
610    start_time = datetime.datetime.now()
611    # grab the parser
612    parser = autoserv_parser.autoserv_parser
613    parser.parse_args()
614
615    if len(sys.argv) == 1:
616        parser.parser.print_help()
617        sys.exit(1)
618
619    # If the job requires to run with server-side package, try to stage server-
620    # side package first. If that fails with error that autotest server package
621    # does not exist, fall back to run the job without using server-side
622    # packaging. If option warn_no_ssp is specified, that means autoserv is
623    # running in a drone does not support SSP, thus no need to stage server-side
624    # package.
625    ssp_url = None
626    ssp_url_warning = False
627    if (not parser.options.warn_no_ssp and parser.options.require_ssp):
628        ssp_url, ssp_error_msg = _stage_ssp(parser)
629        # The build does not have autotest server package. Fall back to not
630        # to use server-side package. Logging is postponed until logging being
631        # set up.
632        ssp_url_warning = not ssp_url
633
634    if parser.options.no_logging:
635        results = None
636    else:
637        results = parser.options.results
638        if not results:
639            results = 'results.' + time.strftime('%Y-%m-%d-%H.%M.%S')
640        results = os.path.abspath(results)
641        resultdir_exists = False
642        for filename in ('control.srv', 'status.log', '.autoserv_execute'):
643            if os.path.exists(os.path.join(results, filename)):
644                resultdir_exists = True
645        if not parser.options.use_existing_results and resultdir_exists:
646            error = "Error: results directory already exists: %s\n" % results
647            sys.stderr.write(error)
648            sys.exit(1)
649
650        # Now that we certified that there's no leftover results dir from
651        # previous jobs, lets create the result dir since the logging system
652        # needs to create the log file in there.
653        if not os.path.isdir(results):
654            os.makedirs(results)
655
656    # Server-side packaging will only be used if it's required and the package
657    # is available. If warn_no_ssp is specified, it means that autoserv is
658    # running in a drone does not have SSP supported and a warning will be logs.
659    # Therefore, it should not run with SSP.
660    use_ssp = (not parser.options.warn_no_ssp and parser.options.require_ssp
661               and ssp_url)
662    if use_ssp:
663        log_dir = os.path.join(results, 'ssp_logs') if results else None
664        if log_dir and not os.path.exists(log_dir):
665            os.makedirs(log_dir)
666    else:
667        log_dir = results
668
669    logging_manager.configure_logging(
670            server_logging_config.ServerLoggingConfig(),
671            results_dir=log_dir,
672            use_console=not parser.options.no_tee,
673            verbose=parser.options.verbose,
674            no_console_prefix=parser.options.no_console_prefix)
675
676    if ssp_url_warning:
677        logging.warn(
678                'Autoserv is required to run with server-side packaging. '
679                'However, no server-side package can be found based on '
680                '`--image`, host attribute job_repo_url or host OS version '
681                'label. It could be that the build to test is older than the '
682                'minimum version that supports server-side packaging. The test '
683                'will be executed without using erver-side packaging. '
684                'Following is the detailed error:\n%s', ssp_error_msg)
685
686    if results:
687        logging.info("Results placed in %s" % results)
688
689        # wait until now to perform this check, so it get properly logged
690        if (parser.options.use_existing_results and not resultdir_exists and
691            not utils.is_in_container()):
692            logging.error("No existing results directory found: %s", results)
693            sys.exit(1)
694
695    logging.debug('autoserv is running in drone %s.', socket.gethostname())
696    logging.debug('autoserv command was: %s', ' '.join(sys.argv))
697
698    if parser.options.write_pidfile and results:
699        pid_file_manager = pidfile.PidFileManager(parser.options.pidfile_label,
700                                                  results)
701        pid_file_manager.open_file()
702    else:
703        pid_file_manager = None
704
705    autotest.BaseAutotest.set_install_in_tmpdir(
706        parser.options.install_in_tmpdir)
707
708    try:
709        # Take the first argument as control file name, get the test name from
710        # the control file.
711        if (len(parser.args) > 0 and parser.args[0] != '' and
712            parser.options.machines):
713            try:
714                test_name = control_data.parse_control(parser.args[0],
715                                                       raise_warnings=True).name
716            except control_data.ControlVariableException:
717                logging.debug('Failed to retrieve test name from control file.')
718                test_name = None
719    except control_data.ControlVariableException as e:
720        logging.error(str(e))
721    exit_code = 0
722    # TODO(beeps): Extend this to cover different failure modes.
723    # Testing exceptions are matched against labels sent to autoserv. Eg,
724    # to allow only the hostless job to run, specify
725    # testing_exceptions: test_suite in the shadow_config. To allow both
726    # the hostless job and dummy_Pass to run, specify
727    # testing_exceptions: test_suite,dummy_Pass. You can figure out
728    # what label autoserv is invoked with by looking through the logs of a test
729    # for the autoserv command's -l option.
730    testing_exceptions = _CONFIG.get_config_value(
731            'AUTOSERV', 'testing_exceptions', type=list, default=[])
732    test_mode = _CONFIG.get_config_value(
733            'AUTOSERV', 'testing_mode', type=bool, default=False)
734    test_mode = (results_mocker and test_mode and not
735                 any([ex in parser.options.label
736                      for ex in testing_exceptions]))
737    is_task = (parser.options.verify or parser.options.repair or
738               parser.options.provision or parser.options.reset or
739               parser.options.cleanup or parser.options.collect_crashinfo)
740    try:
741        try:
742            if test_mode:
743                # The parser doesn't run on tasks anyway, so we can just return
744                # happy signals without faking results.
745                if not is_task:
746                    machine = parser.options.results.split('/')[-1]
747
748                    # TODO(beeps): The proper way to do this would be to
749                    # refactor job creation so we can invoke job.record
750                    # directly. To do that one needs to pipe the test_name
751                    # through run_autoserv and bail just before invoking
752                    # the server job. See the comment in
753                    # puppylab/results_mocker for more context.
754                    results_mocker.ResultsMocker(
755                            test_name if test_name else 'unknown-test',
756                            parser.options.results, machine
757                            ).mock_results()
758                return
759            else:
760                run_autoserv(pid_file_manager, results, parser, ssp_url,
761                             use_ssp)
762        except SystemExit as e:
763            exit_code = e.code
764            if exit_code:
765                logging.exception(e)
766        except Exception as e:
767            # If we don't know what happened, we'll classify it as
768            # an 'abort' and return 1.
769            logging.exception(e)
770            exit_code = 1
771    finally:
772        if pid_file_manager:
773            pid_file_manager.close_file(exit_code)
774        # Record the autoserv duration time. Must be called
775        # just before the system exits to ensure accuracy.
776        duration_secs = (datetime.datetime.now() - start_time).total_seconds()
777        record_autoserv(parser.options, duration_secs)
778    sys.exit(exit_code)
779
780
781if __name__ == '__main__':
782    main()
783