1# Copyright (c) 2017 The Chromium Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""
6Convenience functions for use by tests or whomever.
7
8There's no really good way to do this, as this isn't a class we can do
9inheritance with, just a collection of static methods.
10"""
11
12# pylint: disable=missing-docstring
13
14import StringIO
15import errno
16import inspect
17import itertools
18import logging
19import os
20import pickle
21import random
22import re
23import resource
24import select
25import shutil
26import signal
27import socket
28import string
29import struct
30import subprocess
31import textwrap
32import time
33import urllib2
34import urlparse
35import uuid
36import warnings
37
38try:
39    import hashlib
40except ImportError:
41    import md5
42    import sha
43
44import common
45
46from autotest_lib.client.common_lib import env
47from autotest_lib.client.common_lib import error
48from autotest_lib.client.common_lib import global_config
49from autotest_lib.client.common_lib import logging_manager
50from autotest_lib.client.common_lib import metrics_mock_class
51from autotest_lib.client.cros import constants
52
53from autotest_lib.client.common_lib.lsbrelease_utils import *
54
55
56def deprecated(func):
57    """This is a decorator which can be used to mark functions as deprecated.
58    It will result in a warning being emmitted when the function is used."""
59    def new_func(*args, **dargs):
60        warnings.warn("Call to deprecated function %s." % func.__name__,
61                      category=DeprecationWarning)
62        return func(*args, **dargs)
63    new_func.__name__ = func.__name__
64    new_func.__doc__ = func.__doc__
65    new_func.__dict__.update(func.__dict__)
66    return new_func
67
68
69class _NullStream(object):
70    def write(self, data):
71        pass
72
73
74    def flush(self):
75        pass
76
77
78TEE_TO_LOGS = object()
79_the_null_stream = _NullStream()
80
81DEVNULL = object()
82
83DEFAULT_STDOUT_LEVEL = logging.DEBUG
84DEFAULT_STDERR_LEVEL = logging.ERROR
85
86# prefixes for logging stdout/stderr of commands
87STDOUT_PREFIX = '[stdout] '
88STDERR_PREFIX = '[stderr] '
89
90# safe characters for the shell (do not need quoting)
91SHELL_QUOTING_WHITELIST = frozenset(string.ascii_letters +
92                                    string.digits +
93                                    '_-+=')
94
95def custom_warning_handler(message, category, filename, lineno, file=None,
96                           line=None):
97    """Custom handler to log at the WARNING error level. Ignores |file|."""
98    logging.warning(warnings.formatwarning(message, category, filename, lineno,
99                                           line))
100
101warnings.showwarning = custom_warning_handler
102
103def get_stream_tee_file(stream, level, prefix=''):
104    if stream is None:
105        return _the_null_stream
106    if stream is DEVNULL:
107        return None
108    if stream is TEE_TO_LOGS:
109        return logging_manager.LoggingFile(level=level, prefix=prefix)
110    return stream
111
112
113def _join_with_nickname(base_string, nickname):
114    if nickname:
115        return '%s BgJob "%s" ' % (base_string, nickname)
116    return base_string
117
118
119# TODO: Cleanup and possibly eliminate |unjoinable|, which is only used in our
120# master-ssh connection process, while fixing underlying
121# semantics problem in BgJob. See crbug.com/279312
122class BgJob(object):
123    def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
124                 stdin=None, stdout_level=DEFAULT_STDOUT_LEVEL,
125                 stderr_level=DEFAULT_STDERR_LEVEL, nickname=None,
126                 unjoinable=False, env=None, extra_paths=None):
127        """Create and start a new BgJob.
128
129        This constructor creates a new BgJob, and uses Popen to start a new
130        subprocess with given command. It returns without blocking on execution
131        of the subprocess.
132
133        After starting a new BgJob, use output_prepare to connect the process's
134        stdout and stderr pipes to the stream of your choice.
135
136        When the job is running, the jobs's output streams are only read from
137        when process_output is called.
138
139        @param command: command to be executed in new subprocess. May be either
140                        a list, or a string (in which case Popen will be called
141                        with shell=True)
142        @param stdout_tee: (Optional) a file like object, TEE_TO_LOGS or
143                           DEVNULL.
144                           If not given, after finishing the process, the
145                           stdout data from subprocess is available in
146                           result.stdout.
147                           If a file like object is given, in process_output(),
148                           the stdout data from the subprocess will be handled
149                           by the given file like object.
150                           If TEE_TO_LOGS is given, in process_output(), the
151                           stdout data from the subprocess will be handled by
152                           the standard logging_manager.
153                           If DEVNULL is given, the stdout of the subprocess
154                           will be just discarded. In addition, even after
155                           cleanup(), result.stdout will be just an empty
156                           string (unlike the case where stdout_tee is not
157                           given).
158        @param stderr_tee: Same as stdout_tee, but for stderr.
159        @param verbose: Boolean, make BgJob logging more verbose.
160        @param stdin: Stream object, will be passed to Popen as the new
161                      process's stdin.
162        @param stdout_level: A logging level value. If stdout_tee was set to
163                             TEE_TO_LOGS, sets the level that tee'd
164                             stdout output will be logged at. Ignored
165                             otherwise.
166        @param stderr_level: Same as stdout_level, but for stderr.
167        @param nickname: Optional string, to be included in logging messages
168        @param unjoinable: Optional bool, default False.
169                           This should be True for BgJobs running in background
170                           and will never be joined with join_bg_jobs(), such
171                           as the master-ssh connection. Instead, it is
172                           caller's responsibility to terminate the subprocess
173                           correctly, e.g. by calling nuke_subprocess().
174                           This will lead that, calling join_bg_jobs(),
175                           process_output() or cleanup() will result in an
176                           InvalidBgJobCall exception.
177                           Also, |stdout_tee| and |stderr_tee| must be set to
178                           DEVNULL, otherwise InvalidBgJobCall is raised.
179        @param env: Dict containing environment variables used in subprocess.
180        @param extra_paths: Optional string list, to be prepended to the PATH
181                            env variable in env (or os.environ dict if env is
182                            not specified).
183        """
184        self.command = command
185        self.unjoinable = unjoinable
186        if (unjoinable and (stdout_tee != DEVNULL or stderr_tee != DEVNULL)):
187            raise error.InvalidBgJobCall(
188                'stdout_tee and stderr_tee must be DEVNULL for '
189                'unjoinable BgJob')
190        self._stdout_tee = get_stream_tee_file(
191                stdout_tee, stdout_level,
192                prefix=_join_with_nickname(STDOUT_PREFIX, nickname))
193        self._stderr_tee = get_stream_tee_file(
194                stderr_tee, stderr_level,
195                prefix=_join_with_nickname(STDERR_PREFIX, nickname))
196        self.result = CmdResult(command)
197
198        # allow for easy stdin input by string, we'll let subprocess create
199        # a pipe for stdin input and we'll write to it in the wait loop
200        if isinstance(stdin, basestring):
201            self.string_stdin = stdin
202            stdin = subprocess.PIPE
203        else:
204            self.string_stdin = None
205
206        # Prepend extra_paths to env['PATH'] if necessary.
207        if extra_paths:
208            env = (os.environ if env is None else env).copy()
209            oldpath = env.get('PATH')
210            env['PATH'] = os.pathsep.join(
211                    extra_paths + ([oldpath] if oldpath else []))
212
213        if verbose:
214            logging.debug("Running '%s'", command)
215
216        if type(command) == list:
217            shell = False
218            executable = None
219        else:
220            shell = True
221            executable = '/bin/bash'
222
223        with open('/dev/null', 'w') as devnull:
224            self.sp = subprocess.Popen(
225                command,
226                stdin=stdin,
227                stdout=devnull if stdout_tee == DEVNULL else subprocess.PIPE,
228                stderr=devnull if stderr_tee == DEVNULL else subprocess.PIPE,
229                preexec_fn=self._reset_sigpipe,
230                shell=shell, executable=executable,
231                env=env, close_fds=True)
232
233        self._cleanup_called = False
234        self._stdout_file = (
235            None if stdout_tee == DEVNULL else StringIO.StringIO())
236        self._stderr_file = (
237            None if stderr_tee == DEVNULL else StringIO.StringIO())
238
239    def process_output(self, stdout=True, final_read=False):
240        """Read from process's output stream, and write data to destinations.
241
242        This function reads up to 1024 bytes from the background job's
243        stdout or stderr stream, and writes the resulting data to the BgJob's
244        output tee and to the stream set up in output_prepare.
245
246        Warning: Calls to process_output will block on reads from the
247        subprocess stream, and will block on writes to the configured
248        destination stream.
249
250        @param stdout: True = read and process data from job's stdout.
251                       False = from stderr.
252                       Default: True
253        @param final_read: Do not read only 1024 bytes from stream. Instead,
254                           read and process all data until end of the stream.
255
256        """
257        if self.unjoinable:
258            raise error.InvalidBgJobCall('Cannot call process_output on '
259                                         'a job with unjoinable BgJob')
260        if stdout:
261            pipe, buf, tee = (
262                self.sp.stdout, self._stdout_file, self._stdout_tee)
263        else:
264            pipe, buf, tee = (
265                self.sp.stderr, self._stderr_file, self._stderr_tee)
266
267        if not pipe:
268            return
269
270        if final_read:
271            # read in all the data we can from pipe and then stop
272            data = []
273            while select.select([pipe], [], [], 0)[0]:
274                data.append(os.read(pipe.fileno(), 1024))
275                if len(data[-1]) == 0:
276                    break
277            data = "".join(data)
278        else:
279            # perform a single read
280            data = os.read(pipe.fileno(), 1024)
281        buf.write(data)
282        tee.write(data)
283
284    def cleanup(self):
285        """Clean up after BgJob.
286
287        Flush the stdout_tee and stderr_tee buffers, close the
288        subprocess stdout and stderr buffers, and saves data from
289        the configured stdout and stderr destination streams to
290        self.result. Duplicate calls ignored with a warning.
291        """
292        if self.unjoinable:
293            raise error.InvalidBgJobCall('Cannot call cleanup on '
294                                         'a job with a unjoinable BgJob')
295        if self._cleanup_called:
296            logging.warning('BgJob [%s] received a duplicate call to '
297                            'cleanup. Ignoring.', self.command)
298            return
299        try:
300            if self.sp.stdout:
301                self._stdout_tee.flush()
302                self.sp.stdout.close()
303                self.result.stdout = self._stdout_file.getvalue()
304
305            if self.sp.stderr:
306                self._stderr_tee.flush()
307                self.sp.stderr.close()
308                self.result.stderr = self._stderr_file.getvalue()
309        finally:
310            self._cleanup_called = True
311
312    def _reset_sigpipe(self):
313        if not env.IN_MOD_WSGI:
314            signal.signal(signal.SIGPIPE, signal.SIG_DFL)
315
316
317def ip_to_long(ip):
318    # !L is a long in network byte order
319    return struct.unpack('!L', socket.inet_aton(ip))[0]
320
321
322def long_to_ip(number):
323    # See above comment.
324    return socket.inet_ntoa(struct.pack('!L', number))
325
326
327def create_subnet_mask(bits):
328    return (1 << 32) - (1 << 32-bits)
329
330
331def format_ip_with_mask(ip, mask_bits):
332    masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
333    return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
334
335
336def normalize_hostname(alias):
337    ip = socket.gethostbyname(alias)
338    return socket.gethostbyaddr(ip)[0]
339
340
341def get_ip_local_port_range():
342    match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
343                     read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
344    return (int(match.group(1)), int(match.group(2)))
345
346
347def set_ip_local_port_range(lower, upper):
348    write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
349                   '%d %d\n' % (lower, upper))
350
351
352def read_one_line(filename):
353    return open(filename, 'r').readline().rstrip('\n')
354
355
356def read_file(filename):
357    f = open(filename)
358    try:
359        return f.read()
360    finally:
361        f.close()
362
363
364def get_field(data, param, linestart="", sep=" "):
365    """
366    Parse data from string.
367    @param data: Data to parse.
368        example:
369          data:
370             cpu   324 345 34  5 345
371             cpu0  34  11  34 34  33
372             ^^^^
373             start of line
374             params 0   1   2  3   4
375    @param param: Position of parameter after linestart marker.
376    @param linestart: String to which start line with parameters.
377    @param sep: Separator between parameters regular expression.
378    """
379    search = re.compile(r"(?<=^%s)\s*(.*)" % linestart, re.MULTILINE)
380    find = search.search(data)
381    if find != None:
382        return re.split("%s" % sep, find.group(1))[param]
383    else:
384        print "There is no line which starts with %s in data." % linestart
385        return None
386
387
388def write_one_line(filename, line):
389    open_write_close(filename, str(line).rstrip('\n') + '\n')
390
391
392def open_write_close(filename, data):
393    f = open(filename, 'w')
394    try:
395        f.write(data)
396    finally:
397        f.close()
398
399
400def locate_file(path, base_dir=None):
401    """Locates a file.
402
403    @param path: The path of the file being located. Could be absolute or relative
404        path. For relative path, it tries to locate the file from base_dir.
405    @param base_dir (optional): Base directory of the relative path.
406
407    @returns Absolute path of the file if found. None if path is None.
408    @raises error.TestFail if the file is not found.
409    """
410    if path is None:
411        return None
412
413    if not os.path.isabs(path) and base_dir is not None:
414        # Assume the relative path is based in autotest directory.
415        path = os.path.join(base_dir, path)
416    if not os.path.isfile(path):
417        raise error.TestFail('ERROR: Unable to find %s' % path)
418    return path
419
420
421def matrix_to_string(matrix, header=None):
422    """
423    Return a pretty, aligned string representation of a nxm matrix.
424
425    This representation can be used to print any tabular data, such as
426    database results. It works by scanning the lengths of each element
427    in each column, and determining the format string dynamically.
428
429    @param matrix: Matrix representation (list with n rows of m elements).
430    @param header: Optional tuple or list with header elements to be displayed.
431    """
432    if type(header) is list:
433        header = tuple(header)
434    lengths = []
435    if header:
436        for column in header:
437            lengths.append(len(column))
438    for row in matrix:
439        for i, column in enumerate(row):
440            column = unicode(column).encode("utf-8")
441            cl = len(column)
442            try:
443                ml = lengths[i]
444                if cl > ml:
445                    lengths[i] = cl
446            except IndexError:
447                lengths.append(cl)
448
449    lengths = tuple(lengths)
450    format_string = ""
451    for length in lengths:
452        format_string += "%-" + str(length) + "s "
453    format_string += "\n"
454
455    matrix_str = ""
456    if header:
457        matrix_str += format_string % header
458    for row in matrix:
459        matrix_str += format_string % tuple(row)
460
461    return matrix_str
462
463
464def read_keyval(path, type_tag=None):
465    """
466    Read a key-value pair format file into a dictionary, and return it.
467    Takes either a filename or directory name as input. If it's a
468    directory name, we assume you want the file to be called keyval.
469
470    @param path: Full path of the file to read from.
471    @param type_tag: If not None, only keyvals with key ending
472                     in a suffix {type_tag} will be collected.
473    """
474    if os.path.isdir(path):
475        path = os.path.join(path, 'keyval')
476    if not os.path.exists(path):
477        return {}
478
479    if type_tag:
480        pattern = r'^([-\.\w]+)\{%s\}=(.*)$' % type_tag
481    else:
482        pattern = r'^([-\.\w]+)=(.*)$'
483
484    keyval = {}
485    f = open(path)
486    for line in f:
487        line = re.sub('#.*', '', line).rstrip()
488        if not line:
489            continue
490        match = re.match(pattern, line)
491        if match:
492            key = match.group(1)
493            value = match.group(2)
494            if re.search('^\d+$', value):
495                value = int(value)
496            elif re.search('^(\d+\.)?\d+$', value):
497                value = float(value)
498            keyval[key] = value
499        else:
500            raise ValueError('Invalid format line: %s' % line)
501    f.close()
502    return keyval
503
504
505def write_keyval(path, dictionary, type_tag=None):
506    """
507    Write a key-value pair format file out to a file. This uses append
508    mode to open the file, so existing text will not be overwritten or
509    reparsed.
510
511    If type_tag is None, then the key must be composed of alphanumeric
512    characters (or dashes+underscores). However, if type-tag is not
513    null then the keys must also have "{type_tag}" as a suffix. At
514    the moment the only valid values of type_tag are "attr" and "perf".
515
516    @param path: full path of the file to be written
517    @param dictionary: the items to write
518    @param type_tag: see text above
519    """
520    if os.path.isdir(path):
521        path = os.path.join(path, 'keyval')
522    keyval = open(path, 'a')
523
524    if type_tag is None:
525        key_regex = re.compile(r'^[-\.\w]+$')
526    else:
527        if type_tag not in ('attr', 'perf'):
528            raise ValueError('Invalid type tag: %s' % type_tag)
529        escaped_tag = re.escape(type_tag)
530        key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
531    try:
532        for key in sorted(dictionary.keys()):
533            if not key_regex.search(key):
534                raise ValueError('Invalid key: %s' % key)
535            keyval.write('%s=%s\n' % (key, dictionary[key]))
536    finally:
537        keyval.close()
538
539
540def is_url(path):
541    """Return true if path looks like a URL"""
542    # for now, just handle http and ftp
543    url_parts = urlparse.urlparse(path)
544    return (url_parts[0] in ('http', 'ftp'))
545
546
547def urlopen(url, data=None, timeout=5):
548    """Wrapper to urllib2.urlopen with timeout addition."""
549
550    # Save old timeout
551    old_timeout = socket.getdefaulttimeout()
552    socket.setdefaulttimeout(timeout)
553    try:
554        return urllib2.urlopen(url, data=data)
555    finally:
556        socket.setdefaulttimeout(old_timeout)
557
558
559def urlretrieve(url, filename, data=None, timeout=300):
560    """Retrieve a file from given url."""
561    logging.debug('Fetching %s -> %s', url, filename)
562
563    src_file = urlopen(url, data=data, timeout=timeout)
564    try:
565        dest_file = open(filename, 'wb')
566        try:
567            shutil.copyfileobj(src_file, dest_file)
568        finally:
569            dest_file.close()
570    finally:
571        src_file.close()
572
573
574def hash(type, input=None):
575    """
576    Returns an hash object of type md5 or sha1. This function is implemented in
577    order to encapsulate hash objects in a way that is compatible with python
578    2.4 and python 2.6 without warnings.
579
580    Note that even though python 2.6 hashlib supports hash types other than
581    md5 and sha1, we are artificially limiting the input values in order to
582    make the function to behave exactly the same among both python
583    implementations.
584
585    @param input: Optional input string that will be used to update the hash.
586    """
587    if type not in ['md5', 'sha1']:
588        raise ValueError("Unsupported hash type: %s" % type)
589
590    try:
591        hash = hashlib.new(type)
592    except NameError:
593        if type == 'md5':
594            hash = md5.new()
595        elif type == 'sha1':
596            hash = sha.new()
597
598    if input:
599        hash.update(input)
600
601    return hash
602
603
604def get_file(src, dest, permissions=None):
605    """Get a file from src, which can be local or a remote URL"""
606    if src == dest:
607        return
608
609    if is_url(src):
610        urlretrieve(src, dest)
611    else:
612        shutil.copyfile(src, dest)
613
614    if permissions:
615        os.chmod(dest, permissions)
616    return dest
617
618
619def unmap_url(srcdir, src, destdir='.'):
620    """
621    Receives either a path to a local file or a URL.
622    returns either the path to the local file, or the fetched URL
623
624    unmap_url('/usr/src', 'foo.tar', '/tmp')
625                            = '/usr/src/foo.tar'
626    unmap_url('/usr/src', 'http://site/file', '/tmp')
627                            = '/tmp/file'
628                            (after retrieving it)
629    """
630    if is_url(src):
631        url_parts = urlparse.urlparse(src)
632        filename = os.path.basename(url_parts[2])
633        dest = os.path.join(destdir, filename)
634        return get_file(src, dest)
635    else:
636        return os.path.join(srcdir, src)
637
638
639def update_version(srcdir, preserve_srcdir, new_version, install,
640                   *args, **dargs):
641    """
642    Make sure srcdir is version new_version
643
644    If not, delete it and install() the new version.
645
646    In the preserve_srcdir case, we just check it's up to date,
647    and if not, we rerun install, without removing srcdir
648    """
649    versionfile = os.path.join(srcdir, '.version')
650    install_needed = True
651
652    if os.path.exists(versionfile):
653        old_version = pickle.load(open(versionfile))
654        if old_version == new_version:
655            install_needed = False
656
657    if install_needed:
658        if not preserve_srcdir and os.path.exists(srcdir):
659            shutil.rmtree(srcdir)
660        install(*args, **dargs)
661        if os.path.exists(srcdir):
662            pickle.dump(new_version, open(versionfile, 'w'))
663
664
665def get_stderr_level(stderr_is_expected, stdout_level=DEFAULT_STDOUT_LEVEL):
666    if stderr_is_expected:
667        return stdout_level
668    return DEFAULT_STDERR_LEVEL
669
670
671def run(command, timeout=None, ignore_status=False, stdout_tee=None,
672        stderr_tee=None, verbose=True, stdin=None, stderr_is_expected=None,
673        stdout_level=None, stderr_level=None, args=(), nickname=None,
674        ignore_timeout=False, env=None, extra_paths=None):
675    """
676    Run a command on the host.
677
678    @param command: the command line string.
679    @param timeout: time limit in seconds before attempting to kill the
680            running process. The run() function will take a few seconds
681            longer than 'timeout' to complete if it has to kill the process.
682    @param ignore_status: do not raise an exception, no matter what the exit
683            code of the command is.
684    @param stdout_tee: optional file-like object to which stdout data
685            will be written as it is generated (data will still be stored
686            in result.stdout).
687    @param stderr_tee: likewise for stderr.
688    @param verbose: if True, log the command being run.
689    @param stdin: stdin to pass to the executed process (can be a file
690            descriptor, a file object of a real file or a string).
691    @param stderr_is_expected: if True, stderr will be logged at the same level
692            as stdout
693    @param stdout_level: logging level used if stdout_tee is TEE_TO_LOGS;
694            if None, a default is used.
695    @param stderr_level: like stdout_level but for stderr.
696    @param args: sequence of strings of arguments to be given to the command
697            inside " quotes after they have been escaped for that; each
698            element in the sequence will be given as a separate command
699            argument
700    @param nickname: Short string that will appear in logging messages
701                     associated with this command.
702    @param ignore_timeout: If True, timeouts are ignored otherwise if a
703            timeout occurs it will raise CmdTimeoutError.
704    @param env: Dict containing environment variables used in a subprocess.
705    @param extra_paths: Optional string list, to be prepended to the PATH
706                        env variable in env (or os.environ dict if env is
707                        not specified).
708
709    @return a CmdResult object or None if the command timed out and
710            ignore_timeout is True
711
712    @raise CmdError: the exit code of the command execution was not 0
713    @raise CmdTimeoutError: the command timed out and ignore_timeout is False.
714    """
715    if isinstance(args, basestring):
716        raise TypeError('Got a string for the "args" keyword argument, '
717                        'need a sequence.')
718
719    # In some cases, command will actually be a list
720    # (For example, see get_user_hash in client/cros/cryptohome.py.)
721    # So, to cover that case, detect if it's a string or not and convert it
722    # into one if necessary.
723    if not isinstance(command, basestring):
724        command = ' '.join([sh_quote_word(arg) for arg in command])
725
726    command = ' '.join([command] + [sh_quote_word(arg) for arg in args])
727
728    if stderr_is_expected is None:
729        stderr_is_expected = ignore_status
730    if stdout_level is None:
731        stdout_level = DEFAULT_STDOUT_LEVEL
732    if stderr_level is None:
733        stderr_level = get_stderr_level(stderr_is_expected, stdout_level)
734
735    try:
736        bg_job = join_bg_jobs(
737            (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin,
738                   stdout_level=stdout_level, stderr_level=stderr_level,
739                   nickname=nickname, env=env, extra_paths=extra_paths),),
740            timeout)[0]
741    except error.CmdTimeoutError:
742        if not ignore_timeout:
743            raise
744        return None
745
746    if not ignore_status and bg_job.result.exit_status:
747        raise error.CmdError(command, bg_job.result,
748                             "Command returned non-zero exit status")
749
750    return bg_job.result
751
752
753def run_parallel(commands, timeout=None, ignore_status=False,
754                 stdout_tee=None, stderr_tee=None,
755                 nicknames=[]):
756    """
757    Behaves the same as run() with the following exceptions:
758
759    - commands is a list of commands to run in parallel.
760    - ignore_status toggles whether or not an exception should be raised
761      on any error.
762
763    @return: a list of CmdResult objects
764    """
765    bg_jobs = []
766    for (command, nickname) in itertools.izip_longest(commands, nicknames):
767        bg_jobs.append(BgJob(command, stdout_tee, stderr_tee,
768                             stderr_level=get_stderr_level(ignore_status),
769                             nickname=nickname))
770
771    # Updates objects in bg_jobs list with their process information
772    join_bg_jobs(bg_jobs, timeout)
773
774    for bg_job in bg_jobs:
775        if not ignore_status and bg_job.result.exit_status:
776            raise error.CmdError(command, bg_job.result,
777                                 "Command returned non-zero exit status")
778
779    return [bg_job.result for bg_job in bg_jobs]
780
781
782@deprecated
783def run_bg(command):
784    """Function deprecated. Please use BgJob class instead."""
785    bg_job = BgJob(command)
786    return bg_job.sp, bg_job.result
787
788
789def join_bg_jobs(bg_jobs, timeout=None):
790    """Joins the bg_jobs with the current thread.
791
792    Returns the same list of bg_jobs objects that was passed in.
793    """
794    if any(bg_job.unjoinable for bg_job in bg_jobs):
795        raise error.InvalidBgJobCall(
796                'join_bg_jobs cannot be called for unjoinable bg_job')
797
798    timeout_error = False
799    try:
800        # We are holding ends to stdin, stdout pipes
801        # hence we need to be sure to close those fds no mater what
802        start_time = time.time()
803        timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
804
805        for bg_job in bg_jobs:
806            # Process stdout and stderr
807            bg_job.process_output(stdout=True,final_read=True)
808            bg_job.process_output(stdout=False,final_read=True)
809    finally:
810        # close our ends of the pipes to the sp no matter what
811        for bg_job in bg_jobs:
812            bg_job.cleanup()
813
814    if timeout_error:
815        # TODO: This needs to be fixed to better represent what happens when
816        # running in parallel. However this is backwards compatable, so it will
817        # do for the time being.
818        raise error.CmdTimeoutError(
819                bg_jobs[0].command, bg_jobs[0].result,
820                "Command(s) did not complete within %d seconds" % timeout)
821
822
823    return bg_jobs
824
825
826def _wait_for_commands(bg_jobs, start_time, timeout):
827    """Waits for background jobs by select polling their stdout/stderr.
828
829    @param bg_jobs: A list of background jobs to wait on.
830    @param start_time: Time used to calculate the timeout lifetime of a job.
831    @param timeout: The timeout of the list of bg_jobs.
832
833    @return: True if the return was due to a timeout, False otherwise.
834    """
835
836    # To check for processes which terminate without producing any output
837    # a 1 second timeout is used in select.
838    SELECT_TIMEOUT = 1
839
840    read_list = []
841    write_list = []
842    reverse_dict = {}
843
844    for bg_job in bg_jobs:
845        if bg_job.sp.stdout:
846            read_list.append(bg_job.sp.stdout)
847            reverse_dict[bg_job.sp.stdout] = (bg_job, True)
848        if bg_job.sp.stderr:
849            read_list.append(bg_job.sp.stderr)
850            reverse_dict[bg_job.sp.stderr] = (bg_job, False)
851        if bg_job.string_stdin is not None:
852            write_list.append(bg_job.sp.stdin)
853            reverse_dict[bg_job.sp.stdin] = bg_job
854
855    if timeout:
856        stop_time = start_time + timeout
857        time_left = stop_time - time.time()
858    else:
859        time_left = None # so that select never times out
860
861    while not timeout or time_left > 0:
862        # select will return when we may write to stdin, when there is
863        # stdout/stderr output we can read (including when it is
864        # EOF, that is the process has terminated) or when a non-fatal
865        # signal was sent to the process. In the last case the select returns
866        # EINTR, and we continue waiting for the job if the signal handler for
867        # the signal that interrupted the call allows us to.
868        try:
869            read_ready, write_ready, _ = select.select(read_list, write_list,
870                                                       [], SELECT_TIMEOUT)
871        except select.error as v:
872            if v[0] == errno.EINTR:
873                logging.warning(v)
874                continue
875            else:
876                raise
877        # os.read() has to be used instead of
878        # subproc.stdout.read() which will otherwise block
879        for file_obj in read_ready:
880            bg_job, is_stdout = reverse_dict[file_obj]
881            bg_job.process_output(is_stdout)
882
883        for file_obj in write_ready:
884            # we can write PIPE_BUF bytes without blocking
885            # POSIX requires PIPE_BUF is >= 512
886            bg_job = reverse_dict[file_obj]
887            file_obj.write(bg_job.string_stdin[:512])
888            bg_job.string_stdin = bg_job.string_stdin[512:]
889            # no more input data, close stdin, remove it from the select set
890            if not bg_job.string_stdin:
891                file_obj.close()
892                write_list.remove(file_obj)
893                del reverse_dict[file_obj]
894
895        all_jobs_finished = True
896        for bg_job in bg_jobs:
897            if bg_job.result.exit_status is not None:
898                continue
899
900            bg_job.result.exit_status = bg_job.sp.poll()
901            if bg_job.result.exit_status is not None:
902                # process exited, remove its stdout/stdin from the select set
903                bg_job.result.duration = time.time() - start_time
904                if bg_job.sp.stdout:
905                    read_list.remove(bg_job.sp.stdout)
906                    del reverse_dict[bg_job.sp.stdout]
907                if bg_job.sp.stderr:
908                    read_list.remove(bg_job.sp.stderr)
909                    del reverse_dict[bg_job.sp.stderr]
910            else:
911                all_jobs_finished = False
912
913        if all_jobs_finished:
914            return False
915
916        if timeout:
917            time_left = stop_time - time.time()
918
919    # Kill all processes which did not complete prior to timeout
920    for bg_job in bg_jobs:
921        if bg_job.result.exit_status is not None:
922            continue
923
924        logging.warning('run process timeout (%s) fired on: %s', timeout,
925                        bg_job.command)
926        if nuke_subprocess(bg_job.sp) is None:
927            # If process could not be SIGKILL'd, log kernel stack.
928            logging.warning(read_file('/proc/%d/stack' % bg_job.sp.pid))
929        bg_job.result.exit_status = bg_job.sp.poll()
930        bg_job.result.duration = time.time() - start_time
931
932    return True
933
934
935def pid_is_alive(pid):
936    """
937    True if process pid exists and is not yet stuck in Zombie state.
938    Zombies are impossible to move between cgroups, etc.
939    pid can be integer, or text of integer.
940    """
941    path = '/proc/%s/stat' % pid
942
943    try:
944        stat = read_one_line(path)
945    except IOError:
946        if not os.path.exists(path):
947            # file went away
948            return False
949        raise
950
951    return stat.split()[2] != 'Z'
952
953
954def signal_pid(pid, sig):
955    """
956    Sends a signal to a process id. Returns True if the process terminated
957    successfully, False otherwise.
958    """
959    try:
960        os.kill(pid, sig)
961    except OSError:
962        # The process may have died before we could kill it.
963        pass
964
965    for i in range(5):
966        if not pid_is_alive(pid):
967            return True
968        time.sleep(1)
969
970    # The process is still alive
971    return False
972
973
974def nuke_subprocess(subproc):
975    # check if the subprocess is still alive, first
976    if subproc.poll() is not None:
977        return subproc.poll()
978
979    # the process has not terminated within timeout,
980    # kill it via an escalating series of signals.
981    signal_queue = [signal.SIGTERM, signal.SIGKILL]
982    for sig in signal_queue:
983        signal_pid(subproc.pid, sig)
984        if subproc.poll() is not None:
985            return subproc.poll()
986
987
988def nuke_pid(pid, signal_queue=(signal.SIGTERM, signal.SIGKILL)):
989    # the process has not terminated within timeout,
990    # kill it via an escalating series of signals.
991    pid_path = '/proc/%d/'
992    if not os.path.exists(pid_path % pid):
993        # Assume that if the pid does not exist in proc it is already dead.
994        logging.error('No listing in /proc for pid:%d.', pid)
995        raise error.AutoservPidAlreadyDeadError('Could not kill nonexistant '
996                                                'pid: %s.', pid)
997    for sig in signal_queue:
998        if signal_pid(pid, sig):
999            return
1000
1001    # no signal successfully terminated the process
1002    raise error.AutoservRunError('Could not kill %d for process name: %s' % (
1003            pid, get_process_name(pid)), None)
1004
1005
1006def system(command, timeout=None, ignore_status=False):
1007    """
1008    Run a command
1009
1010    @param timeout: timeout in seconds
1011    @param ignore_status: if ignore_status=False, throw an exception if the
1012            command's exit code is non-zero
1013            if ignore_stauts=True, return the exit code.
1014
1015    @return exit status of command
1016            (note, this will always be zero unless ignore_status=True)
1017    """
1018    return run(command, timeout=timeout, ignore_status=ignore_status,
1019               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).exit_status
1020
1021
1022def system_parallel(commands, timeout=None, ignore_status=False):
1023    """This function returns a list of exit statuses for the respective
1024    list of commands."""
1025    return [bg_jobs.exit_status for bg_jobs in
1026            run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
1027                         stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
1028
1029
1030def system_output(command, timeout=None, ignore_status=False,
1031                  retain_output=False, args=()):
1032    """
1033    Run a command and return the stdout output.
1034
1035    @param command: command string to execute.
1036    @param timeout: time limit in seconds before attempting to kill the
1037            running process. The function will take a few seconds longer
1038            than 'timeout' to complete if it has to kill the process.
1039    @param ignore_status: do not raise an exception, no matter what the exit
1040            code of the command is.
1041    @param retain_output: set to True to make stdout/stderr of the command
1042            output to be also sent to the logging system
1043    @param args: sequence of strings of arguments to be given to the command
1044            inside " quotes after they have been escaped for that; each
1045            element in the sequence will be given as a separate command
1046            argument
1047
1048    @return a string with the stdout output of the command.
1049    """
1050    if retain_output:
1051        out = run(command, timeout=timeout, ignore_status=ignore_status,
1052                  stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS,
1053                  args=args).stdout
1054    else:
1055        out = run(command, timeout=timeout, ignore_status=ignore_status,
1056                  args=args).stdout
1057    if out[-1:] == '\n':
1058        out = out[:-1]
1059    return out
1060
1061
1062def system_output_parallel(commands, timeout=None, ignore_status=False,
1063                           retain_output=False):
1064    if retain_output:
1065        out = [bg_job.stdout for bg_job
1066               in run_parallel(commands, timeout=timeout,
1067                               ignore_status=ignore_status,
1068                               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
1069    else:
1070        out = [bg_job.stdout for bg_job in run_parallel(commands,
1071                                  timeout=timeout, ignore_status=ignore_status)]
1072    for x in out:
1073        if out[-1:] == '\n': out = out[:-1]
1074    return out
1075
1076
1077def strip_unicode(input):
1078    if type(input) == list:
1079        return [strip_unicode(i) for i in input]
1080    elif type(input) == dict:
1081        output = {}
1082        for key in input.keys():
1083            output[str(key)] = strip_unicode(input[key])
1084        return output
1085    elif type(input) == unicode:
1086        return str(input)
1087    else:
1088        return input
1089
1090
1091def get_cpu_percentage(function, *args, **dargs):
1092    """Returns a tuple containing the CPU% and return value from function call.
1093
1094    This function calculates the usage time by taking the difference of
1095    the user and system times both before and after the function call.
1096    """
1097    child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
1098    self_pre = resource.getrusage(resource.RUSAGE_SELF)
1099    start = time.time()
1100    to_return = function(*args, **dargs)
1101    elapsed = time.time() - start
1102    self_post = resource.getrusage(resource.RUSAGE_SELF)
1103    child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
1104
1105    # Calculate CPU Percentage
1106    s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
1107    c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
1108    cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
1109
1110    return cpu_percent, to_return
1111
1112
1113def get_arch(run_function=run):
1114    """
1115    Get the hardware architecture of the machine.
1116    If specified, run_function should return a CmdResult object and throw a
1117    CmdError exception.
1118    If run_function is anything other than utils.run(), it is used to
1119    execute the commands. By default (when set to utils.run()) this will
1120    just examine os.uname()[4].
1121    """
1122
1123    # Short circuit from the common case.
1124    if run_function == run:
1125        return re.sub(r'i\d86$', 'i386', os.uname()[4])
1126
1127    # Otherwise, use the run_function in case it hits a remote machine.
1128    arch = run_function('/bin/uname -m').stdout.rstrip()
1129    if re.match(r'i\d86$', arch):
1130        arch = 'i386'
1131    return arch
1132
1133def get_arch_userspace(run_function=run):
1134    """
1135    Get the architecture by userspace (possibly different from kernel).
1136    """
1137    archs = {
1138        'arm': 'ELF 32-bit.*, ARM,',
1139        'i386': 'ELF 32-bit.*, Intel 80386,',
1140        'x86_64': 'ELF 64-bit.*, x86-64,',
1141    }
1142
1143    cmd = 'file --brief --dereference /bin/sh'
1144    filestr = run_function(cmd).stdout.rstrip()
1145    for a, regex in archs.iteritems():
1146        if re.match(regex, filestr):
1147            return a
1148
1149    return get_arch()
1150
1151
1152def get_num_logical_cpus_per_socket(run_function=run):
1153    """
1154    Get the number of cores (including hyperthreading) per cpu.
1155    run_function is used to execute the commands. It defaults to
1156    utils.run() but a custom method (if provided) should be of the
1157    same schema as utils.run. It should return a CmdResult object and
1158    throw a CmdError exception.
1159    """
1160    siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
1161    num_siblings = map(int,
1162                       re.findall(r'^siblings\s*:\s*(\d+)\s*$',
1163                                  siblings, re.M))
1164    if len(num_siblings) == 0:
1165        raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
1166    if min(num_siblings) != max(num_siblings):
1167        raise error.TestError('Number of siblings differ %r' %
1168                              num_siblings)
1169    return num_siblings[0]
1170
1171
1172def merge_trees(src, dest):
1173    """
1174    Merges a source directory tree at 'src' into a destination tree at
1175    'dest'. If a path is a file in both trees than the file in the source
1176    tree is APPENDED to the one in the destination tree. If a path is
1177    a directory in both trees then the directories are recursively merged
1178    with this function. In any other case, the function will skip the
1179    paths that cannot be merged (instead of failing).
1180    """
1181    if not os.path.exists(src):
1182        return # exists only in dest
1183    elif not os.path.exists(dest):
1184        if os.path.isfile(src):
1185            shutil.copy2(src, dest) # file only in src
1186        else:
1187            shutil.copytree(src, dest, symlinks=True) # dir only in src
1188        return
1189    elif os.path.isfile(src) and os.path.isfile(dest):
1190        # src & dest are files in both trees, append src to dest
1191        destfile = open(dest, "a")
1192        try:
1193            srcfile = open(src)
1194            try:
1195                destfile.write(srcfile.read())
1196            finally:
1197                srcfile.close()
1198        finally:
1199            destfile.close()
1200    elif os.path.isdir(src) and os.path.isdir(dest):
1201        # src & dest are directories in both trees, so recursively merge
1202        for name in os.listdir(src):
1203            merge_trees(os.path.join(src, name), os.path.join(dest, name))
1204    else:
1205        # src & dest both exist, but are incompatible
1206        return
1207
1208
1209class CmdResult(object):
1210    """
1211    Command execution result.
1212
1213    command:     String containing the command line itself
1214    exit_status: Integer exit code of the process
1215    stdout:      String containing stdout of the process
1216    stderr:      String containing stderr of the process
1217    duration:    Elapsed wall clock time running the process
1218    """
1219
1220
1221    def __init__(self, command="", stdout="", stderr="",
1222                 exit_status=None, duration=0):
1223        self.command = command
1224        self.exit_status = exit_status
1225        self.stdout = stdout
1226        self.stderr = stderr
1227        self.duration = duration
1228
1229
1230    def __eq__(self, other):
1231        if type(self) == type(other):
1232            return (self.command == other.command
1233                    and self.exit_status == other.exit_status
1234                    and self.stdout == other.stdout
1235                    and self.stderr == other.stderr
1236                    and self.duration == other.duration)
1237        else:
1238            return NotImplemented
1239
1240
1241    def __repr__(self):
1242        wrapper = textwrap.TextWrapper(width = 78,
1243                                       initial_indent="\n    ",
1244                                       subsequent_indent="    ")
1245
1246        stdout = self.stdout.rstrip()
1247        if stdout:
1248            stdout = "\nstdout:\n%s" % stdout
1249
1250        stderr = self.stderr.rstrip()
1251        if stderr:
1252            stderr = "\nstderr:\n%s" % stderr
1253
1254        return ("* Command: %s\n"
1255                "Exit status: %s\n"
1256                "Duration: %s\n"
1257                "%s"
1258                "%s"
1259                % (wrapper.fill(str(self.command)), self.exit_status,
1260                self.duration, stdout, stderr))
1261
1262
1263class run_randomly:
1264    def __init__(self, run_sequentially=False):
1265        # Run sequentially is for debugging control files
1266        self.test_list = []
1267        self.run_sequentially = run_sequentially
1268
1269
1270    def add(self, *args, **dargs):
1271        test = (args, dargs)
1272        self.test_list.append(test)
1273
1274
1275    def run(self, fn):
1276        while self.test_list:
1277            test_index = random.randint(0, len(self.test_list)-1)
1278            if self.run_sequentially:
1279                test_index = 0
1280            (args, dargs) = self.test_list.pop(test_index)
1281            fn(*args, **dargs)
1282
1283
1284def import_site_module(path, module, dummy=None, modulefile=None):
1285    """
1286    Try to import the site specific module if it exists.
1287
1288    @param path full filename of the source file calling this (ie __file__)
1289    @param module full module name
1290    @param dummy dummy value to return in case there is no symbol to import
1291    @param modulefile module filename
1292
1293    @return site specific module or dummy
1294
1295    @raises ImportError if the site file exists but imports fails
1296    """
1297    short_module = module[module.rfind(".") + 1:]
1298
1299    if not modulefile:
1300        modulefile = short_module + ".py"
1301
1302    if os.path.exists(os.path.join(os.path.dirname(path), modulefile)):
1303        return __import__(module, {}, {}, [short_module])
1304    return dummy
1305
1306
1307def import_site_symbol(path, module, name, dummy=None, modulefile=None):
1308    """
1309    Try to import site specific symbol from site specific file if it exists
1310
1311    @param path full filename of the source file calling this (ie __file__)
1312    @param module full module name
1313    @param name symbol name to be imported from the site file
1314    @param dummy dummy value to return in case there is no symbol to import
1315    @param modulefile module filename
1316
1317    @return site specific symbol or dummy
1318
1319    @raises ImportError if the site file exists but imports fails
1320    """
1321    module = import_site_module(path, module, modulefile=modulefile)
1322    if not module:
1323        return dummy
1324
1325    # special unique value to tell us if the symbol can't be imported
1326    cant_import = object()
1327
1328    obj = getattr(module, name, cant_import)
1329    if obj is cant_import:
1330        return dummy
1331
1332    return obj
1333
1334
1335def import_site_class(path, module, classname, baseclass, modulefile=None):
1336    """
1337    Try to import site specific class from site specific file if it exists
1338
1339    Args:
1340        path: full filename of the source file calling this (ie __file__)
1341        module: full module name
1342        classname: class name to be loaded from site file
1343        baseclass: base class object to return when no site file present or
1344            to mixin when site class exists but is not inherited from baseclass
1345        modulefile: module filename
1346
1347    Returns: baseclass if site specific class does not exist, the site specific
1348        class if it exists and is inherited from baseclass or a mixin of the
1349        site specific class and baseclass when the site specific class exists
1350        and is not inherited from baseclass
1351
1352    Raises: ImportError if the site file exists but imports fails
1353    """
1354
1355    res = import_site_symbol(path, module, classname, None, modulefile)
1356    if res:
1357        if not issubclass(res, baseclass):
1358            # if not a subclass of baseclass then mix in baseclass with the
1359            # site specific class object and return the result
1360            res = type(classname, (res, baseclass), {})
1361    else:
1362        res = baseclass
1363
1364    return res
1365
1366
1367def import_site_function(path, module, funcname, dummy, modulefile=None):
1368    """
1369    Try to import site specific function from site specific file if it exists
1370
1371    Args:
1372        path: full filename of the source file calling this (ie __file__)
1373        module: full module name
1374        funcname: function name to be imported from site file
1375        dummy: dummy function to return in case there is no function to import
1376        modulefile: module filename
1377
1378    Returns: site specific function object or dummy
1379
1380    Raises: ImportError if the site file exists but imports fails
1381    """
1382
1383    return import_site_symbol(path, module, funcname, dummy, modulefile)
1384
1385
1386def _get_pid_path(program_name):
1387    my_path = os.path.dirname(__file__)
1388    return os.path.abspath(os.path.join(my_path, "..", "..",
1389                                        "%s.pid" % program_name))
1390
1391
1392def write_pid(program_name):
1393    """
1394    Try to drop <program_name>.pid in the main autotest directory.
1395
1396    Args:
1397      program_name: prefix for file name
1398    """
1399    pidfile = open(_get_pid_path(program_name), "w")
1400    try:
1401        pidfile.write("%s\n" % os.getpid())
1402    finally:
1403        pidfile.close()
1404
1405
1406def delete_pid_file_if_exists(program_name):
1407    """
1408    Tries to remove <program_name>.pid from the main autotest directory.
1409    """
1410    pidfile_path = _get_pid_path(program_name)
1411
1412    try:
1413        os.remove(pidfile_path)
1414    except OSError:
1415        if not os.path.exists(pidfile_path):
1416            return
1417        raise
1418
1419
1420def get_pid_from_file(program_name):
1421    """
1422    Reads the pid from <program_name>.pid in the autotest directory.
1423
1424    @param program_name the name of the program
1425    @return the pid if the file exists, None otherwise.
1426    """
1427    pidfile_path = _get_pid_path(program_name)
1428    if not os.path.exists(pidfile_path):
1429        return None
1430
1431    pidfile = open(_get_pid_path(program_name), 'r')
1432
1433    try:
1434        try:
1435            pid = int(pidfile.readline())
1436        except IOError:
1437            if not os.path.exists(pidfile_path):
1438                return None
1439            raise
1440    finally:
1441        pidfile.close()
1442
1443    return pid
1444
1445
1446def get_process_name(pid):
1447    """
1448    Get process name from PID.
1449    @param pid: PID of process.
1450    @return: Process name if PID stat file exists or 'Dead PID' if it does not.
1451    """
1452    pid_stat_path = "/proc/%d/stat"
1453    if not os.path.exists(pid_stat_path % pid):
1454        return "Dead Pid"
1455    return get_field(read_file(pid_stat_path % pid), 1)[1:-1]
1456
1457
1458def program_is_alive(program_name):
1459    """
1460    Checks if the process is alive and not in Zombie state.
1461
1462    @param program_name the name of the program
1463    @return True if still alive, False otherwise
1464    """
1465    pid = get_pid_from_file(program_name)
1466    if pid is None:
1467        return False
1468    return pid_is_alive(pid)
1469
1470
1471def signal_program(program_name, sig=signal.SIGTERM):
1472    """
1473    Sends a signal to the process listed in <program_name>.pid
1474
1475    @param program_name the name of the program
1476    @param sig signal to send
1477    """
1478    pid = get_pid_from_file(program_name)
1479    if pid:
1480        signal_pid(pid, sig)
1481
1482
1483def get_relative_path(path, reference):
1484    """Given 2 absolute paths "path" and "reference", compute the path of
1485    "path" as relative to the directory "reference".
1486
1487    @param path the absolute path to convert to a relative path
1488    @param reference an absolute directory path to which the relative
1489        path will be computed
1490    """
1491    # normalize the paths (remove double slashes, etc)
1492    assert(os.path.isabs(path))
1493    assert(os.path.isabs(reference))
1494
1495    path = os.path.normpath(path)
1496    reference = os.path.normpath(reference)
1497
1498    # we could use os.path.split() but it splits from the end
1499    path_list = path.split(os.path.sep)[1:]
1500    ref_list = reference.split(os.path.sep)[1:]
1501
1502    # find the longest leading common path
1503    for i in xrange(min(len(path_list), len(ref_list))):
1504        if path_list[i] != ref_list[i]:
1505            # decrement i so when exiting this loop either by no match or by
1506            # end of range we are one step behind
1507            i -= 1
1508            break
1509    i += 1
1510    # drop the common part of the paths, not interested in that anymore
1511    del path_list[:i]
1512
1513    # for each uncommon component in the reference prepend a ".."
1514    path_list[:0] = ['..'] * (len(ref_list) - i)
1515
1516    return os.path.join(*path_list)
1517
1518
1519def sh_escape(command):
1520    """
1521    Escape special characters from a command so that it can be passed
1522    as a double quoted (" ") string in a (ba)sh command.
1523
1524    Args:
1525            command: the command string to escape.
1526
1527    Returns:
1528            The escaped command string. The required englobing double
1529            quotes are NOT added and so should be added at some point by
1530            the caller.
1531
1532    See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
1533    """
1534    command = command.replace("\\", "\\\\")
1535    command = command.replace("$", r'\$')
1536    command = command.replace('"', r'\"')
1537    command = command.replace('`', r'\`')
1538    return command
1539
1540
1541def sh_quote_word(text, whitelist=SHELL_QUOTING_WHITELIST):
1542    r"""Quote a string to make it safe as a single word in a shell command.
1543
1544    POSIX shell syntax recognizes no escape characters inside a single-quoted
1545    string.  So, single quotes can safely quote any string of characters except
1546    a string with a single quote character.  A single quote character must be
1547    quoted with the sequence '\'' which translates to:
1548        '  -> close current quote
1549        \' -> insert a literal single quote
1550        '  -> reopen quoting again.
1551
1552    This is safe for all combinations of characters, including embedded and
1553    trailing backslashes in odd or even numbers.
1554
1555    This is also safe for nesting, e.g. the following is a valid use:
1556
1557        adb_command = 'adb shell %s' % (
1558                sh_quote_word('echo %s' % sh_quote_word('hello world')))
1559
1560    @param text: The string to be quoted into a single word for the shell.
1561    @param whitelist: Optional list of characters that do not need quoting.
1562                      Defaults to a known good list of characters.
1563
1564    @return A string, possibly quoted, safe as a single word for a shell.
1565    """
1566    if all(c in whitelist for c in text):
1567        return text
1568    return "'" + text.replace("'", r"'\''") + "'"
1569
1570
1571def configure(extra=None, configure='./configure'):
1572    """
1573    Run configure passing in the correct host, build, and target options.
1574
1575    @param extra: extra command line arguments to pass to configure
1576    @param configure: which configure script to use
1577    """
1578    args = []
1579    if 'CHOST' in os.environ:
1580        args.append('--host=' + os.environ['CHOST'])
1581    if 'CBUILD' in os.environ:
1582        args.append('--build=' + os.environ['CBUILD'])
1583    if 'CTARGET' in os.environ:
1584        args.append('--target=' + os.environ['CTARGET'])
1585    if extra:
1586        args.append(extra)
1587
1588    system('%s %s' % (configure, ' '.join(args)))
1589
1590
1591def make(extra='', make='make', timeout=None, ignore_status=False):
1592    """
1593    Run make, adding MAKEOPTS to the list of options.
1594
1595    @param extra: extra command line arguments to pass to make.
1596    """
1597    cmd = '%s %s %s' % (make, os.environ.get('MAKEOPTS', ''), extra)
1598    return system(cmd, timeout=timeout, ignore_status=ignore_status)
1599
1600
1601def compare_versions(ver1, ver2):
1602    """Version number comparison between ver1 and ver2 strings.
1603
1604    >>> compare_tuple("1", "2")
1605    -1
1606    >>> compare_tuple("foo-1.1", "foo-1.2")
1607    -1
1608    >>> compare_tuple("1.2", "1.2a")
1609    -1
1610    >>> compare_tuple("1.2b", "1.2a")
1611    1
1612    >>> compare_tuple("1.3.5.3a", "1.3.5.3b")
1613    -1
1614
1615    Args:
1616        ver1: version string
1617        ver2: version string
1618
1619    Returns:
1620        int:  1 if ver1 >  ver2
1621              0 if ver1 == ver2
1622             -1 if ver1 <  ver2
1623    """
1624    ax = re.split('[.-]', ver1)
1625    ay = re.split('[.-]', ver2)
1626    while len(ax) > 0 and len(ay) > 0:
1627        cx = ax.pop(0)
1628        cy = ay.pop(0)
1629        maxlen = max(len(cx), len(cy))
1630        c = cmp(cx.zfill(maxlen), cy.zfill(maxlen))
1631        if c != 0:
1632            return c
1633    return cmp(len(ax), len(ay))
1634
1635
1636def args_to_dict(args):
1637    """Convert autoserv extra arguments in the form of key=val or key:val to a
1638    dictionary.  Each argument key is converted to lowercase dictionary key.
1639
1640    Args:
1641        args - list of autoserv extra arguments.
1642
1643    Returns:
1644        dictionary
1645    """
1646    arg_re = re.compile(r'(\w+)[:=](.*)$')
1647    dict = {}
1648    for arg in args:
1649        match = arg_re.match(arg)
1650        if match:
1651            dict[match.group(1).lower()] = match.group(2)
1652        else:
1653            logging.warning("args_to_dict: argument '%s' doesn't match "
1654                            "'%s' pattern. Ignored.", arg, arg_re.pattern)
1655    return dict
1656
1657
1658def get_unused_port():
1659    """
1660    Finds a semi-random available port. A race condition is still
1661    possible after the port number is returned, if another process
1662    happens to bind it.
1663
1664    Returns:
1665        A port number that is unused on both TCP and UDP.
1666    """
1667
1668    def try_bind(port, socket_type, socket_proto):
1669        s = socket.socket(socket.AF_INET, socket_type, socket_proto)
1670        try:
1671            try:
1672                s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1673                s.bind(('', port))
1674                return s.getsockname()[1]
1675            except socket.error:
1676                return None
1677        finally:
1678            s.close()
1679
1680    # On the 2.6 kernel, calling try_bind() on UDP socket returns the
1681    # same port over and over. So always try TCP first.
1682    while True:
1683        # Ask the OS for an unused port.
1684        port = try_bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP)
1685        # Check if this port is unused on the other protocol.
1686        if port and try_bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP):
1687            return port
1688
1689
1690def ask(question, auto=False):
1691    """
1692    Raw input with a prompt that emulates logging.
1693
1694    @param question: Question to be asked
1695    @param auto: Whether to return "y" instead of asking the question
1696    """
1697    if auto:
1698        logging.info("%s (y/n) y", question)
1699        return "y"
1700    return raw_input("%s INFO | %s (y/n) " %
1701                     (time.strftime("%H:%M:%S", time.localtime()), question))
1702
1703
1704def rdmsr(address, cpu=0):
1705    """
1706    Reads an x86 MSR from the specified CPU, returns as long integer.
1707    """
1708    with open('/dev/cpu/%s/msr' % cpu, 'r', 0) as fd:
1709        fd.seek(address)
1710        return struct.unpack('=Q', fd.read(8))[0]
1711
1712
1713def wait_for_value(func,
1714                   expected_value=None,
1715                   min_threshold=None,
1716                   max_threshold=None,
1717                   timeout_sec=10):
1718    """
1719    Returns the value of func().  If |expected_value|, |min_threshold|, and
1720    |max_threshold| are not set, returns immediately.
1721
1722    If |expected_value| is set, polls the return value until |expected_value| is
1723    reached, and returns that value.
1724
1725    If either |max_threshold| or |min_threshold| is set, this function will
1726    will repeatedly call func() until the return value reaches or exceeds one of
1727    these thresholds.
1728
1729    Polling will stop after |timeout_sec| regardless of these thresholds.
1730
1731    @param func: function whose return value is to be waited on.
1732    @param expected_value: wait for func to return this value.
1733    @param min_threshold: wait for func value to reach or fall below this value.
1734    @param max_threshold: wait for func value to reach or rise above this value.
1735    @param timeout_sec: Number of seconds to wait before giving up and
1736                        returning whatever value func() last returned.
1737
1738    Return value:
1739        The most recent return value of func().
1740    """
1741    value = None
1742    start_time_sec = time.time()
1743    while True:
1744        value = func()
1745        if (expected_value is None and \
1746            min_threshold is None and \
1747            max_threshold is None) or \
1748           (expected_value is not None and value == expected_value) or \
1749           (min_threshold is not None and value <= min_threshold) or \
1750           (max_threshold is not None and value >= max_threshold):
1751            break
1752
1753        if time.time() - start_time_sec >= timeout_sec:
1754            break
1755        time.sleep(0.1)
1756
1757    return value
1758
1759
1760def wait_for_value_changed(func,
1761                           old_value=None,
1762                           timeout_sec=10):
1763    """
1764    Returns the value of func().
1765
1766    The function polls the return value until it is different from |old_value|,
1767    and returns that value.
1768
1769    Polling will stop after |timeout_sec|.
1770
1771    @param func: function whose return value is to be waited on.
1772    @param old_value: wait for func to return a value different from this.
1773    @param timeout_sec: Number of seconds to wait before giving up and
1774                        returning whatever value func() last returned.
1775
1776    @returns The most recent return value of func().
1777    """
1778    value = None
1779    start_time_sec = time.time()
1780    while True:
1781        value = func()
1782        if value != old_value:
1783            break
1784
1785        if time.time() - start_time_sec >= timeout_sec:
1786            break
1787        time.sleep(0.1)
1788
1789    return value
1790
1791
1792CONFIG = global_config.global_config
1793
1794# Keep checking if the pid is alive every second until the timeout (in seconds)
1795CHECK_PID_IS_ALIVE_TIMEOUT = 6
1796
1797_LOCAL_HOST_LIST = ('localhost', '127.0.0.1')
1798
1799# The default address of a vm gateway.
1800DEFAULT_VM_GATEWAY = '10.0.2.2'
1801
1802# Google Storage bucket URI to store results in.
1803DEFAULT_OFFLOAD_GSURI = CONFIG.get_config_value(
1804        'CROS', 'results_storage_server', default=None)
1805
1806# Default Moblab Ethernet Interface.
1807_MOBLAB_ETH_0 = 'eth0'
1808_MOBLAB_ETH_1 = 'eth1'
1809
1810# A list of subnets that requires dedicated devserver and drone in the same
1811# subnet. Each item is a tuple of (subnet_ip, mask_bits), e.g.,
1812# ('192.168.0.0', 24))
1813RESTRICTED_SUBNETS = []
1814
1815def _setup_restricted_subnets():
1816    restricted_subnets_list = CONFIG.get_config_value(
1817            'CROS', 'restricted_subnets', type=list, default=[])
1818    # TODO(dshi): Remove the code to split subnet with `:` after R51 is
1819    # off stable channel, and update shadow config to use `/` as
1820    # delimiter for consistency.
1821    for subnet in restricted_subnets_list:
1822        ip, mask_bits = subnet.split('/') if '/' in subnet \
1823                        else subnet.split(':')
1824        RESTRICTED_SUBNETS.append((ip, int(mask_bits)))
1825
1826_setup_restricted_subnets()
1827
1828# regex pattern for CLIENT/wireless_ssid_ config. For example, global config
1829# can have following config in CLIENT section to indicate that hosts in subnet
1830# 192.168.0.1/24 should use wireless ssid of `ssid_1`
1831# wireless_ssid_192.168.0.1/24: ssid_1
1832WIRELESS_SSID_PATTERN = 'wireless_ssid_(.*)/(\d+)'
1833
1834
1835def get_moblab_serial_number():
1836    """Gets the moblab public network interface.
1837
1838    If the eth0 is an USB interface, try to use eth1 instead. Otherwise
1839    use eth0 by default.
1840    """
1841    try:
1842        cmd_result = run('sudo vpd -g serial_number')
1843        if cmd_result.stdout:
1844          return cmd_result.stdout
1845    except error.CmdError as e:
1846        logging.error(str(e))
1847        logging.info('Serial number ')
1848        pass
1849    return 'NoSerialNumber'
1850
1851
1852def ping(host, deadline=None, tries=None, timeout=60, user=None):
1853    """Attempt to ping |host|.
1854
1855    Shell out to 'ping' if host is an IPv4 addres or 'ping6' if host is an
1856    IPv6 address to try to reach |host| for |timeout| seconds.
1857    Returns exit code of ping.
1858
1859    Per 'man ping', if you specify BOTH |deadline| and |tries|, ping only
1860    returns 0 if we get responses to |tries| pings within |deadline| seconds.
1861
1862    Specifying |deadline| or |count| alone should return 0 as long as
1863    some packets receive responses.
1864
1865    Note that while this works with literal IPv6 addresses it will not work
1866    with hostnames that resolve to IPv6 only.
1867
1868    @param host: the host to ping.
1869    @param deadline: seconds within which |tries| pings must succeed.
1870    @param tries: number of pings to send.
1871    @param timeout: number of seconds after which to kill 'ping' command.
1872    @return exit code of ping command.
1873    """
1874    args = [host]
1875    cmd = 'ping6' if re.search(r':.*:', host) else 'ping'
1876
1877    if deadline:
1878        args.append('-w%d' % deadline)
1879    if tries:
1880        args.append('-c%d' % tries)
1881
1882    if user != None:
1883        args = [user, '-c', ' '.join([cmd] + args)]
1884        cmd = 'su'
1885
1886    return run(cmd, args=args, verbose=True,
1887                          ignore_status=True, timeout=timeout,
1888                          stdout_tee=TEE_TO_LOGS,
1889                          stderr_tee=TEE_TO_LOGS).exit_status
1890
1891
1892def host_is_in_lab_zone(hostname):
1893    """Check if the host is in the CLIENT.dns_zone.
1894
1895    @param hostname: The hostname to check.
1896    @returns True if hostname.dns_zone resolves, otherwise False.
1897    """
1898    host_parts = hostname.split('.')
1899    dns_zone = CONFIG.get_config_value('CLIENT', 'dns_zone', default=None)
1900    fqdn = '%s.%s' % (host_parts[0], dns_zone)
1901    try:
1902        socket.gethostbyname(fqdn)
1903        return True
1904    except socket.gaierror:
1905        return False
1906
1907
1908def host_could_be_in_afe(hostname):
1909    """Check if the host could be in Autotest Front End.
1910
1911    Report whether or not a host could be in AFE, without actually
1912    consulting AFE. This method exists because some systems are in the
1913    lab zone, but not actually managed by AFE.
1914
1915    @param hostname: The hostname to check.
1916    @returns True if hostname is in lab zone, and does not match *-dev-*
1917    """
1918    # Do the 'dev' check first, so that we skip DNS lookup if the
1919    # hostname matches. This should give us greater resilience to lab
1920    # failures.
1921    return (hostname.find('-dev-') == -1) and host_is_in_lab_zone(hostname)
1922
1923
1924def get_chrome_version(job_views):
1925    """
1926    Retrieves the version of the chrome binary associated with a job.
1927
1928    When a test runs we query the chrome binary for it's version and drop
1929    that value into a client keyval. To retrieve the chrome version we get all
1930    the views associated with a test from the db, including those of the
1931    server and client jobs, and parse the version out of the first test view
1932    that has it. If we never ran a single test in the suite the job_views
1933    dictionary will not contain a chrome version.
1934
1935    This method cannot retrieve the chrome version from a dictionary that
1936    does not conform to the structure of an autotest tko view.
1937
1938    @param job_views: a list of a job's result views, as returned by
1939                      the get_detailed_test_views method in rpc_interface.
1940    @return: The chrome version string, or None if one can't be found.
1941    """
1942
1943    # Aborted jobs have no views.
1944    if not job_views:
1945        return None
1946
1947    for view in job_views:
1948        if (view.get('attributes')
1949            and constants.CHROME_VERSION in view['attributes'].keys()):
1950
1951            return view['attributes'].get(constants.CHROME_VERSION)
1952
1953    logging.warning('Could not find chrome version for failure.')
1954    return None
1955
1956
1957def get_moblab_id():
1958    """Gets the moblab random id.
1959
1960    The random id file is cached on disk. If it does not exist, a new file is
1961    created the first time.
1962
1963    @returns the moblab random id.
1964    """
1965    moblab_id_filepath = '/home/moblab/.moblab_id'
1966    try:
1967        if os.path.exists(moblab_id_filepath):
1968            with open(moblab_id_filepath, 'r') as moblab_id_file:
1969                random_id = moblab_id_file.read()
1970        else:
1971            random_id = uuid.uuid1().hex
1972            with open(moblab_id_filepath, 'w') as moblab_id_file:
1973                moblab_id_file.write('%s' % random_id)
1974    except IOError as e:
1975        # Possible race condition, another process has created the file.
1976        # Sleep a second to make sure the file gets closed.
1977        logging.info(e)
1978        time.sleep(1)
1979        with open(moblab_id_filepath, 'r') as moblab_id_file:
1980            random_id = moblab_id_file.read()
1981    return random_id
1982
1983
1984def get_offload_gsuri():
1985    """Return the GSURI to offload test results to.
1986
1987    For the normal use case this is the results_storage_server in the
1988    global_config.
1989
1990    However partners using Moblab will be offloading their results to a
1991    subdirectory of their image storage buckets. The subdirectory is
1992    determined by the MAC Address of the Moblab device.
1993
1994    @returns gsuri to offload test results to.
1995    """
1996    # For non-moblab, use results_storage_server or default.
1997    if not is_moblab():
1998        return DEFAULT_OFFLOAD_GSURI
1999
2000    # For moblab, use results_storage_server or image_storage_server as bucket
2001    # name and mac-address/moblab_id as path.
2002    gsuri = DEFAULT_OFFLOAD_GSURI
2003    if not gsuri:
2004        gsuri = "%sresults/" % CONFIG.get_config_value('CROS', 'image_storage_server')
2005
2006    return '%s%s/%s/' % (gsuri, get_moblab_serial_number(), get_moblab_id())
2007
2008
2009# TODO(petermayo): crosbug.com/31826 Share this with _GsUpload in
2010# //chromite.git/buildbot/prebuilt.py somewhere/somehow
2011def gs_upload(local_file, remote_file, acl, result_dir=None,
2012              transfer_timeout=300, acl_timeout=300):
2013    """Upload to GS bucket.
2014
2015    @param local_file: Local file to upload
2016    @param remote_file: Remote location to upload the local_file to.
2017    @param acl: name or file used for controlling access to the uploaded
2018                file.
2019    @param result_dir: Result directory if you want to add tracing to the
2020                       upload.
2021    @param transfer_timeout: Timeout for this upload call.
2022    @param acl_timeout: Timeout for the acl call needed to confirm that
2023                        the uploader has permissions to execute the upload.
2024
2025    @raise CmdError: the exit code of the gsutil call was not 0.
2026
2027    @returns True/False - depending on if the upload succeeded or failed.
2028    """
2029    # https://developers.google.com/storage/docs/accesscontrol#extension
2030    CANNED_ACLS = ['project-private', 'private', 'public-read',
2031                   'public-read-write', 'authenticated-read',
2032                   'bucket-owner-read', 'bucket-owner-full-control']
2033    _GSUTIL_BIN = 'gsutil'
2034    acl_cmd = None
2035    if acl in CANNED_ACLS:
2036        cmd = '%s cp -a %s %s %s' % (_GSUTIL_BIN, acl, local_file, remote_file)
2037    else:
2038        # For private uploads we assume that the overlay board is set up
2039        # properly and a googlestore_acl.xml is present, if not this script
2040        # errors
2041        cmd = '%s cp -a private %s %s' % (_GSUTIL_BIN, local_file, remote_file)
2042        if not os.path.exists(acl):
2043            logging.error('Unable to find ACL File %s.', acl)
2044            return False
2045        acl_cmd = '%s setacl %s %s' % (_GSUTIL_BIN, acl, remote_file)
2046    if not result_dir:
2047        run(cmd, timeout=transfer_timeout, verbose=True)
2048        if acl_cmd:
2049            run(acl_cmd, timeout=acl_timeout, verbose=True)
2050        return True
2051    with open(os.path.join(result_dir, 'tracing'), 'w') as ftrace:
2052        ftrace.write('Preamble\n')
2053        run(cmd, timeout=transfer_timeout, verbose=True,
2054                       stdout_tee=ftrace, stderr_tee=ftrace)
2055        if acl_cmd:
2056            ftrace.write('\nACL setting\n')
2057            # Apply the passed in ACL xml file to the uploaded object.
2058            run(acl_cmd, timeout=acl_timeout, verbose=True,
2059                           stdout_tee=ftrace, stderr_tee=ftrace)
2060        ftrace.write('Postamble\n')
2061        return True
2062
2063
2064def gs_ls(uri_pattern):
2065    """Returns a list of URIs that match a given pattern.
2066
2067    @param uri_pattern: a GS URI pattern, may contain wildcards
2068
2069    @return A list of URIs matching the given pattern.
2070
2071    @raise CmdError: the gsutil command failed.
2072
2073    """
2074    gs_cmd = ' '.join(['gsutil', 'ls', uri_pattern])
2075    result = system_output(gs_cmd).splitlines()
2076    return [path.rstrip() for path in result if path]
2077
2078
2079def nuke_pids(pid_list, signal_queue=[signal.SIGTERM, signal.SIGKILL]):
2080    """
2081    Given a list of pid's, kill them via an esclating series of signals.
2082
2083    @param pid_list: List of PID's to kill.
2084    @param signal_queue: Queue of signals to send the PID's to terminate them.
2085
2086    @return: A mapping of the signal name to the number of processes it
2087        was sent to.
2088    """
2089    sig_count = {}
2090    # Though this is slightly hacky it beats hardcoding names anyday.
2091    sig_names = dict((k, v) for v, k in signal.__dict__.iteritems()
2092                     if v.startswith('SIG'))
2093    for sig in signal_queue:
2094        logging.debug('Sending signal %s to the following pids:', sig)
2095        sig_count[sig_names.get(sig, 'unknown_signal')] = len(pid_list)
2096        for pid in pid_list:
2097            logging.debug('Pid %d', pid)
2098            try:
2099                os.kill(pid, sig)
2100            except OSError:
2101                # The process may have died from a previous signal before we
2102                # could kill it.
2103                pass
2104        if sig == signal.SIGKILL:
2105            return sig_count
2106        pid_list = [pid for pid in pid_list if pid_is_alive(pid)]
2107        if not pid_list:
2108            break
2109        time.sleep(CHECK_PID_IS_ALIVE_TIMEOUT)
2110    failed_list = []
2111    for pid in pid_list:
2112        if pid_is_alive(pid):
2113            failed_list.append('Could not kill %d for process name: %s.' % pid,
2114                               get_process_name(pid))
2115    if failed_list:
2116        raise error.AutoservRunError('Following errors occured: %s' %
2117                                     failed_list, None)
2118    return sig_count
2119
2120
2121def externalize_host(host):
2122    """Returns an externally accessible host name.
2123
2124    @param host: a host name or address (string)
2125
2126    @return An externally visible host name or address
2127
2128    """
2129    return socket.gethostname() if host in _LOCAL_HOST_LIST else host
2130
2131
2132def urlopen_socket_timeout(url, data=None, timeout=5):
2133    """
2134    Wrapper to urllib2.urlopen with a socket timeout.
2135
2136    This method will convert all socket timeouts to
2137    TimeoutExceptions, so we can use it in conjunction
2138    with the rpc retry decorator and continue to handle
2139    other URLErrors as we see fit.
2140
2141    @param url: The url to open.
2142    @param data: The data to send to the url (eg: the urlencoded dictionary
2143                 used with a POST call).
2144    @param timeout: The timeout for this urlopen call.
2145
2146    @return: The response of the urlopen call.
2147
2148    @raises: error.TimeoutException when a socket timeout occurs.
2149             urllib2.URLError for errors that not caused by timeout.
2150             urllib2.HTTPError for errors like 404 url not found.
2151    """
2152    old_timeout = socket.getdefaulttimeout()
2153    socket.setdefaulttimeout(timeout)
2154    try:
2155        return urllib2.urlopen(url, data=data)
2156    except urllib2.URLError as e:
2157        if type(e.reason) is socket.timeout:
2158            raise error.TimeoutException(str(e))
2159        raise
2160    finally:
2161        socket.setdefaulttimeout(old_timeout)
2162
2163
2164def parse_chrome_version(version_string):
2165    """
2166    Parse a chrome version string and return version and milestone.
2167
2168    Given a chrome version of the form "W.X.Y.Z", return "W.X.Y.Z" as
2169    the version and "W" as the milestone.
2170
2171    @param version_string: Chrome version string.
2172    @return: a tuple (chrome_version, milestone). If the incoming version
2173             string is not of the form "W.X.Y.Z", chrome_version will
2174             be set to the incoming "version_string" argument and the
2175             milestone will be set to the empty string.
2176    """
2177    match = re.search('(\d+)\.\d+\.\d+\.\d+', version_string)
2178    ver = match.group(0) if match else version_string
2179    milestone = match.group(1) if match else ''
2180    return ver, milestone
2181
2182
2183def is_localhost(server):
2184    """Check if server is equivalent to localhost.
2185
2186    @param server: Name of the server to check.
2187
2188    @return: True if given server is equivalent to localhost.
2189
2190    @raise socket.gaierror: If server name failed to be resolved.
2191    """
2192    if server in _LOCAL_HOST_LIST:
2193        return True
2194    try:
2195        return (socket.gethostbyname(socket.gethostname()) ==
2196                socket.gethostbyname(server))
2197    except socket.gaierror:
2198        logging.error('Failed to resolve server name %s.', server)
2199        return False
2200
2201
2202def get_function_arg_value(func, arg_name, args, kwargs):
2203    """Get the value of the given argument for the function.
2204
2205    @param func: Function being called with given arguments.
2206    @param arg_name: Name of the argument to look for value.
2207    @param args: arguments for function to be called.
2208    @param kwargs: keyword arguments for function to be called.
2209
2210    @return: The value of the given argument for the function.
2211
2212    @raise ValueError: If the argument is not listed function arguemnts.
2213    @raise KeyError: If no value is found for the given argument.
2214    """
2215    if arg_name in kwargs:
2216        return kwargs[arg_name]
2217
2218    argspec = inspect.getargspec(func)
2219    index = argspec.args.index(arg_name)
2220    try:
2221        return args[index]
2222    except IndexError:
2223        try:
2224            # The argument can use a default value. Reverse the default value
2225            # so argument with default value can be counted from the last to
2226            # the first.
2227            return argspec.defaults[::-1][len(argspec.args) - index - 1]
2228        except IndexError:
2229            raise KeyError('Argument %s is not given a value. argspec: %s, '
2230                           'args:%s, kwargs:%s' %
2231                           (arg_name, argspec, args, kwargs))
2232
2233
2234def has_systemd():
2235    """Check if the host is running systemd.
2236
2237    @return: True if the host uses systemd, otherwise returns False.
2238    """
2239    return os.path.basename(os.readlink('/proc/1/exe')) == 'systemd'
2240
2241
2242def version_match(build_version, release_version, update_url=''):
2243    """Compare release version from lsb-release with cros-version label.
2244
2245    build_version is a string based on build name. It is prefixed with builder
2246    info and branch ID, e.g., lumpy-release/R43-6809.0.0. It may not include
2247    builder info, e.g., lumpy-release, in which case, update_url shall be passed
2248    in to determine if the build is a trybot or pgo-generate build.
2249    release_version is retrieved from lsb-release.
2250    These two values might not match exactly.
2251
2252    The method is designed to compare version for following 6 scenarios with
2253    samples of build version and expected release version:
2254    1. trybot non-release build (paladin, pre-cq or test-ap build).
2255    build version:   trybot-lumpy-paladin/R27-3837.0.0-b123
2256    release version: 3837.0.2013_03_21_1340
2257
2258    2. trybot release build.
2259    build version:   trybot-lumpy-release/R27-3837.0.0-b456
2260    release version: 3837.0.0
2261
2262    3. buildbot official release build.
2263    build version:   lumpy-release/R27-3837.0.0
2264    release version: 3837.0.0
2265
2266    4. non-official paladin rc build.
2267    build version:   lumpy-paladin/R27-3878.0.0-rc7
2268    release version: 3837.0.0-rc7
2269
2270    5. chrome-perf build.
2271    build version:   lumpy-chrome-perf/R28-3837.0.0-b2996
2272    release version: 3837.0.0
2273
2274    6. pgo-generate build.
2275    build version:   lumpy-release-pgo-generate/R28-3837.0.0-b2996
2276    release version: 3837.0.0-pgo-generate
2277
2278    7. build version with --cheetsth suffix.
2279    build version:   lumpy-release/R28-3837.0.0-cheetsth
2280    release version: 3837.0.0
2281
2282    TODO: This logic has a bug if a trybot paladin build failed to be
2283    installed in a DUT running an older trybot paladin build with same
2284    platform number, but different build number (-b###). So to conclusively
2285    determine if a tryjob paladin build is imaged successfully, we may need
2286    to find out the date string from update url.
2287
2288    @param build_version: Build name for cros version, e.g.
2289                          peppy-release/R43-6809.0.0 or R43-6809.0.0
2290    @param release_version: Release version retrieved from lsb-release,
2291                            e.g., 6809.0.0
2292    @param update_url: Update url which include the full builder information.
2293                       Default is set to empty string.
2294
2295    @return: True if the values match, otherwise returns False.
2296    """
2297    # If the build is from release, CQ or PFQ builder, cros-version label must
2298    # be ended with release version in lsb-release.
2299    if (build_version.endswith(release_version) or
2300            build_version.endswith(release_version + '-cheetsth')):
2301        return True
2302
2303    if build_version.endswith('-cheetsth'):
2304        build_version = re.sub('-cheetsth' + '$', '', build_version)
2305
2306    # Remove R#- and -b# at the end of build version
2307    stripped_version = re.sub(r'(R\d+-|-b\d+)', '', build_version)
2308    # Trim the builder info, e.g., trybot-lumpy-paladin/
2309    stripped_version = stripped_version.split('/')[-1]
2310
2311    is_trybot_non_release_build = (
2312            re.match(r'.*trybot-.+-(paladin|pre-cq|test-ap|toolchain)',
2313                     build_version) or
2314            re.match(r'.*trybot-.+-(paladin|pre-cq|test-ap|toolchain)',
2315                     update_url))
2316
2317    # Replace date string with 0 in release_version
2318    release_version_no_date = re.sub(r'\d{4}_\d{2}_\d{2}_\d+', '0',
2319                                    release_version)
2320    has_date_string = release_version != release_version_no_date
2321
2322    is_pgo_generate_build = (
2323            re.match(r'.+-pgo-generate', build_version) or
2324            re.match(r'.+-pgo-generate', update_url))
2325
2326    # Remove |-pgo-generate| in release_version
2327    release_version_no_pgo = release_version.replace('-pgo-generate', '')
2328    has_pgo_generate = release_version != release_version_no_pgo
2329
2330    if is_trybot_non_release_build:
2331        if not has_date_string:
2332            logging.error('A trybot paladin or pre-cq build is expected. '
2333                          'Version "%s" is not a paladin or pre-cq  build.',
2334                          release_version)
2335            return False
2336        return stripped_version == release_version_no_date
2337    elif is_pgo_generate_build:
2338        if not has_pgo_generate:
2339            logging.error('A pgo-generate build is expected. Version '
2340                          '"%s" is not a pgo-generate build.',
2341                          release_version)
2342            return False
2343        return stripped_version == release_version_no_pgo
2344    else:
2345        if has_date_string:
2346            logging.error('Unexpected date found in a non trybot paladin or '
2347                          'pre-cq build.')
2348            return False
2349        # Versioned build, i.e., rc or release build.
2350        return stripped_version == release_version
2351
2352
2353def get_real_user():
2354    """Get the real user that runs the script.
2355
2356    The function check environment variable SUDO_USER for the user if the
2357    script is run with sudo. Otherwise, it returns the value of environment
2358    variable USER.
2359
2360    @return: The user name that runs the script.
2361
2362    """
2363    user = os.environ.get('SUDO_USER')
2364    if not user:
2365        user = os.environ.get('USER')
2366    return user
2367
2368
2369def get_service_pid(service_name):
2370    """Return pid of service.
2371
2372    @param service_name: string name of service.
2373
2374    @return: pid or 0 if service is not running.
2375    """
2376    if has_systemd():
2377        # systemctl show prints 'MainPID=0' if the service is not running.
2378        cmd_result = run('systemctl show -p MainPID %s' %
2379                                    service_name, ignore_status=True)
2380        return int(cmd_result.stdout.split('=')[1])
2381    else:
2382        cmd_result = run('status %s' % service_name,
2383                                        ignore_status=True)
2384        if 'start/running' in cmd_result.stdout:
2385            return int(cmd_result.stdout.split()[3])
2386        return 0
2387
2388
2389def control_service(service_name, action='start', ignore_status=True):
2390    """Controls a service. It can be used to start, stop or restart
2391    a service.
2392
2393    @param service_name: string service to be restarted.
2394
2395    @param action: string choice of action to control command.
2396
2397    @param ignore_status: boolean ignore if system command fails.
2398
2399    @return: status code of the executed command.
2400    """
2401    if action not in ('start', 'stop', 'restart'):
2402        raise ValueError('Unknown action supplied as parameter.')
2403
2404    control_cmd = action + ' ' + service_name
2405    if has_systemd():
2406        control_cmd = 'systemctl ' + control_cmd
2407    return system(control_cmd, ignore_status=ignore_status)
2408
2409
2410def restart_service(service_name, ignore_status=True):
2411    """Restarts a service
2412
2413    @param service_name: string service to be restarted.
2414
2415    @param ignore_status: boolean ignore if system command fails.
2416
2417    @return: status code of the executed command.
2418    """
2419    return control_service(service_name, action='restart', ignore_status=ignore_status)
2420
2421
2422def start_service(service_name, ignore_status=True):
2423    """Starts a service
2424
2425    @param service_name: string service to be started.
2426
2427    @param ignore_status: boolean ignore if system command fails.
2428
2429    @return: status code of the executed command.
2430    """
2431    return control_service(service_name, action='start', ignore_status=ignore_status)
2432
2433
2434def stop_service(service_name, ignore_status=True):
2435    """Stops a service
2436
2437    @param service_name: string service to be stopped.
2438
2439    @param ignore_status: boolean ignore if system command fails.
2440
2441    @return: status code of the executed command.
2442    """
2443    return control_service(service_name, action='stop', ignore_status=ignore_status)
2444
2445
2446def sudo_require_password():
2447    """Test if the process can run sudo command without using password.
2448
2449    @return: True if the process needs password to run sudo command.
2450
2451    """
2452    try:
2453        run('sudo -n true')
2454        return False
2455    except error.CmdError:
2456        logging.warn('sudo command requires password.')
2457        return True
2458
2459
2460def is_in_container():
2461    """Check if the process is running inside a container.
2462
2463    @return: True if the process is running inside a container, otherwise False.
2464    """
2465    result = run('grep -q "/lxc/" /proc/1/cgroup',
2466                            verbose=False, ignore_status=True)
2467    if result.exit_status == 0:
2468        return True
2469
2470    # Check "container" environment variable for lxd/lxc containers.
2471    if os.environ.get('container') == 'lxc':
2472        return True
2473
2474    return False
2475
2476
2477def is_flash_installed():
2478    """
2479    The Adobe Flash binary is only distributed with internal builds.
2480    """
2481    return (os.path.exists('/opt/google/chrome/pepper/libpepflashplayer.so')
2482        and os.path.exists('/opt/google/chrome/pepper/pepper-flash.info'))
2483
2484
2485def verify_flash_installed():
2486    """
2487    The Adobe Flash binary is only distributed with internal builds.
2488    Warn users of public builds of the extra dependency.
2489    """
2490    if not is_flash_installed():
2491        raise error.TestNAError('No Adobe Flash binary installed.')
2492
2493
2494def is_in_same_subnet(ip_1, ip_2, mask_bits=24):
2495    """Check if two IP addresses are in the same subnet with given mask bits.
2496
2497    The two IP addresses are string of IPv4, e.g., '192.168.0.3'.
2498
2499    @param ip_1: First IP address to compare.
2500    @param ip_2: Second IP address to compare.
2501    @param mask_bits: Number of mask bits for subnet comparison. Default to 24.
2502
2503    @return: True if the two IP addresses are in the same subnet.
2504
2505    """
2506    mask = ((2L<<mask_bits-1) -1)<<(32-mask_bits)
2507    ip_1_num = struct.unpack('!I', socket.inet_aton(ip_1))[0]
2508    ip_2_num = struct.unpack('!I', socket.inet_aton(ip_2))[0]
2509    return ip_1_num & mask == ip_2_num & mask
2510
2511
2512def get_ip_address(hostname):
2513    """Get the IP address of given hostname.
2514
2515    @param hostname: Hostname of a DUT.
2516
2517    @return: The IP address of given hostname. None if failed to resolve
2518             hostname.
2519    """
2520    try:
2521        if hostname:
2522            return socket.gethostbyname(hostname)
2523    except socket.gaierror as e:
2524        logging.error('Failed to get IP address of %s, error: %s.', hostname, e)
2525
2526
2527def get_servers_in_same_subnet(host_ip, mask_bits, servers=None,
2528                               server_ip_map=None):
2529    """Get the servers in the same subnet of the given host ip.
2530
2531    @param host_ip: The IP address of a dut to look for devserver.
2532    @param mask_bits: Number of mask bits.
2533    @param servers: A list of servers to be filtered by subnet specified by
2534                    host_ip and mask_bits.
2535    @param server_ip_map: A map between the server name and its IP address.
2536            The map can be pre-built for better performance, e.g., when
2537            allocating a drone for an agent task.
2538
2539    @return: A list of servers in the same subnet of the given host ip.
2540
2541    """
2542    matched_servers = []
2543    if not servers and not server_ip_map:
2544        raise ValueError('Either `servers` or `server_ip_map` must be given.')
2545    if not servers:
2546        servers = server_ip_map.keys()
2547    # Make sure server_ip_map is an empty dict if it's not set.
2548    if not server_ip_map:
2549        server_ip_map = {}
2550    for server in servers:
2551        server_ip = server_ip_map.get(server, get_ip_address(server))
2552        if server_ip and is_in_same_subnet(server_ip, host_ip, mask_bits):
2553            matched_servers.append(server)
2554    return matched_servers
2555
2556
2557def get_restricted_subnet(hostname, restricted_subnets=RESTRICTED_SUBNETS):
2558    """Get the restricted subnet of given hostname.
2559
2560    @param hostname: Name of the host to look for matched restricted subnet.
2561    @param restricted_subnets: A list of restricted subnets, default is set to
2562            RESTRICTED_SUBNETS.
2563
2564    @return: A tuple of (subnet_ip, mask_bits), which defines a restricted
2565             subnet.
2566    """
2567    host_ip = get_ip_address(hostname)
2568    if not host_ip:
2569        return
2570    for subnet_ip, mask_bits in restricted_subnets:
2571        if is_in_same_subnet(subnet_ip, host_ip, mask_bits):
2572            return subnet_ip, mask_bits
2573
2574
2575def get_wireless_ssid(hostname):
2576    """Get the wireless ssid based on given hostname.
2577
2578    The method tries to locate the wireless ssid in the same subnet of given
2579    hostname first. If none is found, it returns the default setting in
2580    CLIENT/wireless_ssid.
2581
2582    @param hostname: Hostname of the test device.
2583
2584    @return: wireless ssid for the test device.
2585    """
2586    default_ssid = CONFIG.get_config_value('CLIENT', 'wireless_ssid',
2587                                           default=None)
2588    host_ip = get_ip_address(hostname)
2589    if not host_ip:
2590        return default_ssid
2591
2592    # Get all wireless ssid in the global config.
2593    ssids = CONFIG.get_config_value_regex('CLIENT', WIRELESS_SSID_PATTERN)
2594
2595    # There could be multiple subnet matches, pick the one with most strict
2596    # match, i.e., the one with highest maskbit.
2597    matched_ssid = default_ssid
2598    matched_maskbit = -1
2599    for key, value in ssids.items():
2600        # The config key filtered by regex WIRELESS_SSID_PATTERN has a format of
2601        # wireless_ssid_[subnet_ip]/[maskbit], for example:
2602        # wireless_ssid_192.168.0.1/24
2603        # Following line extract the subnet ip and mask bit from the key name.
2604        match = re.match(WIRELESS_SSID_PATTERN, key)
2605        subnet_ip, maskbit = match.groups()
2606        maskbit = int(maskbit)
2607        if (is_in_same_subnet(subnet_ip, host_ip, maskbit) and
2608            maskbit > matched_maskbit):
2609            matched_ssid = value
2610            matched_maskbit = maskbit
2611    return matched_ssid
2612
2613
2614def parse_launch_control_build(build_name):
2615    """Get branch, target, build_id from the given Launch Control build_name.
2616
2617    @param build_name: Name of a Launch Control build, should be formated as
2618                       branch/target/build_id
2619
2620    @return: Tuple of branch, target, build_id
2621    @raise ValueError: If the build_name is not correctly formated.
2622    """
2623    branch, target, build_id = build_name.split('/')
2624    return branch, target, build_id
2625
2626
2627def parse_android_target(target):
2628    """Get board and build type from the given target.
2629
2630    @param target: Name of an Android build target, e.g., shamu-eng.
2631
2632    @return: Tuple of board, build_type
2633    @raise ValueError: If the target is not correctly formated.
2634    """
2635    board, build_type = target.split('-')
2636    return board, build_type
2637
2638
2639def parse_launch_control_target(target):
2640    """Parse the build target and type from a Launch Control target.
2641
2642    The Launch Control target has the format of build_target-build_type, e.g.,
2643    shamu-eng or dragonboard-userdebug. This method extracts the build target
2644    and type from the target name.
2645
2646    @param target: Name of a Launch Control target, e.g., shamu-eng.
2647
2648    @return: (build_target, build_type), e.g., ('shamu', 'userdebug')
2649    """
2650    match = re.match('(?P<build_target>.+)-(?P<build_type>[^-]+)', target)
2651    if match:
2652        return match.group('build_target'), match.group('build_type')
2653    else:
2654        return None, None
2655
2656
2657def is_launch_control_build(build):
2658    """Check if a given build is a Launch Control build.
2659
2660    @param build: Name of a build, e.g.,
2661                  ChromeOS build: daisy-release/R50-1234.0.0
2662                  Launch Control build: git_mnc_release/shamu-eng
2663
2664    @return: True if the build name matches the pattern of a Launch Control
2665             build, False otherwise.
2666    """
2667    try:
2668        _, target, _ = parse_launch_control_build(build)
2669        build_target, _ = parse_launch_control_target(target)
2670        if build_target:
2671            return True
2672    except ValueError:
2673        # parse_launch_control_build or parse_launch_control_target failed.
2674        pass
2675    return False
2676
2677
2678def which(exec_file):
2679    """Finds an executable file.
2680
2681    If the file name contains a path component, it is checked as-is.
2682    Otherwise, we check with each of the path components found in the system
2683    PATH prepended. This behavior is similar to the 'which' command-line tool.
2684
2685    @param exec_file: Name or path to desired executable.
2686
2687    @return: An actual path to the executable, or None if not found.
2688    """
2689    if os.path.dirname(exec_file):
2690        return exec_file if os.access(exec_file, os.X_OK) else None
2691    sys_path = os.environ.get('PATH')
2692    prefix_list = sys_path.split(os.pathsep) if sys_path else []
2693    for prefix in prefix_list:
2694        path = os.path.join(prefix, exec_file)
2695        if os.access(path, os.X_OK):
2696            return path
2697
2698
2699class TimeoutError(error.TestError):
2700    """Error raised when we time out when waiting on a condition."""
2701    pass
2702
2703
2704def poll_for_condition(condition,
2705                       exception=None,
2706                       timeout=10,
2707                       sleep_interval=0.1,
2708                       desc=None):
2709    """Polls until a condition becomes true.
2710
2711    @param condition: function taking no args and returning bool
2712    @param exception: exception to throw if condition doesn't become true
2713    @param timeout: maximum number of seconds to wait
2714    @param sleep_interval: time to sleep between polls
2715    @param desc: description of default TimeoutError used if 'exception' is
2716                 None
2717
2718    @return The true value that caused the poll loop to terminate.
2719
2720    @raise 'exception' arg if supplied; TimeoutError otherwise
2721    """
2722    start_time = time.time()
2723    while True:
2724        value = condition()
2725        if value:
2726            return value
2727        if time.time() + sleep_interval - start_time > timeout:
2728            if exception:
2729                logging.error('Will raise error %r due to unexpected return: '
2730                              '%r', exception, value)
2731                raise exception
2732
2733            if desc:
2734                desc = 'Timed out waiting for condition: ' + desc
2735            else:
2736                desc = 'Timed out waiting for unnamed condition'
2737            logging.error(desc)
2738            raise TimeoutError(desc)
2739
2740        time.sleep(sleep_interval)
2741
2742
2743class metrics_mock(metrics_mock_class.mock_class_base):
2744    """mock class for metrics in case chromite is not installed."""
2745    pass
2746