base_utils.py revision 593def56a4eba6339d13237975f33f3f4b24e7cc
1#
2# Copyright 2008 Google Inc. Released under the GPL v2
3
4import os, pickle, random, re, resource, select, shutil, signal, StringIO
5import socket, struct, subprocess, sys, time, textwrap, urlparse
6import warnings, smtplib, logging, urllib2
7from threading import Thread, Event
8try:
9    import hashlib
10except ImportError:
11    import md5, sha
12from autotest_lib.client.common_lib import error, logging_manager
13
14def deprecated(func):
15    """This is a decorator which can be used to mark functions as deprecated.
16    It will result in a warning being emmitted when the function is used."""
17    def new_func(*args, **dargs):
18        warnings.warn("Call to deprecated function %s." % func.__name__,
19                      category=DeprecationWarning)
20        return func(*args, **dargs)
21    new_func.__name__ = func.__name__
22    new_func.__doc__ = func.__doc__
23    new_func.__dict__.update(func.__dict__)
24    return new_func
25
26
27class _NullStream(object):
28    def write(self, data):
29        pass
30
31
32    def flush(self):
33        pass
34
35
36TEE_TO_LOGS = object()
37_the_null_stream = _NullStream()
38
39DEFAULT_STDOUT_LEVEL = logging.DEBUG
40DEFAULT_STDERR_LEVEL = logging.ERROR
41
42# prefixes for logging stdout/stderr of commands
43STDOUT_PREFIX = '[stdout] '
44STDERR_PREFIX = '[stderr] '
45
46def custom_warning_handler(message, category, filename, lineno, file=None,
47                           line=None):
48    """Custom handler to log at the WARNING error level. Ignores |file|."""
49    logging.warning(warnings.formatwarning(message, category, filename, lineno,
50                                           line))
51
52warnings.showwarning = custom_warning_handler
53
54def get_stream_tee_file(stream, level, prefix=''):
55    if stream is None:
56        return _the_null_stream
57    if stream is TEE_TO_LOGS:
58        return logging_manager.LoggingFile(level=level, prefix=prefix)
59    return stream
60
61
62class BgJob(object):
63    def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
64                 stdin=None, stderr_level=DEFAULT_STDERR_LEVEL):
65        self.command = command
66        self.stdout_tee = get_stream_tee_file(stdout_tee, DEFAULT_STDOUT_LEVEL,
67                                              prefix=STDOUT_PREFIX)
68        self.stderr_tee = get_stream_tee_file(stderr_tee, stderr_level,
69                                              prefix=STDERR_PREFIX)
70        self.result = CmdResult(command)
71
72        # allow for easy stdin input by string, we'll let subprocess create
73        # a pipe for stdin input and we'll write to it in the wait loop
74        if isinstance(stdin, basestring):
75            self.string_stdin = stdin
76            stdin = subprocess.PIPE
77        else:
78            self.string_stdin = None
79
80        if verbose:
81            logging.debug("Running '%s'" % command)
82        self.sp = subprocess.Popen(command, stdout=subprocess.PIPE,
83                                   stderr=subprocess.PIPE,
84                                   preexec_fn=self._reset_sigpipe, shell=True,
85                                   executable="/bin/bash",
86                                   stdin=stdin)
87
88
89    def output_prepare(self, stdout_file=None, stderr_file=None):
90        self.stdout_file = stdout_file
91        self.stderr_file = stderr_file
92
93
94    def process_output(self, stdout=True, final_read=False):
95        """output_prepare must be called prior to calling this"""
96        if stdout:
97            pipe, buf, tee = self.sp.stdout, self.stdout_file, self.stdout_tee
98        else:
99            pipe, buf, tee = self.sp.stderr, self.stderr_file, self.stderr_tee
100
101        if final_read:
102            # read in all the data we can from pipe and then stop
103            data = []
104            while select.select([pipe], [], [], 0)[0]:
105                data.append(os.read(pipe.fileno(), 1024))
106                if len(data[-1]) == 0:
107                    break
108            data = "".join(data)
109        else:
110            # perform a single read
111            data = os.read(pipe.fileno(), 1024)
112        buf.write(data)
113        tee.write(data)
114
115
116    def cleanup(self):
117        self.stdout_tee.flush()
118        self.stderr_tee.flush()
119        self.sp.stdout.close()
120        self.sp.stderr.close()
121        self.result.stdout = self.stdout_file.getvalue()
122        self.result.stderr = self.stderr_file.getvalue()
123
124
125    def _reset_sigpipe(self):
126        signal.signal(signal.SIGPIPE, signal.SIG_DFL)
127
128
129def ip_to_long(ip):
130    # !L is a long in network byte order
131    return struct.unpack('!L', socket.inet_aton(ip))[0]
132
133
134def long_to_ip(number):
135    # See above comment.
136    return socket.inet_ntoa(struct.pack('!L', number))
137
138
139def create_subnet_mask(bits):
140    return (1 << 32) - (1 << 32-bits)
141
142
143def format_ip_with_mask(ip, mask_bits):
144    masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
145    return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
146
147
148def normalize_hostname(alias):
149    ip = socket.gethostbyname(alias)
150    return socket.gethostbyaddr(ip)[0]
151
152
153def get_ip_local_port_range():
154    match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
155                     read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
156    return (int(match.group(1)), int(match.group(2)))
157
158
159def set_ip_local_port_range(lower, upper):
160    write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
161                   '%d %d\n' % (lower, upper))
162
163
164
165def send_email(mail_from, mail_to, subject, body):
166    """
167    Sends an email via smtp
168
169    mail_from: string with email address of sender
170    mail_to: string or list with email address(es) of recipients
171    subject: string with subject of email
172    body: (multi-line) string with body of email
173    """
174    if isinstance(mail_to, str):
175        mail_to = [mail_to]
176    msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (mail_from, ','.join(mail_to),
177                                                   subject, body)
178    try:
179        mailer = smtplib.SMTP('localhost')
180        try:
181            mailer.sendmail(mail_from, mail_to, msg)
182        finally:
183            mailer.quit()
184    except Exception, e:
185        # Emails are non-critical, not errors, but don't raise them
186        print "Sending email failed. Reason: %s" % repr(e)
187
188
189def read_one_line(filename):
190    return open(filename, 'r').readline().rstrip('\n')
191
192
193def read_file(filename):
194    f = open(filename)
195    try:
196        return f.read()
197    finally:
198        f.close()
199
200
201def get_field(data, param, linestart="", sep=" "):
202    """
203    Parse data from string.
204    @param data: Data to parse.
205        example:
206          data:
207             cpu   324 345 34  5 345
208             cpu0  34  11  34 34  33
209             ^^^^
210             start of line
211             params 0   1   2  3   4
212    @param param: Position of parameter after linestart marker.
213    @param linestart: String to which start line with parameters.
214    @param sep: Separator between parameters regular expression.
215    """
216    search = re.compile(r"(?<=^%s)\s*(.*)" % linestart, re.MULTILINE)
217    find = search.search(data)
218    if find != None:
219        return re.split("%s" % sep, find.group(1))[param]
220    else:
221        print "There is no line which starts with %s in data." % linestart
222        return None
223
224
225def write_one_line(filename, line):
226    open_write_close(filename, line.rstrip('\n') + '\n')
227
228
229def open_write_close(filename, data):
230    f = open(filename, 'w')
231    try:
232        f.write(data)
233    finally:
234        f.close()
235
236
237def matrix_to_string(matrix, header=None):
238    """
239    Return a pretty, aligned string representation of a nxm matrix.
240
241    This representation can be used to print any tabular data, such as
242    database results. It works by scanning the lengths of each element
243    in each column, and determining the format string dynamically.
244
245    @param matrix: Matrix representation (list with n rows of m elements).
246    @param header: Optional tuple or list with header elements to be displayed.
247    """
248    if type(header) is list:
249        header = tuple(header)
250    lengths = []
251    if header:
252        for column in header:
253            lengths.append(len(column))
254    for row in matrix:
255        for i, column in enumerate(row):
256            column = unicode(column).encode("utf-8")
257            cl = len(column)
258            try:
259                ml = lengths[i]
260                if cl > ml:
261                    lengths[i] = cl
262            except IndexError:
263                lengths.append(cl)
264
265    lengths = tuple(lengths)
266    format_string = ""
267    for length in lengths:
268        format_string += "%-" + str(length) + "s "
269    format_string += "\n"
270
271    matrix_str = ""
272    if header:
273        matrix_str += format_string % header
274    for row in matrix:
275        matrix_str += format_string % tuple(row)
276
277    return matrix_str
278
279
280def read_keyval(path):
281    """
282    Read a key-value pair format file into a dictionary, and return it.
283    Takes either a filename or directory name as input. If it's a
284    directory name, we assume you want the file to be called keyval.
285    """
286    if os.path.isdir(path):
287        path = os.path.join(path, 'keyval')
288    keyval = {}
289    if os.path.exists(path):
290        for line in open(path):
291            line = re.sub('#.*', '', line).rstrip()
292            if not re.search(r'^[-\.\w]+=', line):
293                raise ValueError('Invalid format line: %s' % line)
294            key, value = line.split('=', 1)
295            if re.search('^\d+$', value):
296                value = int(value)
297            elif re.search('^(\d+\.)?\d+$', value):
298                value = float(value)
299            keyval[key] = value
300    return keyval
301
302
303def write_keyval(path, dictionary, type_tag=None, tap_report=None):
304    """
305    Write a key-value pair format file out to a file. This uses append
306    mode to open the file, so existing text will not be overwritten or
307    reparsed.
308
309    If type_tag is None, then the key must be composed of alphanumeric
310    characters (or dashes+underscores). However, if type-tag is not
311    null then the keys must also have "{type_tag}" as a suffix. At
312    the moment the only valid values of type_tag are "attr" and "perf".
313
314    @param path: full path of the file to be written
315    @param dictionary: the items to write
316    @param type_tag: see text above
317    """
318    if os.path.isdir(path):
319        path = os.path.join(path, 'keyval')
320    keyval = open(path, 'a')
321
322    if type_tag is None:
323        key_regex = re.compile(r'^[-\.\w]+$')
324    else:
325        if type_tag not in ('attr', 'perf'):
326            raise ValueError('Invalid type tag: %s' % type_tag)
327        escaped_tag = re.escape(type_tag)
328        key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
329    try:
330        for key in sorted(dictionary.keys()):
331            if not key_regex.search(key):
332                raise ValueError('Invalid key: %s' % key)
333            keyval.write('%s=%s\n' % (key, dictionary[key]))
334    finally:
335        keyval.close()
336
337    # same for tap
338    if tap_report is not None and tap_report.do_tap_report:
339        tap_report.record_keyval(path, dictionary, type_tag=type_tag)
340
341class FileFieldMonitor(object):
342    """
343    Monitors the information from the file and reports it's values.
344
345    It gather the information at start and stop of the measurement or
346    continuously during the measurement.
347    """
348    class Monitor(Thread):
349        """
350        Internal monitor class to ensure continuous monitor of monitored file.
351        """
352        def __init__(self, master):
353            """
354            @param master: Master class which control Monitor
355            """
356            Thread.__init__(self)
357            self.master = master
358
359        def run(self):
360            """
361            Start monitor in thread mode
362            """
363            while not self.master.end_event.isSet():
364                self.master._get_value(self.master.logging)
365                time.sleep(self.master.time_step)
366
367
368    def __init__(self, status_file, data_to_read, mode_diff, continuously=False,
369                 contlogging=False, separator=" +", time_step=0.1):
370        """
371        Initialize variables.
372        @param status_file: File contain status.
373        @param mode_diff: If True make a difference of value, else average.
374        @param data_to_read: List of tuples with data position.
375            format: [(start_of_line,position in params)]
376            example:
377              data:
378                 cpu   324 345 34  5 345
379                 cpu0  34  11  34 34  33
380                 ^^^^
381                 start of line
382                 params 0   1   2  3   4
383        @param mode_diff: True to subtract old value from new value,
384            False make average of the values.
385        @parma continuously: Start the monitoring thread using the time_step
386            as the measurement period.
387        @param contlogging: Log data in continuous run.
388        @param separator: Regular expression of separator.
389        @param time_step: Time period of the monitoring value.
390        """
391        self.end_event = Event()
392        self.start_time = 0
393        self.end_time = 0
394        self.test_time = 0
395
396        self.status_file = status_file
397        self.separator = separator
398        self.data_to_read = data_to_read
399        self.num_of_params = len(self.data_to_read)
400        self.mode_diff = mode_diff
401        self.continuously = continuously
402        self.time_step = time_step
403
404        self.value = [0 for i in range(self.num_of_params)]
405        self.old_value = [0 for i in range(self.num_of_params)]
406        self.log = []
407        self.logging = contlogging
408
409        self.started = False
410        self.num_of_get_value = 0
411        self.monitor = None
412
413
414    def _get_value(self, logging=True):
415        """
416        Return current values.
417        @param logging: If true log value in memory. There can be problem
418          with long run.
419        """
420        data = read_file(self.status_file)
421        value = []
422        for i in range(self.num_of_params):
423            value.append(int(get_field(data,
424                             self.data_to_read[i][1],
425                             self.data_to_read[i][0],
426                             self.separator)))
427
428        if logging:
429            self.log.append(value)
430        if not self.mode_diff:
431            value = map(lambda x, y: x + y, value, self.old_value)
432
433        self.old_value = value
434        self.num_of_get_value += 1
435        return value
436
437
438    def start(self):
439        """
440        Start value monitor.
441        """
442        if self.started:
443            self.stop()
444        self.old_value = [0 for i in range(self.num_of_params)]
445        self.num_of_get_value = 0
446        self.log = []
447        self.end_event.clear()
448        self.start_time = time.time()
449        self._get_value()
450        self.started = True
451        if (self.continuously):
452            self.monitor = FileFieldMonitor.Monitor(self)
453            self.monitor.start()
454
455
456    def stop(self):
457        """
458        Stop value monitor.
459        """
460        if self.started:
461            self.started = False
462            self.end_time = time.time()
463            self.test_time = self.end_time - self.start_time
464            self.value = self._get_value()
465            if (self.continuously):
466                self.end_event.set()
467                self.monitor.join()
468            if (self.mode_diff):
469                self.value = map(lambda x, y: x - y, self.log[-1], self.log[0])
470            else:
471                self.value = map(lambda x: x / self.num_of_get_value,
472                                 self.value)
473
474
475    def get_status(self):
476        """
477        @return: Status of monitored process average value,
478            time of test and array of monitored values and time step of
479            continuous run.
480        """
481        if self.started:
482            self.stop()
483        if self.mode_diff:
484            for i in range(len(self.log) - 1):
485                self.log[i] = (map(lambda x, y: x - y,
486                                   self.log[i + 1], self.log[i]))
487            self.log.pop()
488        return (self.value, self.test_time, self.log, self.time_step)
489
490
491def is_url(path):
492    """Return true if path looks like a URL"""
493    # for now, just handle http and ftp
494    url_parts = urlparse.urlparse(path)
495    return (url_parts[0] in ('http', 'ftp'))
496
497
498def urlopen(url, data=None, timeout=5):
499    """Wrapper to urllib2.urlopen with timeout addition."""
500
501    # Save old timeout
502    old_timeout = socket.getdefaulttimeout()
503    socket.setdefaulttimeout(timeout)
504    try:
505        return urllib2.urlopen(url, data=data)
506    finally:
507        socket.setdefaulttimeout(old_timeout)
508
509
510def urlretrieve(url, filename, data=None, timeout=300):
511    """Retrieve a file from given url."""
512    logging.debug('Fetching %s -> %s', url, filename)
513
514    src_file = urlopen(url, data=data, timeout=timeout)
515    try:
516        dest_file = open(filename, 'wb')
517        try:
518            shutil.copyfileobj(src_file, dest_file)
519        finally:
520            dest_file.close()
521    finally:
522        src_file.close()
523
524
525def hash(type, input=None):
526    """
527    Returns an hash object of type md5 or sha1. This function is implemented in
528    order to encapsulate hash objects in a way that is compatible with python
529    2.4 and python 2.6 without warnings.
530
531    Note that even though python 2.6 hashlib supports hash types other than
532    md5 and sha1, we are artificially limiting the input values in order to
533    make the function to behave exactly the same among both python
534    implementations.
535
536    @param input: Optional input string that will be used to update the hash.
537    """
538    if type not in ['md5', 'sha1']:
539        raise ValueError("Unsupported hash type: %s" % type)
540
541    try:
542        hash = hashlib.new(type)
543    except NameError:
544        if type == 'md5':
545            hash = md5.new()
546        elif type == 'sha1':
547            hash = sha.new()
548
549    if input:
550        hash.update(input)
551
552    return hash
553
554
555def get_file(src, dest, permissions=None):
556    """Get a file from src, which can be local or a remote URL"""
557    if src == dest:
558        return
559
560    if is_url(src):
561        urlretrieve(src, dest)
562    else:
563        shutil.copyfile(src, dest)
564
565    if permissions:
566        os.chmod(dest, permissions)
567    return dest
568
569
570def unmap_url(srcdir, src, destdir='.'):
571    """
572    Receives either a path to a local file or a URL.
573    returns either the path to the local file, or the fetched URL
574
575    unmap_url('/usr/src', 'foo.tar', '/tmp')
576                            = '/usr/src/foo.tar'
577    unmap_url('/usr/src', 'http://site/file', '/tmp')
578                            = '/tmp/file'
579                            (after retrieving it)
580    """
581    if is_url(src):
582        url_parts = urlparse.urlparse(src)
583        filename = os.path.basename(url_parts[2])
584        dest = os.path.join(destdir, filename)
585        return get_file(src, dest)
586    else:
587        return os.path.join(srcdir, src)
588
589
590def update_version(srcdir, preserve_srcdir, new_version, install,
591                   *args, **dargs):
592    """
593    Make sure srcdir is version new_version
594
595    If not, delete it and install() the new version.
596
597    In the preserve_srcdir case, we just check it's up to date,
598    and if not, we rerun install, without removing srcdir
599    """
600    versionfile = os.path.join(srcdir, '.version')
601    install_needed = True
602
603    if os.path.exists(versionfile):
604        old_version = pickle.load(open(versionfile))
605        if old_version == new_version:
606            install_needed = False
607
608    if install_needed:
609        if not preserve_srcdir and os.path.exists(srcdir):
610            shutil.rmtree(srcdir)
611        install(*args, **dargs)
612        if os.path.exists(srcdir):
613            pickle.dump(new_version, open(versionfile, 'w'))
614
615
616def get_stderr_level(stderr_is_expected):
617    if stderr_is_expected:
618        return DEFAULT_STDOUT_LEVEL
619    return DEFAULT_STDERR_LEVEL
620
621
622def run(command, timeout=None, ignore_status=False,
623        stdout_tee=None, stderr_tee=None, verbose=True, stdin=None,
624        stderr_is_expected=None, args=()):
625    """
626    Run a command on the host.
627
628    @param command: the command line string.
629    @param timeout: time limit in seconds before attempting to kill the
630            running process. The run() function will take a few seconds
631            longer than 'timeout' to complete if it has to kill the process.
632    @param ignore_status: do not raise an exception, no matter what the exit
633            code of the command is.
634    @param stdout_tee: optional file-like object to which stdout data
635            will be written as it is generated (data will still be stored
636            in result.stdout).
637    @param stderr_tee: likewise for stderr.
638    @param verbose: if True, log the command being run.
639    @param stdin: stdin to pass to the executed process (can be a file
640            descriptor, a file object of a real file or a string).
641    @param args: sequence of strings of arguments to be given to the command
642            inside " quotes after they have been escaped for that; each
643            element in the sequence will be given as a separate command
644            argument
645
646    @return a CmdResult object
647
648    @raise CmdError: the exit code of the command execution was not 0
649    """
650    if isinstance(args, basestring):
651        raise TypeError('Got a string for the "args" keyword argument, '
652                        'need a sequence.')
653
654    for arg in args:
655        command += ' "%s"' % sh_escape(arg)
656    if stderr_is_expected is None:
657        stderr_is_expected = ignore_status
658
659    bg_job = join_bg_jobs(
660        (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin,
661               stderr_level=get_stderr_level(stderr_is_expected)),),
662        timeout)[0]
663    if not ignore_status and bg_job.result.exit_status:
664        raise error.CmdError(command, bg_job.result,
665                             "Command returned non-zero exit status")
666
667    return bg_job.result
668
669
670def run_parallel(commands, timeout=None, ignore_status=False,
671                 stdout_tee=None, stderr_tee=None):
672    """
673    Behaves the same as run() with the following exceptions:
674
675    - commands is a list of commands to run in parallel.
676    - ignore_status toggles whether or not an exception should be raised
677      on any error.
678
679    @return: a list of CmdResult objects
680    """
681    bg_jobs = []
682    for command in commands:
683        bg_jobs.append(BgJob(command, stdout_tee, stderr_tee,
684                             stderr_level=get_stderr_level(ignore_status)))
685
686    # Updates objects in bg_jobs list with their process information
687    join_bg_jobs(bg_jobs, timeout)
688
689    for bg_job in bg_jobs:
690        if not ignore_status and bg_job.result.exit_status:
691            raise error.CmdError(command, bg_job.result,
692                                 "Command returned non-zero exit status")
693
694    return [bg_job.result for bg_job in bg_jobs]
695
696
697@deprecated
698def run_bg(command):
699    """Function deprecated. Please use BgJob class instead."""
700    bg_job = BgJob(command)
701    return bg_job.sp, bg_job.result
702
703
704def join_bg_jobs(bg_jobs, timeout=None):
705    """Joins the bg_jobs with the current thread.
706
707    Returns the same list of bg_jobs objects that was passed in.
708    """
709    ret, timeout_error = 0, False
710    for bg_job in bg_jobs:
711        bg_job.output_prepare(StringIO.StringIO(), StringIO.StringIO())
712
713    try:
714        # We are holding ends to stdin, stdout pipes
715        # hence we need to be sure to close those fds no mater what
716        start_time = time.time()
717        timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
718
719        for bg_job in bg_jobs:
720            # Process stdout and stderr
721            bg_job.process_output(stdout=True,final_read=True)
722            bg_job.process_output(stdout=False,final_read=True)
723    finally:
724        # close our ends of the pipes to the sp no matter what
725        for bg_job in bg_jobs:
726            bg_job.cleanup()
727
728    if timeout_error:
729        # TODO: This needs to be fixed to better represent what happens when
730        # running in parallel. However this is backwards compatable, so it will
731        # do for the time being.
732        raise error.CmdError(bg_jobs[0].command, bg_jobs[0].result,
733                             "Command(s) did not complete within %d seconds"
734                             % timeout)
735
736
737    return bg_jobs
738
739
740def _wait_for_commands(bg_jobs, start_time, timeout):
741    # This returns True if it must return due to a timeout, otherwise False.
742
743    # To check for processes which terminate without producing any output
744    # a 1 second timeout is used in select.
745    SELECT_TIMEOUT = 1
746
747    read_list = []
748    write_list = []
749    reverse_dict = {}
750
751    for bg_job in bg_jobs:
752        read_list.append(bg_job.sp.stdout)
753        read_list.append(bg_job.sp.stderr)
754        reverse_dict[bg_job.sp.stdout] = (bg_job, True)
755        reverse_dict[bg_job.sp.stderr] = (bg_job, False)
756        if bg_job.string_stdin is not None:
757            write_list.append(bg_job.sp.stdin)
758            reverse_dict[bg_job.sp.stdin] = bg_job
759
760    if timeout:
761        stop_time = start_time + timeout
762        time_left = stop_time - time.time()
763    else:
764        time_left = None # so that select never times out
765
766    while not timeout or time_left > 0:
767        # select will return when we may write to stdin or when there is
768        # stdout/stderr output we can read (including when it is
769        # EOF, that is the process has terminated).
770        read_ready, write_ready, _ = select.select(read_list, write_list, [],
771                                                   SELECT_TIMEOUT)
772
773        # os.read() has to be used instead of
774        # subproc.stdout.read() which will otherwise block
775        for file_obj in read_ready:
776            bg_job, is_stdout = reverse_dict[file_obj]
777            bg_job.process_output(is_stdout)
778
779        for file_obj in write_ready:
780            # we can write PIPE_BUF bytes without blocking
781            # POSIX requires PIPE_BUF is >= 512
782            bg_job = reverse_dict[file_obj]
783            file_obj.write(bg_job.string_stdin[:512])
784            bg_job.string_stdin = bg_job.string_stdin[512:]
785            # no more input data, close stdin, remove it from the select set
786            if not bg_job.string_stdin:
787                file_obj.close()
788                write_list.remove(file_obj)
789                del reverse_dict[file_obj]
790
791        all_jobs_finished = True
792        for bg_job in bg_jobs:
793            if bg_job.result.exit_status is not None:
794                continue
795
796            bg_job.result.exit_status = bg_job.sp.poll()
797            if bg_job.result.exit_status is not None:
798                # process exited, remove its stdout/stdin from the select set
799                bg_job.result.duration = time.time() - start_time
800                read_list.remove(bg_job.sp.stdout)
801                read_list.remove(bg_job.sp.stderr)
802                del reverse_dict[bg_job.sp.stdout]
803                del reverse_dict[bg_job.sp.stderr]
804            else:
805                all_jobs_finished = False
806
807        if all_jobs_finished:
808            return False
809
810        if timeout:
811            time_left = stop_time - time.time()
812
813    # Kill all processes which did not complete prior to timeout
814    for bg_job in bg_jobs:
815        if bg_job.result.exit_status is not None:
816            continue
817
818        logging.warn('run process timeout (%s) fired on: %s', timeout,
819                     bg_job.command)
820        nuke_subprocess(bg_job.sp)
821        bg_job.result.exit_status = bg_job.sp.poll()
822        bg_job.result.duration = time.time() - start_time
823
824    return True
825
826
827def pid_is_alive(pid):
828    """
829    True if process pid exists and is not yet stuck in Zombie state.
830    Zombies are impossible to move between cgroups, etc.
831    pid can be integer, or text of integer.
832    """
833    path = '/proc/%s/stat' % pid
834
835    try:
836        stat = read_one_line(path)
837    except IOError:
838        if not os.path.exists(path):
839            # file went away
840            return False
841        raise
842
843    return stat.split()[2] != 'Z'
844
845
846def signal_pid(pid, sig):
847    """
848    Sends a signal to a process id. Returns True if the process terminated
849    successfully, False otherwise.
850    """
851    try:
852        os.kill(pid, sig)
853    except OSError:
854        # The process may have died before we could kill it.
855        pass
856
857    for i in range(5):
858        if not pid_is_alive(pid):
859            return True
860        time.sleep(1)
861
862    # The process is still alive
863    return False
864
865
866def nuke_subprocess(subproc):
867    # check if the subprocess is still alive, first
868    if subproc.poll() is not None:
869        return subproc.poll()
870
871    # the process has not terminated within timeout,
872    # kill it via an escalating series of signals.
873    signal_queue = [signal.SIGTERM, signal.SIGKILL]
874    for sig in signal_queue:
875        signal_pid(subproc.pid, sig)
876        if subproc.poll() is not None:
877            return subproc.poll()
878
879
880def nuke_pid(pid, signal_queue=(signal.SIGTERM, signal.SIGKILL)):
881    # the process has not terminated within timeout,
882    # kill it via an escalating series of signals.
883    for sig in signal_queue:
884        if signal_pid(pid, sig):
885            return
886
887    # no signal successfully terminated the process
888    raise error.AutoservRunError('Could not kill %d' % pid, None)
889
890
891def system(command, timeout=None, ignore_status=False):
892    """
893    Run a command
894
895    @param timeout: timeout in seconds
896    @param ignore_status: if ignore_status=False, throw an exception if the
897            command's exit code is non-zero
898            if ignore_stauts=True, return the exit code.
899
900    @return exit status of command
901            (note, this will always be zero unless ignore_status=True)
902    """
903    return run(command, timeout=timeout, ignore_status=ignore_status,
904               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).exit_status
905
906
907def system_parallel(commands, timeout=None, ignore_status=False):
908    """This function returns a list of exit statuses for the respective
909    list of commands."""
910    return [bg_jobs.exit_status for bg_jobs in
911            run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
912                         stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
913
914
915def system_output(command, timeout=None, ignore_status=False,
916                  retain_output=False, args=()):
917    """
918    Run a command and return the stdout output.
919
920    @param command: command string to execute.
921    @param timeout: time limit in seconds before attempting to kill the
922            running process. The function will take a few seconds longer
923            than 'timeout' to complete if it has to kill the process.
924    @param ignore_status: do not raise an exception, no matter what the exit
925            code of the command is.
926    @param retain_output: set to True to make stdout/stderr of the command
927            output to be also sent to the logging system
928    @param args: sequence of strings of arguments to be given to the command
929            inside " quotes after they have been escaped for that; each
930            element in the sequence will be given as a separate command
931            argument
932
933    @return a string with the stdout output of the command.
934    """
935    if retain_output:
936        out = run(command, timeout=timeout, ignore_status=ignore_status,
937                  stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS,
938                  args=args).stdout
939    else:
940        out = run(command, timeout=timeout, ignore_status=ignore_status,
941                  args=args).stdout
942    if out[-1:] == '\n':
943        out = out[:-1]
944    return out
945
946
947def system_output_parallel(commands, timeout=None, ignore_status=False,
948                           retain_output=False):
949    if retain_output:
950        out = [bg_job.stdout for bg_job
951               in run_parallel(commands, timeout=timeout,
952                               ignore_status=ignore_status,
953                               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
954    else:
955        out = [bg_job.stdout for bg_job in run_parallel(commands,
956                                  timeout=timeout, ignore_status=ignore_status)]
957    for x in out:
958        if out[-1:] == '\n': out = out[:-1]
959    return out
960
961
962def strip_unicode(input):
963    if type(input) == list:
964        return [strip_unicode(i) for i in input]
965    elif type(input) == dict:
966        output = {}
967        for key in input.keys():
968            output[str(key)] = strip_unicode(input[key])
969        return output
970    elif type(input) == unicode:
971        return str(input)
972    else:
973        return input
974
975
976def get_cpu_percentage(function, *args, **dargs):
977    """Returns a tuple containing the CPU% and return value from function call.
978
979    This function calculates the usage time by taking the difference of
980    the user and system times both before and after the function call.
981    """
982    child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
983    self_pre = resource.getrusage(resource.RUSAGE_SELF)
984    start = time.time()
985    to_return = function(*args, **dargs)
986    elapsed = time.time() - start
987    self_post = resource.getrusage(resource.RUSAGE_SELF)
988    child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
989
990    # Calculate CPU Percentage
991    s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
992    c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
993    cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
994
995    return cpu_percent, to_return
996
997
998class SystemLoad(object):
999    """
1000    Get system and/or process values and return average value of load.
1001    """
1002    def __init__(self, pids, advanced=False, time_step=0.1, cpu_cont=False,
1003                 use_log=False):
1004        """
1005        @param pids: List of pids to be monitored. If pid = 0 whole system will
1006          be monitored. pid == 0 means whole system.
1007        @param advanced: monitor add value for system irq count and softirq
1008          for process minor and maior page fault
1009        @param time_step: Time step for continuous monitoring.
1010        @param cpu_cont: If True monitor CPU load continuously.
1011        @param use_log: If true every monitoring is logged for dump.
1012        """
1013        self.pids = []
1014        self.stats = {}
1015        for pid in pids:
1016            if pid == 0:
1017                cpu = FileFieldMonitor("/proc/stat",
1018                                       [("cpu", 0), # User Time
1019                                        ("cpu", 2), # System Time
1020                                        ("intr", 0), # IRQ Count
1021                                        ("softirq", 0)], # Soft IRQ Count
1022                                       True,
1023                                       cpu_cont,
1024                                       use_log,
1025                                       " +",
1026                                       time_step)
1027                mem = FileFieldMonitor("/proc/meminfo",
1028                                       [("MemTotal:", 0), # Mem Total
1029                                        ("MemFree:", 0), # Mem Free
1030                                        ("Buffers:", 0), # Buffers
1031                                        ("Cached:", 0)], # Cached
1032                                       False,
1033                                       True,
1034                                       use_log,
1035                                       " +",
1036                                       time_step)
1037                self.stats[pid] = ["TOTAL", cpu, mem]
1038                self.pids.append(pid)
1039            else:
1040                name = ""
1041                if (type(pid) is int):
1042                    self.pids.append(pid)
1043                    name = get_process_name(pid)
1044                else:
1045                    self.pids.append(pid[0])
1046                    name = pid[1]
1047
1048                cpu = FileFieldMonitor("/proc/%d/stat" %
1049                                       self.pids[-1],
1050                                       [("", 13), # User Time
1051                                        ("", 14), # System Time
1052                                        ("", 9), # Minority Page Fault
1053                                        ("", 11)], # Majority Page Fault
1054                                       True,
1055                                       cpu_cont,
1056                                       use_log,
1057                                       " +",
1058                                       time_step)
1059                mem = FileFieldMonitor("/proc/%d/status" %
1060                                       self.pids[-1],
1061                                       [("VmSize:", 0), # Virtual Memory Size
1062                                        ("VmRSS:", 0), # Resident Set Size
1063                                        ("VmPeak:", 0), # Peak VM Size
1064                                        ("VmSwap:", 0)], # VM in Swap
1065                                       False,
1066                                       True,
1067                                       use_log,
1068                                       " +",
1069                                       time_step)
1070                self.stats[self.pids[-1]] = [name, cpu, mem]
1071
1072        self.advanced = advanced
1073
1074
1075    def __str__(self):
1076        """
1077        Define format how to print
1078        """
1079        out = ""
1080        for pid in self.pids:
1081            for stat in self.stats[pid][1:]:
1082                out += str(stat.get_status()) + "\n"
1083        return out
1084
1085
1086    def start(self, pids=[]):
1087        """
1088        Start monitoring of the process system usage.
1089        @param pids: List of PIDs you intend to control. Use pids=[] to control
1090            all defined PIDs.
1091        """
1092        if pids == []:
1093            pids = self.pids
1094
1095        for pid in pids:
1096            for stat in self.stats[pid][1:]:
1097                stat.start()
1098
1099
1100    def stop(self, pids=[]):
1101        """
1102        Stop monitoring of the process system usage.
1103        @param pids: List of PIDs you intend to control. Use pids=[] to control
1104            all defined PIDs.
1105        """
1106        if pids == []:
1107            pids = self.pids
1108
1109        for pid in pids:
1110            for stat in self.stats[pid][1:]:
1111                stat.stop()
1112
1113
1114    def dump(self, pids=[]):
1115        """
1116        Get the status of monitoring.
1117        @param pids: List of PIDs you intend to control. Use pids=[] to control
1118            all defined PIDs.
1119         @return:
1120            tuple([cpu load], [memory load]):
1121                ([(PID1, (PID1_cpu_meas)), (PID2, (PID2_cpu_meas)), ...],
1122                 [(PID1, (PID1_mem_meas)), (PID2, (PID2_mem_meas)), ...])
1123
1124            PID1_cpu_meas:
1125                average_values[], test_time, cont_meas_values[[]], time_step
1126            PID1_mem_meas:
1127                average_values[], test_time, cont_meas_values[[]], time_step
1128            where average_values[] are the measured values (mem_free,swap,...)
1129            which are described in SystemLoad.__init__()-FileFieldMonitor.
1130            cont_meas_values[[]] is a list of average_values in the sampling
1131            times.
1132        """
1133        if pids == []:
1134            pids = self.pids
1135
1136        cpus = []
1137        memory = []
1138        for pid in pids:
1139            stat = (pid, self.stats[pid][1].get_status())
1140            cpus.append(stat)
1141        for pid in pids:
1142            stat = (pid, self.stats[pid][2].get_status())
1143            memory.append(stat)
1144
1145        return (cpus, memory)
1146
1147
1148    def get_cpu_status_string(self, pids=[]):
1149        """
1150        Convert status to string array.
1151        @param pids: List of PIDs you intend to control. Use pids=[] to control
1152            all defined PIDs.
1153        @return: String format to table.
1154        """
1155        if pids == []:
1156            pids = self.pids
1157
1158        headers = ["NAME",
1159                   ("%7s") % "PID",
1160                   ("%5s") % "USER",
1161                   ("%5s") % "SYS",
1162                   ("%5s") % "SUM"]
1163        if self.advanced:
1164            headers.extend(["MINFLT/IRQC",
1165                            "MAJFLT/SOFTIRQ"])
1166        headers.append(("%11s") % "TIME")
1167        textstatus = []
1168        for pid in pids:
1169            stat = self.stats[pid][1].get_status()
1170            time = stat[1]
1171            stat = stat[0]
1172            textstatus.append(["%s" % self.stats[pid][0],
1173                               "%7s" % pid,
1174                               "%4.0f%%" % (stat[0] / time),
1175                               "%4.0f%%" % (stat[1] / time),
1176                               "%4.0f%%" % ((stat[0] + stat[1]) / time),
1177                               "%10.3fs" % time])
1178            if self.advanced:
1179                textstatus[-1].insert(-1, "%11d" % stat[2])
1180                textstatus[-1].insert(-1, "%14d" % stat[3])
1181
1182        return matrix_to_string(textstatus, tuple(headers))
1183
1184
1185    def get_mem_status_string(self, pids=[]):
1186        """
1187        Convert status to string array.
1188        @param pids: List of PIDs you intend to control. Use pids=[] to control
1189            all defined PIDs.
1190        @return: String format to table.
1191        """
1192        if pids == []:
1193            pids = self.pids
1194
1195        headers = ["NAME",
1196                   ("%7s") % "PID",
1197                   ("%8s") % "TOTAL/VMSIZE",
1198                   ("%8s") % "FREE/VMRSS",
1199                   ("%8s") % "BUFFERS/VMPEAK",
1200                   ("%8s") % "CACHED/VMSWAP",
1201                   ("%11s") % "TIME"]
1202        textstatus = []
1203        for pid in pids:
1204            stat = self.stats[pid][2].get_status()
1205            time = stat[1]
1206            stat = stat[0]
1207            textstatus.append(["%s" % self.stats[pid][0],
1208                               "%7s" % pid,
1209                               "%10dMB" % (stat[0] / 1024),
1210                               "%8dMB" % (stat[1] / 1024),
1211                               "%12dMB" % (stat[2] / 1024),
1212                               "%11dMB" % (stat[3] / 1024),
1213                               "%10.3fs" % time])
1214
1215        return matrix_to_string(textstatus, tuple(headers))
1216
1217
1218def get_arch(run_function=run):
1219    """
1220    Get the hardware architecture of the machine.
1221    run_function is used to execute the commands. It defaults to
1222    utils.run() but a custom method (if provided) should be of the
1223    same schema as utils.run. It should return a CmdResult object and
1224    throw a CmdError exception.
1225    """
1226    arch = run_function('/bin/uname -m').stdout.rstrip()
1227    if re.match(r'i\d86$', arch):
1228        arch = 'i386'
1229    return arch
1230
1231
1232def get_num_logical_cpus_per_socket(run_function=run):
1233    """
1234    Get the number of cores (including hyperthreading) per cpu.
1235    run_function is used to execute the commands. It defaults to
1236    utils.run() but a custom method (if provided) should be of the
1237    same schema as utils.run. It should return a CmdResult object and
1238    throw a CmdError exception.
1239    """
1240    siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
1241    num_siblings = map(int,
1242                       re.findall(r'^siblings\s*:\s*(\d+)\s*$',
1243                                  siblings, re.M))
1244    if len(num_siblings) == 0:
1245        raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
1246    if min(num_siblings) != max(num_siblings):
1247        raise error.TestError('Number of siblings differ %r' %
1248                              num_siblings)
1249    return num_siblings[0]
1250
1251
1252def merge_trees(src, dest):
1253    """
1254    Merges a source directory tree at 'src' into a destination tree at
1255    'dest'. If a path is a file in both trees than the file in the source
1256    tree is APPENDED to the one in the destination tree. If a path is
1257    a directory in both trees then the directories are recursively merged
1258    with this function. In any other case, the function will skip the
1259    paths that cannot be merged (instead of failing).
1260    """
1261    if not os.path.exists(src):
1262        return # exists only in dest
1263    elif not os.path.exists(dest):
1264        if os.path.isfile(src):
1265            shutil.copy2(src, dest) # file only in src
1266        else:
1267            shutil.copytree(src, dest, symlinks=True) # dir only in src
1268        return
1269    elif os.path.isfile(src) and os.path.isfile(dest):
1270        # src & dest are files in both trees, append src to dest
1271        destfile = open(dest, "a")
1272        try:
1273            srcfile = open(src)
1274            try:
1275                destfile.write(srcfile.read())
1276            finally:
1277                srcfile.close()
1278        finally:
1279            destfile.close()
1280    elif os.path.isdir(src) and os.path.isdir(dest):
1281        # src & dest are directories in both trees, so recursively merge
1282        for name in os.listdir(src):
1283            merge_trees(os.path.join(src, name), os.path.join(dest, name))
1284    else:
1285        # src & dest both exist, but are incompatible
1286        return
1287
1288
1289class CmdResult(object):
1290    """
1291    Command execution result.
1292
1293    command:     String containing the command line itself
1294    exit_status: Integer exit code of the process
1295    stdout:      String containing stdout of the process
1296    stderr:      String containing stderr of the process
1297    duration:    Elapsed wall clock time running the process
1298    """
1299
1300
1301    def __init__(self, command="", stdout="", stderr="",
1302                 exit_status=None, duration=0):
1303        self.command = command
1304        self.exit_status = exit_status
1305        self.stdout = stdout
1306        self.stderr = stderr
1307        self.duration = duration
1308
1309
1310    def __repr__(self):
1311        wrapper = textwrap.TextWrapper(width = 78,
1312                                       initial_indent="\n    ",
1313                                       subsequent_indent="    ")
1314
1315        stdout = self.stdout.rstrip()
1316        if stdout:
1317            stdout = "\nstdout:\n%s" % stdout
1318
1319        stderr = self.stderr.rstrip()
1320        if stderr:
1321            stderr = "\nstderr:\n%s" % stderr
1322
1323        return ("* Command: %s\n"
1324                "Exit status: %s\n"
1325                "Duration: %s\n"
1326                "%s"
1327                "%s"
1328                % (wrapper.fill(self.command), self.exit_status,
1329                self.duration, stdout, stderr))
1330
1331
1332class run_randomly:
1333    def __init__(self, run_sequentially=False):
1334        # Run sequentially is for debugging control files
1335        self.test_list = []
1336        self.run_sequentially = run_sequentially
1337
1338
1339    def add(self, *args, **dargs):
1340        test = (args, dargs)
1341        self.test_list.append(test)
1342
1343
1344    def run(self, fn):
1345        while self.test_list:
1346            test_index = random.randint(0, len(self.test_list)-1)
1347            if self.run_sequentially:
1348                test_index = 0
1349            (args, dargs) = self.test_list.pop(test_index)
1350            fn(*args, **dargs)
1351
1352
1353def import_site_module(path, module, dummy=None, modulefile=None):
1354    """
1355    Try to import the site specific module if it exists.
1356
1357    @param path full filename of the source file calling this (ie __file__)
1358    @param module full module name
1359    @param dummy dummy value to return in case there is no symbol to import
1360    @param modulefile module filename
1361
1362    @return site specific module or dummy
1363
1364    @raises ImportError if the site file exists but imports fails
1365    """
1366    short_module = module[module.rfind(".") + 1:]
1367
1368    if not modulefile:
1369        modulefile = short_module + ".py"
1370
1371    if os.path.exists(os.path.join(os.path.dirname(path), modulefile)):
1372        return __import__(module, {}, {}, [short_module])
1373    return dummy
1374
1375
1376def import_site_symbol(path, module, name, dummy=None, modulefile=None):
1377    """
1378    Try to import site specific symbol from site specific file if it exists
1379
1380    @param path full filename of the source file calling this (ie __file__)
1381    @param module full module name
1382    @param name symbol name to be imported from the site file
1383    @param dummy dummy value to return in case there is no symbol to import
1384    @param modulefile module filename
1385
1386    @return site specific symbol or dummy
1387
1388    @raises ImportError if the site file exists but imports fails
1389    """
1390    module = import_site_module(path, module, modulefile=modulefile)
1391    if not module:
1392        return dummy
1393
1394    # special unique value to tell us if the symbol can't be imported
1395    cant_import = object()
1396
1397    obj = getattr(module, name, cant_import)
1398    if obj is cant_import:
1399        logging.debug("unable to import site symbol '%s', using non-site "
1400                      "implementation", name)
1401        return dummy
1402
1403    return obj
1404
1405
1406def import_site_class(path, module, classname, baseclass, modulefile=None):
1407    """
1408    Try to import site specific class from site specific file if it exists
1409
1410    Args:
1411        path: full filename of the source file calling this (ie __file__)
1412        module: full module name
1413        classname: class name to be loaded from site file
1414        baseclass: base class object to return when no site file present or
1415            to mixin when site class exists but is not inherited from baseclass
1416        modulefile: module filename
1417
1418    Returns: baseclass if site specific class does not exist, the site specific
1419        class if it exists and is inherited from baseclass or a mixin of the
1420        site specific class and baseclass when the site specific class exists
1421        and is not inherited from baseclass
1422
1423    Raises: ImportError if the site file exists but imports fails
1424    """
1425
1426    res = import_site_symbol(path, module, classname, None, modulefile)
1427    if res:
1428        if not issubclass(res, baseclass):
1429            # if not a subclass of baseclass then mix in baseclass with the
1430            # site specific class object and return the result
1431            res = type(classname, (res, baseclass), {})
1432    else:
1433        res = baseclass
1434
1435    return res
1436
1437
1438def import_site_function(path, module, funcname, dummy, modulefile=None):
1439    """
1440    Try to import site specific function from site specific file if it exists
1441
1442    Args:
1443        path: full filename of the source file calling this (ie __file__)
1444        module: full module name
1445        funcname: function name to be imported from site file
1446        dummy: dummy function to return in case there is no function to import
1447        modulefile: module filename
1448
1449    Returns: site specific function object or dummy
1450
1451    Raises: ImportError if the site file exists but imports fails
1452    """
1453
1454    return import_site_symbol(path, module, funcname, dummy, modulefile)
1455
1456
1457def _get_pid_path(program_name):
1458    my_path = os.path.dirname(__file__)
1459    return os.path.abspath(os.path.join(my_path, "..", "..",
1460                                        "%s.pid" % program_name))
1461
1462
1463def write_pid(program_name):
1464    """
1465    Try to drop <program_name>.pid in the main autotest directory.
1466
1467    Args:
1468      program_name: prefix for file name
1469    """
1470    pidfile = open(_get_pid_path(program_name), "w")
1471    try:
1472        pidfile.write("%s\n" % os.getpid())
1473    finally:
1474        pidfile.close()
1475
1476
1477def delete_pid_file_if_exists(program_name):
1478    """
1479    Tries to remove <program_name>.pid from the main autotest directory.
1480    """
1481    pidfile_path = _get_pid_path(program_name)
1482
1483    try:
1484        os.remove(pidfile_path)
1485    except OSError:
1486        if not os.path.exists(pidfile_path):
1487            return
1488        raise
1489
1490
1491def get_pid_from_file(program_name):
1492    """
1493    Reads the pid from <program_name>.pid in the autotest directory.
1494
1495    @param program_name the name of the program
1496    @return the pid if the file exists, None otherwise.
1497    """
1498    pidfile_path = _get_pid_path(program_name)
1499    if not os.path.exists(pidfile_path):
1500        return None
1501
1502    pidfile = open(_get_pid_path(program_name), 'r')
1503
1504    try:
1505        try:
1506            pid = int(pidfile.readline())
1507        except IOError:
1508            if not os.path.exists(pidfile_path):
1509                return None
1510            raise
1511    finally:
1512        pidfile.close()
1513
1514    return pid
1515
1516
1517def get_process_name(pid):
1518    """
1519    Get process name from PID.
1520    @param pid: PID of process.
1521    """
1522    return get_field(read_file("/proc/%d/stat" % pid), 1)[1:-1]
1523
1524
1525def program_is_alive(program_name):
1526    """
1527    Checks if the process is alive and not in Zombie state.
1528
1529    @param program_name the name of the program
1530    @return True if still alive, False otherwise
1531    """
1532    pid = get_pid_from_file(program_name)
1533    if pid is None:
1534        return False
1535    return pid_is_alive(pid)
1536
1537
1538def signal_program(program_name, sig=signal.SIGTERM):
1539    """
1540    Sends a signal to the process listed in <program_name>.pid
1541
1542    @param program_name the name of the program
1543    @param sig signal to send
1544    """
1545    pid = get_pid_from_file(program_name)
1546    if pid:
1547        signal_pid(pid, sig)
1548
1549
1550def get_relative_path(path, reference):
1551    """Given 2 absolute paths "path" and "reference", compute the path of
1552    "path" as relative to the directory "reference".
1553
1554    @param path the absolute path to convert to a relative path
1555    @param reference an absolute directory path to which the relative
1556        path will be computed
1557    """
1558    # normalize the paths (remove double slashes, etc)
1559    assert(os.path.isabs(path))
1560    assert(os.path.isabs(reference))
1561
1562    path = os.path.normpath(path)
1563    reference = os.path.normpath(reference)
1564
1565    # we could use os.path.split() but it splits from the end
1566    path_list = path.split(os.path.sep)[1:]
1567    ref_list = reference.split(os.path.sep)[1:]
1568
1569    # find the longest leading common path
1570    for i in xrange(min(len(path_list), len(ref_list))):
1571        if path_list[i] != ref_list[i]:
1572            # decrement i so when exiting this loop either by no match or by
1573            # end of range we are one step behind
1574            i -= 1
1575            break
1576    i += 1
1577    # drop the common part of the paths, not interested in that anymore
1578    del path_list[:i]
1579
1580    # for each uncommon component in the reference prepend a ".."
1581    path_list[:0] = ['..'] * (len(ref_list) - i)
1582
1583    return os.path.join(*path_list)
1584
1585
1586def sh_escape(command):
1587    """
1588    Escape special characters from a command so that it can be passed
1589    as a double quoted (" ") string in a (ba)sh command.
1590
1591    Args:
1592            command: the command string to escape.
1593
1594    Returns:
1595            The escaped command string. The required englobing double
1596            quotes are NOT added and so should be added at some point by
1597            the caller.
1598
1599    See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
1600    """
1601    command = command.replace("\\", "\\\\")
1602    command = command.replace("$", r'\$')
1603    command = command.replace('"', r'\"')
1604    command = command.replace('`', r'\`')
1605    return command
1606
1607
1608def configure(extra=None, configure='./configure'):
1609    """
1610    Run configure passing in the correct host, build, and target options.
1611
1612    @param extra: extra command line arguments to pass to configure
1613    @param configure: which configure script to use
1614    """
1615    args = []
1616    if 'CHOST' in os.environ:
1617        args.append('--host=' + os.environ['CHOST'])
1618    if 'CBUILD' in os.environ:
1619        args.append('--build=' + os.environ['CBUILD'])
1620    if 'CTARGET' in os.environ:
1621        args.append('--target=' + os.environ['CTARGET'])
1622    if extra:
1623        args.append(extra)
1624
1625    system('%s %s' % (configure, ' '.join(args)))
1626
1627
1628def make(extra='', make='make', timeout=None, ignore_status=False):
1629    """
1630    Run make, adding MAKEOPTS to the list of options.
1631
1632    @param extra: extra command line arguments to pass to make.
1633    """
1634    cmd = '%s %s %s' % (make, os.environ.get('MAKEOPTS', ''), extra)
1635    return system(cmd, timeout=timeout, ignore_status=ignore_status)
1636
1637
1638def compare_versions(ver1, ver2):
1639    """Version number comparison between ver1 and ver2 strings.
1640
1641    >>> compare_tuple("1", "2")
1642    -1
1643    >>> compare_tuple("foo-1.1", "foo-1.2")
1644    -1
1645    >>> compare_tuple("1.2", "1.2a")
1646    -1
1647    >>> compare_tuple("1.2b", "1.2a")
1648    1
1649    >>> compare_tuple("1.3.5.3a", "1.3.5.3b")
1650    -1
1651
1652    Args:
1653        ver1: version string
1654        ver2: version string
1655
1656    Returns:
1657        int:  1 if ver1 >  ver2
1658              0 if ver1 == ver2
1659             -1 if ver1 <  ver2
1660    """
1661    ax = re.split('[.-]', ver1)
1662    ay = re.split('[.-]', ver2)
1663    while len(ax) > 0 and len(ay) > 0:
1664        cx = ax.pop(0)
1665        cy = ay.pop(0)
1666        maxlen = max(len(cx), len(cy))
1667        c = cmp(cx.zfill(maxlen), cy.zfill(maxlen))
1668        if c != 0:
1669            return c
1670    return cmp(len(ax), len(ay))
1671
1672
1673def args_to_dict(args):
1674    """Convert autoserv extra arguments in the form of key=val or key:val to a
1675    dictionary.  Each argument key is converted to lowercase dictionary key.
1676
1677    Args:
1678        args - list of autoserv extra arguments.
1679
1680    Returns:
1681        dictionary
1682    """
1683    arg_re = re.compile(r'(\w+)[:=](.*)$')
1684    dict = {}
1685    for arg in args:
1686        match = arg_re.match(arg)
1687        if match:
1688            dict[match.group(1).lower()] = match.group(2)
1689        else:
1690            logging.warning("args_to_dict: argument '%s' doesn't match "
1691                            "'%s' pattern. Ignored." % (arg, arg_re.pattern))
1692    return dict
1693
1694
1695def get_unused_port():
1696    """
1697    Finds a semi-random available port. A race condition is still
1698    possible after the port number is returned, if another process
1699    happens to bind it.
1700
1701    Returns:
1702        A port number that is unused on both TCP and UDP.
1703    """
1704
1705    def try_bind(port, socket_type, socket_proto):
1706        s = socket.socket(socket.AF_INET, socket_type, socket_proto)
1707        try:
1708            try:
1709                s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1710                s.bind(('', port))
1711                return s.getsockname()[1]
1712            except socket.error:
1713                return None
1714        finally:
1715            s.close()
1716
1717    # On the 2.6 kernel, calling try_bind() on UDP socket returns the
1718    # same port over and over. So always try TCP first.
1719    while True:
1720        # Ask the OS for an unused port.
1721        port = try_bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP)
1722        # Check if this port is unused on the other protocol.
1723        if port and try_bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP):
1724            return port
1725
1726
1727def ask(question, auto=False):
1728    """
1729    Raw input with a prompt that emulates logging.
1730
1731    @param question: Question to be asked
1732    @param auto: Whether to return "y" instead of asking the question
1733    """
1734    if auto:
1735        logging.info("%s (y/n) y" % question)
1736        return "y"
1737    return raw_input("%s INFO | %s (y/n) " %
1738                     (time.strftime("%H:%M:%S", time.localtime()), question))
1739