utils.py revision e0493a4af57c1a73376a7bafaed542c01f588196
13cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei#
23cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei# Copyright 2008 Google Inc. Released under the GPL v2
33cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei
43cbfba02fef9dae07a041fdbf2e89611d72d6f90David Weiimport os, pickle, random, re, resource, select, shutil, signal, StringIO
53cbfba02fef9dae07a041fdbf2e89611d72d6f90David Weiimport socket, struct, subprocess, sys, time, textwrap, urlparse
63cbfba02fef9dae07a041fdbf2e89611d72d6f90David Weiimport warnings, smtplib, logging, urllib2
73cbfba02fef9dae07a041fdbf2e89611d72d6f90David Weifrom threading import Thread, Event
83cbfba02fef9dae07a041fdbf2e89611d72d6f90David Weitry:
93cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei    import hashlib
103cbfba02fef9dae07a041fdbf2e89611d72d6f90David Weiexcept ImportError:
113cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei    import md5, sha
123cbfba02fef9dae07a041fdbf2e89611d72d6f90David Weifrom autotest_lib.client.common_lib import error, logging_manager
133cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei
143cbfba02fef9dae07a041fdbf2e89611d72d6f90David Weidef deprecated(func):
153cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei    """This is a decorator which can be used to mark functions as deprecated.
163cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei    It will result in a warning being emmitted when the function is used."""
173cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei    def new_func(*args, **dargs):
183cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei        warnings.warn("Call to deprecated function %s." % func.__name__,
193cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei                      category=DeprecationWarning)
203cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei        return func(*args, **dargs)
213cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei    new_func.__name__ = func.__name__
223cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei    new_func.__doc__ = func.__doc__
233cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei    new_func.__dict__.update(func.__dict__)
243cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei    return new_func
253cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei
263cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei
273cbfba02fef9dae07a041fdbf2e89611d72d6f90David Weiclass _NullStream(object):
283cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei    def write(self, data):
293cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei        pass
303cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei
313cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei
323cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei    def flush(self):
333cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei        pass
343cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei
353cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei
363cbfba02fef9dae07a041fdbf2e89611d72d6f90David WeiTEE_TO_LOGS = object()
373cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei_the_null_stream = _NullStream()
383cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei
393cbfba02fef9dae07a041fdbf2e89611d72d6f90David WeiDEFAULT_STDOUT_LEVEL = logging.DEBUG
403cbfba02fef9dae07a041fdbf2e89611d72d6f90David WeiDEFAULT_STDERR_LEVEL = logging.ERROR
413cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei
423cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei# prefixes for logging stdout/stderr of commands
433cbfba02fef9dae07a041fdbf2e89611d72d6f90David WeiSTDOUT_PREFIX = '[stdout] '
443cbfba02fef9dae07a041fdbf2e89611d72d6f90David WeiSTDERR_PREFIX = '[stderr] '
453cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei
463cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei
473cbfba02fef9dae07a041fdbf2e89611d72d6f90David Weidef get_stream_tee_file(stream, level, prefix=''):
483cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei    if stream is None:
493cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei        return _the_null_stream
503cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei    if stream is TEE_TO_LOGS:
513cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei        return logging_manager.LoggingFile(level=level, prefix=prefix)
523cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei    return stream
533cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei
543cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei
553cbfba02fef9dae07a041fdbf2e89611d72d6f90David Weiclass BgJob(object):
563cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei    def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True,
573cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei                 stdin=None, stderr_level=DEFAULT_STDERR_LEVEL):
583cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei        self.command = command
593cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei        self.stdout_tee = get_stream_tee_file(stdout_tee, DEFAULT_STDOUT_LEVEL,
603cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei                                              prefix=STDOUT_PREFIX)
613cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei        self.stderr_tee = get_stream_tee_file(stderr_tee, stderr_level,
623cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei                                              prefix=STDERR_PREFIX)
633cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei        self.result = CmdResult(command)
643cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei
653cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei        # allow for easy stdin input by string, we'll let subprocess create
663cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei        # a pipe for stdin input and we'll write to it in the wait loop
673cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei        if isinstance(stdin, basestring):
683cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei            self.string_stdin = stdin
693cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei            stdin = subprocess.PIPE
703cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei        else:
713cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei            self.string_stdin = None
723cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei
733cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei        if verbose:
743cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei            logging.debug("Running '%s'" % command)
753cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei        self.sp = subprocess.Popen(command, stdout=subprocess.PIPE,
763cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei                                   stderr=subprocess.PIPE,
773cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei                                   preexec_fn=self._reset_sigpipe, shell=True,
783cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei                                   executable="/bin/bash",
793cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei                                   stdin=stdin)
803cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei
813cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei
823cbfba02fef9dae07a041fdbf2e89611d72d6f90David Wei    def output_prepare(self, stdout_file=None, stderr_file=None):
83        self.stdout_file = stdout_file
84        self.stderr_file = stderr_file
85
86
87    def process_output(self, stdout=True, final_read=False):
88        """output_prepare must be called prior to calling this"""
89        if stdout:
90            pipe, buf, tee = self.sp.stdout, self.stdout_file, self.stdout_tee
91        else:
92            pipe, buf, tee = self.sp.stderr, self.stderr_file, self.stderr_tee
93
94        if final_read:
95            # read in all the data we can from pipe and then stop
96            data = []
97            while select.select([pipe], [], [], 0)[0]:
98                data.append(os.read(pipe.fileno(), 1024))
99                if len(data[-1]) == 0:
100                    break
101            data = "".join(data)
102        else:
103            # perform a single read
104            data = os.read(pipe.fileno(), 1024)
105        buf.write(data)
106        tee.write(data)
107
108
109    def cleanup(self):
110        self.stdout_tee.flush()
111        self.stderr_tee.flush()
112        self.sp.stdout.close()
113        self.sp.stderr.close()
114        self.result.stdout = self.stdout_file.getvalue()
115        self.result.stderr = self.stderr_file.getvalue()
116
117
118    def _reset_sigpipe(self):
119        signal.signal(signal.SIGPIPE, signal.SIG_DFL)
120
121
122def ip_to_long(ip):
123    # !L is a long in network byte order
124    return struct.unpack('!L', socket.inet_aton(ip))[0]
125
126
127def long_to_ip(number):
128    # See above comment.
129    return socket.inet_ntoa(struct.pack('!L', number))
130
131
132def create_subnet_mask(bits):
133    return (1 << 32) - (1 << 32-bits)
134
135
136def format_ip_with_mask(ip, mask_bits):
137    masked_ip = ip_to_long(ip) & create_subnet_mask(mask_bits)
138    return "%s/%s" % (long_to_ip(masked_ip), mask_bits)
139
140
141def normalize_hostname(alias):
142    ip = socket.gethostbyname(alias)
143    return socket.gethostbyaddr(ip)[0]
144
145
146def get_ip_local_port_range():
147    match = re.match(r'\s*(\d+)\s*(\d+)\s*$',
148                     read_one_line('/proc/sys/net/ipv4/ip_local_port_range'))
149    return (int(match.group(1)), int(match.group(2)))
150
151
152def set_ip_local_port_range(lower, upper):
153    write_one_line('/proc/sys/net/ipv4/ip_local_port_range',
154                   '%d %d\n' % (lower, upper))
155
156
157
158def send_email(mail_from, mail_to, subject, body):
159    """
160    Sends an email via smtp
161
162    mail_from: string with email address of sender
163    mail_to: string or list with email address(es) of recipients
164    subject: string with subject of email
165    body: (multi-line) string with body of email
166    """
167    if isinstance(mail_to, str):
168        mail_to = [mail_to]
169    msg = "From: %s\nTo: %s\nSubject: %s\n\n%s" % (mail_from, ','.join(mail_to),
170                                                   subject, body)
171    try:
172        mailer = smtplib.SMTP('localhost')
173        try:
174            mailer.sendmail(mail_from, mail_to, msg)
175        finally:
176            mailer.quit()
177    except Exception, e:
178        # Emails are non-critical, not errors, but don't raise them
179        print "Sending email failed. Reason: %s" % repr(e)
180
181
182def read_one_line(filename):
183    return open(filename, 'r').readline().rstrip('\n')
184
185
186def read_file(filename):
187    f = open(filename)
188    try:
189        return f.read()
190    finally:
191        f.close()
192
193
194def get_field(data, param, linestart="", sep=" "):
195    """
196    Parse data from string.
197    @param data: Data to parse.
198        example:
199          data:
200             cpu   324 345 34  5 345
201             cpu0  34  11  34 34  33
202             ^^^^
203             start of line
204             params 0   1   2  3   4
205    @param param: Position of parameter after linestart marker.
206    @param linestart: String to which start line with parameters.
207    @param sep: Separator between parameters regular expression.
208    """
209    search = re.compile(r"(?<=^%s)\s*(.*)" % linestart, re.MULTILINE)
210    find = search.search(data)
211    if find != None:
212        return re.split("%s" % sep, find.group(1))[param]
213    else:
214        print "There is no line which starts with %s in data." % linestart
215        return None
216
217
218def write_one_line(filename, line):
219    open_write_close(filename, line.rstrip('\n') + '\n')
220
221
222def open_write_close(filename, data):
223    f = open(filename, 'w')
224    try:
225        f.write(data)
226    finally:
227        f.close()
228
229
230def matrix_to_string(matrix, header=None):
231    """
232    Return a pretty, aligned string representation of a nxm matrix.
233
234    This representation can be used to print any tabular data, such as
235    database results. It works by scanning the lengths of each element
236    in each column, and determining the format string dynamically.
237
238    @param matrix: Matrix representation (list with n rows of m elements).
239    @param header: Optional tuple or list with header elements to be displayed.
240    """
241    if type(header) is list:
242        header = tuple(header)
243    lengths = []
244    if header:
245        for column in header:
246            lengths.append(len(column))
247    for row in matrix:
248        for column in row:
249            i = row.index(column)
250            cl = len(column)
251            try:
252                ml = lengths[i]
253                if cl > ml:
254                    lengths[i] = cl
255            except IndexError:
256                lengths.append(cl)
257
258    lengths = tuple(lengths)
259    format_string = ""
260    for length in lengths:
261        format_string += "%-" + str(length) + "s "
262    format_string += "\n"
263
264    matrix_str = ""
265    if header:
266        matrix_str += format_string % header
267    for row in matrix:
268        matrix_str += format_string % tuple(row)
269
270    return matrix_str
271
272
273def read_keyval(path):
274    """
275    Read a key-value pair format file into a dictionary, and return it.
276    Takes either a filename or directory name as input. If it's a
277    directory name, we assume you want the file to be called keyval.
278    """
279    if os.path.isdir(path):
280        path = os.path.join(path, 'keyval')
281    keyval = {}
282    if os.path.exists(path):
283        for line in open(path):
284            line = re.sub('#.*', '', line).rstrip()
285            if not re.search(r'^[-\.\w]+=', line):
286                raise ValueError('Invalid format line: %s' % line)
287            key, value = line.split('=', 1)
288            if re.search('^\d+$', value):
289                value = int(value)
290            elif re.search('^(\d+\.)?\d+$', value):
291                value = float(value)
292            keyval[key] = value
293    return keyval
294
295
296def write_keyval(path, dictionary, type_tag=None):
297    """
298    Write a key-value pair format file out to a file. This uses append
299    mode to open the file, so existing text will not be overwritten or
300    reparsed.
301
302    If type_tag is None, then the key must be composed of alphanumeric
303    characters (or dashes+underscores). However, if type-tag is not
304    null then the keys must also have "{type_tag}" as a suffix. At
305    the moment the only valid values of type_tag are "attr" and "perf".
306    """
307    if os.path.isdir(path):
308        path = os.path.join(path, 'keyval')
309    keyval = open(path, 'a')
310
311    if type_tag is None:
312        key_regex = re.compile(r'^[-\.\w]+$')
313    else:
314        if type_tag not in ('attr', 'perf'):
315            raise ValueError('Invalid type tag: %s' % type_tag)
316        escaped_tag = re.escape(type_tag)
317        key_regex = re.compile(r'^[-\.\w]+\{%s\}$' % escaped_tag)
318    try:
319        for key in sorted(dictionary.keys()):
320            if not key_regex.search(key):
321                raise ValueError('Invalid key: %s' % key)
322            keyval.write('%s=%s\n' % (key, dictionary[key]))
323    finally:
324        keyval.close()
325
326
327class FileFieldMonitor(object):
328    """
329    Monitors the information from the file and reports it's values.
330
331    It gather the information at start and stop of the measurement or
332    continuously during the measurement.
333    """
334    class Monitor(Thread):
335        """
336        Internal monitor class to ensure continuous monitor of monitored file.
337        """
338        def __init__(self, master):
339            """
340            @param master: Master class which control Monitor
341            """
342            Thread.__init__(self)
343            self.master = master
344
345        def run(self):
346            """
347            Start monitor in thread mode
348            """
349            while not self.master.end_event.isSet():
350                self.master._get_value(self.master.logging)
351                time.sleep(self.master.time_step)
352
353
354    def __init__(self, status_file, data_to_read, mode_diff, continuously=False,
355                 contlogging=False, separator=" +", time_step=0.1):
356        """
357        Initialize variables.
358        @param status_file: File contain status.
359        @param mode_diff: If True make a difference of value, else average.
360        @param data_to_read: List of tuples with data position.
361            format: [(start_of_line,position in params)]
362            example:
363              data:
364                 cpu   324 345 34  5 345
365                 cpu0  34  11  34 34  33
366                 ^^^^
367                 start of line
368                 params 0   1   2  3   4
369        @param mode_diff: True to subtract old value from new value,
370            False make average of the values.
371        @parma continuously: Start the monitoring thread using the time_step
372            as the measurement period.
373        @param contlogging: Log data in continuous run.
374        @param separator: Regular expression of separator.
375        @param time_step: Time period of the monitoring value.
376        """
377        self.end_event = Event()
378        self.start_time = 0
379        self.end_time = 0
380        self.test_time = 0
381
382        self.status_file = status_file
383        self.separator = separator
384        self.data_to_read = data_to_read
385        self.num_of_params = len(self.data_to_read)
386        self.mode_diff = mode_diff
387        self.continuously = continuously
388        self.time_step = time_step
389
390        self.value = [0 for i in range(self.num_of_params)]
391        self.old_value = [0 for i in range(self.num_of_params)]
392        self.log = []
393        self.logging = contlogging
394
395        self.started = False
396        self.num_of_get_value = 0
397        self.monitor = None
398
399
400    def _get_value(self, logging=True):
401        """
402        Return current values.
403        @param logging: If true log value in memory. There can be problem
404          with long run.
405        """
406        data = read_file(self.status_file)
407        value = []
408        for i in range(self.num_of_params):
409            value.append(int(get_field(data,
410                             self.data_to_read[i][1],
411                             self.data_to_read[i][0],
412                             self.separator)))
413
414        if logging:
415            self.log.append(value)
416        if not self.mode_diff:
417            value = map(lambda x, y: x + y, value, self.old_value)
418
419        self.old_value = value
420        self.num_of_get_value += 1
421        return value
422
423
424    def start(self):
425        """
426        Start value monitor.
427        """
428        if self.started:
429            self.stop()
430        self.old_value = [0 for i in range(self.num_of_params)]
431        self.num_of_get_value = 0
432        self.log = []
433        self.end_event.clear()
434        self.start_time = time.time()
435        self._get_value()
436        self.started = True
437        if (self.continuously):
438            self.monitor = FileFieldMonitor.Monitor(self)
439            self.monitor.start()
440
441
442    def stop(self):
443        """
444        Stop value monitor.
445        """
446        if self.started:
447            self.started = False
448            self.end_time = time.time()
449            self.test_time = self.end_time - self.start_time
450            self.value = self._get_value()
451            if (self.continuously):
452                self.end_event.set()
453                self.monitor.join()
454            if (self.mode_diff):
455                self.value = map(lambda x, y: x - y, self.log[-1], self.log[0])
456            else:
457                self.value = map(lambda x: x / self.num_of_get_value,
458                                 self.value)
459
460
461    def get_status(self):
462        """
463        @return: Status of monitored process average value,
464            time of test and array of monitored values and time step of
465            continuous run.
466        """
467        if self.started:
468            self.stop()
469        if self.mode_diff:
470            for i in range(len(self.log) - 1):
471                self.log[i] = (map(lambda x, y: x - y,
472                                   self.log[i + 1], self.log[i]))
473            self.log.pop()
474        return (self.value, self.test_time, self.log, self.time_step)
475
476
477def is_url(path):
478    """Return true if path looks like a URL"""
479    # for now, just handle http and ftp
480    url_parts = urlparse.urlparse(path)
481    return (url_parts[0] in ('http', 'ftp'))
482
483
484def urlopen(url, data=None, timeout=5):
485    """Wrapper to urllib2.urlopen with timeout addition."""
486
487    # Save old timeout
488    old_timeout = socket.getdefaulttimeout()
489    socket.setdefaulttimeout(timeout)
490    try:
491        return urllib2.urlopen(url, data=data)
492    finally:
493        socket.setdefaulttimeout(old_timeout)
494
495
496def urlretrieve(url, filename, data=None, timeout=300):
497    """Retrieve a file from given url."""
498    logging.debug('Fetching %s -> %s', url, filename)
499
500    src_file = urlopen(url, data=data, timeout=timeout)
501    try:
502        dest_file = open(filename, 'wb')
503        try:
504            shutil.copyfileobj(src_file, dest_file)
505        finally:
506            dest_file.close()
507    finally:
508        src_file.close()
509
510
511def hash(type, input=None):
512    """
513    Returns an hash object of type md5 or sha1. This function is implemented in
514    order to encapsulate hash objects in a way that is compatible with python
515    2.4 and python 2.6 without warnings.
516
517    Note that even though python 2.6 hashlib supports hash types other than
518    md5 and sha1, we are artificially limiting the input values in order to
519    make the function to behave exactly the same among both python
520    implementations.
521
522    @param input: Optional input string that will be used to update the hash.
523    """
524    if type not in ['md5', 'sha1']:
525        raise ValueError("Unsupported hash type: %s" % type)
526
527    try:
528        hash = hashlib.new(type)
529    except NameError:
530        if type == 'md5':
531            hash = md5.new()
532        elif type == 'sha1':
533            hash = sha.new()
534
535    if input:
536        hash.update(input)
537
538    return hash
539
540
541def get_file(src, dest, permissions=None):
542    """Get a file from src, which can be local or a remote URL"""
543    if src == dest:
544        return
545
546    if is_url(src):
547        urlretrieve(src, dest)
548    else:
549        shutil.copyfile(src, dest)
550
551    if permissions:
552        os.chmod(dest, permissions)
553    return dest
554
555
556def unmap_url(srcdir, src, destdir='.'):
557    """
558    Receives either a path to a local file or a URL.
559    returns either the path to the local file, or the fetched URL
560
561    unmap_url('/usr/src', 'foo.tar', '/tmp')
562                            = '/usr/src/foo.tar'
563    unmap_url('/usr/src', 'http://site/file', '/tmp')
564                            = '/tmp/file'
565                            (after retrieving it)
566    """
567    if is_url(src):
568        url_parts = urlparse.urlparse(src)
569        filename = os.path.basename(url_parts[2])
570        dest = os.path.join(destdir, filename)
571        return get_file(src, dest)
572    else:
573        return os.path.join(srcdir, src)
574
575
576def update_version(srcdir, preserve_srcdir, new_version, install,
577                   *args, **dargs):
578    """
579    Make sure srcdir is version new_version
580
581    If not, delete it and install() the new version.
582
583    In the preserve_srcdir case, we just check it's up to date,
584    and if not, we rerun install, without removing srcdir
585    """
586    versionfile = os.path.join(srcdir, '.version')
587    install_needed = True
588
589    if os.path.exists(versionfile):
590        old_version = pickle.load(open(versionfile))
591        if old_version == new_version:
592            install_needed = False
593
594    if install_needed:
595        if not preserve_srcdir and os.path.exists(srcdir):
596            shutil.rmtree(srcdir)
597        install(*args, **dargs)
598        if os.path.exists(srcdir):
599            pickle.dump(new_version, open(versionfile, 'w'))
600
601
602def get_stderr_level(stderr_is_expected):
603    if stderr_is_expected:
604        return DEFAULT_STDOUT_LEVEL
605    return DEFAULT_STDERR_LEVEL
606
607
608def run(command, timeout=None, ignore_status=False,
609        stdout_tee=None, stderr_tee=None, verbose=True, stdin=None,
610        stderr_is_expected=None, args=()):
611    """
612    Run a command on the host.
613
614    @param command: the command line string.
615    @param timeout: time limit in seconds before attempting to kill the
616            running process. The run() function will take a few seconds
617            longer than 'timeout' to complete if it has to kill the process.
618    @param ignore_status: do not raise an exception, no matter what the exit
619            code of the command is.
620    @param stdout_tee: optional file-like object to which stdout data
621            will be written as it is generated (data will still be stored
622            in result.stdout).
623    @param stderr_tee: likewise for stderr.
624    @param verbose: if True, log the command being run.
625    @param stdin: stdin to pass to the executed process (can be a file
626            descriptor, a file object of a real file or a string).
627    @param args: sequence of strings of arguments to be given to the command
628            inside " quotes after they have been escaped for that; each
629            element in the sequence will be given as a separate command
630            argument
631
632    @return a CmdResult object
633
634    @raise CmdError: the exit code of the command execution was not 0
635    """
636    if isinstance(args, basestring):
637        raise TypeError('Got a string for the "args" keyword argument, '
638                        'need a sequence.')
639
640    for arg in args:
641        command += ' "%s"' % sh_escape(arg)
642    if stderr_is_expected is None:
643        stderr_is_expected = ignore_status
644
645    bg_job = join_bg_jobs(
646        (BgJob(command, stdout_tee, stderr_tee, verbose, stdin=stdin,
647               stderr_level=get_stderr_level(stderr_is_expected)),),
648        timeout)[0]
649    if not ignore_status and bg_job.result.exit_status:
650        raise error.CmdError(command, bg_job.result,
651                             "Command returned non-zero exit status")
652
653    return bg_job.result
654
655
656def run_parallel(commands, timeout=None, ignore_status=False,
657                 stdout_tee=None, stderr_tee=None):
658    """
659    Behaves the same as run() with the following exceptions:
660
661    - commands is a list of commands to run in parallel.
662    - ignore_status toggles whether or not an exception should be raised
663      on any error.
664
665    @return: a list of CmdResult objects
666    """
667    bg_jobs = []
668    for command in commands:
669        bg_jobs.append(BgJob(command, stdout_tee, stderr_tee,
670                             stderr_level=get_stderr_level(ignore_status)))
671
672    # Updates objects in bg_jobs list with their process information
673    join_bg_jobs(bg_jobs, timeout)
674
675    for bg_job in bg_jobs:
676        if not ignore_status and bg_job.result.exit_status:
677            raise error.CmdError(command, bg_job.result,
678                                 "Command returned non-zero exit status")
679
680    return [bg_job.result for bg_job in bg_jobs]
681
682
683@deprecated
684def run_bg(command):
685    """Function deprecated. Please use BgJob class instead."""
686    bg_job = BgJob(command)
687    return bg_job.sp, bg_job.result
688
689
690def join_bg_jobs(bg_jobs, timeout=None):
691    """Joins the bg_jobs with the current thread.
692
693    Returns the same list of bg_jobs objects that was passed in.
694    """
695    ret, timeout_error = 0, False
696    for bg_job in bg_jobs:
697        bg_job.output_prepare(StringIO.StringIO(), StringIO.StringIO())
698
699    try:
700        # We are holding ends to stdin, stdout pipes
701        # hence we need to be sure to close those fds no mater what
702        start_time = time.time()
703        timeout_error = _wait_for_commands(bg_jobs, start_time, timeout)
704
705        for bg_job in bg_jobs:
706            # Process stdout and stderr
707            bg_job.process_output(stdout=True,final_read=True)
708            bg_job.process_output(stdout=False,final_read=True)
709    finally:
710        # close our ends of the pipes to the sp no matter what
711        for bg_job in bg_jobs:
712            bg_job.cleanup()
713
714    if timeout_error:
715        # TODO: This needs to be fixed to better represent what happens when
716        # running in parallel. However this is backwards compatable, so it will
717        # do for the time being.
718        raise error.CmdError(bg_jobs[0].command, bg_jobs[0].result,
719                             "Command(s) did not complete within %d seconds"
720                             % timeout)
721
722
723    return bg_jobs
724
725
726def _wait_for_commands(bg_jobs, start_time, timeout):
727    # This returns True if it must return due to a timeout, otherwise False.
728
729    # To check for processes which terminate without producing any output
730    # a 1 second timeout is used in select.
731    SELECT_TIMEOUT = 1
732
733    read_list = []
734    write_list = []
735    reverse_dict = {}
736
737    for bg_job in bg_jobs:
738        read_list.append(bg_job.sp.stdout)
739        read_list.append(bg_job.sp.stderr)
740        reverse_dict[bg_job.sp.stdout] = (bg_job, True)
741        reverse_dict[bg_job.sp.stderr] = (bg_job, False)
742        if bg_job.string_stdin is not None:
743            write_list.append(bg_job.sp.stdin)
744            reverse_dict[bg_job.sp.stdin] = bg_job
745
746    if timeout:
747        stop_time = start_time + timeout
748        time_left = stop_time - time.time()
749    else:
750        time_left = None # so that select never times out
751
752    while not timeout or time_left > 0:
753        # select will return when we may write to stdin or when there is
754        # stdout/stderr output we can read (including when it is
755        # EOF, that is the process has terminated).
756        read_ready, write_ready, _ = select.select(read_list, write_list, [],
757                                                   SELECT_TIMEOUT)
758
759        # os.read() has to be used instead of
760        # subproc.stdout.read() which will otherwise block
761        for file_obj in read_ready:
762            bg_job, is_stdout = reverse_dict[file_obj]
763            bg_job.process_output(is_stdout)
764
765        for file_obj in write_ready:
766            # we can write PIPE_BUF bytes without blocking
767            # POSIX requires PIPE_BUF is >= 512
768            bg_job = reverse_dict[file_obj]
769            file_obj.write(bg_job.string_stdin[:512])
770            bg_job.string_stdin = bg_job.string_stdin[512:]
771            # no more input data, close stdin, remove it from the select set
772            if not bg_job.string_stdin:
773                file_obj.close()
774                write_list.remove(file_obj)
775                del reverse_dict[file_obj]
776
777        all_jobs_finished = True
778        for bg_job in bg_jobs:
779            if bg_job.result.exit_status is not None:
780                continue
781
782            bg_job.result.exit_status = bg_job.sp.poll()
783            if bg_job.result.exit_status is not None:
784                # process exited, remove its stdout/stdin from the select set
785                bg_job.result.duration = time.time() - start_time
786                read_list.remove(bg_job.sp.stdout)
787                read_list.remove(bg_job.sp.stderr)
788                del reverse_dict[bg_job.sp.stdout]
789                del reverse_dict[bg_job.sp.stderr]
790            else:
791                all_jobs_finished = False
792
793        if all_jobs_finished:
794            return False
795
796        if timeout:
797            time_left = stop_time - time.time()
798
799    # Kill all processes which did not complete prior to timeout
800    for bg_job in bg_jobs:
801        if bg_job.result.exit_status is not None:
802            continue
803
804        logging.warn('run process timeout (%s) fired on: %s', timeout,
805                     bg_job.command)
806        nuke_subprocess(bg_job.sp)
807        bg_job.result.exit_status = bg_job.sp.poll()
808        bg_job.result.duration = time.time() - start_time
809
810    return True
811
812
813def pid_is_alive(pid):
814    """
815    True if process pid exists and is not yet stuck in Zombie state.
816    Zombies are impossible to move between cgroups, etc.
817    pid can be integer, or text of integer.
818    """
819    path = '/proc/%s/stat' % pid
820
821    try:
822        stat = read_one_line(path)
823    except IOError:
824        if not os.path.exists(path):
825            # file went away
826            return False
827        raise
828
829    return stat.split()[2] != 'Z'
830
831
832def signal_pid(pid, sig):
833    """
834    Sends a signal to a process id. Returns True if the process terminated
835    successfully, False otherwise.
836    """
837    try:
838        os.kill(pid, sig)
839    except OSError:
840        # The process may have died before we could kill it.
841        pass
842
843    for i in range(5):
844        if not pid_is_alive(pid):
845            return True
846        time.sleep(1)
847
848    # The process is still alive
849    return False
850
851
852def nuke_subprocess(subproc):
853    # check if the subprocess is still alive, first
854    if subproc.poll() is not None:
855        return subproc.poll()
856
857    # the process has not terminated within timeout,
858    # kill it via an escalating series of signals.
859    signal_queue = [signal.SIGTERM, signal.SIGKILL]
860    for sig in signal_queue:
861        signal_pid(subproc.pid, sig)
862        if subproc.poll() is not None:
863            return subproc.poll()
864
865
866def nuke_pid(pid, signal_queue=(signal.SIGTERM, signal.SIGKILL)):
867    # the process has not terminated within timeout,
868    # kill it via an escalating series of signals.
869    for sig in signal_queue:
870        if signal_pid(pid, sig):
871            return
872
873    # no signal successfully terminated the process
874    raise error.AutoservRunError('Could not kill %d' % pid, None)
875
876
877def system(command, timeout=None, ignore_status=False):
878    """
879    Run a command
880
881    @param timeout: timeout in seconds
882    @param ignore_status: if ignore_status=False, throw an exception if the
883            command's exit code is non-zero
884            if ignore_stauts=True, return the exit code.
885
886    @return exit status of command
887            (note, this will always be zero unless ignore_status=True)
888    """
889    return run(command, timeout=timeout, ignore_status=ignore_status,
890               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS).exit_status
891
892
893def system_parallel(commands, timeout=None, ignore_status=False):
894    """This function returns a list of exit statuses for the respective
895    list of commands."""
896    return [bg_jobs.exit_status for bg_jobs in
897            run_parallel(commands, timeout=timeout, ignore_status=ignore_status,
898                         stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
899
900
901def system_output(command, timeout=None, ignore_status=False,
902                  retain_output=False, args=()):
903    """
904    Run a command and return the stdout output.
905
906    @param command: command string to execute.
907    @param timeout: time limit in seconds before attempting to kill the
908            running process. The function will take a few seconds longer
909            than 'timeout' to complete if it has to kill the process.
910    @param ignore_status: do not raise an exception, no matter what the exit
911            code of the command is.
912    @param retain_output: set to True to make stdout/stderr of the command
913            output to be also sent to the logging system
914    @param args: sequence of strings of arguments to be given to the command
915            inside " quotes after they have been escaped for that; each
916            element in the sequence will be given as a separate command
917            argument
918
919    @return a string with the stdout output of the command.
920    """
921    if retain_output:
922        out = run(command, timeout=timeout, ignore_status=ignore_status,
923                  stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS,
924                  args=args).stdout
925    else:
926        out = run(command, timeout=timeout, ignore_status=ignore_status,
927                  args=args).stdout
928    if out[-1:] == '\n':
929        out = out[:-1]
930    return out
931
932
933def system_output_parallel(commands, timeout=None, ignore_status=False,
934                           retain_output=False):
935    if retain_output:
936        out = [bg_job.stdout for bg_job
937               in run_parallel(commands, timeout=timeout,
938                               ignore_status=ignore_status,
939                               stdout_tee=TEE_TO_LOGS, stderr_tee=TEE_TO_LOGS)]
940    else:
941        out = [bg_job.stdout for bg_job in run_parallel(commands,
942                                  timeout=timeout, ignore_status=ignore_status)]
943    for x in out:
944        if out[-1:] == '\n': out = out[:-1]
945    return out
946
947
948def strip_unicode(input):
949    if type(input) == list:
950        return [strip_unicode(i) for i in input]
951    elif type(input) == dict:
952        output = {}
953        for key in input.keys():
954            output[str(key)] = strip_unicode(input[key])
955        return output
956    elif type(input) == unicode:
957        return str(input)
958    else:
959        return input
960
961
962def get_cpu_percentage(function, *args, **dargs):
963    """Returns a tuple containing the CPU% and return value from function call.
964
965    This function calculates the usage time by taking the difference of
966    the user and system times both before and after the function call.
967    """
968    child_pre = resource.getrusage(resource.RUSAGE_CHILDREN)
969    self_pre = resource.getrusage(resource.RUSAGE_SELF)
970    start = time.time()
971    to_return = function(*args, **dargs)
972    elapsed = time.time() - start
973    self_post = resource.getrusage(resource.RUSAGE_SELF)
974    child_post = resource.getrusage(resource.RUSAGE_CHILDREN)
975
976    # Calculate CPU Percentage
977    s_user, s_system = [a - b for a, b in zip(self_post, self_pre)[:2]]
978    c_user, c_system = [a - b for a, b in zip(child_post, child_pre)[:2]]
979    cpu_percent = (s_user + c_user + s_system + c_system) / elapsed
980
981    return cpu_percent, to_return
982
983
984class SystemLoad(object):
985    """
986    Get system and/or process values and return average value of load.
987    """
988    def __init__(self, pids, advanced=False, time_step=0.1, cpu_cont=False,
989                 use_log=False):
990        """
991        @param pids: List of pids to be monitored. If pid = 0 whole system will
992          be monitored. pid == 0 means whole system.
993        @param advanced: monitor add value for system irq count and softirq
994          for process minor and maior page fault
995        @param time_step: Time step for continuous monitoring.
996        @param cpu_cont: If True monitor CPU load continuously.
997        @param use_log: If true every monitoring is logged for dump.
998        """
999        self.pids = []
1000        self.stats = {}
1001        for pid in pids:
1002            if pid == 0:
1003                cpu = FileFieldMonitor("/proc/stat",
1004                                       [("cpu", 0), # User Time
1005                                        ("cpu", 2), # System Time
1006                                        ("intr", 0), # IRQ Count
1007                                        ("softirq", 0)], # Soft IRQ Count
1008                                       True,
1009                                       cpu_cont,
1010                                       use_log,
1011                                       " +",
1012                                       time_step)
1013                mem = FileFieldMonitor("/proc/meminfo",
1014                                       [("MemTotal:", 0), # Mem Total
1015                                        ("MemFree:", 0), # Mem Free
1016                                        ("Buffers:", 0), # Buffers
1017                                        ("Cached:", 0)], # Cached
1018                                       False,
1019                                       True,
1020                                       use_log,
1021                                       " +",
1022                                       time_step)
1023                self.stats[pid] = ["TOTAL", cpu, mem]
1024                self.pids.append(pid)
1025            else:
1026                name = ""
1027                if (type(pid) is int):
1028                    self.pids.append(pid)
1029                    name = get_process_name(pid)
1030                else:
1031                    self.pids.append(pid[0])
1032                    name = pid[1]
1033
1034                cpu = FileFieldMonitor("/proc/%d/stat" %
1035                                       self.pids[-1],
1036                                       [("", 13), # User Time
1037                                        ("", 14), # System Time
1038                                        ("", 9), # Minority Page Fault
1039                                        ("", 11)], # Majority Page Fault
1040                                       True,
1041                                       cpu_cont,
1042                                       use_log,
1043                                       " +",
1044                                       time_step)
1045                mem = FileFieldMonitor("/proc/%d/status" %
1046                                       self.pids[-1],
1047                                       [("VmSize:", 0), # Virtual Memory Size
1048                                        ("VmRSS:", 0), # Resident Set Size
1049                                        ("VmPeak:", 0), # Peak VM Size
1050                                        ("VmSwap:", 0)], # VM in Swap
1051                                       False,
1052                                       True,
1053                                       use_log,
1054                                       " +",
1055                                       time_step)
1056                self.stats[self.pids[-1]] = [name, cpu, mem]
1057
1058        self.advanced = advanced
1059
1060
1061    def __str__(self):
1062        """
1063        Define format how to print
1064        """
1065        out = ""
1066        for pid in self.pids:
1067            for stat in self.stats[pid][1:]:
1068                out += str(stat.get_status()) + "\n"
1069        return out
1070
1071
1072    def start(self, pids=[]):
1073        """
1074        Start monitoring of the process system usage.
1075        @param pids: List of PIDs you intend to control. Use pids=[] to control
1076            all defined PIDs.
1077        """
1078        if pids == []:
1079            pids = self.pids
1080
1081        for pid in pids:
1082            for stat in self.stats[pid][1:]:
1083                stat.start()
1084
1085
1086    def stop(self, pids=[]):
1087        """
1088        Stop monitoring of the process system usage.
1089        @param pids: List of PIDs you intend to control. Use pids=[] to control
1090            all defined PIDs.
1091        """
1092        if pids == []:
1093            pids = self.pids
1094
1095        for pid in pids:
1096            for stat in self.stats[pid][1:]:
1097                stat.stop()
1098
1099
1100    def dump(self, pids=[]):
1101        """
1102        Get the status of monitoring.
1103        @param pids: List of PIDs you intend to control. Use pids=[] to control
1104            all defined PIDs.
1105         @return:
1106            tuple([cpu load], [memory load]):
1107                ([(PID1, (PID1_cpu_meas)), (PID2, (PID2_cpu_meas)), ...],
1108                 [(PID1, (PID1_mem_meas)), (PID2, (PID2_mem_meas)), ...])
1109
1110            PID1_cpu_meas:
1111                average_values[], test_time, cont_meas_values[[]], time_step
1112            PID1_mem_meas:
1113                average_values[], test_time, cont_meas_values[[]], time_step
1114            where average_values[] are the measured values (mem_free,swap,...)
1115            which are described in SystemLoad.__init__()-FileFieldMonitor.
1116            cont_meas_values[[]] is a list of average_values in the sampling
1117            times.
1118        """
1119        if pids == []:
1120            pids = self.pids
1121
1122        cpus = []
1123        memory = []
1124        for pid in pids:
1125            stat = (pid, self.stats[pid][1].get_status())
1126            cpus.append(stat)
1127        for pid in pids:
1128            stat = (pid, self.stats[pid][2].get_status())
1129            memory.append(stat)
1130
1131        return (cpus, memory)
1132
1133
1134    def get_cpu_status_string(self, pids=[]):
1135        """
1136        Convert status to string array.
1137        @param pids: List of PIDs you intend to control. Use pids=[] to control
1138            all defined PIDs.
1139        @return: String format to table.
1140        """
1141        if pids == []:
1142            pids = self.pids
1143
1144        headers = ["NAME",
1145                   ("%7s") % "PID",
1146                   ("%5s") % "USER",
1147                   ("%5s") % "SYS",
1148                   ("%5s") % "SUM"]
1149        if self.advanced:
1150            headers.extend(["MINFLT/IRQC",
1151                            "MAJFLT/SOFTIRQ"])
1152        headers.append(("%11s") % "TIME")
1153        textstatus = []
1154        for pid in pids:
1155            stat = self.stats[pid][1].get_status()
1156            time = stat[1]
1157            stat = stat[0]
1158            textstatus.append(["%s" % self.stats[pid][0],
1159                               "%7s" % pid,
1160                               "%4.0f%%" % (stat[0] / time),
1161                               "%4.0f%%" % (stat[1] / time),
1162                               "%4.0f%%" % ((stat[0] + stat[1]) / time),
1163                               "%10.3fs" % time])
1164            if self.advanced:
1165                textstatus[-1].insert(-1, "%11d" % stat[2])
1166                textstatus[-1].insert(-1, "%14d" % stat[3])
1167
1168        return matrix_to_string(textstatus, tuple(headers))
1169
1170
1171    def get_mem_status_string(self, pids=[]):
1172        """
1173        Convert status to string array.
1174        @param pids: List of PIDs you intend to control. Use pids=[] to control
1175            all defined PIDs.
1176        @return: String format to table.
1177        """
1178        if pids == []:
1179            pids = self.pids
1180
1181        headers = ["NAME",
1182                   ("%7s") % "PID",
1183                   ("%8s") % "TOTAL/VMSIZE",
1184                   ("%8s") % "FREE/VMRSS",
1185                   ("%8s") % "BUFFERS/VMPEAK",
1186                   ("%8s") % "CACHED/VMSWAP",
1187                   ("%11s") % "TIME"]
1188        textstatus = []
1189        for pid in pids:
1190            stat = self.stats[pid][2].get_status()
1191            time = stat[1]
1192            stat = stat[0]
1193            textstatus.append(["%s" % self.stats[pid][0],
1194                               "%7s" % pid,
1195                               "%10dMB" % (stat[0] / 1024),
1196                               "%8dMB" % (stat[1] / 1024),
1197                               "%12dMB" % (stat[2] / 1024),
1198                               "%11dMB" % (stat[3] / 1024),
1199                               "%10.3fs" % time])
1200
1201        return matrix_to_string(textstatus, tuple(headers))
1202
1203
1204def get_arch(run_function=run):
1205    """
1206    Get the hardware architecture of the machine.
1207    run_function is used to execute the commands. It defaults to
1208    utils.run() but a custom method (if provided) should be of the
1209    same schema as utils.run. It should return a CmdResult object and
1210    throw a CmdError exception.
1211    """
1212    arch = run_function('/bin/uname -m').stdout.rstrip()
1213    if re.match(r'i\d86$', arch):
1214        arch = 'i386'
1215    return arch
1216
1217
1218def get_num_logical_cpus_per_socket(run_function=run):
1219    """
1220    Get the number of cores (including hyperthreading) per cpu.
1221    run_function is used to execute the commands. It defaults to
1222    utils.run() but a custom method (if provided) should be of the
1223    same schema as utils.run. It should return a CmdResult object and
1224    throw a CmdError exception.
1225    """
1226    siblings = run_function('grep "^siblings" /proc/cpuinfo').stdout.rstrip()
1227    num_siblings = map(int,
1228                       re.findall(r'^siblings\s*:\s*(\d+)\s*$',
1229                                  siblings, re.M))
1230    if len(num_siblings) == 0:
1231        raise error.TestError('Unable to find siblings info in /proc/cpuinfo')
1232    if min(num_siblings) != max(num_siblings):
1233        raise error.TestError('Number of siblings differ %r' %
1234                              num_siblings)
1235    return num_siblings[0]
1236
1237
1238def merge_trees(src, dest):
1239    """
1240    Merges a source directory tree at 'src' into a destination tree at
1241    'dest'. If a path is a file in both trees than the file in the source
1242    tree is APPENDED to the one in the destination tree. If a path is
1243    a directory in both trees then the directories are recursively merged
1244    with this function. In any other case, the function will skip the
1245    paths that cannot be merged (instead of failing).
1246    """
1247    if not os.path.exists(src):
1248        return # exists only in dest
1249    elif not os.path.exists(dest):
1250        if os.path.isfile(src):
1251            shutil.copy2(src, dest) # file only in src
1252        else:
1253            shutil.copytree(src, dest, symlinks=True) # dir only in src
1254        return
1255    elif os.path.isfile(src) and os.path.isfile(dest):
1256        # src & dest are files in both trees, append src to dest
1257        destfile = open(dest, "a")
1258        try:
1259            srcfile = open(src)
1260            try:
1261                destfile.write(srcfile.read())
1262            finally:
1263                srcfile.close()
1264        finally:
1265            destfile.close()
1266    elif os.path.isdir(src) and os.path.isdir(dest):
1267        # src & dest are directories in both trees, so recursively merge
1268        for name in os.listdir(src):
1269            merge_trees(os.path.join(src, name), os.path.join(dest, name))
1270    else:
1271        # src & dest both exist, but are incompatible
1272        return
1273
1274
1275class CmdResult(object):
1276    """
1277    Command execution result.
1278
1279    command:     String containing the command line itself
1280    exit_status: Integer exit code of the process
1281    stdout:      String containing stdout of the process
1282    stderr:      String containing stderr of the process
1283    duration:    Elapsed wall clock time running the process
1284    """
1285
1286
1287    def __init__(self, command="", stdout="", stderr="",
1288                 exit_status=None, duration=0):
1289        self.command = command
1290        self.exit_status = exit_status
1291        self.stdout = stdout
1292        self.stderr = stderr
1293        self.duration = duration
1294
1295
1296    def __repr__(self):
1297        wrapper = textwrap.TextWrapper(width = 78,
1298                                       initial_indent="\n    ",
1299                                       subsequent_indent="    ")
1300
1301        stdout = self.stdout.rstrip()
1302        if stdout:
1303            stdout = "\nstdout:\n%s" % stdout
1304
1305        stderr = self.stderr.rstrip()
1306        if stderr:
1307            stderr = "\nstderr:\n%s" % stderr
1308
1309        return ("* Command: %s\n"
1310                "Exit status: %s\n"
1311                "Duration: %s\n"
1312                "%s"
1313                "%s"
1314                % (wrapper.fill(self.command), self.exit_status,
1315                self.duration, stdout, stderr))
1316
1317
1318class run_randomly:
1319    def __init__(self, run_sequentially=False):
1320        # Run sequentially is for debugging control files
1321        self.test_list = []
1322        self.run_sequentially = run_sequentially
1323
1324
1325    def add(self, *args, **dargs):
1326        test = (args, dargs)
1327        self.test_list.append(test)
1328
1329
1330    def run(self, fn):
1331        while self.test_list:
1332            test_index = random.randint(0, len(self.test_list)-1)
1333            if self.run_sequentially:
1334                test_index = 0
1335            (args, dargs) = self.test_list.pop(test_index)
1336            fn(*args, **dargs)
1337
1338
1339def import_site_module(path, module, dummy=None, modulefile=None):
1340    """
1341    Try to import the site specific module if it exists.
1342
1343    @param path full filename of the source file calling this (ie __file__)
1344    @param module full module name
1345    @param dummy dummy value to return in case there is no symbol to import
1346    @param modulefile module filename
1347
1348    @return site specific module or dummy
1349
1350    @raises ImportError if the site file exists but imports fails
1351    """
1352    short_module = module[module.rfind(".") + 1:]
1353
1354    if not modulefile:
1355        modulefile = short_module + ".py"
1356
1357    if os.path.exists(os.path.join(os.path.dirname(path), modulefile)):
1358        return __import__(module, {}, {}, [short_module])
1359    return dummy
1360
1361
1362def import_site_symbol(path, module, name, dummy=None, modulefile=None):
1363    """
1364    Try to import site specific symbol from site specific file if it exists
1365
1366    @param path full filename of the source file calling this (ie __file__)
1367    @param module full module name
1368    @param name symbol name to be imported from the site file
1369    @param dummy dummy value to return in case there is no symbol to import
1370    @param modulefile module filename
1371
1372    @return site specific symbol or dummy
1373
1374    @raises ImportError if the site file exists but imports fails
1375    """
1376    module = import_site_module(path, module, modulefile=modulefile)
1377    if not module:
1378        return dummy
1379
1380    # special unique value to tell us if the symbol can't be imported
1381    cant_import = object()
1382
1383    obj = getattr(module, name, cant_import)
1384    if obj is cant_import:
1385        logging.debug("unable to import site symbol '%s', using non-site "
1386                      "implementation", name)
1387        return dummy
1388
1389    return obj
1390
1391
1392def import_site_class(path, module, classname, baseclass, modulefile=None):
1393    """
1394    Try to import site specific class from site specific file if it exists
1395
1396    Args:
1397        path: full filename of the source file calling this (ie __file__)
1398        module: full module name
1399        classname: class name to be loaded from site file
1400        baseclass: base class object to return when no site file present or
1401            to mixin when site class exists but is not inherited from baseclass
1402        modulefile: module filename
1403
1404    Returns: baseclass if site specific class does not exist, the site specific
1405        class if it exists and is inherited from baseclass or a mixin of the
1406        site specific class and baseclass when the site specific class exists
1407        and is not inherited from baseclass
1408
1409    Raises: ImportError if the site file exists but imports fails
1410    """
1411
1412    res = import_site_symbol(path, module, classname, None, modulefile)
1413    if res:
1414        if not issubclass(res, baseclass):
1415            # if not a subclass of baseclass then mix in baseclass with the
1416            # site specific class object and return the result
1417            res = type(classname, (res, baseclass), {})
1418    else:
1419        res = baseclass
1420
1421    return res
1422
1423
1424def import_site_function(path, module, funcname, dummy, modulefile=None):
1425    """
1426    Try to import site specific function from site specific file if it exists
1427
1428    Args:
1429        path: full filename of the source file calling this (ie __file__)
1430        module: full module name
1431        funcname: function name to be imported from site file
1432        dummy: dummy function to return in case there is no function to import
1433        modulefile: module filename
1434
1435    Returns: site specific function object or dummy
1436
1437    Raises: ImportError if the site file exists but imports fails
1438    """
1439
1440    return import_site_symbol(path, module, funcname, dummy, modulefile)
1441
1442
1443def _get_pid_path(program_name):
1444    my_path = os.path.dirname(__file__)
1445    return os.path.abspath(os.path.join(my_path, "..", "..",
1446                                        "%s.pid" % program_name))
1447
1448
1449def write_pid(program_name):
1450    """
1451    Try to drop <program_name>.pid in the main autotest directory.
1452
1453    Args:
1454      program_name: prefix for file name
1455    """
1456    pidfile = open(_get_pid_path(program_name), "w")
1457    try:
1458        pidfile.write("%s\n" % os.getpid())
1459    finally:
1460        pidfile.close()
1461
1462
1463def delete_pid_file_if_exists(program_name):
1464    """
1465    Tries to remove <program_name>.pid from the main autotest directory.
1466    """
1467    pidfile_path = _get_pid_path(program_name)
1468
1469    try:
1470        os.remove(pidfile_path)
1471    except OSError:
1472        if not os.path.exists(pidfile_path):
1473            return
1474        raise
1475
1476
1477def get_pid_from_file(program_name):
1478    """
1479    Reads the pid from <program_name>.pid in the autotest directory.
1480
1481    @param program_name the name of the program
1482    @return the pid if the file exists, None otherwise.
1483    """
1484    pidfile_path = _get_pid_path(program_name)
1485    if not os.path.exists(pidfile_path):
1486        return None
1487
1488    pidfile = open(_get_pid_path(program_name), 'r')
1489
1490    try:
1491        try:
1492            pid = int(pidfile.readline())
1493        except IOError:
1494            if not os.path.exists(pidfile_path):
1495                return None
1496            raise
1497    finally:
1498        pidfile.close()
1499
1500    return pid
1501
1502
1503def get_process_name(pid):
1504    """
1505    Get process name from PID.
1506    @param pid: PID of process.
1507    """
1508    return get_field(read_file("/proc/%d/stat" % pid), 1)[1:-1]
1509
1510
1511def program_is_alive(program_name):
1512    """
1513    Checks if the process is alive and not in Zombie state.
1514
1515    @param program_name the name of the program
1516    @return True if still alive, False otherwise
1517    """
1518    pid = get_pid_from_file(program_name)
1519    if pid is None:
1520        return False
1521    return pid_is_alive(pid)
1522
1523
1524def signal_program(program_name, sig=signal.SIGTERM):
1525    """
1526    Sends a signal to the process listed in <program_name>.pid
1527
1528    @param program_name the name of the program
1529    @param sig signal to send
1530    """
1531    pid = get_pid_from_file(program_name)
1532    if pid:
1533        signal_pid(pid, sig)
1534
1535
1536def get_relative_path(path, reference):
1537    """Given 2 absolute paths "path" and "reference", compute the path of
1538    "path" as relative to the directory "reference".
1539
1540    @param path the absolute path to convert to a relative path
1541    @param reference an absolute directory path to which the relative
1542        path will be computed
1543    """
1544    # normalize the paths (remove double slashes, etc)
1545    assert(os.path.isabs(path))
1546    assert(os.path.isabs(reference))
1547
1548    path = os.path.normpath(path)
1549    reference = os.path.normpath(reference)
1550
1551    # we could use os.path.split() but it splits from the end
1552    path_list = path.split(os.path.sep)[1:]
1553    ref_list = reference.split(os.path.sep)[1:]
1554
1555    # find the longest leading common path
1556    for i in xrange(min(len(path_list), len(ref_list))):
1557        if path_list[i] != ref_list[i]:
1558            # decrement i so when exiting this loop either by no match or by
1559            # end of range we are one step behind
1560            i -= 1
1561            break
1562    i += 1
1563    # drop the common part of the paths, not interested in that anymore
1564    del path_list[:i]
1565
1566    # for each uncommon component in the reference prepend a ".."
1567    path_list[:0] = ['..'] * (len(ref_list) - i)
1568
1569    return os.path.join(*path_list)
1570
1571
1572def sh_escape(command):
1573    """
1574    Escape special characters from a command so that it can be passed
1575    as a double quoted (" ") string in a (ba)sh command.
1576
1577    Args:
1578            command: the command string to escape.
1579
1580    Returns:
1581            The escaped command string. The required englobing double
1582            quotes are NOT added and so should be added at some point by
1583            the caller.
1584
1585    See also: http://www.tldp.org/LDP/abs/html/escapingsection.html
1586    """
1587    command = command.replace("\\", "\\\\")
1588    command = command.replace("$", r'\$')
1589    command = command.replace('"', r'\"')
1590    command = command.replace('`', r'\`')
1591    return command
1592
1593
1594def configure(extra=None, configure='./configure'):
1595    """
1596    Run configure passing in the correct host, build, and target options.
1597
1598    @param extra: extra command line arguments to pass to configure
1599    @param configure: which configure script to use
1600    """
1601    args = []
1602    if 'CHOST' in os.environ:
1603        args.append('--host=' + os.environ['CHOST'])
1604    if 'CBUILD' in os.environ:
1605        args.append('--build=' + os.environ['CBUILD'])
1606    if 'CTARGET' in os.environ:
1607        args.append('--target=' + os.environ['CTARGET'])
1608    if extra:
1609        args.append(extra)
1610
1611    system('%s %s' % (configure, ' '.join(args)))
1612
1613
1614def make(extra='', make='make', timeout=None, ignore_status=False):
1615    """
1616    Run make, adding MAKEOPTS to the list of options.
1617
1618    @param extra: extra command line arguments to pass to make.
1619    """
1620    cmd = '%s %s %s' % (make, os.environ.get('MAKEOPTS', ''), extra)
1621    return system(cmd, timeout=timeout, ignore_status=ignore_status)
1622
1623
1624def compare_versions(ver1, ver2):
1625    """Version number comparison between ver1 and ver2 strings.
1626
1627    >>> compare_tuple("1", "2")
1628    -1
1629    >>> compare_tuple("foo-1.1", "foo-1.2")
1630    -1
1631    >>> compare_tuple("1.2", "1.2a")
1632    -1
1633    >>> compare_tuple("1.2b", "1.2a")
1634    1
1635    >>> compare_tuple("1.3.5.3a", "1.3.5.3b")
1636    -1
1637
1638    Args:
1639        ver1: version string
1640        ver2: version string
1641
1642    Returns:
1643        int:  1 if ver1 >  ver2
1644              0 if ver1 == ver2
1645             -1 if ver1 <  ver2
1646    """
1647    ax = re.split('[.-]', ver1)
1648    ay = re.split('[.-]', ver2)
1649    while len(ax) > 0 and len(ay) > 0:
1650        cx = ax.pop(0)
1651        cy = ay.pop(0)
1652        maxlen = max(len(cx), len(cy))
1653        c = cmp(cx.zfill(maxlen), cy.zfill(maxlen))
1654        if c != 0:
1655            return c
1656    return cmp(len(ax), len(ay))
1657
1658
1659def args_to_dict(args):
1660    """Convert autoserv extra arguments in the form of key=val or key:val to a
1661    dictionary.  Each argument key is converted to lowercase dictionary key.
1662
1663    Args:
1664        args - list of autoserv extra arguments.
1665
1666    Returns:
1667        dictionary
1668    """
1669    arg_re = re.compile(r'(\w+)[:=](.*)$')
1670    dict = {}
1671    for arg in args:
1672        match = arg_re.match(arg)
1673        if match:
1674            dict[match.group(1).lower()] = match.group(2)
1675        else:
1676            logging.warning("args_to_dict: argument '%s' doesn't match "
1677                            "'%s' pattern. Ignored." % (arg, arg_re.pattern))
1678    return dict
1679
1680
1681def get_unused_port():
1682    """
1683    Finds a semi-random available port. A race condition is still
1684    possible after the port number is returned, if another process
1685    happens to bind it.
1686
1687    Returns:
1688        A port number that is unused on both TCP and UDP.
1689    """
1690
1691    def try_bind(port, socket_type, socket_proto):
1692        s = socket.socket(socket.AF_INET, socket_type, socket_proto)
1693        try:
1694            try:
1695                s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1696                s.bind(('', port))
1697                return s.getsockname()[1]
1698            except socket.error:
1699                return None
1700        finally:
1701            s.close()
1702
1703    # On the 2.6 kernel, calling try_bind() on UDP socket returns the
1704    # same port over and over. So always try TCP first.
1705    while True:
1706        # Ask the OS for an unused port.
1707        port = try_bind(0, socket.SOCK_STREAM, socket.IPPROTO_TCP)
1708        # Check if this port is unused on the other protocol.
1709        if port and try_bind(port, socket.SOCK_DGRAM, socket.IPPROTO_UDP):
1710            return port
1711