base_utils.py revision 42b7a5d85e9467ae6f3408f20bfee5de93afcf45
1"""
2DO NOT import this file directly - import client/bin/utils.py,
3which will mix this in
4
5Convenience functions for use by tests or whomever.
6
7Note that this file is mixed in by utils.py - note very carefully the
8precedence order defined there
9"""
10import os, shutil, sys, signal, commands, pickle, glob, statvfs
11import math, re, string, fnmatch, logging
12from autotest_lib.client.common_lib import error, utils, magic
13
14
15def grep(pattern, file):
16    """
17    This is mainly to fix the return code inversion from grep
18    Also handles compressed files.
19
20    returns 1 if the pattern is present in the file, 0 if not.
21    """
22    command = 'grep "%s" > /dev/null' % pattern
23    ret = cat_file_to_cmd(file, command, ignore_status=True)
24    return not ret
25
26
27def difflist(list1, list2):
28    """returns items in list2 that are not in list1"""
29    diff = [];
30    for x in list2:
31        if x not in list1:
32            diff.append(x)
33    return diff
34
35
36def cat_file_to_cmd(file, command, ignore_status=0, return_output=False):
37    """
38    equivalent to 'cat file | command' but knows to use
39    zcat or bzcat if appropriate
40    """
41    if not os.path.isfile(file):
42        raise NameError('invalid file %s to cat to command %s'
43                % (file, command))
44
45    if return_output:
46        run_cmd = utils.system_output
47    else:
48        run_cmd = utils.system
49
50    if magic.guess_type(file) == 'application/x-bzip2':
51        cat = 'bzcat'
52    elif magic.guess_type(file) == 'application/x-gzip':
53        cat = 'zcat'
54    else:
55        cat = 'cat'
56    return run_cmd('%s %s | %s' % (cat, file, command),
57                                                    ignore_status=ignore_status)
58
59
60def extract_tarball_to_dir(tarball, dir):
61    """
62    Extract a tarball to a specified directory name instead of whatever
63    the top level of a tarball is - useful for versioned directory names, etc
64    """
65    if os.path.exists(dir):
66        if os.path.isdir(dir):
67            shutil.rmtree(dir)
68        else:
69            os.remove(dir)
70    pwd = os.getcwd()
71    os.chdir(os.path.dirname(os.path.abspath(dir)))
72    newdir = extract_tarball(tarball)
73    os.rename(newdir, dir)
74    os.chdir(pwd)
75
76
77def extract_tarball(tarball):
78    """Returns the directory extracted by the tarball."""
79    extracted = cat_file_to_cmd(tarball, 'tar xvf - 2>/dev/null',
80                                    return_output=True).splitlines()
81
82    dir = None
83
84    for line in extracted:
85        line = re.sub(r'^./', '', line)
86        if not line or line == '.':
87            continue
88        topdir = line.split('/')[0]
89        if os.path.isdir(topdir):
90            if dir:
91                assert(dir == topdir)
92            else:
93                dir = topdir
94    if dir:
95        return dir
96    else:
97        raise NameError('extracting tarball produced no dir')
98
99
100def hash_file(filename, size=None, method="md5"):
101    """
102    Calculate the hash of filename.
103    If size is not None, limit to first size bytes.
104    Throw exception if something is wrong with filename.
105    Can be also implemented with bash one-liner (assuming size%1024==0):
106    dd if=filename bs=1024 count=size/1024 | sha1sum -
107
108    @param filename: Path of the file that will have its hash calculated.
109    @param method: Method used to calculate the hash. Supported methods:
110            * md5
111            * sha1
112    @returns: Hash of the file, if something goes wrong, return None.
113    """
114    chunksize = 4096
115    fsize = os.path.getsize(filename)
116
117    if not size or size > fsize:
118        size = fsize
119    f = open(filename, 'rb')
120
121    try:
122        hash = utils.hash(method)
123    except ValueError:
124        logging.error("Unknown hash type %s, returning None" % method)
125
126    while size > 0:
127        if chunksize > size:
128            chunksize = size
129        data = f.read(chunksize)
130        if len(data) == 0:
131            logging.debug("Nothing left to read but size=%d" % size)
132            break
133        hash.update(data)
134        size -= len(data)
135    f.close()
136    return hash.hexdigest()
137
138
139def unmap_url_cache(cachedir, url, expected_hash, method="md5"):
140    """
141    Downloads a file from a URL to a cache directory. If the file is already
142    at the expected position and has the expected hash, let's not download it
143    again.
144
145    @param cachedir: Directory that might hold a copy of the file we want to
146            download.
147    @param url: URL for the file we want to download.
148    @param expected_hash: Hash string that we expect the file downloaded to
149            have.
150    @param method: Method used to calculate the hash string (md5, sha1).
151    """
152    # Let's convert cachedir to a canonical path, if it's not already
153    cachedir = os.path.realpath(cachedir)
154    if not os.path.isdir(cachedir):
155        try:
156            os.makedirs(cachedir)
157        except:
158            raise ValueError('Could not create cache directory %s' % cachedir)
159    file_from_url = os.path.basename(url)
160    file_local_path = os.path.join(cachedir, file_from_url)
161
162    file_hash = None
163    failure_counter = 0
164    while not file_hash == expected_hash:
165        if os.path.isfile(file_local_path):
166            file_hash = hash_file(file_local_path, method)
167            if file_hash == expected_hash:
168                # File is already at the expected position and ready to go
169                src = file_from_url
170            else:
171                # Let's download the package again, it's corrupted...
172                logging.error("Seems that file %s is corrupted, trying to "
173                              "download it again" % file_from_url)
174                src = url
175                failure_counter += 1
176        else:
177            # File is not there, let's download it
178            src = url
179        if failure_counter > 1:
180            raise EnvironmentError("Consistently failed to download the "
181                                   "package %s. Aborting further download "
182                                   "attempts. This might mean either the "
183                                   "network connection has problems or the "
184                                   "expected hash string that was determined "
185                                   "for this file is wrong" % file_from_url)
186        file_path = utils.unmap_url(cachedir, src, cachedir)
187
188    return file_path
189
190
191def force_copy(src, dest):
192    """Replace dest with a new copy of src, even if it exists"""
193    if os.path.isfile(dest):
194        os.remove(dest)
195    if os.path.isdir(dest):
196        dest = os.path.join(dest, os.path.basename(src))
197    shutil.copyfile(src, dest)
198    return dest
199
200
201def force_link(src, dest):
202    """Link src to dest, overwriting it if it exists"""
203    return utils.system("ln -sf %s %s" % (src, dest))
204
205
206def file_contains_pattern(file, pattern):
207    """Return true if file contains the specified egrep pattern"""
208    if not os.path.isfile(file):
209        raise NameError('file %s does not exist' % file)
210    return not utils.system('egrep -q "' + pattern + '" ' + file, ignore_status=True)
211
212
213def list_grep(list, pattern):
214    """True if any item in list matches the specified pattern."""
215    compiled = re.compile(pattern)
216    for line in list:
217        match = compiled.search(line)
218        if (match):
219            return 1
220    return 0
221
222
223def get_os_vendor():
224    """Try to guess what's the os vendor
225    """
226    if os.path.isfile('/etc/SuSE-release'):
227        return 'SUSE'
228
229    issue = '/etc/issue'
230
231    if not os.path.isfile(issue):
232        return 'Unknown'
233
234    if file_contains_pattern(issue, 'Red Hat'):
235        return 'Red Hat'
236    elif file_contains_pattern(issue, 'Fedora'):
237        return 'Fedora Core'
238    elif file_contains_pattern(issue, 'SUSE'):
239        return 'SUSE'
240    elif file_contains_pattern(issue, 'Ubuntu'):
241        return 'Ubuntu'
242    elif file_contains_pattern(issue, 'Debian'):
243        return 'Debian'
244    else:
245        return 'Unknown'
246
247
248def get_vmlinux():
249    """Return the full path to vmlinux
250
251    Ahem. This is crap. Pray harder. Bad Martin.
252    """
253    vmlinux = '/boot/vmlinux-%s' % utils.system_output('uname -r')
254    if os.path.isfile(vmlinux):
255        return vmlinux
256    vmlinux = '/lib/modules/%s/build/vmlinux' % utils.system_output('uname -r')
257    if os.path.isfile(vmlinux):
258        return vmlinux
259    return None
260
261
262def get_systemmap():
263    """Return the full path to System.map
264
265    Ahem. This is crap. Pray harder. Bad Martin.
266    """
267    map = '/boot/System.map-%s' % utils.system_output('uname -r')
268    if os.path.isfile(map):
269        return map
270    map = '/lib/modules/%s/build/System.map' % utils.system_output('uname -r')
271    if os.path.isfile(map):
272        return map
273    return None
274
275
276def get_modules_dir():
277    """Return the modules dir for the running kernel version"""
278    kernel_version = utils.system_output('uname -r')
279    return '/lib/modules/%s/kernel' % kernel_version
280
281
282def get_cpu_arch():
283    """Work out which CPU architecture we're running on"""
284    f = open('/proc/cpuinfo', 'r')
285    cpuinfo = f.readlines()
286    f.close()
287    if list_grep(cpuinfo, '^cpu.*(RS64|POWER3|Broadband Engine)'):
288        return 'power'
289    elif list_grep(cpuinfo, '^cpu.*POWER4'):
290        return 'power4'
291    elif list_grep(cpuinfo, '^cpu.*POWER5'):
292        return 'power5'
293    elif list_grep(cpuinfo, '^cpu.*POWER6'):
294        return 'power6'
295    elif list_grep(cpuinfo, '^cpu.*POWER7'):
296        return 'power7'
297    elif list_grep(cpuinfo, '^cpu.*PPC970'):
298        return 'power970'
299    elif list_grep(cpuinfo, 'ARM'):
300        return 'arm'
301    elif list_grep(cpuinfo, '^flags.*:.* lm .*'):
302        return 'x86_64'
303    else:
304        return 'i386'
305
306
307def get_current_kernel_arch():
308    """Get the machine architecture, now just a wrap of 'uname -m'."""
309    return os.popen('uname -m').read().rstrip()
310
311
312def get_file_arch(filename):
313    # -L means follow symlinks
314    file_data = utils.system_output('file -L ' + filename)
315    if file_data.count('80386'):
316        return 'i386'
317    return None
318
319
320def count_cpus():
321    """number of CPUs in the local machine according to /proc/cpuinfo"""
322    f = file('/proc/cpuinfo', 'r')
323    cpus = 0
324    for line in f.readlines():
325        if line.lower().startswith('processor'):
326            cpus += 1
327    return cpus
328
329
330# Returns total memory in kb
331def read_from_meminfo(key):
332    meminfo = utils.system_output('grep %s /proc/meminfo' % key)
333    return int(re.search(r'\d+', meminfo).group(0))
334
335
336def memtotal():
337    return read_from_meminfo('MemTotal')
338
339
340def freememtotal():
341    return read_from_meminfo('MemFree')
342
343
344def rounded_memtotal():
345    # Get total of all physical mem, in kbytes
346    usable_kbytes = memtotal()
347    # usable_kbytes is system's usable DRAM in kbytes,
348    #   as reported by memtotal() from device /proc/meminfo memtotal
349    #   after Linux deducts 1.5% to 5.1% for system table overhead
350    # Undo the unknown actual deduction by rounding up
351    #   to next small multiple of a big power-of-two
352    #   eg  12GB - 5.1% gets rounded back up to 12GB
353    mindeduct = 0.015  # 1.5 percent
354    maxdeduct = 0.055  # 5.5 percent
355    # deduction range 1.5% .. 5.5% supports physical mem sizes
356    #    6GB .. 12GB in steps of .5GB
357    #   12GB .. 24GB in steps of 1 GB
358    #   24GB .. 48GB in steps of 2 GB ...
359    # Finer granularity in physical mem sizes would require
360    #   tighter spread between min and max possible deductions
361
362    # increase mem size by at least min deduction, without rounding
363    min_kbytes   = int(usable_kbytes / (1.0 - mindeduct))
364    # increase mem size further by 2**n rounding, by 0..roundKb or more
365    round_kbytes = int(usable_kbytes / (1.0 - maxdeduct)) - min_kbytes
366    # find least binary roundup 2**n that covers worst-cast roundKb
367    mod2n = 1 << int(math.ceil(math.log(round_kbytes, 2)))
368    # have round_kbytes <= mod2n < round_kbytes*2
369    # round min_kbytes up to next multiple of mod2n
370    phys_kbytes = min_kbytes + mod2n - 1
371    phys_kbytes = phys_kbytes - (phys_kbytes % mod2n)  # clear low bits
372    return phys_kbytes
373
374
375def sysctl(key, value=None):
376    """Generic implementation of sysctl, to read and write.
377
378    @param key: A location under /proc/sys
379    @param value: If not None, a value to write into the sysctl.
380
381    @return The single-line sysctl value as a string.
382    """
383    path = '/proc/sys/%s' % key
384    if value is not None:
385        utils.write_one_line(path, str(value))
386    return utils.read_one_line(path)
387
388
389def sysctl_kernel(key, value=None):
390    """(Very) partial implementation of sysctl, for kernel params"""
391    if value is not None:
392        # write
393        utils.write_one_line('/proc/sys/kernel/%s' % key, str(value))
394    else:
395        # read
396        out = utils.read_one_line('/proc/sys/kernel/%s' % key)
397        return int(re.search(r'\d+', out).group(0))
398
399
400def _convert_exit_status(sts):
401    if os.WIFSIGNALED(sts):
402        return -os.WTERMSIG(sts)
403    elif os.WIFEXITED(sts):
404        return os.WEXITSTATUS(sts)
405    else:
406        # impossible?
407        raise RuntimeError("Unknown exit status %d!" % sts)
408
409
410def where_art_thy_filehandles():
411    """Dump the current list of filehandles"""
412    os.system("ls -l /proc/%d/fd >> /dev/tty" % os.getpid())
413
414
415def print_to_tty(string):
416    """Output string straight to the tty"""
417    open('/dev/tty', 'w').write(string + '\n')
418
419
420def dump_object(object):
421    """Dump an object's attributes and methods
422
423    kind of like dir()
424    """
425    for item in object.__dict__.iteritems():
426        print item
427        try:
428            (key,value) = item
429            dump_object(value)
430        except:
431            continue
432
433
434def environ(env_key):
435    """return the requested environment variable, or '' if unset"""
436    if (os.environ.has_key(env_key)):
437        return os.environ[env_key]
438    else:
439        return ''
440
441
442def prepend_path(newpath, oldpath):
443    """prepend newpath to oldpath"""
444    if (oldpath):
445        return newpath + ':' + oldpath
446    else:
447        return newpath
448
449
450def append_path(oldpath, newpath):
451    """append newpath to oldpath"""
452    if (oldpath):
453        return oldpath + ':' + newpath
454    else:
455        return newpath
456
457
458def avgtime_print(dir):
459    """ Calculate some benchmarking statistics.
460        Input is a directory containing a file called 'time'.
461        File contains one-per-line results of /usr/bin/time.
462        Output is average Elapsed, User, and System time in seconds,
463          and average CPU percentage.
464    """
465    f = open(dir + "/time")
466    user = system = elapsed = cpu = count = 0
467    r = re.compile('([\d\.]*)user ([\d\.]*)system (\d*):([\d\.]*)elapsed (\d*)%CPU')
468    for line in f.readlines():
469        try:
470            s = r.match(line);
471            user += float(s.group(1))
472            system += float(s.group(2))
473            elapsed += (float(s.group(3)) * 60) + float(s.group(4))
474            cpu += float(s.group(5))
475            count += 1
476        except:
477            raise ValueError("badly formatted times")
478
479    f.close()
480    return "Elapsed: %0.2fs User: %0.2fs System: %0.2fs CPU: %0.0f%%" % \
481          (elapsed/count, user/count, system/count, cpu/count)
482
483
484def running_config():
485    """
486    Return path of config file of the currently running kernel
487    """
488    version = utils.system_output('uname -r')
489    for config in ('/proc/config.gz', \
490                   '/boot/config-%s' % version,
491                   '/lib/modules/%s/build/.config' % version):
492        if os.path.isfile(config):
493            return config
494    return None
495
496
497def check_for_kernel_feature(feature):
498    config = running_config()
499
500    if not config:
501        raise TypeError("Can't find kernel config file")
502
503    if magic.guess_type(config) == 'application/x-gzip':
504        grep = 'zgrep'
505    else:
506        grep = 'grep'
507    grep += ' ^CONFIG_%s= %s' % (feature, config)
508
509    if not utils.system_output(grep, ignore_status=True):
510        raise ValueError("Kernel doesn't have a %s feature" % (feature))
511
512
513def cpu_online_map():
514    """
515    Check out the available cpu online map
516    """
517    cpus = []
518    for line in open('/proc/cpuinfo', 'r').readlines():
519        if line.startswith('processor'):
520            cpus.append(line.split()[2]) # grab cpu number
521    return cpus
522
523
524def check_glibc_ver(ver):
525    glibc_ver = commands.getoutput('ldd --version').splitlines()[0]
526    glibc_ver = re.search(r'(\d+\.\d+(\.\d+)?)', glibc_ver).group()
527    if utils.compare_versions(glibc_ver, ver) == 1:
528        raise error.TestError("Glibc too old (%s). Glibc >= %s is needed." % \
529                                                (glibc_ver, ver))
530
531def check_kernel_ver(ver):
532    kernel_ver = utils.system_output('uname -r')
533    kv_tmp = re.split(r'[-]', kernel_ver)[0:3]
534    # In compare_versions, if v1 < v2, return value == -1
535    if utils.compare_versions(kv_tmp[0], ver) == -1:
536        raise error.TestError("Kernel too old (%s). Kernel > %s is needed." %
537                              (kernel_ver, ver))
538
539
540def human_format(number):
541    # Convert number to kilo / mega / giga format.
542    if number < 1024:
543        return "%d" % number
544    kilo = float(number) / 1024.0
545    if kilo < 1024:
546        return "%.2fk" % kilo
547    meg = kilo / 1024.0
548    if meg < 1024:
549        return "%.2fM" % meg
550    gig = meg / 1024.0
551    return "%.2fG" % gig
552
553
554def numa_nodes():
555    node_paths = glob.glob('/sys/devices/system/node/node*')
556    nodes = [int(re.sub(r'.*node(\d+)', r'\1', x)) for x in node_paths]
557    return (sorted(nodes))
558
559
560def node_size():
561    nodes = max(len(numa_nodes()), 1)
562    return ((memtotal() * 1024) / nodes)
563
564
565def to_seconds(time_string):
566    """Converts a string in M+:SS.SS format to S+.SS"""
567    elts = time_string.split(':')
568    if len(elts) == 1:
569        return time_string
570    return str(int(elts[0]) * 60 + float(elts[1]))
571
572
573def extract_all_time_results(results_string):
574    """Extract user, system, and elapsed times into a list of tuples"""
575    pattern = re.compile(r"(.*?)user (.*?)system (.*?)elapsed")
576    results = []
577    for result in pattern.findall(results_string):
578        results.append(tuple([to_seconds(elt) for elt in result]))
579    return results
580
581
582def pickle_load(filename):
583    return pickle.load(open(filename, 'r'))
584
585
586# Return the kernel version and build timestamp.
587def running_os_release():
588    return os.uname()[2:4]
589
590
591def running_os_ident():
592    (version, timestamp) = running_os_release()
593    return version + '::' + timestamp
594
595
596def running_os_full_version():
597    (version, timestamp) = running_os_release()
598    return version
599
600
601# much like find . -name 'pattern'
602def locate(pattern, root=os.getcwd()):
603    for path, dirs, files in os.walk(root):
604        for f in files:
605            if fnmatch.fnmatch(f, pattern):
606                yield os.path.abspath(os.path.join(path, f))
607
608
609def freespace(path):
610    """Return the disk free space, in bytes"""
611    s = os.statvfs(path)
612    return s.f_bavail * s.f_bsize
613
614
615def disk_block_size(path):
616    """Return the disk block size, in bytes"""
617    return os.statvfs(path).f_bsize
618
619
620def get_cpu_family():
621    procinfo = utils.system_output('cat /proc/cpuinfo')
622    CPU_FAMILY_RE = re.compile(r'^cpu family\s+:\s+(\S+)', re.M)
623    matches = CPU_FAMILY_RE.findall(procinfo)
624    if matches:
625        return int(matches[0])
626    else:
627        raise error.TestError('Could not get valid cpu family data')
628
629
630def get_disks():
631    df_output = utils.system_output('df')
632    disk_re = re.compile(r'^(/dev/hd[a-z]+)3', re.M)
633    return disk_re.findall(df_output)
634
635
636def load_module(module_name):
637    # Checks if a module has already been loaded
638    if module_is_loaded(module_name):
639        return False
640
641    utils.system('/sbin/modprobe ' + module_name)
642    return True
643
644
645def unload_module(module_name):
646    """
647    Removes a module. Handles dependencies. If even then it's not possible
648    to remove one of the modules, it will trhow an error.CmdError exception.
649
650    @param module_name: Name of the module we want to remove.
651    """
652    l_raw = utils.system_output("/sbin/lsmod").splitlines()
653    lsmod = [x for x in l_raw if x.split()[0] == module_name]
654    if len(lsmod) > 0:
655        line_parts = lsmod[0].split()
656        if len(line_parts) == 4:
657            submodules = line_parts[3].split(",")
658            for submodule in submodules:
659                unload_module(submodule)
660        utils.system("/sbin/modprobe -r %s" % module_name)
661        logging.info("Module %s unloaded" % module_name)
662    else:
663        logging.info("Module %s is already unloaded" % module_name)
664
665
666def module_is_loaded(module_name):
667    module_name = module_name.replace('-', '_')
668    modules = utils.system_output('/sbin/lsmod').splitlines()
669    for module in modules:
670        if module.startswith(module_name) and module[len(module_name)] == ' ':
671            return True
672    return False
673
674
675def get_loaded_modules():
676    lsmod_output = utils.system_output('/sbin/lsmod').splitlines()[1:]
677    return [line.split(None, 1)[0] for line in lsmod_output]
678
679
680def get_huge_page_size():
681    output = utils.system_output('grep Hugepagesize /proc/meminfo')
682    return int(output.split()[1]) # Assumes units always in kB. :(
683
684
685def get_num_huge_pages():
686    raw_hugepages = utils.system_output('/sbin/sysctl vm.nr_hugepages')
687    return int(raw_hugepages.split()[2])
688
689
690def set_num_huge_pages(num):
691    utils.system('/sbin/sysctl vm.nr_hugepages=%d' % num)
692
693
694def get_cpu_vendor():
695    cpuinfo = open('/proc/cpuinfo').read()
696    vendors = re.findall(r'(?m)^vendor_id\s*:\s*(\S+)\s*$', cpuinfo)
697    for i in xrange(1, len(vendors)):
698        if vendors[i] != vendors[0]:
699            raise error.TestError('multiple cpu vendors found: ' + str(vendors))
700    return vendors[0]
701
702
703def probe_cpus():
704    """
705    This routine returns a list of cpu devices found under
706    /sys/devices/system/cpu.
707    """
708    cmd = 'find /sys/devices/system/cpu/ -maxdepth 1 -type d -name cpu*'
709    return utils.system_output(cmd).splitlines()
710
711
712def ping_default_gateway():
713    """Ping the default gateway."""
714
715    network = open('/etc/sysconfig/network')
716    m = re.search('GATEWAY=(\S+)', network.read())
717
718    if m:
719        gw = m.group(1)
720        cmd = 'ping %s -c 5 > /dev/null' % gw
721        return utils.system(cmd, ignore_status=True)
722
723    raise error.TestError('Unable to find default gateway')
724
725
726def drop_caches():
727    """Writes back all dirty pages to disk and clears all the caches."""
728    utils.system("sync")
729    # We ignore failures here as this will fail on 2.6.11 kernels.
730    utils.system("echo 3 > /proc/sys/vm/drop_caches", ignore_status=True)
731
732
733def process_is_alive(name_pattern):
734    """
735    'pgrep name' misses all python processes and also long process names.
736    'pgrep -f name' gets all shell commands with name in args.
737    So look only for command whose initial pathname ends with name.
738    Name itself is an egrep pattern, so it can use | etc for variations.
739    """
740    return utils.system("pgrep -f '^([^ /]*/)*(%s)([ ]|$)'" % name_pattern,
741                        ignore_status=True) == 0
742
743
744def get_hwclock_seconds(utc=True):
745    """
746    Return the hardware clock in seconds as a floating point value.
747    Use Coordinated Universal Time if utc is True, local time otherwise.
748    Raise a ValueError if unable to read the hardware clock.
749    """
750    cmd = '/sbin/hwclock --debug'
751    if utc:
752        cmd += ' --utc'
753    hwclock_output = utils.system_output(cmd, ignore_status=True)
754    match = re.search(r'= ([0-9]+) seconds since .+ (-?[0-9.]+) seconds$',
755                      hwclock_output, re.DOTALL)
756    if match:
757        seconds = int(match.group(1)) + float(match.group(2))
758        logging.debug('hwclock seconds = %f' % seconds)
759        return seconds
760
761    raise ValueError('Unable to read the hardware clock -- ' +
762                     hwclock_output)
763
764
765def set_wake_alarm(alarm_time):
766    """
767    Set the hardware RTC-based wake alarm to 'alarm_time'.
768    """
769    utils.write_one_line('/sys/class/rtc/rtc0/wakealarm', str(alarm_time))
770
771
772def set_power_state(state):
773    """
774    Set the system power state to 'state'.
775    """
776    utils.write_one_line('/sys/power/state', state)
777
778
779def standby():
780    """
781    Power-on suspend (S1)
782    """
783    set_power_state('standby')
784
785
786def suspend_to_ram():
787    """
788    Suspend the system to RAM (S3)
789    """
790    set_power_state('mem')
791
792
793def suspend_to_disk():
794    """
795    Suspend the system to disk (S4)
796    """
797    set_power_state('disk')
798