base_utils.py revision 171137407ec2d93db48123a4331929c8e9290cbb
1""" 2DO NOT import this file directly - import client/bin/utils.py, 3which will mix this in 4 5Convenience functions for use by tests or whomever. 6 7Note that this file is mixed in by utils.py - note very carefully the 8precedence order defined there 9""" 10import os, shutil, commands, pickle, glob 11import math, re, fnmatch, logging, multiprocessing 12from autotest_lib.client.common_lib import error, utils, magic 13 14 15def grep(pattern, file): 16 """ 17 This is mainly to fix the return code inversion from grep 18 Also handles compressed files. 19 20 returns 1 if the pattern is present in the file, 0 if not. 21 """ 22 command = 'grep "%s" > /dev/null' % pattern 23 ret = cat_file_to_cmd(file, command, ignore_status=True) 24 return not ret 25 26 27def difflist(list1, list2): 28 """returns items in list2 that are not in list1""" 29 diff = []; 30 for x in list2: 31 if x not in list1: 32 diff.append(x) 33 return diff 34 35 36def cat_file_to_cmd(file, command, ignore_status=0, return_output=False): 37 """ 38 equivalent to 'cat file | command' but knows to use 39 zcat or bzcat if appropriate 40 """ 41 if not os.path.isfile(file): 42 raise NameError('invalid file %s to cat to command %s' 43 % (file, command)) 44 45 if return_output: 46 run_cmd = utils.system_output 47 else: 48 run_cmd = utils.system 49 50 if magic.guess_type(file) == 'application/x-bzip2': 51 cat = 'bzcat' 52 elif magic.guess_type(file) == 'application/x-gzip': 53 cat = 'zcat' 54 else: 55 cat = 'cat' 56 return run_cmd('%s %s | %s' % (cat, file, command), 57 ignore_status=ignore_status) 58 59 60def extract_tarball_to_dir(tarball, dir): 61 """ 62 Extract a tarball to a specified directory name instead of whatever 63 the top level of a tarball is - useful for versioned directory names, etc 64 """ 65 if os.path.exists(dir): 66 if os.path.isdir(dir): 67 shutil.rmtree(dir) 68 else: 69 os.remove(dir) 70 pwd = os.getcwd() 71 os.chdir(os.path.dirname(os.path.abspath(dir))) 72 newdir = extract_tarball(tarball) 73 os.rename(newdir, dir) 74 os.chdir(pwd) 75 76 77def extract_tarball(tarball): 78 """Returns the directory extracted by the tarball.""" 79 extracted = cat_file_to_cmd(tarball, 'tar xvf - 2>/dev/null', 80 return_output=True).splitlines() 81 82 dir = None 83 84 for line in extracted: 85 line = re.sub(r'^./', '', line) 86 if not line or line == '.': 87 continue 88 topdir = line.split('/')[0] 89 if os.path.isdir(topdir): 90 if dir: 91 assert(dir == topdir) 92 else: 93 dir = topdir 94 if dir: 95 return dir 96 else: 97 raise NameError('extracting tarball produced no dir') 98 99 100def hash_file(filename, size=None, method="md5"): 101 """ 102 Calculate the hash of filename. 103 If size is not None, limit to first size bytes. 104 Throw exception if something is wrong with filename. 105 Can be also implemented with bash one-liner (assuming size%1024==0): 106 dd if=filename bs=1024 count=size/1024 | sha1sum - 107 108 @param filename: Path of the file that will have its hash calculated. 109 @param method: Method used to calculate the hash. Supported methods: 110 * md5 111 * sha1 112 @returns: Hash of the file, if something goes wrong, return None. 113 """ 114 chunksize = 4096 115 fsize = os.path.getsize(filename) 116 117 if not size or size > fsize: 118 size = fsize 119 f = open(filename, 'rb') 120 121 try: 122 hash = utils.hash(method) 123 except ValueError: 124 logging.error("Unknown hash type %s, returning None", method) 125 126 while size > 0: 127 if chunksize > size: 128 chunksize = size 129 data = f.read(chunksize) 130 if len(data) == 0: 131 logging.debug("Nothing left to read but size=%d", size) 132 break 133 hash.update(data) 134 size -= len(data) 135 f.close() 136 return hash.hexdigest() 137 138 139def unmap_url_cache(cachedir, url, expected_hash, method="md5"): 140 """ 141 Downloads a file from a URL to a cache directory. If the file is already 142 at the expected position and has the expected hash, let's not download it 143 again. 144 145 @param cachedir: Directory that might hold a copy of the file we want to 146 download. 147 @param url: URL for the file we want to download. 148 @param expected_hash: Hash string that we expect the file downloaded to 149 have. 150 @param method: Method used to calculate the hash string (md5, sha1). 151 """ 152 # Let's convert cachedir to a canonical path, if it's not already 153 cachedir = os.path.realpath(cachedir) 154 if not os.path.isdir(cachedir): 155 try: 156 os.makedirs(cachedir) 157 except: 158 raise ValueError('Could not create cache directory %s' % cachedir) 159 file_from_url = os.path.basename(url) 160 file_local_path = os.path.join(cachedir, file_from_url) 161 162 file_hash = None 163 failure_counter = 0 164 while not file_hash == expected_hash: 165 if os.path.isfile(file_local_path): 166 file_hash = hash_file(file_local_path, method) 167 if file_hash == expected_hash: 168 # File is already at the expected position and ready to go 169 src = file_from_url 170 else: 171 # Let's download the package again, it's corrupted... 172 logging.error("Seems that file %s is corrupted, trying to " 173 "download it again", file_from_url) 174 src = url 175 failure_counter += 1 176 else: 177 # File is not there, let's download it 178 src = url 179 if failure_counter > 1: 180 raise EnvironmentError("Consistently failed to download the " 181 "package %s. Aborting further download " 182 "attempts. This might mean either the " 183 "network connection has problems or the " 184 "expected hash string that was determined " 185 "for this file is wrong", file_from_url) 186 file_path = utils.unmap_url(cachedir, src, cachedir) 187 188 return file_path 189 190 191def force_copy(src, dest): 192 """Replace dest with a new copy of src, even if it exists""" 193 if os.path.isfile(dest): 194 os.remove(dest) 195 if os.path.isdir(dest): 196 dest = os.path.join(dest, os.path.basename(src)) 197 shutil.copyfile(src, dest) 198 return dest 199 200 201def force_link(src, dest): 202 """Link src to dest, overwriting it if it exists""" 203 return utils.system("ln -sf %s %s" % (src, dest)) 204 205 206def file_contains_pattern(file, pattern): 207 """Return true if file contains the specified egrep pattern""" 208 if not os.path.isfile(file): 209 raise NameError('file %s does not exist' % file) 210 return not utils.system('egrep -q "' + pattern + '" ' + file, ignore_status=True) 211 212 213def list_grep(list, pattern): 214 """True if any item in list matches the specified pattern.""" 215 compiled = re.compile(pattern) 216 for line in list: 217 match = compiled.search(line) 218 if (match): 219 return 1 220 return 0 221 222 223def get_os_vendor(): 224 """Try to guess what's the os vendor 225 """ 226 if os.path.isfile('/etc/SuSE-release'): 227 return 'SUSE' 228 229 issue = '/etc/issue' 230 231 if not os.path.isfile(issue): 232 return 'Unknown' 233 234 if file_contains_pattern(issue, 'Red Hat'): 235 return 'Red Hat' 236 elif file_contains_pattern(issue, 'Fedora'): 237 return 'Fedora Core' 238 elif file_contains_pattern(issue, 'SUSE'): 239 return 'SUSE' 240 elif file_contains_pattern(issue, 'Ubuntu'): 241 return 'Ubuntu' 242 elif file_contains_pattern(issue, 'Debian'): 243 return 'Debian' 244 else: 245 return 'Unknown' 246 247 248def get_cc(): 249 try: 250 return os.environ['CC'] 251 except KeyError: 252 return 'gcc' 253 254 255def get_vmlinux(): 256 """Return the full path to vmlinux 257 258 Ahem. This is crap. Pray harder. Bad Martin. 259 """ 260 vmlinux = '/boot/vmlinux-%s' % utils.system_output('uname -r') 261 if os.path.isfile(vmlinux): 262 return vmlinux 263 vmlinux = '/lib/modules/%s/build/vmlinux' % utils.system_output('uname -r') 264 if os.path.isfile(vmlinux): 265 return vmlinux 266 return None 267 268 269def get_systemmap(): 270 """Return the full path to System.map 271 272 Ahem. This is crap. Pray harder. Bad Martin. 273 """ 274 map = '/boot/System.map-%s' % utils.system_output('uname -r') 275 if os.path.isfile(map): 276 return map 277 map = '/lib/modules/%s/build/System.map' % utils.system_output('uname -r') 278 if os.path.isfile(map): 279 return map 280 return None 281 282 283def get_modules_dir(): 284 """Return the modules dir for the running kernel version""" 285 kernel_version = utils.system_output('uname -r') 286 return '/lib/modules/%s/kernel' % kernel_version 287 288 289def get_cpu_arch(): 290 """Work out which CPU architecture we're running on""" 291 f = open('/proc/cpuinfo', 'r') 292 cpuinfo = f.readlines() 293 f.close() 294 if list_grep(cpuinfo, '^cpu.*(RS64|POWER3|Broadband Engine)'): 295 return 'power' 296 elif list_grep(cpuinfo, '^cpu.*POWER4'): 297 return 'power4' 298 elif list_grep(cpuinfo, '^cpu.*POWER5'): 299 return 'power5' 300 elif list_grep(cpuinfo, '^cpu.*POWER6'): 301 return 'power6' 302 elif list_grep(cpuinfo, '^cpu.*POWER7'): 303 return 'power7' 304 elif list_grep(cpuinfo, '^cpu.*PPC970'): 305 return 'power970' 306 elif list_grep(cpuinfo, 'ARM'): 307 return 'arm' 308 elif list_grep(cpuinfo, '^flags.*:.* lm .*'): 309 return 'x86_64' 310 else: 311 return 'i386' 312 313def get_arm_soc_family(): 314 """Work out which ARM SoC we're running on""" 315 f = open('/proc/cpuinfo', 'r') 316 cpuinfo = f.readlines() 317 f.close() 318 if list_grep(cpuinfo, 'EXYNOS5'): 319 return 'exynos5' 320 elif list_grep(cpuinfo, 'Tegra'): 321 return 'tegra' 322 return 'arm' 323 324def get_cpu_soc_family(): 325 """Like get_cpu_arch, but for ARM, returns the SoC family name""" 326 family = get_cpu_arch() 327 if family == 'arm': 328 family = get_arm_soc_family() 329 return family 330 331def get_current_kernel_arch(): 332 """Get the machine architecture, now just a wrap of 'uname -m'.""" 333 return os.popen('uname -m').read().rstrip() 334 335 336def get_file_arch(filename): 337 # -L means follow symlinks 338 file_data = utils.system_output('file -L ' + filename) 339 if file_data.count('80386'): 340 return 'i386' 341 return None 342 343 344def count_cpus(): 345 """number of CPUs in the local machine according to /proc/cpuinfo""" 346 try: 347 return multiprocessing.cpu_count() 348 except Exception as e: 349 logging.exception('can not get cpu count from' 350 ' multiprocessing.cpu_count()') 351 352 f = file('/proc/cpuinfo', 'r') 353 cpus = 0 354 for line in f.readlines(): 355 # Matches lines like "processor : 0" 356 if re.search(r'^processor\s*:\s*[0-9]+$', line): 357 cpus += 1 358 # Returns at least one cpu. Check comment #1 in crosbug.com/p/9582. 359 return cpus if cpus > 0 else 1 360 361 362# Returns total memory in kb 363def read_from_meminfo(key): 364 meminfo = utils.system_output('grep %s /proc/meminfo' % key) 365 return int(re.search(r'\d+', meminfo).group(0)) 366 367 368def memtotal(): 369 return read_from_meminfo('MemTotal') 370 371 372def freememtotal(): 373 return read_from_meminfo('MemFree') 374 375 376def rounded_memtotal(): 377 # Get total of all physical mem, in kbytes 378 usable_kbytes = memtotal() 379 # usable_kbytes is system's usable DRAM in kbytes, 380 # as reported by memtotal() from device /proc/meminfo memtotal 381 # after Linux deducts 1.5% to 5.1% for system table overhead 382 # Undo the unknown actual deduction by rounding up 383 # to next small multiple of a big power-of-two 384 # eg 12GB - 5.1% gets rounded back up to 12GB 385 mindeduct = 0.015 # 1.5 percent 386 maxdeduct = 0.055 # 5.5 percent 387 # deduction range 1.5% .. 5.5% supports physical mem sizes 388 # 6GB .. 12GB in steps of .5GB 389 # 12GB .. 24GB in steps of 1 GB 390 # 24GB .. 48GB in steps of 2 GB ... 391 # Finer granularity in physical mem sizes would require 392 # tighter spread between min and max possible deductions 393 394 # increase mem size by at least min deduction, without rounding 395 min_kbytes = int(usable_kbytes / (1.0 - mindeduct)) 396 # increase mem size further by 2**n rounding, by 0..roundKb or more 397 round_kbytes = int(usable_kbytes / (1.0 - maxdeduct)) - min_kbytes 398 # find least binary roundup 2**n that covers worst-cast roundKb 399 mod2n = 1 << int(math.ceil(math.log(round_kbytes, 2))) 400 # have round_kbytes <= mod2n < round_kbytes*2 401 # round min_kbytes up to next multiple of mod2n 402 phys_kbytes = min_kbytes + mod2n - 1 403 phys_kbytes = phys_kbytes - (phys_kbytes % mod2n) # clear low bits 404 return phys_kbytes 405 406 407def sysctl(key, value=None): 408 """Generic implementation of sysctl, to read and write. 409 410 @param key: A location under /proc/sys 411 @param value: If not None, a value to write into the sysctl. 412 413 @return The single-line sysctl value as a string. 414 """ 415 path = '/proc/sys/%s' % key 416 if value is not None: 417 utils.write_one_line(path, str(value)) 418 return utils.read_one_line(path) 419 420 421def sysctl_kernel(key, value=None): 422 """(Very) partial implementation of sysctl, for kernel params""" 423 if value is not None: 424 # write 425 utils.write_one_line('/proc/sys/kernel/%s' % key, str(value)) 426 else: 427 # read 428 out = utils.read_one_line('/proc/sys/kernel/%s' % key) 429 return int(re.search(r'\d+', out).group(0)) 430 431 432def _convert_exit_status(sts): 433 if os.WIFSIGNALED(sts): 434 return -os.WTERMSIG(sts) 435 elif os.WIFEXITED(sts): 436 return os.WEXITSTATUS(sts) 437 else: 438 # impossible? 439 raise RuntimeError("Unknown exit status %d!" % sts) 440 441 442def where_art_thy_filehandles(): 443 """Dump the current list of filehandles""" 444 os.system("ls -l /proc/%d/fd >> /dev/tty" % os.getpid()) 445 446 447def print_to_tty(string): 448 """Output string straight to the tty""" 449 open('/dev/tty', 'w').write(string + '\n') 450 451 452def dump_object(object): 453 """Dump an object's attributes and methods 454 455 kind of like dir() 456 """ 457 for item in object.__dict__.iteritems(): 458 print item 459 try: 460 (key,value) = item 461 dump_object(value) 462 except: 463 continue 464 465 466def environ(env_key): 467 """return the requested environment variable, or '' if unset""" 468 if (os.environ.has_key(env_key)): 469 return os.environ[env_key] 470 else: 471 return '' 472 473 474def prepend_path(newpath, oldpath): 475 """prepend newpath to oldpath""" 476 if (oldpath): 477 return newpath + ':' + oldpath 478 else: 479 return newpath 480 481 482def append_path(oldpath, newpath): 483 """append newpath to oldpath""" 484 if (oldpath): 485 return oldpath + ':' + newpath 486 else: 487 return newpath 488 489 490def avgtime_print(dir): 491 """ Calculate some benchmarking statistics. 492 Input is a directory containing a file called 'time'. 493 File contains one-per-line results of /usr/bin/time. 494 Output is average Elapsed, User, and System time in seconds, 495 and average CPU percentage. 496 """ 497 f = open(dir + "/time") 498 user = system = elapsed = cpu = count = 0 499 r = re.compile('([\d\.]*)user ([\d\.]*)system (\d*):([\d\.]*)elapsed (\d*)%CPU') 500 for line in f.readlines(): 501 try: 502 s = r.match(line); 503 user += float(s.group(1)) 504 system += float(s.group(2)) 505 elapsed += (float(s.group(3)) * 60) + float(s.group(4)) 506 cpu += float(s.group(5)) 507 count += 1 508 except: 509 raise ValueError("badly formatted times") 510 511 f.close() 512 return "Elapsed: %0.2fs User: %0.2fs System: %0.2fs CPU: %0.0f%%" % \ 513 (elapsed/count, user/count, system/count, cpu/count) 514 515 516def running_config(): 517 """ 518 Return path of config file of the currently running kernel 519 """ 520 version = utils.system_output('uname -r') 521 for config in ('/proc/config.gz', \ 522 '/boot/config-%s' % version, 523 '/lib/modules/%s/build/.config' % version): 524 if os.path.isfile(config): 525 return config 526 return None 527 528 529def check_for_kernel_feature(feature): 530 config = running_config() 531 532 if not config: 533 raise TypeError("Can't find kernel config file") 534 535 if magic.guess_type(config) == 'application/x-gzip': 536 grep = 'zgrep' 537 else: 538 grep = 'grep' 539 grep += ' ^CONFIG_%s= %s' % (feature, config) 540 541 if not utils.system_output(grep, ignore_status=True): 542 raise ValueError("Kernel doesn't have a %s feature" % (feature)) 543 544 545def cpu_online_map(): 546 """ 547 Check out the available cpu online map 548 """ 549 cpus = [] 550 for line in open('/proc/cpuinfo', 'r').readlines(): 551 if line.startswith('processor'): 552 cpus.append(line.split()[2]) # grab cpu number 553 return cpus 554 555 556def check_glibc_ver(ver): 557 glibc_ver = commands.getoutput('ldd --version').splitlines()[0] 558 glibc_ver = re.search(r'(\d+\.\d+(\.\d+)?)', glibc_ver).group() 559 if utils.compare_versions(glibc_ver, ver) == -1: 560 raise error.TestError("Glibc too old (%s). Glibc >= %s is needed." % 561 (glibc_ver, ver)) 562 563def check_kernel_ver(ver): 564 kernel_ver = utils.system_output('uname -r') 565 kv_tmp = re.split(r'[-]', kernel_ver)[0:3] 566 # In compare_versions, if v1 < v2, return value == -1 567 if utils.compare_versions(kv_tmp[0], ver) == -1: 568 raise error.TestError("Kernel too old (%s). Kernel > %s is needed." % 569 (kernel_ver, ver)) 570 571 572def human_format(number): 573 # Convert number to kilo / mega / giga format. 574 if number < 1024: 575 return "%d" % number 576 kilo = float(number) / 1024.0 577 if kilo < 1024: 578 return "%.2fk" % kilo 579 meg = kilo / 1024.0 580 if meg < 1024: 581 return "%.2fM" % meg 582 gig = meg / 1024.0 583 return "%.2fG" % gig 584 585 586def numa_nodes(): 587 node_paths = glob.glob('/sys/devices/system/node/node*') 588 nodes = [int(re.sub(r'.*node(\d+)', r'\1', x)) for x in node_paths] 589 return (sorted(nodes)) 590 591 592def node_size(): 593 nodes = max(len(numa_nodes()), 1) 594 return ((memtotal() * 1024) / nodes) 595 596 597def to_seconds(time_string): 598 """Converts a string in M+:SS.SS format to S+.SS""" 599 elts = time_string.split(':') 600 if len(elts) == 1: 601 return time_string 602 return str(int(elts[0]) * 60 + float(elts[1])) 603 604 605def extract_all_time_results(results_string): 606 """Extract user, system, and elapsed times into a list of tuples""" 607 pattern = re.compile(r"(.*?)user (.*?)system (.*?)elapsed") 608 results = [] 609 for result in pattern.findall(results_string): 610 results.append(tuple([to_seconds(elt) for elt in result])) 611 return results 612 613 614def pickle_load(filename): 615 return pickle.load(open(filename, 'r')) 616 617 618# Return the kernel version and build timestamp. 619def running_os_release(): 620 return os.uname()[2:4] 621 622 623def running_os_ident(): 624 (version, timestamp) = running_os_release() 625 return version + '::' + timestamp 626 627 628def running_os_full_version(): 629 (version, timestamp) = running_os_release() 630 return version 631 632 633# much like find . -name 'pattern' 634def locate(pattern, root=os.getcwd()): 635 for path, dirs, files in os.walk(root): 636 for f in files: 637 if fnmatch.fnmatch(f, pattern): 638 yield os.path.abspath(os.path.join(path, f)) 639 640 641def freespace(path): 642 """Return the disk free space, in bytes""" 643 s = os.statvfs(path) 644 return s.f_bavail * s.f_bsize 645 646 647def disk_block_size(path): 648 """Return the disk block size, in bytes""" 649 return os.statvfs(path).f_bsize 650 651 652def get_cpu_family(): 653 procinfo = utils.system_output('cat /proc/cpuinfo') 654 CPU_FAMILY_RE = re.compile(r'^cpu family\s+:\s+(\S+)', re.M) 655 matches = CPU_FAMILY_RE.findall(procinfo) 656 if matches: 657 return int(matches[0]) 658 else: 659 raise error.TestError('Could not get valid cpu family data') 660 661 662def get_disks(): 663 df_output = utils.system_output('df') 664 disk_re = re.compile(r'^(/dev/hd[a-z]+)3', re.M) 665 return disk_re.findall(df_output) 666 667 668def load_module(module_name): 669 # Checks if a module has already been loaded 670 if module_is_loaded(module_name): 671 return False 672 673 utils.system('/sbin/modprobe ' + module_name) 674 return True 675 676 677def unload_module(module_name): 678 """ 679 Removes a module. Handles dependencies. If even then it's not possible 680 to remove one of the modules, it will trhow an error.CmdError exception. 681 682 @param module_name: Name of the module we want to remove. 683 """ 684 l_raw = utils.system_output("/bin/lsmod").splitlines() 685 lsmod = [x for x in l_raw if x.split()[0] == module_name] 686 if len(lsmod) > 0: 687 line_parts = lsmod[0].split() 688 if len(line_parts) == 4: 689 submodules = line_parts[3].split(",") 690 for submodule in submodules: 691 unload_module(submodule) 692 utils.system("/sbin/modprobe -r %s" % module_name) 693 logging.info("Module %s unloaded", module_name) 694 else: 695 logging.info("Module %s is already unloaded", module_name) 696 697 698def module_is_loaded(module_name): 699 module_name = module_name.replace('-', '_') 700 modules = utils.system_output('/bin/lsmod').splitlines() 701 for module in modules: 702 if module.startswith(module_name) and module[len(module_name)] == ' ': 703 return True 704 return False 705 706 707def get_loaded_modules(): 708 lsmod_output = utils.system_output('/bin/lsmod').splitlines()[1:] 709 return [line.split(None, 1)[0] for line in lsmod_output] 710 711 712def get_huge_page_size(): 713 output = utils.system_output('grep Hugepagesize /proc/meminfo') 714 return int(output.split()[1]) # Assumes units always in kB. :( 715 716 717def get_num_huge_pages(): 718 raw_hugepages = utils.system_output('/sbin/sysctl vm.nr_hugepages') 719 return int(raw_hugepages.split()[2]) 720 721 722def set_num_huge_pages(num): 723 utils.system('/sbin/sysctl vm.nr_hugepages=%d' % num) 724 725 726def get_cpu_vendor(): 727 cpuinfo = open('/proc/cpuinfo').read() 728 vendors = re.findall(r'(?m)^vendor_id\s*:\s*(\S+)\s*$', cpuinfo) 729 for i in xrange(1, len(vendors)): 730 if vendors[i] != vendors[0]: 731 raise error.TestError('multiple cpu vendors found: ' + str(vendors)) 732 return vendors[0] 733 734 735def probe_cpus(): 736 """ 737 This routine returns a list of cpu devices found under 738 /sys/devices/system/cpu. 739 """ 740 cmd = 'find /sys/devices/system/cpu/ -maxdepth 1 -type d -name cpu*' 741 return utils.system_output(cmd).splitlines() 742 743 744def ping_default_gateway(): 745 """Ping the default gateway.""" 746 747 network = open('/etc/sysconfig/network') 748 m = re.search('GATEWAY=(\S+)', network.read()) 749 750 if m: 751 gw = m.group(1) 752 cmd = 'ping %s -c 5 > /dev/null' % gw 753 return utils.system(cmd, ignore_status=True) 754 755 raise error.TestError('Unable to find default gateway') 756 757 758def drop_caches(): 759 """Writes back all dirty pages to disk and clears all the caches.""" 760 utils.system("sync") 761 # We ignore failures here as this will fail on 2.6.11 kernels. 762 utils.system("echo 3 > /proc/sys/vm/drop_caches", ignore_status=True) 763 764 765def process_is_alive(name_pattern): 766 """ 767 'pgrep name' misses all python processes and also long process names. 768 'pgrep -f name' gets all shell commands with name in args. 769 So look only for command whose initial pathname ends with name. 770 Name itself is an egrep pattern, so it can use | etc for variations. 771 """ 772 return utils.system("pgrep -f '^([^ /]*/)*(%s)([ ]|$)'" % name_pattern, 773 ignore_status=True) == 0 774 775 776def get_hwclock_seconds(utc=True): 777 """ 778 Return the hardware clock in seconds as a floating point value. 779 Use Coordinated Universal Time if utc is True, local time otherwise. 780 Raise a ValueError if unable to read the hardware clock. 781 """ 782 cmd = '/sbin/hwclock --debug' 783 if utc: 784 cmd += ' --utc' 785 hwclock_output = utils.system_output(cmd, ignore_status=True) 786 match = re.search(r'= ([0-9]+) seconds since .+ (-?[0-9.]+) seconds$', 787 hwclock_output, re.DOTALL) 788 if match: 789 seconds = int(match.group(1)) + float(match.group(2)) 790 logging.debug('hwclock seconds = %f', seconds) 791 return seconds 792 793 raise ValueError('Unable to read the hardware clock -- ' + 794 hwclock_output) 795 796 797def set_wake_alarm(alarm_time): 798 """ 799 Set the hardware RTC-based wake alarm to 'alarm_time'. 800 """ 801 utils.write_one_line('/sys/class/rtc/rtc0/wakealarm', str(alarm_time)) 802 803 804def set_power_state(state): 805 """ 806 Set the system power state to 'state'. 807 """ 808 utils.write_one_line('/sys/power/state', state) 809 810 811def standby(): 812 """ 813 Power-on suspend (S1) 814 """ 815 set_power_state('standby') 816 817 818def suspend_to_ram(): 819 """ 820 Suspend the system to RAM (S3) 821 """ 822 set_power_state('mem') 823 824 825def suspend_to_disk(): 826 """ 827 Suspend the system to disk (S4) 828 """ 829 set_power_state('disk') 830