1# Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5import glob, logging, re
6
7from autotest_lib.client.bin import test
8from autotest_lib.client.bin import utils
9from optparse import OptionParser
10
11class platform_LibCBench(test.test):
12    version = 1
13
14    iteration_output = []
15    GOVERNOR_FILE = '/sys/devices/system/cpu/cpu*/cpufreq/scaling_governor'
16
17    def run_once(self, args=[]):
18        parser = OptionParser()
19        parser.add_option('-i',
20                          '--iterations',
21                          dest='iterations',
22                          default=3,
23                          help='Number of iterations to run.')
24        parser.add_option('--path',
25                          dest='path',
26                          default='/usr/local/libc-bench/libc-bench',
27                          help='Path to the libc-bench binary.')
28
29        options, args = parser.parse_args(args)
30
31        last_governor_modes = []
32        governor_paths = glob.glob(self.GOVERNOR_FILE)
33        for path in governor_paths:
34            mode = utils.system_output('cat %s' % path)
35            last_governor_modes.append(mode)
36            utils.system('sudo bash -c "echo performance > %s"' % path)
37
38        for i in xrange(int(options.iterations)):
39            self.iteration_output.append(utils.system_output(options.path))
40
41        for i in xrange(len(governor_paths)):
42            utils.system('sudo bash -c "echo %s > %s"' %
43                         (last_governor_modes[i], governor_paths[i]))
44
45    def postprocess_iteration(self):
46        results = {}
47
48        current_benchmark = None
49        # Process the output of the benchmarks.
50        # Output for each benchmark looks like the following:
51        # b_<benchmark_1>
52        #   time: ..., x: ..., y: ..., z: ...
53        for output in self.iteration_output:
54            for line in output.split('\n'):
55                if line.startswith('b_'):
56                    current_benchmark = line
57                elif line.strip().startswith('time'):
58                    time = float(line.strip().split(',')[0].split(' ')[1])
59                    assert(current_benchmark is not None)
60                    results.setdefault(current_benchmark, []).append(time)
61
62        perf_results = {}
63        for benchmark in results:
64            average = sum(results[benchmark]) / len(results[benchmark])
65            minimum = min(results[benchmark])
66            maximum = max(results[benchmark])
67            difference = maximum - minimum
68            percent_difference = difference / average * 100
69
70
71            logging.info('%s:\tmin=%s\tmax=%s\tdiff=%s\tavg=%s\tpercent=%s' %
72                         (benchmark, minimum, maximum, difference, average,
73                          percent_difference))
74
75            key_string = re.sub('[^\w]', '_', benchmark)
76            perf_results[key_string] = average
77
78
79        self.write_perf_keyval(perf_results)
80