report.py revision 8720abc26202a8f25dce60d4bdefed5dfeb76b4f
1#!/usr/bin/python
2
3import argparse
4import fnmatch as fnm
5import json
6import math
7import numpy as np
8import os
9import re
10import sys
11
12from collections import defaultdict
13from colors import TestColors
14from results import Results
15
16# Configure logging
17import logging
18reload(logging)
19logging.basicConfig(
20    format='%(asctime)-9s %(levelname)-8s: %(message)s',
21    level=logging.DEBUG,
22    # level=logging.INFO,
23    datefmt='%I:%M:%S')
24
25# By default compare all the possible combinations
26DEFAULT_COMPARE = [(r'base_', r'test_')]
27
28class Report(object):
29
30
31    def __init__(self, results_dir, compare=None, formats=['relative']):
32        self.results_json = results_dir + '/results.json'
33        self.results = {}
34
35        self.compare = []
36
37        # Parse results (if required)
38        if not os.path.isfile(self.results_json):
39            Results(results_dir)
40
41        # Load results from file (if already parsed)
42        logging.info('%14s - Load results from [%s]...',
43                'Results', self.results_json)
44        with open(self.results_json) as infile:
45           self.results = json.load(infile)
46
47        # Setup configuration comparisons
48        if compare is None:
49            compare = DEFAULT_COMPARE
50            logging.warning('%14s - Comparing all the possible combination',
51                    'Results')
52        for (base_rexp, test_rexp) in compare:
53            logging.info('Configured regexps for comparisions (bases , tests): (%s, %s)',
54                    base_rexp, test_rexp)
55            base_rexp = re.compile(base_rexp, re.DOTALL)
56            test_rexp = re.compile(test_rexp, re.DOTALL)
57            self.compare.append((base_rexp, test_rexp))
58
59        # Report all supported workload classes
60        self.__rtapp_report(formats)
61        self.__default_report(formats)
62
63    ############################### REPORT RTAPP ###############################
64
65    def __rtapp_report(self, formats):
66
67        if 'rtapp' not in self.results.keys():
68            logging.debug('%14s - No RTApp workloads to report', 'ReportRTApp')
69            return
70
71        logging.debug('%14s - Reporting RTApp workloads', 'ReportRTApp')
72
73        # Setup lables depending on requested report
74        if 'absolute' in formats:
75            nrg_lable = 'Energy Indexes (Absolute)'
76            prf_lable = 'Performance Indexes (Absolute)'
77            logging.info('')
78            logging.info('%14s - Absolute comparisions:', 'Report')
79            print ''
80        else:
81            nrg_lable = 'Energy Indexes (Relative)'
82            prf_lable = 'Performance Indexes (Relative)'
83            logging.info('')
84            logging.info('%14s - Relative comparisions:', 'Report')
85            print ''
86
87        # Dump headers
88        print '{:9s}   {:20s} |'\
89                ' {:33s} | {:54s} |'\
90                .format('Test Id', 'Comparision',
91                        nrg_lable, prf_lable)
92        print '{:9s}   {:20s} |'\
93                ' {:>10s} {:>10s} {:>10s}  |'\
94                ' {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} |'\
95                .format('', '',
96                        'LITTLE', 'big', 'Total',
97                        'PerfIndex', 'NegSlacks', 'EDP1', 'EDP2', 'EDP3')
98
99        # For each test
100        _results = self.results['rtapp']
101        for tid in sorted(_results.keys()):
102            new_test = True
103            # For each configuration...
104            for base_idx in sorted(_results[tid].keys()):
105                # Which matches at least on base regexp
106                for (base_rexp, test_rexp) in self.compare:
107                    if not base_rexp.match(base_idx):
108                        continue
109                    # Look for a configuration which matches the test regexp
110                    for test_idx in sorted(_results[tid].keys()):
111                        if test_idx == base_idx:
112                            continue
113                        if new_test:
114                            print '{:-<33s}+{:-<35s}+{:-<56s}+'\
115                                    .format('','', '')
116                            self.__rtapp_reference(tid, base_idx)
117                            new_test = False
118                        if test_rexp.match(test_idx) == None:
119                            continue
120                        self.__rtapp_compare(tid, base_idx, test_idx, formats)
121
122        print ''
123
124    def __rtapp_reference(self, tid, base_idx):
125        _results = self.results['rtapp']
126
127        logging.debug('Test %s: compare against [%s] base',
128                tid, base_idx)
129        res_line = '{0:8s}: {1:22s} | '.format(tid, base_idx)
130
131        # Dump all energy metrics
132        for cpus in ['LITTLE', 'big', 'Total']:
133            res_base = _results[tid][base_idx]['energy'][cpus]['avg']
134            # Dump absolute values
135            res_line += ' {0:10.3f}'.format(res_base)
136        res_line += ' |'
137
138        # If available, dump also performance results
139        if 'performance' not in _results[tid][base_idx].keys():
140            print res_line
141            return
142
143        for pidx in ['perf_avg', 'slack_pct', 'edp1', 'edp2', 'edp3']:
144            res_base = _results[tid][base_idx]['performance'][pidx]['avg']
145
146            logging.debug('idx: %s, base: %s', pidx, res_base)
147
148            if pidx in ['perf_avg']:
149                res_line += ' {0:s}'.format(TestColors.rate(res_base))
150                continue
151            if pidx in ['slack_pct']:
152                res_line += ' {0:s}'.format(
153                        TestColors.rate(res_base, positive_is_good = False))
154                continue
155            if 'edp' in pidx:
156                res_line += ' {0:10.2e}'.format(res_base)
157                continue
158        res_line += ' |'
159        print res_line
160
161    def __rtapp_compare(self, tid, base_idx, test_idx, formats):
162        _results = self.results['rtapp']
163
164        logging.debug('Test %s: compare %s with %s',
165                tid, base_idx, test_idx)
166        res_line = '{0:8s}:   {1:20s} | '.format(tid, test_idx)
167
168        # Dump all energy metrics
169        for cpus in ['LITTLE', 'big', 'Total']:
170            res_base = _results[tid][base_idx]['energy'][cpus]['avg']
171            res_test = _results[tid][test_idx]['energy'][cpus]['avg']
172            speedup_cnt =  res_test - res_base
173            if 'absolute' in formats:
174                res_line += ' {0:10.2f}'.format(speedup_cnt)
175            else:
176                speedup_pct =  100.0 * speedup_cnt / res_base
177                res_line += ' {0:s}'\
178                        .format(TestColors.rate(
179                            speedup_pct,
180                            positive_is_good = False))
181        res_line += ' |'
182
183        # If available, dump also performance results
184        if 'performance' not in _results[tid][base_idx].keys():
185            print res_line
186            return
187
188        for pidx in ['perf_avg', 'slack_pct', 'edp1', 'edp2', 'edp3']:
189            res_base = _results[tid][base_idx]['performance'][pidx]['avg']
190            res_test = _results[tid][test_idx]['performance'][pidx]['avg']
191
192            logging.debug('idx: %s, base: %s, test: %s',
193                    pidx, res_base, res_test)
194
195            if pidx in ['perf_avg']:
196                res_line += ' {0:s}'.format(TestColors.rate(res_test))
197                continue
198
199            if pidx in ['slack_pct']:
200                res_line += ' {0:s}'.format(
201                        TestColors.rate(res_test, positive_is_good = False))
202                continue
203
204            # Compute difference base-vs-test
205            if 'edp' in pidx:
206                speedup_cnt = res_base - res_test
207                if 'absolute':
208                    res_line += ' {0:10.2e}'.format(speedup_cnt)
209                else:
210                    res_line += ' {0:s}'.format(TestColors.rate(speedup_pct))
211
212        res_line += ' |'
213        print res_line
214
215    ############################### REPORT DEFAULT #############################
216
217    def __default_report(self, formats):
218
219        # Build list of workload types which can be rendered using the default parser
220        wtypes = []
221        for supported_wtype in DEFAULT_WTYPES:
222            if supported_wtype in self.results.keys():
223                wtypes.append(supported_wtype)
224
225        if len(wtypes) == 0:
226            logging.debug('%14s - No Default workloads to report', 'ReportDefault')
227            return
228
229        logging.debug('%14s - Reporting Default workloads', 'ReportDefault')
230
231        # Setup lables depending on requested report
232        if 'absolute' in formats:
233            nrg_lable = 'Energy Indexes (Absolute)'
234            prf_lable = 'Performance Indexes (Absolute)'
235            logging.info('')
236            logging.info('%14s - Absolute comparisions:', 'Report')
237            print ''
238        else:
239            nrg_lable = 'Energy Indexes (Relative)'
240            prf_lable = 'Performance Indexes (Relative)'
241            logging.info('')
242            logging.info('%14s - Relative comparisions:', 'Report')
243            print ''
244
245        # Dump headers
246        print '{:9s}   {:20s} |'\
247                ' {:33s} | {:54s} |'\
248                .format('Test Id', 'Comparision',
249                        nrg_lable, prf_lable)
250        print '{:9s}   {:20s} |'\
251                ' {:>10s} {:>10s} {:>10s}  |'\
252                ' {:>10s} {:>10s} {:>10s} {:>10s} {:>10s} |'\
253                .format('', '',
254                        'LITTLE', 'big', 'Total',
255                        'Perf', 'CTime', 'EDP1', 'EDP2', 'EDP3')
256
257        # For each default test
258        for wtype in wtypes:
259            _results = self.results[wtype]
260            for tid in sorted(_results.keys()):
261                new_test = True
262                # For each configuration...
263                for base_idx in sorted(_results[tid].keys()):
264                    # Which matches at least on base regexp
265                    for (base_rexp, test_rexp) in self.compare:
266                        if not base_rexp.match(base_idx):
267                            continue
268                        # Look for a configuration which matches the test regexp
269                        for test_idx in sorted(_results[tid].keys()):
270                            if test_idx == base_idx:
271                                continue
272                            if new_test:
273                                print '{:-<33s}+{:-<35s}+{:-<56s}+'\
274                                        .format('','', '')
275                                new_test = False
276                            if not test_rexp.match(test_idx):
277                                continue
278                            self.__default_compare(wtype, tid, base_idx, test_idx, formats)
279
280        print ''
281
282    def __default_compare(self, wtype, tid, base_idx, test_idx, formats):
283        _results = self.results[wtype]
284
285        logging.debug('Test %s: compare %s with %s',
286                tid, base_idx, test_idx)
287        res_comp = '{0:s} vs {1:s}'.format(test_idx, base_idx)
288        res_line = '{0:8s}: {1:22s} | '.format(tid, res_comp)
289
290        # Dump all energy metrics
291        for cpus in ['LITTLE', 'big', 'Total']:
292
293            # If either base of test have a 0 MAX energy, this measn that
294            # energy has not been collected
295            base_max = _results[tid][base_idx]['energy'][cpus]['max']
296            test_max = _results[tid][test_idx]['energy'][cpus]['max']
297            if base_max == 0 or test_max == 0:
298                res_line += ' {0:10s}'.format('NA')
299                continue
300
301            # Otherwise, report energy values
302            res_base = _results[tid][base_idx]['energy'][cpus]['avg']
303            res_test = _results[tid][test_idx]['energy'][cpus]['avg']
304
305            speedup_cnt =  res_test - res_base
306            if 'absolute' in formats:
307                res_line += ' {0:10.2f}'.format(speedup_cnt)
308            else:
309                speedup_pct =  100.0 * speedup_cnt / res_base
310                res_line += ' {0:s}'\
311                        .format(TestColors.rate(
312                            speedup_pct,
313                            positive_is_good = False))
314        res_line += ' |'
315
316        # If available, dump also performance results
317        if 'performance' not in _results[tid][base_idx].keys():
318            print res_line
319            return
320
321        for pidx in ['perf_avg', 'ctime_avg', 'edp1', 'edp2', 'edp3']:
322            res_base = _results[tid][base_idx]['performance'][pidx]['avg']
323            res_test = _results[tid][test_idx]['performance'][pidx]['avg']
324
325            logging.debug('idx: %s, base: %s, test: %s',
326                    pidx, res_base, res_test)
327
328            # Compute difference base-vs-test
329            speedup_cnt = 0
330            if res_base != 0:
331                if pidx in ['perf_avg']:
332                    speedup_cnt =  res_test - res_base
333                else:
334                    speedup_cnt =  res_base - res_test
335
336            # Compute speedup if required
337            speedup_pct = 0
338            if 'absolute' in formats:
339                if 'edp' in pidx:
340                    res_line += ' {0:10.2e}'.format(speedup_cnt)
341                else:
342                    res_line += ' {0:10.2f}'.format(speedup_cnt)
343            else:
344                if res_base != 0:
345                    if pidx in ['perf_avg']:
346                        # speedup_pct =  100.0 * speedup_cnt / res_base
347                        speedup_pct =  speedup_cnt
348                    else:
349                        speedup_pct =  100.0 * speedup_cnt / res_base
350                res_line += ' {0:s}'.format(TestColors.rate(speedup_pct))
351        res_line += ' |'
352        print res_line
353
354# List of workload types which can be parsed using the default test parser
355DEFAULT_WTYPES = ['perf_bench_messaging', 'perf_bench_pipe']
356
357#vim :set tabstop=4 shiftwidth=4 expandtab
358