1'''
2Created on May 16, 2011
3
4@author: bungeman
5'''
6import bench_util
7import getopt
8import httplib
9import itertools
10import json
11import os
12import re
13import sys
14import urllib
15import urllib2
16import xml.sax.saxutils
17
18# Maximum expected number of characters we expect in an svn revision.
19MAX_SVN_REV_LENGTH = 5
20
21# Indices for getting elements from bench expectation files.
22# See bench_expectations_<builder>.txt for details.
23EXPECTED_IDX = -3
24LB_IDX = -2
25UB_IDX = -1
26
27# Indices of the tuple of dictionaries containing slower and faster alerts.
28SLOWER = 0
29FASTER = 1
30
31# URL prefix for the bench dashboard page. Showing recent 15 days of data.
32DASHBOARD_URL_PREFIX = 'http://go/skpdash/#15'
33
34def usage():
35    """Prints simple usage information."""
36
37    print '-a <representation_alg> bench representation algorithm to use. '
38    print '   Defaults to "25th". See bench_util.py for details.'
39    print '-b <builder> name of the builder whose bench data we are checking.'
40    print '-d <dir> a directory containing bench_<revision>_<scalar> files.'
41    print '-e <file> file containing expected bench builder values/ranges.'
42    print '   Will raise exception if actual bench values are out of range.'
43    print '   See bench_expectations_<builder>.txt for data format / examples.'
44    print '-r <revision> the git commit hash or svn revision for checking '
45    print '   bench values.'
46
47
48class Label:
49    """The information in a label.
50
51    (str, str, str, str, {str:str})"""
52    def __init__(self, bench, config, time_type, settings):
53        self.bench = bench
54        self.config = config
55        self.time_type = time_type
56        self.settings = settings
57
58    def __repr__(self):
59        return "Label(%s, %s, %s, %s)" % (
60                   str(self.bench),
61                   str(self.config),
62                   str(self.time_type),
63                   str(self.settings),
64               )
65
66    def __str__(self):
67        return "%s_%s_%s_%s" % (
68                   str(self.bench),
69                   str(self.config),
70                   str(self.time_type),
71                   str(self.settings),
72               )
73
74    def __eq__(self, other):
75        return (self.bench == other.bench and
76                self.config == other.config and
77                self.time_type == other.time_type and
78                self.settings == other.settings)
79
80    def __hash__(self):
81        return (hash(self.bench) ^
82                hash(self.config) ^
83                hash(self.time_type) ^
84                hash(frozenset(self.settings.iteritems())))
85
86def create_bench_dict(revision_data_points):
87    """Convert current revision data into a dictionary of line data.
88
89    Args:
90      revision_data_points: a list of bench data points
91
92    Returns:
93      a dictionary of this form:
94          keys = Label objects
95          values = the corresponding bench value
96    """
97    bench_dict = {}
98    for point in revision_data_points:
99        point_name = Label(point.bench,point.config,point.time_type,
100                           point.settings)
101        if point_name not in bench_dict:
102            bench_dict[point_name] = point.time
103        else:
104            raise Exception('Duplicate expectation entry: ' + str(point_name))
105
106    return bench_dict
107
108def read_expectations(expectations, filename):
109    """Reads expectations data from file and put in expectations dict."""
110    for expectation in open(filename).readlines():
111        elements = expectation.strip().split(',')
112        if not elements[0] or elements[0].startswith('#'):
113            continue
114        if len(elements) != 5:
115            raise Exception("Invalid expectation line format: %s" %
116                            expectation)
117        bench_entry = elements[0] + ',' + elements[1]
118        if bench_entry in expectations:
119            raise Exception("Dup entries for bench expectation %s" %
120                            bench_entry)
121        # [<Bench_BmpConfig_TimeType>,<Platform-Alg>] -> (LB, UB, EXPECTED)
122        expectations[bench_entry] = (float(elements[LB_IDX]),
123                                     float(elements[UB_IDX]),
124                                     float(elements[EXPECTED_IDX]))
125
126def check_expectations(lines, expectations, key_suffix):
127    """Check if any bench results are outside of expected range.
128
129    For each input line in lines, checks the expectations dictionary to see if
130    the bench is out of the given range.
131
132    Args:
133      lines: dictionary mapping Label objects to the bench values.
134      expectations: dictionary returned by read_expectations().
135      key_suffix: string of <Platform>-<Alg> containing the bot platform and the
136        bench representation algorithm.
137
138    Returns:
139      No return value.
140
141    Raises:
142      Exception containing bench data that are out of range, if any.
143    """
144    # The platform for this bot, to pass to the dashboard plot.
145    platform = key_suffix[ : key_suffix.rfind('-')]
146    # Tuple of dictionaries recording exceptions that are slower and faster,
147    # respectively. Each dictionary maps off_ratio (ratio of actual to expected)
148    # to a list of corresponding exception messages.
149    exceptions = ({}, {})
150    for line in lines:
151        line_str = str(line)
152        line_str = line_str[ : line_str.find('_{')]
153        # Extracts bench and config from line_str, which is in the format
154        # <bench-picture-name>.skp_<config>_
155        bench, config = line_str.strip('_').split('.skp_')
156        bench_platform_key = line_str + ',' + key_suffix
157        if bench_platform_key not in expectations:
158            continue
159        this_bench_value = lines[line]
160        this_min, this_max, this_expected = expectations[bench_platform_key]
161        if this_bench_value < this_min or this_bench_value > this_max:
162            off_ratio = this_bench_value / this_expected
163            exception = 'Bench %s out of range [%s, %s] (%s vs %s, %s%%).' % (
164                bench_platform_key, this_min, this_max, this_bench_value,
165                this_expected, (off_ratio - 1) * 100)
166            exception += '\n' + '~'.join([
167                DASHBOARD_URL_PREFIX, bench, platform, config])
168            if off_ratio > 1:  # Bench is slower.
169                exceptions[SLOWER].setdefault(off_ratio, []).append(exception)
170            else:
171                exceptions[FASTER].setdefault(off_ratio, []).append(exception)
172    outputs = []
173    for i in [SLOWER, FASTER]:
174      if exceptions[i]:
175          ratios = exceptions[i].keys()
176          ratios.sort(reverse=True)
177          li = []
178          for ratio in ratios:
179              li.extend(exceptions[i][ratio])
180          header = '%s benches got slower (sorted by %% difference):' % len(li)
181          if i == FASTER:
182              header = header.replace('slower', 'faster')
183          outputs.extend(['', header] + li)
184
185    if outputs:
186        # Directly raising Exception will have stderr outputs tied to the line
187        # number of the script, so use sys.stderr.write() instead.
188        # Add a trailing newline to supress new line checking errors.
189        sys.stderr.write('\n'.join(['Exception:'] + outputs + ['\n']))
190        exit(1)
191
192
193def main():
194    """Parses command line and checks bench expectations."""
195    try:
196        opts, _ = getopt.getopt(sys.argv[1:],
197                                "a:b:d:e:r:",
198                                "default-setting=")
199    except getopt.GetoptError, err:
200        print str(err)
201        usage()
202        sys.exit(2)
203
204    directory = None
205    bench_expectations = {}
206    rep = '25th'  # bench representation algorithm, default to 25th
207    rev = None  # git commit hash or svn revision number
208    bot = None
209
210    try:
211        for option, value in opts:
212            if option == "-a":
213                rep = value
214            elif option == "-b":
215                bot = value
216            elif option == "-d":
217                directory = value
218            elif option == "-e":
219                read_expectations(bench_expectations, value)
220            elif option == "-r":
221                rev = value
222            else:
223                usage()
224                assert False, "unhandled option"
225    except ValueError:
226        usage()
227        sys.exit(2)
228
229    if directory is None or bot is None or rev is None:
230        usage()
231        sys.exit(2)
232
233    platform_and_alg = bot + '-' + rep
234
235    data_points = bench_util.parse_skp_bench_data(directory, rev, rep)
236
237    bench_dict = create_bench_dict(data_points)
238
239    if bench_expectations:
240        check_expectations(bench_dict, bench_expectations, platform_and_alg)
241
242
243if __name__ == "__main__":
244    main()
245