1#!/usr/local/bin/python -O
2
3""" A Python Benchmark Suite
4
5"""
6# Note: Please keep this module compatible to Python 2.6.
7#
8# Tests may include features in later Python versions, but these
9# should then be embedded in try-except clauses in the configuration
10# module Setup.py.
11#
12
13from __future__ import print_function
14
15# pybench Copyright
16__copyright__ = """\
17Copyright (c), 1997-2006, Marc-Andre Lemburg (mal@lemburg.com)
18Copyright (c), 2000-2006, eGenix.com Software GmbH (info@egenix.com)
19
20                   All Rights Reserved.
21
22Permission to use, copy, modify, and distribute this software and its
23documentation for any purpose and without fee or royalty is hereby
24granted, provided that the above copyright notice appear in all copies
25and that both that copyright notice and this permission notice appear
26in supporting documentation or portions thereof, including
27modifications, that you make.
28
29THE AUTHOR MARC-ANDRE LEMBURG DISCLAIMS ALL WARRANTIES WITH REGARD TO
30THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
31FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
32INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
33FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
34NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
35WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
36"""
37
38import sys
39import time
40import platform
41from CommandLine import *
42
43try:
44    import cPickle
45    pickle = cPickle
46except ImportError:
47    import pickle
48
49# Version number; version history: see README file !
50__version__ = '2.1'
51
52### Constants
53
54# Second fractions
55MILLI_SECONDS = 1e3
56MICRO_SECONDS = 1e6
57
58# Percent unit
59PERCENT = 100
60
61# Horizontal line length
62LINE = 79
63
64# Minimum test run-time
65MIN_TEST_RUNTIME = 1e-3
66
67# Number of calibration runs to use for calibrating the tests
68CALIBRATION_RUNS = 20
69
70# Number of calibration loops to run for each calibration run
71CALIBRATION_LOOPS = 20
72
73# Allow skipping calibration ?
74ALLOW_SKIPPING_CALIBRATION = 1
75
76# Timer types
77TIMER_TIME_TIME = 'time.time'
78TIMER_TIME_PROCESS_TIME = 'time.process_time'
79TIMER_TIME_PERF_COUNTER = 'time.perf_counter'
80TIMER_TIME_CLOCK = 'time.clock'
81TIMER_SYSTIMES_PROCESSTIME = 'systimes.processtime'
82
83# Choose platform default timer
84if hasattr(time, 'perf_counter'):
85    TIMER_PLATFORM_DEFAULT = TIMER_TIME_PERF_COUNTER
86elif sys.platform[:3] == 'win':
87    # On WinXP this has 2.5ms resolution
88    TIMER_PLATFORM_DEFAULT = TIMER_TIME_CLOCK
89else:
90    # On Linux this has 1ms resolution
91    TIMER_PLATFORM_DEFAULT = TIMER_TIME_TIME
92
93# Print debug information ?
94_debug = 0
95
96### Helpers
97
98def get_timer(timertype):
99
100    if timertype == TIMER_TIME_TIME:
101        return time.time
102    elif timertype == TIMER_TIME_PROCESS_TIME:
103        return time.process_time
104    elif timertype == TIMER_TIME_PERF_COUNTER:
105        return time.perf_counter
106    elif timertype == TIMER_TIME_CLOCK:
107        return time.clock
108    elif timertype == TIMER_SYSTIMES_PROCESSTIME:
109        import systimes
110        return systimes.processtime
111    else:
112        raise TypeError('unknown timer type: %s' % timertype)
113
114def get_machine_details():
115
116    if _debug:
117        print('Getting machine details...')
118    buildno, builddate = platform.python_build()
119    python = platform.python_version()
120    # XXX this is now always UCS4, maybe replace it with 'PEP393' in 3.3+?
121    if sys.maxunicode == 65535:
122        # UCS2 build (standard)
123        unitype = 'UCS2'
124    else:
125        # UCS4 build (most recent Linux distros)
126        unitype = 'UCS4'
127    bits, linkage = platform.architecture()
128    return {
129        'platform': platform.platform(),
130        'processor': platform.processor(),
131        'executable': sys.executable,
132        'implementation': getattr(platform, 'python_implementation',
133                                  lambda:'n/a')(),
134        'python': platform.python_version(),
135        'compiler': platform.python_compiler(),
136        'buildno': buildno,
137        'builddate': builddate,
138        'unicode': unitype,
139        'bits': bits,
140        }
141
142def print_machine_details(d, indent=''):
143
144    l = ['Machine Details:',
145         '   Platform ID:    %s' % d.get('platform', 'n/a'),
146         '   Processor:      %s' % d.get('processor', 'n/a'),
147         '',
148         'Python:',
149         '   Implementation: %s' % d.get('implementation', 'n/a'),
150         '   Executable:     %s' % d.get('executable', 'n/a'),
151         '   Version:        %s' % d.get('python', 'n/a'),
152         '   Compiler:       %s' % d.get('compiler', 'n/a'),
153         '   Bits:           %s' % d.get('bits', 'n/a'),
154         '   Build:          %s (#%s)' % (d.get('builddate', 'n/a'),
155                                          d.get('buildno', 'n/a')),
156         '   Unicode:        %s' % d.get('unicode', 'n/a'),
157         ]
158    joiner = '\n' + indent
159    print(indent + joiner.join(l) + '\n')
160
161### Test baseclass
162
163class Test:
164
165    """ All test must have this class as baseclass. It provides
166        the necessary interface to the benchmark machinery.
167
168        The tests must set .rounds to a value high enough to let the
169        test run between 20-50 seconds. This is needed because
170        clock()-timing only gives rather inaccurate values (on Linux,
171        for example, it is accurate to a few hundreths of a
172        second). If you don't want to wait that long, use a warp
173        factor larger than 1.
174
175        It is also important to set the .operations variable to a
176        value representing the number of "virtual operations" done per
177        call of .run().
178
179        If you change a test in some way, don't forget to increase
180        its version number.
181
182    """
183
184    ### Instance variables that each test should override
185
186    # Version number of the test as float (x.yy); this is important
187    # for comparisons of benchmark runs - tests with unequal version
188    # number will not get compared.
189    version = 2.1
190
191    # The number of abstract operations done in each round of the
192    # test. An operation is the basic unit of what you want to
193    # measure. The benchmark will output the amount of run-time per
194    # operation. Note that in order to raise the measured timings
195    # significantly above noise level, it is often required to repeat
196    # sets of operations more than once per test round. The measured
197    # overhead per test round should be less than 1 second.
198    operations = 1
199
200    # Number of rounds to execute per test run. This should be
201    # adjusted to a figure that results in a test run-time of between
202    # 1-2 seconds.
203    rounds = 100000
204
205    ### Internal variables
206
207    # Mark this class as implementing a test
208    is_a_test = 1
209
210    # Last timing: (real, run, overhead)
211    last_timing = (0.0, 0.0, 0.0)
212
213    # Warp factor to use for this test
214    warp = 1
215
216    # Number of calibration runs to use
217    calibration_runs = CALIBRATION_RUNS
218
219    # List of calibration timings
220    overhead_times = None
221
222    # List of test run timings
223    times = []
224
225    # Timer used for the benchmark
226    timer = TIMER_PLATFORM_DEFAULT
227
228    def __init__(self, warp=None, calibration_runs=None, timer=None):
229
230        # Set parameters
231        if warp is not None:
232            self.rounds = int(self.rounds / warp)
233            if self.rounds == 0:
234                raise ValueError('warp factor set too high')
235            self.warp = warp
236        if calibration_runs is not None:
237            if (not ALLOW_SKIPPING_CALIBRATION and
238                calibration_runs < 1):
239                raise ValueError('at least one calibration run is required')
240            self.calibration_runs = calibration_runs
241        if timer is not None:
242            self.timer = timer
243
244        # Init variables
245        self.times = []
246        self.overhead_times = []
247
248        # We want these to be in the instance dict, so that pickle
249        # saves them
250        self.version = self.version
251        self.operations = self.operations
252        self.rounds = self.rounds
253
254    def get_timer(self):
255
256        """ Return the timer function to use for the test.
257
258        """
259        return get_timer(self.timer)
260
261    def compatible(self, other):
262
263        """ Return 1/0 depending on whether the test is compatible
264            with the other Test instance or not.
265
266        """
267        if self.version != other.version:
268            return 0
269        if self.rounds != other.rounds:
270            return 0
271        return 1
272
273    def calibrate_test(self):
274
275        if self.calibration_runs == 0:
276            self.overhead_times = [0.0]
277            return
278
279        calibrate = self.calibrate
280        timer = self.get_timer()
281        calibration_loops = range(CALIBRATION_LOOPS)
282
283        # Time the calibration loop overhead
284        prep_times = []
285        for i in range(self.calibration_runs):
286            t = timer()
287            for i in calibration_loops:
288                pass
289            t = timer() - t
290            prep_times.append(t / CALIBRATION_LOOPS)
291        min_prep_time = min(prep_times)
292        if _debug:
293            print()
294            print('Calib. prep time     = %.6fms' % (
295                min_prep_time * MILLI_SECONDS))
296
297        # Time the calibration runs (doing CALIBRATION_LOOPS loops of
298        # .calibrate() method calls each)
299        for i in range(self.calibration_runs):
300            t = timer()
301            for i in calibration_loops:
302                calibrate()
303            t = timer() - t
304            self.overhead_times.append(t / CALIBRATION_LOOPS
305                                       - min_prep_time)
306
307        # Check the measured times
308        min_overhead = min(self.overhead_times)
309        max_overhead = max(self.overhead_times)
310        if _debug:
311            print('Calib. overhead time = %.6fms' % (
312                min_overhead * MILLI_SECONDS))
313        if min_overhead < 0.0:
314            raise ValueError('calibration setup did not work')
315        if max_overhead - min_overhead > 0.1:
316            raise ValueError(
317                'overhead calibration timing range too inaccurate: '
318                '%r - %r' % (min_overhead, max_overhead))
319
320    def run(self):
321
322        """ Run the test in two phases: first calibrate, then
323            do the actual test. Be careful to keep the calibration
324            timing low w/r to the test timing.
325
326        """
327        test = self.test
328        timer = self.get_timer()
329
330        # Get calibration
331        min_overhead = min(self.overhead_times)
332
333        # Test run
334        t = timer()
335        test()
336        t = timer() - t
337        if t < MIN_TEST_RUNTIME:
338            raise ValueError('warp factor too high: '
339                             'test times are < 10ms')
340        eff_time = t - min_overhead
341        if eff_time < 0:
342            raise ValueError('wrong calibration')
343        self.last_timing = (eff_time, t, min_overhead)
344        self.times.append(eff_time)
345
346    def calibrate(self):
347
348        """ Calibrate the test.
349
350            This method should execute everything that is needed to
351            setup and run the test - except for the actual operations
352            that you intend to measure. pybench uses this method to
353            measure the test implementation overhead.
354
355        """
356        return
357
358    def test(self):
359
360        """ Run the test.
361
362            The test needs to run self.rounds executing
363            self.operations number of operations each.
364
365        """
366        return
367
368    def stat(self):
369
370        """ Return test run statistics as tuple:
371
372            (minimum run time,
373             average run time,
374             total run time,
375             average time per operation,
376             minimum overhead time)
377
378        """
379        runs = len(self.times)
380        if runs == 0:
381            return 0.0, 0.0, 0.0, 0.0
382        min_time = min(self.times)
383        total_time = sum(self.times)
384        avg_time = total_time / float(runs)
385        operation_avg = total_time / float(runs
386                                           * self.rounds
387                                           * self.operations)
388        if self.overhead_times:
389            min_overhead = min(self.overhead_times)
390        else:
391            min_overhead = self.last_timing[2]
392        return min_time, avg_time, total_time, operation_avg, min_overhead
393
394### Load Setup
395
396# This has to be done after the definition of the Test class, since
397# the Setup module will import subclasses using this class.
398
399import Setup
400
401### Benchmark base class
402
403class Benchmark:
404
405    # Name of the benchmark
406    name = ''
407
408    # Number of benchmark rounds to run
409    rounds = 1
410
411    # Warp factor use to run the tests
412    warp = 1                    # Warp factor
413
414    # Average benchmark round time
415    roundtime = 0
416
417    # Benchmark version number as float x.yy
418    version = 2.1
419
420    # Produce verbose output ?
421    verbose = 0
422
423    # Dictionary with the machine details
424    machine_details = None
425
426    # Timer used for the benchmark
427    timer = TIMER_PLATFORM_DEFAULT
428
429    def __init__(self, name, verbose=None, timer=None, warp=None,
430                 calibration_runs=None):
431
432        if name:
433            self.name = name
434        else:
435            self.name = '%04i-%02i-%02i %02i:%02i:%02i' % \
436                        (time.localtime(time.time())[:6])
437        if verbose is not None:
438            self.verbose = verbose
439        if timer is not None:
440            self.timer = timer
441        if warp is not None:
442            self.warp = warp
443        if calibration_runs is not None:
444            self.calibration_runs = calibration_runs
445
446        # Init vars
447        self.tests = {}
448        if _debug:
449            print('Getting machine details...')
450        self.machine_details = get_machine_details()
451
452        # Make .version an instance attribute to have it saved in the
453        # Benchmark pickle
454        self.version = self.version
455
456    def get_timer(self):
457
458        """ Return the timer function to use for the test.
459
460        """
461        return get_timer(self.timer)
462
463    def compatible(self, other):
464
465        """ Return 1/0 depending on whether the benchmark is
466            compatible with the other Benchmark instance or not.
467
468        """
469        if self.version != other.version:
470            return 0
471        if (self.machine_details == other.machine_details and
472            self.timer != other.timer):
473            return 0
474        if (self.calibration_runs == 0 and
475            other.calibration_runs != 0):
476            return 0
477        if (self.calibration_runs != 0 and
478            other.calibration_runs == 0):
479            return 0
480        return 1
481
482    def load_tests(self, setupmod, limitnames=None):
483
484        # Add tests
485        if self.verbose:
486            print('Searching for tests ...')
487            print('--------------------------------------')
488        for testclass in setupmod.__dict__.values():
489            if not hasattr(testclass, 'is_a_test'):
490                continue
491            name = testclass.__name__
492            if  name == 'Test':
493                continue
494            if (limitnames is not None and
495                limitnames.search(name) is None):
496                continue
497            self.tests[name] = testclass(
498                warp=self.warp,
499                calibration_runs=self.calibration_runs,
500                timer=self.timer)
501        l = sorted(self.tests)
502        if self.verbose:
503            for name in l:
504                print('  %s' % name)
505            print('--------------------------------------')
506            print('  %i tests found' % len(l))
507            print()
508
509    def calibrate(self):
510
511        print('Calibrating tests. Please wait...', end=' ')
512        sys.stdout.flush()
513        if self.verbose:
514            print()
515            print()
516            print('Test                              min      max')
517            print('-' * LINE)
518        tests = sorted(self.tests.items())
519        for i in range(len(tests)):
520            name, test = tests[i]
521            test.calibrate_test()
522            if self.verbose:
523                print('%30s:  %6.3fms  %6.3fms' % \
524                      (name,
525                       min(test.overhead_times) * MILLI_SECONDS,
526                       max(test.overhead_times) * MILLI_SECONDS))
527        if self.verbose:
528            print()
529            print('Done with the calibration.')
530        else:
531            print('done.')
532        print()
533
534    def run(self):
535
536        tests = sorted(self.tests.items())
537        timer = self.get_timer()
538        print('Running %i round(s) of the suite at warp factor %i:' % \
539              (self.rounds, self.warp))
540        print()
541        self.roundtimes = []
542        for i in range(self.rounds):
543            if self.verbose:
544                print(' Round %-25i  effective   absolute  overhead' % (i+1))
545            total_eff_time = 0.0
546            for j in range(len(tests)):
547                name, test = tests[j]
548                if self.verbose:
549                    print('%30s:' % name, end=' ')
550                test.run()
551                (eff_time, abs_time, min_overhead) = test.last_timing
552                total_eff_time = total_eff_time + eff_time
553                if self.verbose:
554                    print('    %5.0fms    %5.0fms %7.3fms' % \
555                          (eff_time * MILLI_SECONDS,
556                           abs_time * MILLI_SECONDS,
557                           min_overhead * MILLI_SECONDS))
558            self.roundtimes.append(total_eff_time)
559            if self.verbose:
560                print('                   '
561                       '               ------------------------------')
562                print('                   '
563                       '     Totals:    %6.0fms' %
564                       (total_eff_time * MILLI_SECONDS))
565                print()
566            else:
567                print('* Round %i done in %.3f seconds.' % (i+1,
568                                                            total_eff_time))
569        print()
570
571    def stat(self):
572
573        """ Return benchmark run statistics as tuple:
574
575            (minimum round time,
576             average round time,
577             maximum round time)
578
579            XXX Currently not used, since the benchmark does test
580                statistics across all rounds.
581
582        """
583        runs = len(self.roundtimes)
584        if runs == 0:
585            return 0.0, 0.0
586        min_time = min(self.roundtimes)
587        total_time = sum(self.roundtimes)
588        avg_time = total_time / float(runs)
589        max_time = max(self.roundtimes)
590        return (min_time, avg_time, max_time)
591
592    def print_header(self, title='Benchmark'):
593
594        print('-' * LINE)
595        print('%s: %s' % (title, self.name))
596        print('-' * LINE)
597        print()
598        print('    Rounds: %s' % self.rounds)
599        print('    Warp:   %s' % self.warp)
600        print('    Timer:  %s' % self.timer)
601        print()
602        if self.machine_details:
603            print_machine_details(self.machine_details, indent='    ')
604            print()
605
606    def print_benchmark(self, hidenoise=0, limitnames=None):
607
608        print('Test                          '
609               '   minimum  average  operation  overhead')
610        print('-' * LINE)
611        tests = sorted(self.tests.items())
612        total_min_time = 0.0
613        total_avg_time = 0.0
614        for name, test in tests:
615            if (limitnames is not None and
616                limitnames.search(name) is None):
617                continue
618            (min_time,
619             avg_time,
620             total_time,
621             op_avg,
622             min_overhead) = test.stat()
623            total_min_time = total_min_time + min_time
624            total_avg_time = total_avg_time + avg_time
625            print('%30s:  %5.0fms  %5.0fms  %6.2fus  %7.3fms' % \
626                  (name,
627                   min_time * MILLI_SECONDS,
628                   avg_time * MILLI_SECONDS,
629                   op_avg * MICRO_SECONDS,
630                   min_overhead *MILLI_SECONDS))
631        print('-' * LINE)
632        print('Totals:                        '
633               ' %6.0fms %6.0fms' %
634               (total_min_time * MILLI_SECONDS,
635                total_avg_time * MILLI_SECONDS,
636                ))
637        print()
638
639    def print_comparison(self, compare_to, hidenoise=0, limitnames=None):
640
641        # Check benchmark versions
642        if compare_to.version != self.version:
643            print('* Benchmark versions differ: '
644                   'cannot compare this benchmark to "%s" !' %
645                   compare_to.name)
646            print()
647            self.print_benchmark(hidenoise=hidenoise,
648                                 limitnames=limitnames)
649            return
650
651        # Print header
652        compare_to.print_header('Comparing with')
653        print('Test                          '
654               '   minimum run-time        average  run-time')
655        print('                              '
656               '   this    other   diff    this    other   diff')
657        print('-' * LINE)
658
659        # Print test comparisons
660        tests = sorted(self.tests.items())
661        total_min_time = other_total_min_time = 0.0
662        total_avg_time = other_total_avg_time = 0.0
663        benchmarks_compatible = self.compatible(compare_to)
664        tests_compatible = 1
665        for name, test in tests:
666            if (limitnames is not None and
667                limitnames.search(name) is None):
668                continue
669            (min_time,
670             avg_time,
671             total_time,
672             op_avg,
673             min_overhead) = test.stat()
674            total_min_time = total_min_time + min_time
675            total_avg_time = total_avg_time + avg_time
676            try:
677                other = compare_to.tests[name]
678            except KeyError:
679                other = None
680            if other is None:
681                # Other benchmark doesn't include the given test
682                min_diff, avg_diff = 'n/a', 'n/a'
683                other_min_time = 0.0
684                other_avg_time = 0.0
685                tests_compatible = 0
686            else:
687                (other_min_time,
688                 other_avg_time,
689                 other_total_time,
690                 other_op_avg,
691                 other_min_overhead) = other.stat()
692                other_total_min_time = other_total_min_time + other_min_time
693                other_total_avg_time = other_total_avg_time + other_avg_time
694                if (benchmarks_compatible and
695                    test.compatible(other)):
696                    # Both benchmark and tests are comparable
697                    min_diff = ((min_time * self.warp) /
698                                (other_min_time * other.warp) - 1.0)
699                    avg_diff = ((avg_time * self.warp) /
700                                (other_avg_time * other.warp) - 1.0)
701                    if hidenoise and abs(min_diff) < 10.0:
702                        min_diff = ''
703                    else:
704                        min_diff = '%+5.1f%%' % (min_diff * PERCENT)
705                    if hidenoise and abs(avg_diff) < 10.0:
706                        avg_diff = ''
707                    else:
708                        avg_diff = '%+5.1f%%' % (avg_diff * PERCENT)
709                else:
710                    # Benchmark or tests are not comparable
711                    min_diff, avg_diff = 'n/a', 'n/a'
712                    tests_compatible = 0
713            print('%30s: %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' % \
714                  (name,
715                   min_time * MILLI_SECONDS,
716                   other_min_time * MILLI_SECONDS * compare_to.warp / self.warp,
717                   min_diff,
718                   avg_time * MILLI_SECONDS,
719                   other_avg_time * MILLI_SECONDS * compare_to.warp / self.warp,
720                   avg_diff))
721        print('-' * LINE)
722
723        # Summarise test results
724        if not benchmarks_compatible or not tests_compatible:
725            min_diff, avg_diff = 'n/a', 'n/a'
726        else:
727            if other_total_min_time != 0.0:
728                min_diff = '%+5.1f%%' % (
729                    ((total_min_time * self.warp) /
730                     (other_total_min_time * compare_to.warp) - 1.0) * PERCENT)
731            else:
732                min_diff = 'n/a'
733            if other_total_avg_time != 0.0:
734                avg_diff = '%+5.1f%%' % (
735                    ((total_avg_time * self.warp) /
736                     (other_total_avg_time * compare_to.warp) - 1.0) * PERCENT)
737            else:
738                avg_diff = 'n/a'
739        print('Totals:                       '
740               '  %5.0fms %5.0fms %7s %5.0fms %5.0fms %7s' %
741               (total_min_time * MILLI_SECONDS,
742                (other_total_min_time * compare_to.warp/self.warp
743                 * MILLI_SECONDS),
744                min_diff,
745                total_avg_time * MILLI_SECONDS,
746                (other_total_avg_time * compare_to.warp/self.warp
747                 * MILLI_SECONDS),
748                avg_diff
749               ))
750        print()
751        print('(this=%s, other=%s)' % (self.name,
752                                       compare_to.name))
753        print()
754
755class PyBenchCmdline(Application):
756
757    header = ("PYBENCH - a benchmark test suite for Python "
758              "interpreters/compilers.")
759
760    version = __version__
761
762    debug = _debug
763
764    options = [ArgumentOption('-n',
765                              'number of rounds',
766                              Setup.Number_of_rounds),
767               ArgumentOption('-f',
768                              'save benchmark to file arg',
769                              ''),
770               ArgumentOption('-c',
771                              'compare benchmark with the one in file arg',
772                              ''),
773               ArgumentOption('-s',
774                              'show benchmark in file arg, then exit',
775                              ''),
776               ArgumentOption('-w',
777                              'set warp factor to arg',
778                              Setup.Warp_factor),
779               ArgumentOption('-t',
780                              'run only tests with names matching arg',
781                              ''),
782               ArgumentOption('-C',
783                              'set the number of calibration runs to arg',
784                              CALIBRATION_RUNS),
785               SwitchOption('-d',
786                            'hide noise in comparisons',
787                            0),
788               SwitchOption('-v',
789                            'verbose output (not recommended)',
790                            0),
791               SwitchOption('--with-gc',
792                            'enable garbage collection',
793                            0),
794               SwitchOption('--with-syscheck',
795                            'use default sys check interval',
796                            0),
797               ArgumentOption('--timer',
798                            'use given timer',
799                            TIMER_PLATFORM_DEFAULT),
800               ]
801
802    about = """\
803The normal operation is to run the suite and display the
804results. Use -f to save them for later reuse or comparisons.
805
806Available timers:
807
808   time.time
809   time.clock
810   systimes.processtime
811
812Examples:
813
814python2.1 pybench.py -f p21.pybench
815python2.5 pybench.py -f p25.pybench
816python pybench.py -s p25.pybench -c p21.pybench
817"""
818    copyright = __copyright__
819
820    def main(self):
821
822        rounds = self.values['-n']
823        reportfile = self.values['-f']
824        show_bench = self.values['-s']
825        compare_to = self.values['-c']
826        hidenoise = self.values['-d']
827        warp = int(self.values['-w'])
828        withgc = self.values['--with-gc']
829        limitnames = self.values['-t']
830        if limitnames:
831            if _debug:
832                print('* limiting test names to one with substring "%s"' % \
833                      limitnames)
834            limitnames = re.compile(limitnames, re.I)
835        else:
836            limitnames = None
837        verbose = self.verbose
838        withsyscheck = self.values['--with-syscheck']
839        calibration_runs = self.values['-C']
840        timer = self.values['--timer']
841
842        print('-' * LINE)
843        print('PYBENCH %s' % __version__)
844        print('-' * LINE)
845        print('* using %s %s' % (
846            getattr(platform, 'python_implementation', lambda:'Python')(),
847            ' '.join(sys.version.split())))
848
849        # Switch off garbage collection
850        if not withgc:
851            try:
852                import gc
853            except ImportError:
854                print('* Python version doesn\'t support garbage collection')
855            else:
856                try:
857                    gc.disable()
858                except NotImplementedError:
859                    print('* Python version doesn\'t support gc.disable')
860                else:
861                    print('* disabled garbage collection')
862
863        # "Disable" sys check interval
864        if not withsyscheck:
865            # Too bad the check interval uses an int instead of a long...
866            value = 2147483647
867            try:
868                sys.setcheckinterval(value)
869            except (AttributeError, NotImplementedError):
870                print('* Python version doesn\'t support sys.setcheckinterval')
871            else:
872                print('* system check interval set to maximum: %s' % value)
873
874        if timer == TIMER_SYSTIMES_PROCESSTIME:
875            import systimes
876            print('* using timer: systimes.processtime (%s)' % \
877                  systimes.SYSTIMES_IMPLEMENTATION)
878        else:
879            # Check that the clock function does exist
880            try:
881                get_timer(timer)
882            except TypeError:
883                print("* Error: Unknown timer: %s" % timer)
884                return
885
886            print('* using timer: %s' % timer)
887            if hasattr(time, 'get_clock_info'):
888                info = time.get_clock_info(timer[5:])
889                print('* timer: resolution=%s, implementation=%s'
890                      % (info.resolution, info.implementation))
891
892        print()
893
894        if compare_to:
895            try:
896                f = open(compare_to,'rb')
897                bench = pickle.load(f)
898                bench.name = compare_to
899                f.close()
900                compare_to = bench
901            except IOError as reason:
902                print('* Error opening/reading file %s: %s' % (
903                    repr(compare_to),
904                    reason))
905                compare_to = None
906
907        if show_bench:
908            try:
909                f = open(show_bench,'rb')
910                bench = pickle.load(f)
911                bench.name = show_bench
912                f.close()
913                bench.print_header()
914                if compare_to:
915                    bench.print_comparison(compare_to,
916                                           hidenoise=hidenoise,
917                                           limitnames=limitnames)
918                else:
919                    bench.print_benchmark(hidenoise=hidenoise,
920                                          limitnames=limitnames)
921            except IOError as reason:
922                print('* Error opening/reading file %s: %s' % (
923                    repr(show_bench),
924                    reason))
925                print()
926            return
927
928        if reportfile:
929            print('Creating benchmark: %s (rounds=%i, warp=%i)' % \
930                  (reportfile, rounds, warp))
931            print()
932
933        # Create benchmark object
934        bench = Benchmark(reportfile,
935                          verbose=verbose,
936                          timer=timer,
937                          warp=warp,
938                          calibration_runs=calibration_runs)
939        bench.rounds = rounds
940        bench.load_tests(Setup, limitnames=limitnames)
941        try:
942            bench.calibrate()
943            bench.run()
944        except KeyboardInterrupt:
945            print()
946            print('*** KeyboardInterrupt -- Aborting')
947            print()
948            return
949        bench.print_header()
950        if compare_to:
951            bench.print_comparison(compare_to,
952                                   hidenoise=hidenoise,
953                                   limitnames=limitnames)
954        else:
955            bench.print_benchmark(hidenoise=hidenoise,
956                                  limitnames=limitnames)
957
958        # Ring bell
959        sys.stderr.write('\007')
960
961        if reportfile:
962            try:
963                f = open(reportfile,'wb')
964                bench.name = reportfile
965                pickle.dump(bench,f)
966                f.close()
967            except IOError as reason:
968                print('* Error opening/writing reportfile %s: %s' % (
969                    reportfile,
970                    reason))
971                print()
972
973if __name__ == '__main__':
974    PyBenchCmdline()
975