benchmark_metrics_experiment_unittest.py revision e9895d0f43b3078b847b8d495a7c1e8ae309d382
1#!/usr/bin/python2
2
3# Copyright 2016 The Chromium OS Authors. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
6"""Unit tests for the benchmark_metrics_experiment module."""
7
8from benchmark_metrics_experiment import MetricsExperiment
9
10import mock
11import os
12import tempfile
13import unittest
14
15
16class MetricsExperimentTest(unittest.TestCase):
17  """Test class for MetricsExperiment class."""
18
19  def __init__(self, *args, **kwargs):
20    super(MetricsExperimentTest, self).__init__(*args, **kwargs)
21    self._pairwise_inclusive_count_test_file = \
22        'testdata/input/pairwise_inclusive_count_test.csv'
23    self._pairwise_inclusive_count_reference_file = \
24        'testdata/input/pairwise_inclusive_count_reference.csv'
25    self._inclusive_count_test_file = \
26        'testdata/input/inclusive_count_test.csv'
27    self._inclusive_count_reference_file = \
28        'testdata/input/inclusive_count_reference.csv'
29    self._cwp_function_groups_file = \
30        'testdata/input/cwp_function_groups'
31
32  def testParseInclusiveStatisticsFile(self):
33    expected_inclusive_statistics_test = {
34        'func_f,/a/b/file_f': 2.3,
35        'func_g,/a/b/file_g': 2.2,
36        'func_h,/c/d/file_h': 3.3,
37        'func_i,/c/d/file_i': 4.4,
38        'func_j,/e/file_j': 5.5,
39        'func_k,/e/file_k': 6.6
40    }
41    expected_inclusive_statistics_reference = {
42        'func_f,/a/b/file_f': 1.0,
43        'func_g,/a/b/file_g': 4.4,
44        'func_h,/c/d/file_h': 3.0,
45        'func_i,/c/d/file_i': 4.0,
46        'func_j,/e/file_j': 5.0,
47        'func_l,/e/file_l': 6.0
48    }
49    result_inclusive_statistics_test = \
50        MetricsExperiment.ParseInclusiveStatisticsFile(
51            self._inclusive_count_test_file)
52    result_inclusive_statistics_reference = \
53        MetricsExperiment.ParseInclusiveStatisticsFile(
54            self._inclusive_count_reference_file)
55    self.assertEqual(result_inclusive_statistics_test,
56                     expected_inclusive_statistics_test)
57    self.assertEqual(result_inclusive_statistics_reference,
58                     expected_inclusive_statistics_reference)
59
60  def testParsePairwiseInclusiveStatisticsFile(self):
61    expected_pairwise_inclusive_statistics_test = {
62        'func_f': {'func_g,/a/b/file_g2': 0.01,
63                   'func_h,/c/d/file_h': 0.02,
64                   'func_i,/c/d/file_i': 0.03},
65        'func_g': {'func_j,/e/file_j': 0.4,
66                   'func_m,/e/file_m': 0.6}
67    }
68    expected_pairwise_inclusive_statistics_reference = {
69        'func_f': {'func_g,/a/b/file_g': 0.1,
70                   'func_h,/c/d/file_h': 0.2,
71                   'func_i,/c/d/file_i': 0.3},
72        'func_g': {'func_j,/e/file_j': 0.4}
73    }
74    result_pairwise_inclusive_statistics_test = \
75        MetricsExperiment.ParsePairwiseInclusiveStatisticsFile(
76            self._pairwise_inclusive_count_test_file)
77    result_pairwise_inclusive_statistics_reference = \
78        MetricsExperiment.ParsePairwiseInclusiveStatisticsFile(
79            self._pairwise_inclusive_count_reference_file)
80    self.assertEqual(result_pairwise_inclusive_statistics_test,
81                     expected_pairwise_inclusive_statistics_test)
82    self.assertEqual(result_pairwise_inclusive_statistics_reference,
83                     expected_pairwise_inclusive_statistics_reference)
84
85  def _CheckFileContents(self, file_name, expected_content_lines):
86    with open(file_name, 'r') as input_file:
87      result_content_lines = input_file.readlines()
88      self.assertListEqual(expected_content_lines, result_content_lines)
89
90  def testExperiment(self):
91    group_statistics_file, group_statistics_filename = tempfile.mkstemp()
92
93    os.close(group_statistics_file)
94
95    function_statistics_file, function_statistics_filename = tempfile.mkstemp()
96
97    os.close(function_statistics_file)
98
99    expected_group_statistics_lines = \
100        ['group,file_path,function_count,distance_cum,distance_avg,score_cum,'
101         'score_avg\n',
102         'ab,/a/b,2.0,3.16,1.58,7.52435897436,3.76217948718\n',
103         'e,/e,2.0,2.0,1.0,27.5,13.75\n',
104         'cd,/c/d,2.0,2.0,1.0,27.5,13.75']
105    expected_function_statistics_lines = \
106        ['function,file,distance,score\n',
107         'func_i,/c/d/file_i,1.0,17.6\n',
108         'func_j,/e/file_j,1.0,27.5\n',
109         'func_f,/a/b/file_f,1.56,1.47435897436\n',
110         'func_h,/c/d/file_h,1.0,9.9\n',
111         'func_k,/e/file_k,1.0,0.0\n',
112         'func_g,/a/b/file_g,1.6,6.05']
113    metric_experiment = \
114        MetricsExperiment(self._pairwise_inclusive_count_reference_file,
115                          self._pairwise_inclusive_count_test_file,
116                          self._inclusive_count_reference_file,
117                          self._inclusive_count_test_file,
118                          self._cwp_function_groups_file,
119                          group_statistics_filename,
120                          function_statistics_filename)
121
122    metric_experiment.PerformComputation()
123    self._CheckFileContents(group_statistics_filename,
124                            expected_group_statistics_lines)
125    self._CheckFileContents(function_statistics_filename,
126                            expected_function_statistics_lines)
127    os.remove(group_statistics_filename)
128    os.remove(function_statistics_filename)
129
130
131if __name__ == '__main__':
132  unittest.main()
133