statistics_recorder.cc revision 2a99a7e74a7f215066514fe81d2bfa6639d9eddd
1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "base/metrics/statistics_recorder.h"
6
7#include "base/debug/leak_annotations.h"
8#include "base/logging.h"
9#include "base/memory/scoped_ptr.h"
10#include "base/metrics/histogram.h"
11#include "base/stringprintf.h"
12#include "base/synchronization/lock.h"
13
14using std::list;
15using std::string;
16
17namespace {
18// Initialize histogram statistics gathering system.
19base::LazyInstance<base::StatisticsRecorder>::Leaky g_statistics_recorder_ =
20    LAZY_INSTANCE_INITIALIZER;
21}  // namespace
22
23namespace base {
24
25// Collect the number of histograms created.
26static uint32 number_of_histograms_ = 0;
27// Collect the number of vectors saved because of caching ranges.
28static uint32 number_of_vectors_saved_ = 0;
29// Collect the number of ranges_ elements saved because of caching ranges.
30static size_t saved_ranges_size_ = 0;
31
32// static
33void StatisticsRecorder::Initialize() {
34  // Ensure that an instance of the StatisticsRecorder object is created.
35  g_statistics_recorder_.Get();
36}
37
38
39// static
40bool StatisticsRecorder::IsActive() {
41  if (lock_ == NULL)
42    return false;
43  base::AutoLock auto_lock(*lock_);
44  return NULL != histograms_;
45}
46
47// static
48HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
49    HistogramBase* histogram) {
50  // As per crbug.com/79322 the histograms are intentionally leaked, so we need
51  // to annotate them. Because ANNOTATE_LEAKING_OBJECT_PTR may be used only once
52  // for an object, the duplicates should not be annotated.
53  // Callers are responsible for not calling RegisterOrDeleteDuplicate(ptr)
54  // twice if (lock_ == NULL) || (!histograms_).
55  if (lock_ == NULL) {
56    ANNOTATE_LEAKING_OBJECT_PTR(histogram);  // see crbug.com/79322
57    return histogram;
58  }
59
60  HistogramBase* histogram_to_delete = NULL;
61  HistogramBase* histogram_to_return = NULL;
62  {
63    base::AutoLock auto_lock(*lock_);
64    if (histograms_ == NULL) {
65      histogram_to_return = histogram;
66    } else {
67      const string& name = histogram->histogram_name();
68      HistogramMap::iterator it = histograms_->find(name);
69      if (histograms_->end() == it) {
70        (*histograms_)[name] = histogram;
71        ANNOTATE_LEAKING_OBJECT_PTR(histogram);  // see crbug.com/79322
72        ++number_of_histograms_;
73        histogram_to_return = histogram;
74      } else if (histogram == it->second) {
75        // The histogram was registered before.
76        histogram_to_return = histogram;
77      } else {
78        // We already have one histogram with this name.
79        histogram_to_return = it->second;
80        histogram_to_delete = histogram;
81      }
82    }
83  }
84  delete histogram_to_delete;
85  return histogram_to_return;
86}
87
88// static
89const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
90    const BucketRanges* ranges) {
91  DCHECK(ranges->HasValidChecksum());
92  scoped_ptr<const BucketRanges> ranges_deleter;
93
94  if (lock_ == NULL) {
95    ANNOTATE_LEAKING_OBJECT_PTR(ranges);
96    return ranges;
97  }
98
99  base::AutoLock auto_lock(*lock_);
100  if (ranges_ == NULL) {
101    ANNOTATE_LEAKING_OBJECT_PTR(ranges);
102    return ranges;
103  }
104
105  list<const BucketRanges*>* checksum_matching_list;
106  RangesMap::iterator ranges_it = ranges_->find(ranges->checksum());
107  if (ranges_->end() == ranges_it) {
108    // Add a new matching list to map.
109    checksum_matching_list = new list<const BucketRanges*>();
110    ANNOTATE_LEAKING_OBJECT_PTR(checksum_matching_list);
111    (*ranges_)[ranges->checksum()] = checksum_matching_list;
112  } else {
113    checksum_matching_list = ranges_it->second;
114  }
115
116  list<const BucketRanges*>::iterator checksum_matching_list_it;
117  for (checksum_matching_list_it = checksum_matching_list->begin();
118       checksum_matching_list_it != checksum_matching_list->end();
119       ++checksum_matching_list_it) {
120    const BucketRanges* existing_ranges = *checksum_matching_list_it;
121    if (existing_ranges->Equals(ranges)) {
122      if (existing_ranges == ranges) {
123        return ranges;
124      } else {
125        ++number_of_vectors_saved_;
126        saved_ranges_size_ += ranges->size();
127        ranges_deleter.reset(ranges);
128        return existing_ranges;
129      }
130    }
131  }
132  // We haven't found a BucketRanges which has the same ranges. Register the
133  // new BucketRanges.
134  checksum_matching_list->push_front(ranges);
135  return ranges;
136}
137
138// static
139void StatisticsRecorder::CollectHistogramStats(const std::string& suffix) {
140  static int uma_upload_attempt = 0;
141  ++uma_upload_attempt;
142  if (uma_upload_attempt == 1) {
143    UMA_HISTOGRAM_COUNTS_10000(
144        "Histogram.SharedRange.Count.FirstUpload." + suffix,
145        number_of_histograms_);
146    UMA_HISTOGRAM_COUNTS_10000(
147        "Histogram.SharedRange.RangesSaved.FirstUpload." + suffix,
148        number_of_vectors_saved_);
149    UMA_HISTOGRAM_COUNTS(
150        "Histogram.SharedRange.ElementsSaved.FirstUpload." + suffix,
151        static_cast<int>(saved_ranges_size_));
152    number_of_histograms_ = 0;
153    number_of_vectors_saved_ = 0;
154    saved_ranges_size_ = 0;
155    return;
156  }
157  if (uma_upload_attempt == 2) {
158    UMA_HISTOGRAM_COUNTS_10000(
159        "Histogram.SharedRange.Count.SecondUpload." + suffix,
160        number_of_histograms_);
161    UMA_HISTOGRAM_COUNTS_10000(
162        "Histogram.SharedRange.RangesSaved.SecondUpload." + suffix,
163        number_of_vectors_saved_);
164    UMA_HISTOGRAM_COUNTS(
165        "Histogram.SharedRange.ElementsSaved.SecondUpload." + suffix,
166        static_cast<int>(saved_ranges_size_));
167    number_of_histograms_ = 0;
168    number_of_vectors_saved_ = 0;
169    saved_ranges_size_ = 0;
170    return;
171  }
172  UMA_HISTOGRAM_COUNTS_10000(
173      "Histogram.SharedRange.Count.RestOfUploads." + suffix,
174      number_of_histograms_);
175  UMA_HISTOGRAM_COUNTS_10000(
176      "Histogram.SharedRange.RangesSaved.RestOfUploads." + suffix,
177      number_of_vectors_saved_);
178  UMA_HISTOGRAM_COUNTS(
179      "Histogram.SharedRange.ElementsSaved.RestOfUploads." + suffix,
180      static_cast<int>(saved_ranges_size_));
181}
182
183// static
184void StatisticsRecorder::WriteHTMLGraph(const std::string& query,
185                                        std::string* output) {
186  if (!IsActive())
187    return;
188
189  Histograms snapshot;
190  GetSnapshot(query, &snapshot);
191  for (Histograms::iterator it = snapshot.begin();
192       it != snapshot.end();
193       ++it) {
194    (*it)->WriteHTMLGraph(output);
195    output->append("<br><hr><br>");
196  }
197}
198
199// static
200void StatisticsRecorder::WriteGraph(const std::string& query,
201                                    std::string* output) {
202  if (!IsActive())
203    return;
204  if (query.length())
205    StringAppendF(output, "Collections of histograms for %s\n", query.c_str());
206  else
207    output->append("Collections of all histograms\n");
208
209  Histograms snapshot;
210  GetSnapshot(query, &snapshot);
211  for (Histograms::iterator it = snapshot.begin();
212       it != snapshot.end();
213       ++it) {
214    (*it)->WriteAscii(output);
215    output->append("\n");
216  }
217}
218
219// static
220void StatisticsRecorder::GetHistograms(Histograms* output) {
221  if (lock_ == NULL)
222    return;
223  base::AutoLock auto_lock(*lock_);
224  if (histograms_ == NULL)
225    return;
226
227  for (HistogramMap::iterator it = histograms_->begin();
228       histograms_->end() != it;
229       ++it) {
230    DCHECK_EQ(it->first, it->second->histogram_name());
231    output->push_back(it->second);
232  }
233}
234
235// static
236void StatisticsRecorder::GetBucketRanges(
237    std::vector<const BucketRanges*>* output) {
238  if (lock_ == NULL)
239    return;
240  base::AutoLock auto_lock(*lock_);
241  if (ranges_ == NULL)
242    return;
243
244  for (RangesMap::iterator it = ranges_->begin();
245       ranges_->end() != it;
246       ++it) {
247    list<const BucketRanges*>* ranges_list = it->second;
248    list<const BucketRanges*>::iterator ranges_list_it;
249    for (ranges_list_it = ranges_list->begin();
250         ranges_list_it != ranges_list->end();
251         ++ranges_list_it) {
252      output->push_back(*ranges_list_it);
253    }
254  }
255}
256
257// static
258HistogramBase* StatisticsRecorder::FindHistogram(const std::string& name) {
259  if (lock_ == NULL)
260    return NULL;
261  base::AutoLock auto_lock(*lock_);
262  if (histograms_ == NULL)
263    return NULL;
264
265  HistogramMap::iterator it = histograms_->find(name);
266  if (histograms_->end() == it)
267    return NULL;
268  return it->second;
269}
270
271// private static
272void StatisticsRecorder::GetSnapshot(const std::string& query,
273                                     Histograms* snapshot) {
274  if (lock_ == NULL)
275    return;
276  base::AutoLock auto_lock(*lock_);
277  if (histograms_ == NULL)
278    return;
279
280  for (HistogramMap::iterator it = histograms_->begin();
281       histograms_->end() != it;
282       ++it) {
283    if (it->first.find(query) != std::string::npos)
284      snapshot->push_back(it->second);
285  }
286}
287
288// This singleton instance should be started during the single threaded portion
289// of main(), and hence it is not thread safe.  It initializes globals to
290// provide support for all future calls.
291StatisticsRecorder::StatisticsRecorder() {
292  DCHECK(!histograms_);
293  if (lock_ == NULL) {
294    // This will leak on purpose. It's the only way to make sure we won't race
295    // against the static uninitialization of the module while one of our
296    // static methods relying on the lock get called at an inappropriate time
297    // during the termination phase. Since it's a static data member, we will
298    // leak one per process, which would be similar to the instance allocated
299    // during static initialization and released only on  process termination.
300    lock_ = new base::Lock;
301  }
302  base::AutoLock auto_lock(*lock_);
303  histograms_ = new HistogramMap;
304  ranges_ = new RangesMap;
305}
306
307StatisticsRecorder::~StatisticsRecorder() {
308  DCHECK(histograms_ && ranges_ && lock_);
309  if (dump_on_exit_) {
310    string output;
311    WriteGraph("", &output);
312    DLOG(INFO) << output;
313  }
314
315  // Clean up.
316  scoped_ptr<HistogramMap> histograms_deleter;
317  scoped_ptr<RangesMap> ranges_deleter;
318  // We don't delete lock_ on purpose to avoid having to properly protect
319  // against it going away after we checked for NULL in the static methods.
320  {
321    base::AutoLock auto_lock(*lock_);
322    histograms_deleter.reset(histograms_);
323    ranges_deleter.reset(ranges_);
324    histograms_ = NULL;
325    ranges_ = NULL;
326  }
327  // We are going to leak the histograms and the ranges.
328}
329
330
331// static
332StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL;
333// static
334StatisticsRecorder::RangesMap* StatisticsRecorder::ranges_ = NULL;
335// static
336base::Lock* StatisticsRecorder::lock_ = NULL;
337// static
338bool StatisticsRecorder::dump_on_exit_ = false;
339
340}  // namespace base
341