stats.cc revision 3345a6884c488ff3a535c2c9acdd33d74b37e311
1// Copyright (c) 2010 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/disk_cache/stats.h"
6
7#include "base/format_macros.h"
8#include "base/logging.h"
9#include "base/string_util.h"
10#include "base/stringprintf.h"
11#include "net/disk_cache/backend_impl.h"
12
13namespace {
14
15const int32 kDiskSignature = 0xF01427E0;
16
17struct OnDiskStats {
18  int32 signature;
19  int size;
20  int data_sizes[disk_cache::Stats::kDataSizesLength];
21  int64 counters[disk_cache::Stats::MAX_COUNTER];
22};
23
24// Returns the "floor" (as opposed to "ceiling") of log base 2 of number.
25int LogBase2(int32 number) {
26  unsigned int value = static_cast<unsigned int>(number);
27  const unsigned int mask[] = {0x2, 0xC, 0xF0, 0xFF00, 0xFFFF0000};
28  const unsigned int s[] = {1, 2, 4, 8, 16};
29
30  unsigned int result = 0;
31  for (int i = 4; i >= 0; i--) {
32    if (value & mask[i]) {
33      value >>= s[i];
34      result |= s[i];
35    }
36  }
37  return static_cast<int>(result);
38}
39
40static const char* kCounterNames[] = {
41  "Open miss",
42  "Open hit",
43  "Create miss",
44  "Create hit",
45  "Resurrect hit",
46  "Create error",
47  "Trim entry",
48  "Doom entry",
49  "Doom cache",
50  "Invalid entry",
51  "Open entries",
52  "Max entries",
53  "Timer",
54  "Read data",
55  "Write data",
56  "Open rankings",
57  "Get rankings",
58  "Fatal error",
59  "Last report",
60  "Last report timer"
61};
62COMPILE_ASSERT(arraysize(kCounterNames) == disk_cache::Stats::MAX_COUNTER,
63               update_the_names);
64
65}  // namespace
66
67namespace disk_cache {
68
69bool LoadStats(BackendImpl* backend, Addr address, OnDiskStats* stats) {
70  MappedFile* file = backend->File(address);
71  if (!file)
72    return false;
73
74  size_t offset = address.start_block() * address.BlockSize() +
75                  kBlockHeaderSize;
76  if (!file->Read(stats, sizeof(*stats), offset))
77    return false;
78
79  if (stats->signature != kDiskSignature)
80    return false;
81
82  // We don't want to discard the whole cache every time we have one extra
83  // counter; just reset them to zero.
84  if (stats->size != sizeof(*stats))
85    memset(stats, 0, sizeof(*stats));
86
87  return true;
88}
89
90bool StoreStats(BackendImpl* backend, Addr address, OnDiskStats* stats) {
91  MappedFile* file = backend->File(address);
92  if (!file)
93    return false;
94
95  size_t offset = address.start_block() * address.BlockSize() +
96                  kBlockHeaderSize;
97  return file->Write(stats, sizeof(*stats), offset);
98}
99
100bool CreateStats(BackendImpl* backend, Addr* address, OnDiskStats* stats) {
101  if (!backend->CreateBlock(BLOCK_256, 2, address))
102    return false;
103
104  // If we have more than 512 bytes of counters, change kDiskSignature so we
105  // don't overwrite something else (LoadStats must fail).
106  COMPILE_ASSERT(sizeof(*stats) <= 256 * 2, use_more_blocks);
107  memset(stats, 0, sizeof(*stats));
108  stats->signature = kDiskSignature;
109  stats->size = sizeof(*stats);
110
111  return StoreStats(backend, *address, stats);
112}
113
114bool Stats::Init(BackendImpl* backend, uint32* storage_addr) {
115  OnDiskStats stats;
116  Addr address(*storage_addr);
117  if (address.is_initialized()) {
118    if (!LoadStats(backend, address, &stats))
119      return false;
120  } else {
121    if (!CreateStats(backend, &address, &stats))
122      return false;
123    *storage_addr = address.value();
124  }
125
126  storage_addr_ = address.value();
127  backend_ = backend;
128
129  memcpy(data_sizes_, stats.data_sizes, sizeof(data_sizes_));
130  memcpy(counters_, stats.counters, sizeof(counters_));
131
132  // It seems impossible to support this histogram for more than one
133  // simultaneous objects with the current infrastructure.
134  static bool first_time = true;
135  if (first_time) {
136    first_time = false;
137    // ShouldReportAgain() will re-enter this object.
138    if (!size_histogram_.get() && backend->cache_type() == net::DISK_CACHE &&
139        backend->ShouldReportAgain()) {
140      // Stats may be reused when the cache is re-created, but we want only one
141      // histogram at any given time.
142      size_histogram_ =
143          StatsHistogram::StatsHistogramFactoryGet("DiskCache.SizeStats");
144      size_histogram_->Init(this);
145    }
146  }
147
148  return true;
149}
150
151Stats::Stats() : backend_(NULL) {
152}
153
154Stats::~Stats() {
155}
156
157// The array will be filled this way:
158//  index      size
159//    0       [0, 1024)
160//    1    [1024, 2048)
161//    2    [2048, 4096)
162//    3      [4K, 6K)
163//      ...
164//   10     [18K, 20K)
165//   11     [20K, 24K)
166//   12     [24k, 28K)
167//      ...
168//   15     [36k, 40K)
169//   16     [40k, 64K)
170//   17     [64K, 128K)
171//   18    [128K, 256K)
172//      ...
173//   23      [4M, 8M)
174//   24      [8M, 16M)
175//   25     [16M, 32M)
176//   26     [32M, 64M)
177//   27     [64M, ...)
178int Stats::GetStatsBucket(int32 size) {
179  if (size < 1024)
180    return 0;
181
182  // 10 slots more, until 20K.
183  if (size < 20 * 1024)
184    return size / 2048 + 1;
185
186  // 5 slots more, from 20K to 40K.
187  if (size < 40 * 1024)
188    return (size - 20 * 1024) / 4096 + 11;
189
190  // From this point on, use a logarithmic scale.
191  int result =  LogBase2(size) + 1;
192
193  COMPILE_ASSERT(kDataSizesLength > 16, update_the_scale);
194  if (result >= kDataSizesLength)
195    result = kDataSizesLength - 1;
196
197  return result;
198}
199
200int Stats::GetBucketRange(size_t i) const {
201  if (i < 2)
202    return static_cast<int>(1024 * i);
203
204  if (i < 12)
205    return static_cast<int>(2048 * (i - 1));
206
207  if (i < 17)
208    return static_cast<int>(4096 * (i - 11)) + 20 * 1024;
209
210  int n = 64 * 1024;
211  if (i > static_cast<size_t>(kDataSizesLength)) {
212    NOTREACHED();
213    i = kDataSizesLength;
214  }
215
216  i -= 17;
217  n <<= i;
218  return n;
219}
220
221void Stats::Snapshot(StatsHistogram::StatsSamples* samples) const {
222  samples->GetCounts()->resize(kDataSizesLength);
223  for (int i = 0; i < kDataSizesLength; i++) {
224    int count = data_sizes_[i];
225    if (count < 0)
226      count = 0;
227    samples->GetCounts()->at(i) = count;
228  }
229}
230
231void Stats::ModifyStorageStats(int32 old_size, int32 new_size) {
232  // We keep a counter of the data block size on an array where each entry is
233  // the adjusted log base 2 of the size. The first entry counts blocks of 256
234  // bytes, the second blocks up to 512 bytes, etc. With 20 entries, the last
235  // one stores entries of more than 64 MB
236  int new_index = GetStatsBucket(new_size);
237  int old_index = GetStatsBucket(old_size);
238
239  if (new_size)
240    data_sizes_[new_index]++;
241
242  if (old_size)
243    data_sizes_[old_index]--;
244}
245
246void Stats::OnEvent(Counters an_event) {
247  DCHECK(an_event > MIN_COUNTER || an_event < MAX_COUNTER);
248  counters_[an_event]++;
249}
250
251void Stats::SetCounter(Counters counter, int64 value) {
252  DCHECK(counter > MIN_COUNTER || counter < MAX_COUNTER);
253  counters_[counter] = value;
254}
255
256int64 Stats::GetCounter(Counters counter) const {
257  DCHECK(counter > MIN_COUNTER || counter < MAX_COUNTER);
258  return counters_[counter];
259}
260
261void Stats::GetItems(StatsItems* items) {
262  std::pair<std::string, std::string> item;
263  for (int i = 0; i < kDataSizesLength; i++) {
264    item.first = base::StringPrintf("Size%02d", i);
265    item.second = base::StringPrintf("0x%08x", data_sizes_[i]);
266    items->push_back(item);
267  }
268
269  for (int i = MIN_COUNTER + 1; i < MAX_COUNTER; i++) {
270    item.first = kCounterNames[i];
271    item.second = base::StringPrintf("0x%" PRIx64, counters_[i]);
272    items->push_back(item);
273  }
274}
275
276int Stats::GetHitRatio() const {
277  return GetRatio(OPEN_HIT, OPEN_MISS);
278}
279
280int Stats::GetResurrectRatio() const {
281  return GetRatio(RESURRECT_HIT, CREATE_HIT);
282}
283
284int Stats::GetRatio(Counters hit, Counters miss) const {
285  int64 ratio = GetCounter(hit) * 100;
286  if (!ratio)
287    return 0;
288
289  ratio /= (GetCounter(hit) + GetCounter(miss));
290  return static_cast<int>(ratio);
291}
292
293void Stats::ResetRatios() {
294  SetCounter(OPEN_HIT, 0);
295  SetCounter(OPEN_MISS, 0);
296  SetCounter(RESURRECT_HIT, 0);
297  SetCounter(CREATE_HIT, 0);
298}
299
300int Stats::GetLargeEntriesSize() {
301  int total = 0;
302  // data_sizes_[20] stores values between 512 KB and 1 MB (see comment before
303  // GetStatsBucket()).
304  for (int bucket = 20; bucket < kDataSizesLength; bucket++)
305    total += data_sizes_[bucket] * GetBucketRange(bucket);
306
307  return total;
308}
309
310void Stats::Store() {
311  if (!backend_)
312    return;
313
314  OnDiskStats stats;
315  stats.signature = kDiskSignature;
316  stats.size = sizeof(stats);
317  memcpy(stats.data_sizes, data_sizes_, sizeof(data_sizes_));
318  memcpy(stats.counters, counters_, sizeof(counters_));
319
320  Addr address(storage_addr_);
321  StoreStats(backend_, address, &stats);
322}
323
324}  // namespace disk_cache
325