backend_impl.cc revision 03b57e008b61dfcb1fbad3aea950ae0e001748b0
1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/disk_cache/blockfile/backend_impl.h"
6
7#include "base/bind.h"
8#include "base/bind_helpers.h"
9#include "base/file_util.h"
10#include "base/files/file.h"
11#include "base/files/file_path.h"
12#include "base/hash.h"
13#include "base/message_loop/message_loop.h"
14#include "base/metrics/field_trial.h"
15#include "base/metrics/histogram.h"
16#include "base/metrics/stats_counters.h"
17#include "base/rand_util.h"
18#include "base/single_thread_task_runner.h"
19#include "base/strings/string_util.h"
20#include "base/strings/stringprintf.h"
21#include "base/sys_info.h"
22#include "base/threading/thread_restrictions.h"
23#include "base/time/time.h"
24#include "base/timer/timer.h"
25#include "net/base/net_errors.h"
26#include "net/disk_cache/blockfile/disk_format.h"
27#include "net/disk_cache/blockfile/entry_impl.h"
28#include "net/disk_cache/blockfile/errors.h"
29#include "net/disk_cache/blockfile/experiments.h"
30#include "net/disk_cache/blockfile/file.h"
31#include "net/disk_cache/blockfile/histogram_macros.h"
32#include "net/disk_cache/blockfile/webfonts_histogram.h"
33#include "net/disk_cache/cache_util.h"
34
35// Provide a BackendImpl object to macros from histogram_macros.h.
36#define CACHE_UMA_BACKEND_IMPL_OBJ this
37
38using base::Time;
39using base::TimeDelta;
40using base::TimeTicks;
41
42namespace {
43
44const char* kIndexName = "index";
45
46// Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
47// Note that the actual target is to keep the index table load factor under 55%
48// for most users.
49const int k64kEntriesStore = 240 * 1000 * 1000;
50const int kBaseTableLen = 64 * 1024;
51
52// Avoid trimming the cache for the first 5 minutes (10 timer ticks).
53const int kTrimDelay = 10;
54
55int DesiredIndexTableLen(int32 storage_size) {
56  if (storage_size <= k64kEntriesStore)
57    return kBaseTableLen;
58  if (storage_size <= k64kEntriesStore * 2)
59    return kBaseTableLen * 2;
60  if (storage_size <= k64kEntriesStore * 4)
61    return kBaseTableLen * 4;
62  if (storage_size <= k64kEntriesStore * 8)
63    return kBaseTableLen * 8;
64
65  // The biggest storage_size for int32 requires a 4 MB table.
66  return kBaseTableLen * 16;
67}
68
69int MaxStorageSizeForTable(int table_len) {
70  return table_len * (k64kEntriesStore / kBaseTableLen);
71}
72
73size_t GetIndexSize(int table_len) {
74  size_t table_size = sizeof(disk_cache::CacheAddr) * table_len;
75  return sizeof(disk_cache::IndexHeader) + table_size;
76}
77
78// ------------------------------------------------------------------------
79
80// Sets group for the current experiment. Returns false if the files should be
81// discarded.
82bool InitExperiment(disk_cache::IndexHeader* header, bool cache_created) {
83  if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 ||
84      header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) {
85    // Discard current cache.
86    return false;
87  }
88
89  if (base::FieldTrialList::FindFullName("SimpleCacheTrial") ==
90          "ExperimentControl") {
91    if (cache_created) {
92      header->experiment = disk_cache::EXPERIMENT_SIMPLE_CONTROL;
93      return true;
94    }
95    return header->experiment == disk_cache::EXPERIMENT_SIMPLE_CONTROL;
96  }
97
98  header->experiment = disk_cache::NO_EXPERIMENT;
99  return true;
100}
101
102// A callback to perform final cleanup on the background thread.
103void FinalCleanupCallback(disk_cache::BackendImpl* backend) {
104  backend->CleanupCache();
105}
106
107}  // namespace
108
109// ------------------------------------------------------------------------
110
111namespace disk_cache {
112
113BackendImpl::BackendImpl(
114    const base::FilePath& path,
115    const scoped_refptr<base::SingleThreadTaskRunner>& cache_thread,
116    net::NetLog* net_log)
117    : background_queue_(this, cache_thread),
118      path_(path),
119      block_files_(path),
120      mask_(0),
121      max_size_(0),
122      up_ticks_(0),
123      cache_type_(net::DISK_CACHE),
124      uma_report_(0),
125      user_flags_(0),
126      init_(false),
127      restarted_(false),
128      unit_test_(false),
129      read_only_(false),
130      disabled_(false),
131      new_eviction_(false),
132      first_timer_(true),
133      user_load_(false),
134      net_log_(net_log),
135      done_(true, false),
136      ptr_factory_(this) {
137}
138
139BackendImpl::BackendImpl(
140    const base::FilePath& path,
141    uint32 mask,
142    const scoped_refptr<base::SingleThreadTaskRunner>& cache_thread,
143    net::NetLog* net_log)
144    : background_queue_(this, cache_thread),
145      path_(path),
146      block_files_(path),
147      mask_(mask),
148      max_size_(0),
149      up_ticks_(0),
150      cache_type_(net::DISK_CACHE),
151      uma_report_(0),
152      user_flags_(kMask),
153      init_(false),
154      restarted_(false),
155      unit_test_(false),
156      read_only_(false),
157      disabled_(false),
158      new_eviction_(false),
159      first_timer_(true),
160      user_load_(false),
161      net_log_(net_log),
162      done_(true, false),
163      ptr_factory_(this) {
164}
165
166BackendImpl::~BackendImpl() {
167  if (user_flags_ & kNoRandom) {
168    // This is a unit test, so we want to be strict about not leaking entries
169    // and completing all the work.
170    background_queue_.WaitForPendingIO();
171  } else {
172    // This is most likely not a test, so we want to do as little work as
173    // possible at this time, at the price of leaving dirty entries behind.
174    background_queue_.DropPendingIO();
175  }
176
177  if (background_queue_.BackgroundIsCurrentThread()) {
178    // Unit tests may use the same thread for everything.
179    CleanupCache();
180  } else {
181    background_queue_.background_thread()->PostTask(
182        FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this)));
183    // http://crbug.com/74623
184    base::ThreadRestrictions::ScopedAllowWait allow_wait;
185    done_.Wait();
186  }
187}
188
189int BackendImpl::Init(const CompletionCallback& callback) {
190  background_queue_.Init(callback);
191  return net::ERR_IO_PENDING;
192}
193
194int BackendImpl::SyncInit() {
195#if defined(NET_BUILD_STRESS_CACHE)
196  // Start evictions right away.
197  up_ticks_ = kTrimDelay * 2;
198#endif
199  DCHECK(!init_);
200  if (init_)
201    return net::ERR_FAILED;
202
203  bool create_files = false;
204  if (!InitBackingStore(&create_files)) {
205    ReportError(ERR_STORAGE_ERROR);
206    return net::ERR_FAILED;
207  }
208
209  num_refs_ = num_pending_io_ = max_refs_ = 0;
210  entry_count_ = byte_count_ = 0;
211
212  bool should_create_timer = false;
213  if (!restarted_) {
214    buffer_bytes_ = 0;
215    trace_object_ = TraceObject::GetTraceObject();
216    should_create_timer = true;
217  }
218
219  init_ = true;
220  Trace("Init");
221
222  if (data_->header.experiment != NO_EXPERIMENT &&
223      cache_type_ != net::DISK_CACHE) {
224    // No experiment for other caches.
225    return net::ERR_FAILED;
226  }
227
228  if (!(user_flags_ & kNoRandom)) {
229    // The unit test controls directly what to test.
230    new_eviction_ = (cache_type_ == net::DISK_CACHE);
231  }
232
233  if (!CheckIndex()) {
234    ReportError(ERR_INIT_FAILED);
235    return net::ERR_FAILED;
236  }
237
238  if (!restarted_ && (create_files || !data_->header.num_entries))
239    ReportError(ERR_CACHE_CREATED);
240
241  if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE &&
242      !InitExperiment(&data_->header, create_files)) {
243    return net::ERR_FAILED;
244  }
245
246  // We don't care if the value overflows. The only thing we care about is that
247  // the id cannot be zero, because that value is used as "not dirty".
248  // Increasing the value once per second gives us many years before we start
249  // having collisions.
250  data_->header.this_id++;
251  if (!data_->header.this_id)
252    data_->header.this_id++;
253
254  bool previous_crash = (data_->header.crash != 0);
255  data_->header.crash = 1;
256
257  if (!block_files_.Init(create_files))
258    return net::ERR_FAILED;
259
260  // We want to minimize the changes to cache for an AppCache.
261  if (cache_type() == net::APP_CACHE) {
262    DCHECK(!new_eviction_);
263    read_only_ = true;
264  } else if (cache_type() == net::SHADER_CACHE) {
265    DCHECK(!new_eviction_);
266  }
267
268  eviction_.Init(this);
269
270  // stats_ and rankings_ may end up calling back to us so we better be enabled.
271  disabled_ = false;
272  if (!InitStats())
273    return net::ERR_FAILED;
274
275  disabled_ = !rankings_.Init(this, new_eviction_);
276
277#if defined(STRESS_CACHE_EXTENDED_VALIDATION)
278  trace_object_->EnableTracing(false);
279  int sc = SelfCheck();
280  if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH)
281    NOTREACHED();
282  trace_object_->EnableTracing(true);
283#endif
284
285  if (previous_crash) {
286    ReportError(ERR_PREVIOUS_CRASH);
287  } else if (!restarted_) {
288    ReportError(ERR_NO_ERROR);
289  }
290
291  FlushIndex();
292
293  if (!disabled_ && should_create_timer) {
294    // Create a recurrent timer of 30 secs.
295    int timer_delay = unit_test_ ? 1000 : 30000;
296    timer_.reset(new base::RepeatingTimer<BackendImpl>());
297    timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this,
298                  &BackendImpl::OnStatsTimer);
299  }
300
301  return disabled_ ? net::ERR_FAILED : net::OK;
302}
303
304void BackendImpl::CleanupCache() {
305  Trace("Backend Cleanup");
306  eviction_.Stop();
307  timer_.reset();
308
309  if (init_) {
310    StoreStats();
311    if (data_)
312      data_->header.crash = 0;
313
314    if (user_flags_ & kNoRandom) {
315      // This is a net_unittest, verify that we are not 'leaking' entries.
316      File::WaitForPendingIO(&num_pending_io_);
317      DCHECK(!num_refs_);
318    } else {
319      File::DropPendingIO();
320    }
321  }
322  block_files_.CloseFiles();
323  FlushIndex();
324  index_ = NULL;
325  ptr_factory_.InvalidateWeakPtrs();
326  done_.Signal();
327}
328
329// ------------------------------------------------------------------------
330
331int BackendImpl::OpenPrevEntry(void** iter, Entry** prev_entry,
332                               const CompletionCallback& callback) {
333  DCHECK(!callback.is_null());
334  background_queue_.OpenPrevEntry(iter, prev_entry, callback);
335  return net::ERR_IO_PENDING;
336}
337
338int BackendImpl::SyncOpenEntry(const std::string& key, Entry** entry) {
339  DCHECK(entry);
340  *entry = OpenEntryImpl(key);
341  return (*entry) ? net::OK : net::ERR_FAILED;
342}
343
344int BackendImpl::SyncCreateEntry(const std::string& key, Entry** entry) {
345  DCHECK(entry);
346  *entry = CreateEntryImpl(key);
347  return (*entry) ? net::OK : net::ERR_FAILED;
348}
349
350int BackendImpl::SyncDoomEntry(const std::string& key) {
351  if (disabled_)
352    return net::ERR_FAILED;
353
354  EntryImpl* entry = OpenEntryImpl(key);
355  if (!entry)
356    return net::ERR_FAILED;
357
358  entry->DoomImpl();
359  entry->Release();
360  return net::OK;
361}
362
363int BackendImpl::SyncDoomAllEntries() {
364  // This is not really an error, but it is an interesting condition.
365  ReportError(ERR_CACHE_DOOMED);
366  stats_.OnEvent(Stats::DOOM_CACHE);
367  if (!num_refs_) {
368    RestartCache(false);
369    return disabled_ ? net::ERR_FAILED : net::OK;
370  } else {
371    if (disabled_)
372      return net::ERR_FAILED;
373
374    eviction_.TrimCache(true);
375    return net::OK;
376  }
377}
378
379int BackendImpl::SyncDoomEntriesBetween(const base::Time initial_time,
380                                        const base::Time end_time) {
381  DCHECK_NE(net::APP_CACHE, cache_type_);
382  if (end_time.is_null())
383    return SyncDoomEntriesSince(initial_time);
384
385  DCHECK(end_time >= initial_time);
386
387  if (disabled_)
388    return net::ERR_FAILED;
389
390  EntryImpl* node;
391  void* iter = NULL;
392  EntryImpl* next = OpenNextEntryImpl(&iter);
393  if (!next)
394    return net::OK;
395
396  while (next) {
397    node = next;
398    next = OpenNextEntryImpl(&iter);
399
400    if (node->GetLastUsed() >= initial_time &&
401        node->GetLastUsed() < end_time) {
402      node->DoomImpl();
403    } else if (node->GetLastUsed() < initial_time) {
404      if (next)
405        next->Release();
406      next = NULL;
407      SyncEndEnumeration(iter);
408    }
409
410    node->Release();
411  }
412
413  return net::OK;
414}
415
416// We use OpenNextEntryImpl to retrieve elements from the cache, until we get
417// entries that are too old.
418int BackendImpl::SyncDoomEntriesSince(const base::Time initial_time) {
419  DCHECK_NE(net::APP_CACHE, cache_type_);
420  if (disabled_)
421    return net::ERR_FAILED;
422
423  stats_.OnEvent(Stats::DOOM_RECENT);
424  for (;;) {
425    void* iter = NULL;
426    EntryImpl* entry = OpenNextEntryImpl(&iter);
427    if (!entry)
428      return net::OK;
429
430    if (initial_time > entry->GetLastUsed()) {
431      entry->Release();
432      SyncEndEnumeration(iter);
433      return net::OK;
434    }
435
436    entry->DoomImpl();
437    entry->Release();
438    SyncEndEnumeration(iter);  // Dooming the entry invalidates the iterator.
439  }
440}
441
442int BackendImpl::SyncOpenNextEntry(void** iter, Entry** next_entry) {
443  *next_entry = OpenNextEntryImpl(iter);
444  return (*next_entry) ? net::OK : net::ERR_FAILED;
445}
446
447int BackendImpl::SyncOpenPrevEntry(void** iter, Entry** prev_entry) {
448  *prev_entry = OpenPrevEntryImpl(iter);
449  return (*prev_entry) ? net::OK : net::ERR_FAILED;
450}
451
452void BackendImpl::SyncEndEnumeration(void* iter) {
453  scoped_ptr<Rankings::Iterator> iterator(
454      reinterpret_cast<Rankings::Iterator*>(iter));
455}
456
457void BackendImpl::SyncOnExternalCacheHit(const std::string& key) {
458  if (disabled_)
459    return;
460
461  uint32 hash = base::Hash(key);
462  bool error;
463  EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
464  if (cache_entry) {
465    if (ENTRY_NORMAL == cache_entry->entry()->Data()->state) {
466      UpdateRank(cache_entry, cache_type() == net::SHADER_CACHE);
467    }
468    cache_entry->Release();
469  }
470}
471
472EntryImpl* BackendImpl::OpenEntryImpl(const std::string& key) {
473  if (disabled_)
474    return NULL;
475
476  TimeTicks start = TimeTicks::Now();
477  uint32 hash = base::Hash(key);
478  Trace("Open hash 0x%x", hash);
479
480  bool error;
481  EntryImpl* cache_entry = MatchEntry(key, hash, false, Addr(), &error);
482  if (cache_entry && ENTRY_NORMAL != cache_entry->entry()->Data()->state) {
483    // The entry was already evicted.
484    cache_entry->Release();
485    cache_entry = NULL;
486    web_fonts_histogram::RecordEvictedEntry(key);
487  } else if (!cache_entry) {
488    web_fonts_histogram::RecordCacheMiss(key);
489  }
490
491  int current_size = data_->header.num_bytes / (1024 * 1024);
492  int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
493  int64 no_use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
494  int64 use_hours = total_hours - no_use_hours;
495
496  if (!cache_entry) {
497    CACHE_UMA(AGE_MS, "OpenTime.Miss", 0, start);
498    CACHE_UMA(COUNTS_10000, "AllOpenBySize.Miss", 0, current_size);
499    CACHE_UMA(HOURS, "AllOpenByTotalHours.Miss", 0, total_hours);
500    CACHE_UMA(HOURS, "AllOpenByUseHours.Miss", 0, use_hours);
501    stats_.OnEvent(Stats::OPEN_MISS);
502    return NULL;
503  }
504
505  eviction_.OnOpenEntry(cache_entry);
506  entry_count_++;
507
508  Trace("Open hash 0x%x end: 0x%x", hash,
509        cache_entry->entry()->address().value());
510  CACHE_UMA(AGE_MS, "OpenTime", 0, start);
511  CACHE_UMA(COUNTS_10000, "AllOpenBySize.Hit", 0, current_size);
512  CACHE_UMA(HOURS, "AllOpenByTotalHours.Hit", 0, total_hours);
513  CACHE_UMA(HOURS, "AllOpenByUseHours.Hit", 0, use_hours);
514  stats_.OnEvent(Stats::OPEN_HIT);
515  web_fonts_histogram::RecordCacheHit(cache_entry);
516  SIMPLE_STATS_COUNTER("disk_cache.hit");
517  return cache_entry;
518}
519
520EntryImpl* BackendImpl::CreateEntryImpl(const std::string& key) {
521  if (disabled_ || key.empty())
522    return NULL;
523
524  TimeTicks start = TimeTicks::Now();
525  uint32 hash = base::Hash(key);
526  Trace("Create hash 0x%x", hash);
527
528  scoped_refptr<EntryImpl> parent;
529  Addr entry_address(data_->table[hash & mask_]);
530  if (entry_address.is_initialized()) {
531    // We have an entry already. It could be the one we are looking for, or just
532    // a hash conflict.
533    bool error;
534    EntryImpl* old_entry = MatchEntry(key, hash, false, Addr(), &error);
535    if (old_entry)
536      return ResurrectEntry(old_entry);
537
538    EntryImpl* parent_entry = MatchEntry(key, hash, true, Addr(), &error);
539    DCHECK(!error);
540    if (parent_entry) {
541      parent.swap(&parent_entry);
542    } else if (data_->table[hash & mask_]) {
543      // We should have corrected the problem.
544      NOTREACHED();
545      return NULL;
546    }
547  }
548
549  // The general flow is to allocate disk space and initialize the entry data,
550  // followed by saving that to disk, then linking the entry though the index
551  // and finally through the lists. If there is a crash in this process, we may
552  // end up with:
553  // a. Used, unreferenced empty blocks on disk (basically just garbage).
554  // b. Used, unreferenced but meaningful data on disk (more garbage).
555  // c. A fully formed entry, reachable only through the index.
556  // d. A fully formed entry, also reachable through the lists, but still dirty.
557  //
558  // Anything after (b) can be automatically cleaned up. We may consider saving
559  // the current operation (as we do while manipulating the lists) so that we
560  // can detect and cleanup (a) and (b).
561
562  int num_blocks = EntryImpl::NumBlocksForEntry(key.size());
563  if (!block_files_.CreateBlock(BLOCK_256, num_blocks, &entry_address)) {
564    LOG(ERROR) << "Create entry failed " << key.c_str();
565    stats_.OnEvent(Stats::CREATE_ERROR);
566    return NULL;
567  }
568
569  Addr node_address(0);
570  if (!block_files_.CreateBlock(RANKINGS, 1, &node_address)) {
571    block_files_.DeleteBlock(entry_address, false);
572    LOG(ERROR) << "Create entry failed " << key.c_str();
573    stats_.OnEvent(Stats::CREATE_ERROR);
574    return NULL;
575  }
576
577  scoped_refptr<EntryImpl> cache_entry(
578      new EntryImpl(this, entry_address, false));
579  IncreaseNumRefs();
580
581  if (!cache_entry->CreateEntry(node_address, key, hash)) {
582    block_files_.DeleteBlock(entry_address, false);
583    block_files_.DeleteBlock(node_address, false);
584    LOG(ERROR) << "Create entry failed " << key.c_str();
585    stats_.OnEvent(Stats::CREATE_ERROR);
586    return NULL;
587  }
588
589  cache_entry->BeginLogging(net_log_, true);
590
591  // We are not failing the operation; let's add this to the map.
592  open_entries_[entry_address.value()] = cache_entry.get();
593
594  // Save the entry.
595  cache_entry->entry()->Store();
596  cache_entry->rankings()->Store();
597  IncreaseNumEntries();
598  entry_count_++;
599
600  // Link this entry through the index.
601  if (parent.get()) {
602    parent->SetNextAddress(entry_address);
603  } else {
604    data_->table[hash & mask_] = entry_address.value();
605  }
606
607  // Link this entry through the lists.
608  eviction_.OnCreateEntry(cache_entry.get());
609
610  CACHE_UMA(AGE_MS, "CreateTime", 0, start);
611  stats_.OnEvent(Stats::CREATE_HIT);
612  SIMPLE_STATS_COUNTER("disk_cache.miss");
613  Trace("create entry hit ");
614  FlushIndex();
615  cache_entry->AddRef();
616  return cache_entry.get();
617}
618
619EntryImpl* BackendImpl::OpenNextEntryImpl(void** iter) {
620  return OpenFollowingEntry(true, iter);
621}
622
623EntryImpl* BackendImpl::OpenPrevEntryImpl(void** iter) {
624  return OpenFollowingEntry(false, iter);
625}
626
627bool BackendImpl::SetMaxSize(int max_bytes) {
628  COMPILE_ASSERT(sizeof(max_bytes) == sizeof(max_size_), unsupported_int_model);
629  if (max_bytes < 0)
630    return false;
631
632  // Zero size means use the default.
633  if (!max_bytes)
634    return true;
635
636  // Avoid a DCHECK later on.
637  if (max_bytes >= kint32max - kint32max / 10)
638    max_bytes = kint32max - kint32max / 10 - 1;
639
640  user_flags_ |= kMaxSize;
641  max_size_ = max_bytes;
642  return true;
643}
644
645void BackendImpl::SetType(net::CacheType type) {
646  DCHECK_NE(net::MEMORY_CACHE, type);
647  cache_type_ = type;
648}
649
650base::FilePath BackendImpl::GetFileName(Addr address) const {
651  if (!address.is_separate_file() || !address.is_initialized()) {
652    NOTREACHED();
653    return base::FilePath();
654  }
655
656  std::string tmp = base::StringPrintf("f_%06x", address.FileNumber());
657  return path_.AppendASCII(tmp);
658}
659
660MappedFile* BackendImpl::File(Addr address) {
661  if (disabled_)
662    return NULL;
663  return block_files_.GetFile(address);
664}
665
666base::WeakPtr<InFlightBackendIO> BackendImpl::GetBackgroundQueue() {
667  return background_queue_.GetWeakPtr();
668}
669
670bool BackendImpl::CreateExternalFile(Addr* address) {
671  int file_number = data_->header.last_file + 1;
672  Addr file_address(0);
673  bool success = false;
674  for (int i = 0; i < 0x0fffffff; i++, file_number++) {
675    if (!file_address.SetFileNumber(file_number)) {
676      file_number = 1;
677      continue;
678    }
679    base::FilePath name = GetFileName(file_address);
680    int flags = base::File::FLAG_READ | base::File::FLAG_WRITE |
681                base::File::FLAG_CREATE | base::File::FLAG_EXCLUSIVE_WRITE;
682    base::File file(name, flags);
683    if (!file.IsValid()) {
684      base::File::Error error = file.error_details();
685      if (error != base::File::FILE_ERROR_EXISTS) {
686        LOG(ERROR) << "Unable to create file: " << error;
687        return false;
688      }
689      continue;
690    }
691
692    success = true;
693    break;
694  }
695
696  DCHECK(success);
697  if (!success)
698    return false;
699
700  data_->header.last_file = file_number;
701  address->set_value(file_address.value());
702  return true;
703}
704
705bool BackendImpl::CreateBlock(FileType block_type, int block_count,
706                             Addr* block_address) {
707  return block_files_.CreateBlock(block_type, block_count, block_address);
708}
709
710void BackendImpl::DeleteBlock(Addr block_address, bool deep) {
711  block_files_.DeleteBlock(block_address, deep);
712}
713
714LruData* BackendImpl::GetLruData() {
715  return &data_->header.lru;
716}
717
718void BackendImpl::UpdateRank(EntryImpl* entry, bool modified) {
719  if (read_only_ || (!modified && cache_type() == net::SHADER_CACHE))
720    return;
721  eviction_.UpdateRank(entry, modified);
722}
723
724void BackendImpl::RecoveredEntry(CacheRankingsBlock* rankings) {
725  Addr address(rankings->Data()->contents);
726  EntryImpl* cache_entry = NULL;
727  if (NewEntry(address, &cache_entry)) {
728    STRESS_NOTREACHED();
729    return;
730  }
731
732  uint32 hash = cache_entry->GetHash();
733  cache_entry->Release();
734
735  // Anything on the table means that this entry is there.
736  if (data_->table[hash & mask_])
737    return;
738
739  data_->table[hash & mask_] = address.value();
740  FlushIndex();
741}
742
743void BackendImpl::InternalDoomEntry(EntryImpl* entry) {
744  uint32 hash = entry->GetHash();
745  std::string key = entry->GetKey();
746  Addr entry_addr = entry->entry()->address();
747  bool error;
748  EntryImpl* parent_entry = MatchEntry(key, hash, true, entry_addr, &error);
749  CacheAddr child(entry->GetNextAddress());
750
751  Trace("Doom entry 0x%p", entry);
752
753  if (!entry->doomed()) {
754    // We may have doomed this entry from within MatchEntry.
755    eviction_.OnDoomEntry(entry);
756    entry->InternalDoom();
757    if (!new_eviction_) {
758      DecreaseNumEntries();
759    }
760    stats_.OnEvent(Stats::DOOM_ENTRY);
761  }
762
763  if (parent_entry) {
764    parent_entry->SetNextAddress(Addr(child));
765    parent_entry->Release();
766  } else if (!error) {
767    data_->table[hash & mask_] = child;
768  }
769
770  FlushIndex();
771}
772
773#if defined(NET_BUILD_STRESS_CACHE)
774
775CacheAddr BackendImpl::GetNextAddr(Addr address) {
776  EntriesMap::iterator it = open_entries_.find(address.value());
777  if (it != open_entries_.end()) {
778    EntryImpl* this_entry = it->second;
779    return this_entry->GetNextAddress();
780  }
781  DCHECK(block_files_.IsValid(address));
782  DCHECK(!address.is_separate_file() && address.file_type() == BLOCK_256);
783
784  CacheEntryBlock entry(File(address), address);
785  CHECK(entry.Load());
786  return entry.Data()->next;
787}
788
789void BackendImpl::NotLinked(EntryImpl* entry) {
790  Addr entry_addr = entry->entry()->address();
791  uint32 i = entry->GetHash() & mask_;
792  Addr address(data_->table[i]);
793  if (!address.is_initialized())
794    return;
795
796  for (;;) {
797    DCHECK(entry_addr.value() != address.value());
798    address.set_value(GetNextAddr(address));
799    if (!address.is_initialized())
800      break;
801  }
802}
803#endif  // NET_BUILD_STRESS_CACHE
804
805// An entry may be linked on the DELETED list for a while after being doomed.
806// This function is called when we want to remove it.
807void BackendImpl::RemoveEntry(EntryImpl* entry) {
808#if defined(NET_BUILD_STRESS_CACHE)
809  NotLinked(entry);
810#endif
811  if (!new_eviction_)
812    return;
813
814  DCHECK_NE(ENTRY_NORMAL, entry->entry()->Data()->state);
815
816  Trace("Remove entry 0x%p", entry);
817  eviction_.OnDestroyEntry(entry);
818  DecreaseNumEntries();
819}
820
821void BackendImpl::OnEntryDestroyBegin(Addr address) {
822  EntriesMap::iterator it = open_entries_.find(address.value());
823  if (it != open_entries_.end())
824    open_entries_.erase(it);
825}
826
827void BackendImpl::OnEntryDestroyEnd() {
828  DecreaseNumRefs();
829  if (data_->header.num_bytes > max_size_ && !read_only_ &&
830      (up_ticks_ > kTrimDelay || user_flags_ & kNoRandom))
831    eviction_.TrimCache(false);
832}
833
834EntryImpl* BackendImpl::GetOpenEntry(CacheRankingsBlock* rankings) const {
835  DCHECK(rankings->HasData());
836  EntriesMap::const_iterator it =
837      open_entries_.find(rankings->Data()->contents);
838  if (it != open_entries_.end()) {
839    // We have this entry in memory.
840    return it->second;
841  }
842
843  return NULL;
844}
845
846int32 BackendImpl::GetCurrentEntryId() const {
847  return data_->header.this_id;
848}
849
850int BackendImpl::MaxFileSize() const {
851  return cache_type() == net::PNACL_CACHE ? max_size_ : max_size_ / 8;
852}
853
854void BackendImpl::ModifyStorageSize(int32 old_size, int32 new_size) {
855  if (disabled_ || old_size == new_size)
856    return;
857  if (old_size > new_size)
858    SubstractStorageSize(old_size - new_size);
859  else
860    AddStorageSize(new_size - old_size);
861
862  FlushIndex();
863
864  // Update the usage statistics.
865  stats_.ModifyStorageStats(old_size, new_size);
866}
867
868void BackendImpl::TooMuchStorageRequested(int32 size) {
869  stats_.ModifyStorageStats(0, size);
870}
871
872bool BackendImpl::IsAllocAllowed(int current_size, int new_size) {
873  DCHECK_GT(new_size, current_size);
874  if (user_flags_ & kNoBuffering)
875    return false;
876
877  int to_add = new_size - current_size;
878  if (buffer_bytes_ + to_add > MaxBuffersSize())
879    return false;
880
881  buffer_bytes_ += to_add;
882  CACHE_UMA(COUNTS_50000, "BufferBytes", 0, buffer_bytes_ / 1024);
883  return true;
884}
885
886void BackendImpl::BufferDeleted(int size) {
887  buffer_bytes_ -= size;
888  DCHECK_GE(size, 0);
889}
890
891bool BackendImpl::IsLoaded() const {
892  CACHE_UMA(COUNTS, "PendingIO", 0, num_pending_io_);
893  if (user_flags_ & kNoLoadProtection)
894    return false;
895
896  return (num_pending_io_ > 5 || user_load_);
897}
898
899std::string BackendImpl::HistogramName(const char* name, int experiment) const {
900  if (!experiment)
901    return base::StringPrintf("DiskCache.%d.%s", cache_type_, name);
902  return base::StringPrintf("DiskCache.%d.%s_%d", cache_type_,
903                            name, experiment);
904}
905
906base::WeakPtr<BackendImpl> BackendImpl::GetWeakPtr() {
907  return ptr_factory_.GetWeakPtr();
908}
909
910// We want to remove biases from some histograms so we only send data once per
911// week.
912bool BackendImpl::ShouldReportAgain() {
913  if (uma_report_)
914    return uma_report_ == 2;
915
916  uma_report_++;
917  int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
918  Time last_time = Time::FromInternalValue(last_report);
919  if (!last_report || (Time::Now() - last_time).InDays() >= 7) {
920    stats_.SetCounter(Stats::LAST_REPORT, Time::Now().ToInternalValue());
921    uma_report_++;
922    return true;
923  }
924  return false;
925}
926
927void BackendImpl::FirstEviction() {
928  DCHECK(data_->header.create_time);
929  if (!GetEntryCount())
930    return;  // This is just for unit tests.
931
932  Time create_time = Time::FromInternalValue(data_->header.create_time);
933  CACHE_UMA(AGE, "FillupAge", 0, create_time);
934
935  int64 use_time = stats_.GetCounter(Stats::TIMER);
936  CACHE_UMA(HOURS, "FillupTime", 0, static_cast<int>(use_time / 120));
937  CACHE_UMA(PERCENTAGE, "FirstHitRatio", 0, stats_.GetHitRatio());
938
939  if (!use_time)
940    use_time = 1;
941  CACHE_UMA(COUNTS_10000, "FirstEntryAccessRate", 0,
942            static_cast<int>(data_->header.num_entries / use_time));
943  CACHE_UMA(COUNTS, "FirstByteIORate", 0,
944            static_cast<int>((data_->header.num_bytes / 1024) / use_time));
945
946  int avg_size = data_->header.num_bytes / GetEntryCount();
947  CACHE_UMA(COUNTS, "FirstEntrySize", 0, avg_size);
948
949  int large_entries_bytes = stats_.GetLargeEntriesSize();
950  int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
951  CACHE_UMA(PERCENTAGE, "FirstLargeEntriesRatio", 0, large_ratio);
952
953  if (new_eviction_) {
954    CACHE_UMA(PERCENTAGE, "FirstResurrectRatio", 0, stats_.GetResurrectRatio());
955    CACHE_UMA(PERCENTAGE, "FirstNoUseRatio", 0,
956              data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
957    CACHE_UMA(PERCENTAGE, "FirstLowUseRatio", 0,
958              data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
959    CACHE_UMA(PERCENTAGE, "FirstHighUseRatio", 0,
960              data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
961  }
962
963  stats_.ResetRatios();
964}
965
966void BackendImpl::CriticalError(int error) {
967  STRESS_NOTREACHED();
968  LOG(ERROR) << "Critical error found " << error;
969  if (disabled_)
970    return;
971
972  stats_.OnEvent(Stats::FATAL_ERROR);
973  LogStats();
974  ReportError(error);
975
976  // Setting the index table length to an invalid value will force re-creation
977  // of the cache files.
978  data_->header.table_len = 1;
979  disabled_ = true;
980
981  if (!num_refs_)
982    base::MessageLoop::current()->PostTask(
983        FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
984}
985
986void BackendImpl::ReportError(int error) {
987  STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH ||
988                error == ERR_CACHE_CREATED);
989
990  // We transmit positive numbers, instead of direct error codes.
991  DCHECK_LE(error, 0);
992  CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1);
993}
994
995void BackendImpl::OnEvent(Stats::Counters an_event) {
996  stats_.OnEvent(an_event);
997}
998
999void BackendImpl::OnRead(int32 bytes) {
1000  DCHECK_GE(bytes, 0);
1001  byte_count_ += bytes;
1002  if (byte_count_ < 0)
1003    byte_count_ = kint32max;
1004}
1005
1006void BackendImpl::OnWrite(int32 bytes) {
1007  // We use the same implementation as OnRead... just log the number of bytes.
1008  OnRead(bytes);
1009}
1010
1011void BackendImpl::OnStatsTimer() {
1012  if (disabled_)
1013    return;
1014
1015  stats_.OnEvent(Stats::TIMER);
1016  int64 time = stats_.GetCounter(Stats::TIMER);
1017  int64 current = stats_.GetCounter(Stats::OPEN_ENTRIES);
1018
1019  // OPEN_ENTRIES is a sampled average of the number of open entries, avoiding
1020  // the bias towards 0.
1021  if (num_refs_ && (current != num_refs_)) {
1022    int64 diff = (num_refs_ - current) / 50;
1023    if (!diff)
1024      diff = num_refs_ > current ? 1 : -1;
1025    current = current + diff;
1026    stats_.SetCounter(Stats::OPEN_ENTRIES, current);
1027    stats_.SetCounter(Stats::MAX_ENTRIES, max_refs_);
1028  }
1029
1030  CACHE_UMA(COUNTS, "NumberOfReferences", 0, num_refs_);
1031
1032  CACHE_UMA(COUNTS_10000, "EntryAccessRate", 0, entry_count_);
1033  CACHE_UMA(COUNTS, "ByteIORate", 0, byte_count_ / 1024);
1034
1035  // These values cover about 99.5% of the population (Oct 2011).
1036  user_load_ = (entry_count_ > 300 || byte_count_ > 7 * 1024 * 1024);
1037  entry_count_ = 0;
1038  byte_count_ = 0;
1039  up_ticks_++;
1040
1041  if (!data_)
1042    first_timer_ = false;
1043  if (first_timer_) {
1044    first_timer_ = false;
1045    if (ShouldReportAgain())
1046      ReportStats();
1047  }
1048
1049  // Save stats to disk at 5 min intervals.
1050  if (time % 10 == 0)
1051    StoreStats();
1052}
1053
1054void BackendImpl::IncrementIoCount() {
1055  num_pending_io_++;
1056}
1057
1058void BackendImpl::DecrementIoCount() {
1059  num_pending_io_--;
1060}
1061
1062void BackendImpl::SetUnitTestMode() {
1063  user_flags_ |= kUnitTestMode;
1064  unit_test_ = true;
1065}
1066
1067void BackendImpl::SetUpgradeMode() {
1068  user_flags_ |= kUpgradeMode;
1069  read_only_ = true;
1070}
1071
1072void BackendImpl::SetNewEviction() {
1073  user_flags_ |= kNewEviction;
1074  new_eviction_ = true;
1075}
1076
1077void BackendImpl::SetFlags(uint32 flags) {
1078  user_flags_ |= flags;
1079}
1080
1081void BackendImpl::ClearRefCountForTest() {
1082  num_refs_ = 0;
1083}
1084
1085int BackendImpl::FlushQueueForTest(const CompletionCallback& callback) {
1086  background_queue_.FlushQueue(callback);
1087  return net::ERR_IO_PENDING;
1088}
1089
1090int BackendImpl::RunTaskForTest(const base::Closure& task,
1091                                const CompletionCallback& callback) {
1092  background_queue_.RunTask(task, callback);
1093  return net::ERR_IO_PENDING;
1094}
1095
1096void BackendImpl::TrimForTest(bool empty) {
1097  eviction_.SetTestMode();
1098  eviction_.TrimCache(empty);
1099}
1100
1101void BackendImpl::TrimDeletedListForTest(bool empty) {
1102  eviction_.SetTestMode();
1103  eviction_.TrimDeletedList(empty);
1104}
1105
1106base::RepeatingTimer<BackendImpl>* BackendImpl::GetTimerForTest() {
1107  return timer_.get();
1108}
1109
1110int BackendImpl::SelfCheck() {
1111  if (!init_) {
1112    LOG(ERROR) << "Init failed";
1113    return ERR_INIT_FAILED;
1114  }
1115
1116  int num_entries = rankings_.SelfCheck();
1117  if (num_entries < 0) {
1118    LOG(ERROR) << "Invalid rankings list, error " << num_entries;
1119#if !defined(NET_BUILD_STRESS_CACHE)
1120    return num_entries;
1121#endif
1122  }
1123
1124  if (num_entries != data_->header.num_entries) {
1125    LOG(ERROR) << "Number of entries mismatch";
1126#if !defined(NET_BUILD_STRESS_CACHE)
1127    return ERR_NUM_ENTRIES_MISMATCH;
1128#endif
1129  }
1130
1131  return CheckAllEntries();
1132}
1133
1134void BackendImpl::FlushIndex() {
1135  if (index_.get() && !disabled_)
1136    index_->Flush();
1137}
1138
1139// ------------------------------------------------------------------------
1140
1141net::CacheType BackendImpl::GetCacheType() const {
1142  return cache_type_;
1143}
1144
1145int32 BackendImpl::GetEntryCount() const {
1146  if (!index_.get() || disabled_)
1147    return 0;
1148  // num_entries includes entries already evicted.
1149  int32 not_deleted = data_->header.num_entries -
1150                      data_->header.lru.sizes[Rankings::DELETED];
1151
1152  if (not_deleted < 0) {
1153    NOTREACHED();
1154    not_deleted = 0;
1155  }
1156
1157  return not_deleted;
1158}
1159
1160int BackendImpl::OpenEntry(const std::string& key, Entry** entry,
1161                           const CompletionCallback& callback) {
1162  DCHECK(!callback.is_null());
1163  background_queue_.OpenEntry(key, entry, callback);
1164  return net::ERR_IO_PENDING;
1165}
1166
1167int BackendImpl::CreateEntry(const std::string& key, Entry** entry,
1168                             const CompletionCallback& callback) {
1169  DCHECK(!callback.is_null());
1170  background_queue_.CreateEntry(key, entry, callback);
1171  return net::ERR_IO_PENDING;
1172}
1173
1174int BackendImpl::DoomEntry(const std::string& key,
1175                           const CompletionCallback& callback) {
1176  DCHECK(!callback.is_null());
1177  background_queue_.DoomEntry(key, callback);
1178  return net::ERR_IO_PENDING;
1179}
1180
1181int BackendImpl::DoomAllEntries(const CompletionCallback& callback) {
1182  DCHECK(!callback.is_null());
1183  background_queue_.DoomAllEntries(callback);
1184  return net::ERR_IO_PENDING;
1185}
1186
1187int BackendImpl::DoomEntriesBetween(const base::Time initial_time,
1188                                    const base::Time end_time,
1189                                    const CompletionCallback& callback) {
1190  DCHECK(!callback.is_null());
1191  background_queue_.DoomEntriesBetween(initial_time, end_time, callback);
1192  return net::ERR_IO_PENDING;
1193}
1194
1195int BackendImpl::DoomEntriesSince(const base::Time initial_time,
1196                                  const CompletionCallback& callback) {
1197  DCHECK(!callback.is_null());
1198  background_queue_.DoomEntriesSince(initial_time, callback);
1199  return net::ERR_IO_PENDING;
1200}
1201
1202int BackendImpl::OpenNextEntry(void** iter, Entry** next_entry,
1203                               const CompletionCallback& callback) {
1204  DCHECK(!callback.is_null());
1205  background_queue_.OpenNextEntry(iter, next_entry, callback);
1206  return net::ERR_IO_PENDING;
1207}
1208
1209void BackendImpl::EndEnumeration(void** iter) {
1210  background_queue_.EndEnumeration(*iter);
1211  *iter = NULL;
1212}
1213
1214void BackendImpl::GetStats(StatsItems* stats) {
1215  if (disabled_)
1216    return;
1217
1218  std::pair<std::string, std::string> item;
1219
1220  item.first = "Entries";
1221  item.second = base::StringPrintf("%d", data_->header.num_entries);
1222  stats->push_back(item);
1223
1224  item.first = "Pending IO";
1225  item.second = base::StringPrintf("%d", num_pending_io_);
1226  stats->push_back(item);
1227
1228  item.first = "Max size";
1229  item.second = base::StringPrintf("%d", max_size_);
1230  stats->push_back(item);
1231
1232  item.first = "Current size";
1233  item.second = base::StringPrintf("%d", data_->header.num_bytes);
1234  stats->push_back(item);
1235
1236  item.first = "Cache type";
1237  item.second = "Blockfile Cache";
1238  stats->push_back(item);
1239
1240  stats_.GetItems(stats);
1241}
1242
1243void BackendImpl::OnExternalCacheHit(const std::string& key) {
1244  background_queue_.OnExternalCacheHit(key);
1245}
1246
1247// ------------------------------------------------------------------------
1248
1249// We just created a new file so we're going to write the header and set the
1250// file length to include the hash table (zero filled).
1251bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
1252  AdjustMaxCacheSize(0);
1253
1254  IndexHeader header;
1255  header.table_len = DesiredIndexTableLen(max_size_);
1256
1257  // We need file version 2.1 for the new eviction algorithm.
1258  if (new_eviction_)
1259    header.version = 0x20001;
1260
1261  header.create_time = Time::Now().ToInternalValue();
1262
1263  if (!file->Write(&header, sizeof(header), 0))
1264    return false;
1265
1266  return file->SetLength(GetIndexSize(header.table_len));
1267}
1268
1269bool BackendImpl::InitBackingStore(bool* file_created) {
1270  if (!base::CreateDirectory(path_))
1271    return false;
1272
1273  base::FilePath index_name = path_.AppendASCII(kIndexName);
1274
1275  int flags = base::File::FLAG_READ | base::File::FLAG_WRITE |
1276              base::File::FLAG_OPEN_ALWAYS | base::File::FLAG_EXCLUSIVE_WRITE;
1277  base::File base_file(index_name, flags);
1278  if (!base_file.IsValid())
1279    return false;
1280
1281  bool ret = true;
1282  *file_created = base_file.created();
1283
1284  scoped_refptr<disk_cache::File> file(new disk_cache::File(base_file.Pass()));
1285  if (*file_created)
1286    ret = CreateBackingStore(file.get());
1287
1288  file = NULL;
1289  if (!ret)
1290    return false;
1291
1292  index_ = new MappedFile();
1293  data_ = static_cast<Index*>(index_->Init(index_name, 0));
1294  if (!data_) {
1295    LOG(ERROR) << "Unable to map Index file";
1296    return false;
1297  }
1298
1299  if (index_->GetLength() < sizeof(Index)) {
1300    // We verify this again on CheckIndex() but it's easier to make sure now
1301    // that the header is there.
1302    LOG(ERROR) << "Corrupt Index file";
1303    return false;
1304  }
1305
1306  return true;
1307}
1308
1309// The maximum cache size will be either set explicitly by the caller, or
1310// calculated by this code.
1311void BackendImpl::AdjustMaxCacheSize(int table_len) {
1312  if (max_size_)
1313    return;
1314
1315  // If table_len is provided, the index file exists.
1316  DCHECK(!table_len || data_->header.magic);
1317
1318  // The user is not setting the size, let's figure it out.
1319  int64 available = base::SysInfo::AmountOfFreeDiskSpace(path_);
1320  if (available < 0) {
1321    max_size_ = kDefaultCacheSize;
1322    return;
1323  }
1324
1325  if (table_len)
1326    available += data_->header.num_bytes;
1327
1328  max_size_ = PreferredCacheSize(available);
1329
1330  if (!table_len)
1331    return;
1332
1333  // If we already have a table, adjust the size to it.
1334  int current_max_size = MaxStorageSizeForTable(table_len);
1335  if (max_size_ > current_max_size)
1336    max_size_= current_max_size;
1337}
1338
1339bool BackendImpl::InitStats() {
1340  Addr address(data_->header.stats);
1341  int size = stats_.StorageSize();
1342
1343  if (!address.is_initialized()) {
1344    FileType file_type = Addr::RequiredFileType(size);
1345    DCHECK_NE(file_type, EXTERNAL);
1346    int num_blocks = Addr::RequiredBlocks(size, file_type);
1347
1348    if (!CreateBlock(file_type, num_blocks, &address))
1349      return false;
1350
1351    data_->header.stats = address.value();
1352    return stats_.Init(NULL, 0, address);
1353  }
1354
1355  if (!address.is_block_file()) {
1356    NOTREACHED();
1357    return false;
1358  }
1359
1360  // Load the required data.
1361  size = address.num_blocks() * address.BlockSize();
1362  MappedFile* file = File(address);
1363  if (!file)
1364    return false;
1365
1366  scoped_ptr<char[]> data(new char[size]);
1367  size_t offset = address.start_block() * address.BlockSize() +
1368                  kBlockHeaderSize;
1369  if (!file->Read(data.get(), size, offset))
1370    return false;
1371
1372  if (!stats_.Init(data.get(), size, address))
1373    return false;
1374  if (cache_type_ == net::DISK_CACHE && ShouldReportAgain())
1375    stats_.InitSizeHistogram();
1376  return true;
1377}
1378
1379void BackendImpl::StoreStats() {
1380  int size = stats_.StorageSize();
1381  scoped_ptr<char[]> data(new char[size]);
1382  Addr address;
1383  size = stats_.SerializeStats(data.get(), size, &address);
1384  DCHECK(size);
1385  if (!address.is_initialized())
1386    return;
1387
1388  MappedFile* file = File(address);
1389  if (!file)
1390    return;
1391
1392  size_t offset = address.start_block() * address.BlockSize() +
1393                  kBlockHeaderSize;
1394  file->Write(data.get(), size, offset);  // ignore result.
1395}
1396
1397void BackendImpl::RestartCache(bool failure) {
1398  int64 errors = stats_.GetCounter(Stats::FATAL_ERROR);
1399  int64 full_dooms = stats_.GetCounter(Stats::DOOM_CACHE);
1400  int64 partial_dooms = stats_.GetCounter(Stats::DOOM_RECENT);
1401  int64 last_report = stats_.GetCounter(Stats::LAST_REPORT);
1402
1403  PrepareForRestart();
1404  if (failure) {
1405    DCHECK(!num_refs_);
1406    DCHECK(!open_entries_.size());
1407    DelayedCacheCleanup(path_);
1408  } else {
1409    DeleteCache(path_, false);
1410  }
1411
1412  // Don't call Init() if directed by the unit test: we are simulating a failure
1413  // trying to re-enable the cache.
1414  if (unit_test_)
1415    init_ = true;  // Let the destructor do proper cleanup.
1416  else if (SyncInit() == net::OK) {
1417    stats_.SetCounter(Stats::FATAL_ERROR, errors);
1418    stats_.SetCounter(Stats::DOOM_CACHE, full_dooms);
1419    stats_.SetCounter(Stats::DOOM_RECENT, partial_dooms);
1420    stats_.SetCounter(Stats::LAST_REPORT, last_report);
1421  }
1422}
1423
1424void BackendImpl::PrepareForRestart() {
1425  // Reset the mask_ if it was not given by the user.
1426  if (!(user_flags_ & kMask))
1427    mask_ = 0;
1428
1429  if (!(user_flags_ & kNewEviction))
1430    new_eviction_ = false;
1431
1432  disabled_ = true;
1433  data_->header.crash = 0;
1434  index_->Flush();
1435  index_ = NULL;
1436  data_ = NULL;
1437  block_files_.CloseFiles();
1438  rankings_.Reset();
1439  init_ = false;
1440  restarted_ = true;
1441}
1442
1443int BackendImpl::NewEntry(Addr address, EntryImpl** entry) {
1444  EntriesMap::iterator it = open_entries_.find(address.value());
1445  if (it != open_entries_.end()) {
1446    // Easy job. This entry is already in memory.
1447    EntryImpl* this_entry = it->second;
1448    this_entry->AddRef();
1449    *entry = this_entry;
1450    return 0;
1451  }
1452
1453  STRESS_DCHECK(block_files_.IsValid(address));
1454
1455  if (!address.SanityCheckForEntryV2()) {
1456    LOG(WARNING) << "Wrong entry address.";
1457    STRESS_NOTREACHED();
1458    return ERR_INVALID_ADDRESS;
1459  }
1460
1461  scoped_refptr<EntryImpl> cache_entry(
1462      new EntryImpl(this, address, read_only_));
1463  IncreaseNumRefs();
1464  *entry = NULL;
1465
1466  TimeTicks start = TimeTicks::Now();
1467  if (!cache_entry->entry()->Load())
1468    return ERR_READ_FAILURE;
1469
1470  if (IsLoaded()) {
1471    CACHE_UMA(AGE_MS, "LoadTime", 0, start);
1472  }
1473
1474  if (!cache_entry->SanityCheck()) {
1475    LOG(WARNING) << "Messed up entry found.";
1476    STRESS_NOTREACHED();
1477    return ERR_INVALID_ENTRY;
1478  }
1479
1480  STRESS_DCHECK(block_files_.IsValid(
1481                    Addr(cache_entry->entry()->Data()->rankings_node)));
1482
1483  if (!cache_entry->LoadNodeAddress())
1484    return ERR_READ_FAILURE;
1485
1486  if (!rankings_.SanityCheck(cache_entry->rankings(), false)) {
1487    STRESS_NOTREACHED();
1488    cache_entry->SetDirtyFlag(0);
1489    // Don't remove this from the list (it is not linked properly). Instead,
1490    // break the link back to the entry because it is going away, and leave the
1491    // rankings node to be deleted if we find it through a list.
1492    rankings_.SetContents(cache_entry->rankings(), 0);
1493  } else if (!rankings_.DataSanityCheck(cache_entry->rankings(), false)) {
1494    STRESS_NOTREACHED();
1495    cache_entry->SetDirtyFlag(0);
1496    rankings_.SetContents(cache_entry->rankings(), address.value());
1497  }
1498
1499  if (!cache_entry->DataSanityCheck()) {
1500    LOG(WARNING) << "Messed up entry found.";
1501    cache_entry->SetDirtyFlag(0);
1502    cache_entry->FixForDelete();
1503  }
1504
1505  // Prevent overwriting the dirty flag on the destructor.
1506  cache_entry->SetDirtyFlag(GetCurrentEntryId());
1507
1508  if (cache_entry->dirty()) {
1509    Trace("Dirty entry 0x%p 0x%x", reinterpret_cast<void*>(cache_entry.get()),
1510          address.value());
1511  }
1512
1513  open_entries_[address.value()] = cache_entry.get();
1514
1515  cache_entry->BeginLogging(net_log_, false);
1516  cache_entry.swap(entry);
1517  return 0;
1518}
1519
1520EntryImpl* BackendImpl::MatchEntry(const std::string& key, uint32 hash,
1521                                   bool find_parent, Addr entry_addr,
1522                                   bool* match_error) {
1523  Addr address(data_->table[hash & mask_]);
1524  scoped_refptr<EntryImpl> cache_entry, parent_entry;
1525  EntryImpl* tmp = NULL;
1526  bool found = false;
1527  std::set<CacheAddr> visited;
1528  *match_error = false;
1529
1530  for (;;) {
1531    if (disabled_)
1532      break;
1533
1534    if (visited.find(address.value()) != visited.end()) {
1535      // It's possible for a buggy version of the code to write a loop. Just
1536      // break it.
1537      Trace("Hash collision loop 0x%x", address.value());
1538      address.set_value(0);
1539      parent_entry->SetNextAddress(address);
1540    }
1541    visited.insert(address.value());
1542
1543    if (!address.is_initialized()) {
1544      if (find_parent)
1545        found = true;
1546      break;
1547    }
1548
1549    int error = NewEntry(address, &tmp);
1550    cache_entry.swap(&tmp);
1551
1552    if (error || cache_entry->dirty()) {
1553      // This entry is dirty on disk (it was not properly closed): we cannot
1554      // trust it.
1555      Addr child(0);
1556      if (!error)
1557        child.set_value(cache_entry->GetNextAddress());
1558
1559      if (parent_entry.get()) {
1560        parent_entry->SetNextAddress(child);
1561        parent_entry = NULL;
1562      } else {
1563        data_->table[hash & mask_] = child.value();
1564      }
1565
1566      Trace("MatchEntry dirty %d 0x%x 0x%x", find_parent, entry_addr.value(),
1567            address.value());
1568
1569      if (!error) {
1570        // It is important to call DestroyInvalidEntry after removing this
1571        // entry from the table.
1572        DestroyInvalidEntry(cache_entry.get());
1573        cache_entry = NULL;
1574      } else {
1575        Trace("NewEntry failed on MatchEntry 0x%x", address.value());
1576      }
1577
1578      // Restart the search.
1579      address.set_value(data_->table[hash & mask_]);
1580      visited.clear();
1581      continue;
1582    }
1583
1584    DCHECK_EQ(hash & mask_, cache_entry->entry()->Data()->hash & mask_);
1585    if (cache_entry->IsSameEntry(key, hash)) {
1586      if (!cache_entry->Update())
1587        cache_entry = NULL;
1588      found = true;
1589      if (find_parent && entry_addr.value() != address.value()) {
1590        Trace("Entry not on the index 0x%x", address.value());
1591        *match_error = true;
1592        parent_entry = NULL;
1593      }
1594      break;
1595    }
1596    if (!cache_entry->Update())
1597      cache_entry = NULL;
1598    parent_entry = cache_entry;
1599    cache_entry = NULL;
1600    if (!parent_entry.get())
1601      break;
1602
1603    address.set_value(parent_entry->GetNextAddress());
1604  }
1605
1606  if (parent_entry.get() && (!find_parent || !found))
1607    parent_entry = NULL;
1608
1609  if (find_parent && entry_addr.is_initialized() && !cache_entry.get()) {
1610    *match_error = true;
1611    parent_entry = NULL;
1612  }
1613
1614  if (cache_entry.get() && (find_parent || !found))
1615    cache_entry = NULL;
1616
1617  find_parent ? parent_entry.swap(&tmp) : cache_entry.swap(&tmp);
1618  FlushIndex();
1619  return tmp;
1620}
1621
1622// This is the actual implementation for OpenNextEntry and OpenPrevEntry.
1623EntryImpl* BackendImpl::OpenFollowingEntry(bool forward, void** iter) {
1624  if (disabled_)
1625    return NULL;
1626
1627  DCHECK(iter);
1628
1629  const int kListsToSearch = 3;
1630  scoped_refptr<EntryImpl> entries[kListsToSearch];
1631  scoped_ptr<Rankings::Iterator> iterator(
1632      reinterpret_cast<Rankings::Iterator*>(*iter));
1633  *iter = NULL;
1634
1635  if (!iterator.get()) {
1636    iterator.reset(new Rankings::Iterator(&rankings_));
1637    bool ret = false;
1638
1639    // Get an entry from each list.
1640    for (int i = 0; i < kListsToSearch; i++) {
1641      EntryImpl* temp = NULL;
1642      ret |= OpenFollowingEntryFromList(forward, static_cast<Rankings::List>(i),
1643                                        &iterator->nodes[i], &temp);
1644      entries[i].swap(&temp);  // The entry was already addref'd.
1645    }
1646    if (!ret)
1647      return NULL;
1648  } else {
1649    // Get the next entry from the last list, and the actual entries for the
1650    // elements on the other lists.
1651    for (int i = 0; i < kListsToSearch; i++) {
1652      EntryImpl* temp = NULL;
1653      if (iterator->list == i) {
1654          OpenFollowingEntryFromList(forward, iterator->list,
1655                                     &iterator->nodes[i], &temp);
1656      } else {
1657        temp = GetEnumeratedEntry(iterator->nodes[i],
1658                                  static_cast<Rankings::List>(i));
1659      }
1660
1661      entries[i].swap(&temp);  // The entry was already addref'd.
1662    }
1663  }
1664
1665  int newest = -1;
1666  int oldest = -1;
1667  Time access_times[kListsToSearch];
1668  for (int i = 0; i < kListsToSearch; i++) {
1669    if (entries[i].get()) {
1670      access_times[i] = entries[i]->GetLastUsed();
1671      if (newest < 0) {
1672        DCHECK_LT(oldest, 0);
1673        newest = oldest = i;
1674        continue;
1675      }
1676      if (access_times[i] > access_times[newest])
1677        newest = i;
1678      if (access_times[i] < access_times[oldest])
1679        oldest = i;
1680    }
1681  }
1682
1683  if (newest < 0 || oldest < 0)
1684    return NULL;
1685
1686  EntryImpl* next_entry;
1687  if (forward) {
1688    next_entry = entries[newest].get();
1689    iterator->list = static_cast<Rankings::List>(newest);
1690  } else {
1691    next_entry = entries[oldest].get();
1692    iterator->list = static_cast<Rankings::List>(oldest);
1693  }
1694
1695  *iter = iterator.release();
1696  next_entry->AddRef();
1697  return next_entry;
1698}
1699
1700bool BackendImpl::OpenFollowingEntryFromList(bool forward, Rankings::List list,
1701                                             CacheRankingsBlock** from_entry,
1702                                             EntryImpl** next_entry) {
1703  if (disabled_)
1704    return false;
1705
1706  if (!new_eviction_ && Rankings::NO_USE != list)
1707    return false;
1708
1709  Rankings::ScopedRankingsBlock rankings(&rankings_, *from_entry);
1710  CacheRankingsBlock* next_block = forward ?
1711      rankings_.GetNext(rankings.get(), list) :
1712      rankings_.GetPrev(rankings.get(), list);
1713  Rankings::ScopedRankingsBlock next(&rankings_, next_block);
1714  *from_entry = NULL;
1715
1716  *next_entry = GetEnumeratedEntry(next.get(), list);
1717  if (!*next_entry)
1718    return false;
1719
1720  *from_entry = next.release();
1721  return true;
1722}
1723
1724EntryImpl* BackendImpl::GetEnumeratedEntry(CacheRankingsBlock* next,
1725                                           Rankings::List list) {
1726  if (!next || disabled_)
1727    return NULL;
1728
1729  EntryImpl* entry;
1730  int rv = NewEntry(Addr(next->Data()->contents), &entry);
1731  if (rv) {
1732    STRESS_NOTREACHED();
1733    rankings_.Remove(next, list, false);
1734    if (rv == ERR_INVALID_ADDRESS) {
1735      // There is nothing linked from the index. Delete the rankings node.
1736      DeleteBlock(next->address(), true);
1737    }
1738    return NULL;
1739  }
1740
1741  if (entry->dirty()) {
1742    // We cannot trust this entry.
1743    InternalDoomEntry(entry);
1744    entry->Release();
1745    return NULL;
1746  }
1747
1748  if (!entry->Update()) {
1749    STRESS_NOTREACHED();
1750    entry->Release();
1751    return NULL;
1752  }
1753
1754  // Note that it is unfortunate (but possible) for this entry to be clean, but
1755  // not actually the real entry. In other words, we could have lost this entry
1756  // from the index, and it could have been replaced with a newer one. It's not
1757  // worth checking that this entry is "the real one", so we just return it and
1758  // let the enumeration continue; this entry will be evicted at some point, and
1759  // the regular path will work with the real entry. With time, this problem
1760  // will disasappear because this scenario is just a bug.
1761
1762  // Make sure that we save the key for later.
1763  entry->GetKey();
1764
1765  return entry;
1766}
1767
1768EntryImpl* BackendImpl::ResurrectEntry(EntryImpl* deleted_entry) {
1769  if (ENTRY_NORMAL == deleted_entry->entry()->Data()->state) {
1770    deleted_entry->Release();
1771    stats_.OnEvent(Stats::CREATE_MISS);
1772    Trace("create entry miss ");
1773    return NULL;
1774  }
1775
1776  // We are attempting to create an entry and found out that the entry was
1777  // previously deleted.
1778
1779  eviction_.OnCreateEntry(deleted_entry);
1780  entry_count_++;
1781
1782  stats_.OnEvent(Stats::RESURRECT_HIT);
1783  Trace("Resurrect entry hit ");
1784  return deleted_entry;
1785}
1786
1787void BackendImpl::DestroyInvalidEntry(EntryImpl* entry) {
1788  LOG(WARNING) << "Destroying invalid entry.";
1789  Trace("Destroying invalid entry 0x%p", entry);
1790
1791  entry->SetPointerForInvalidEntry(GetCurrentEntryId());
1792
1793  eviction_.OnDoomEntry(entry);
1794  entry->InternalDoom();
1795
1796  if (!new_eviction_)
1797    DecreaseNumEntries();
1798  stats_.OnEvent(Stats::INVALID_ENTRY);
1799}
1800
1801void BackendImpl::AddStorageSize(int32 bytes) {
1802  data_->header.num_bytes += bytes;
1803  DCHECK_GE(data_->header.num_bytes, 0);
1804}
1805
1806void BackendImpl::SubstractStorageSize(int32 bytes) {
1807  data_->header.num_bytes -= bytes;
1808  DCHECK_GE(data_->header.num_bytes, 0);
1809}
1810
1811void BackendImpl::IncreaseNumRefs() {
1812  num_refs_++;
1813  if (max_refs_ < num_refs_)
1814    max_refs_ = num_refs_;
1815}
1816
1817void BackendImpl::DecreaseNumRefs() {
1818  DCHECK(num_refs_);
1819  num_refs_--;
1820
1821  if (!num_refs_ && disabled_)
1822    base::MessageLoop::current()->PostTask(
1823        FROM_HERE, base::Bind(&BackendImpl::RestartCache, GetWeakPtr(), true));
1824}
1825
1826void BackendImpl::IncreaseNumEntries() {
1827  data_->header.num_entries++;
1828  DCHECK_GT(data_->header.num_entries, 0);
1829}
1830
1831void BackendImpl::DecreaseNumEntries() {
1832  data_->header.num_entries--;
1833  if (data_->header.num_entries < 0) {
1834    NOTREACHED();
1835    data_->header.num_entries = 0;
1836  }
1837}
1838
1839void BackendImpl::LogStats() {
1840  StatsItems stats;
1841  GetStats(&stats);
1842
1843  for (size_t index = 0; index < stats.size(); index++)
1844    VLOG(1) << stats[index].first << ": " << stats[index].second;
1845}
1846
1847void BackendImpl::ReportStats() {
1848  CACHE_UMA(COUNTS, "Entries", 0, data_->header.num_entries);
1849
1850  int current_size = data_->header.num_bytes / (1024 * 1024);
1851  int max_size = max_size_ / (1024 * 1024);
1852  int hit_ratio_as_percentage = stats_.GetHitRatio();
1853
1854  CACHE_UMA(COUNTS_10000, "Size2", 0, current_size);
1855  // For any bin in HitRatioBySize2, the hit ratio of caches of that size is the
1856  // ratio of that bin's total count to the count in the same bin in the Size2
1857  // histogram.
1858  if (base::RandInt(0, 99) < hit_ratio_as_percentage)
1859    CACHE_UMA(COUNTS_10000, "HitRatioBySize2", 0, current_size);
1860  CACHE_UMA(COUNTS_10000, "MaxSize2", 0, max_size);
1861  if (!max_size)
1862    max_size++;
1863  CACHE_UMA(PERCENTAGE, "UsedSpace", 0, current_size * 100 / max_size);
1864
1865  CACHE_UMA(COUNTS_10000, "AverageOpenEntries2", 0,
1866            static_cast<int>(stats_.GetCounter(Stats::OPEN_ENTRIES)));
1867  CACHE_UMA(COUNTS_10000, "MaxOpenEntries2", 0,
1868            static_cast<int>(stats_.GetCounter(Stats::MAX_ENTRIES)));
1869  stats_.SetCounter(Stats::MAX_ENTRIES, 0);
1870
1871  CACHE_UMA(COUNTS_10000, "TotalFatalErrors", 0,
1872            static_cast<int>(stats_.GetCounter(Stats::FATAL_ERROR)));
1873  CACHE_UMA(COUNTS_10000, "TotalDoomCache", 0,
1874            static_cast<int>(stats_.GetCounter(Stats::DOOM_CACHE)));
1875  CACHE_UMA(COUNTS_10000, "TotalDoomRecentEntries", 0,
1876            static_cast<int>(stats_.GetCounter(Stats::DOOM_RECENT)));
1877  stats_.SetCounter(Stats::FATAL_ERROR, 0);
1878  stats_.SetCounter(Stats::DOOM_CACHE, 0);
1879  stats_.SetCounter(Stats::DOOM_RECENT, 0);
1880
1881  int age = (Time::Now() -
1882             Time::FromInternalValue(data_->header.create_time)).InHours();
1883  if (age)
1884    CACHE_UMA(HOURS, "FilesAge", 0, age);
1885
1886  int64 total_hours = stats_.GetCounter(Stats::TIMER) / 120;
1887  if (!data_->header.create_time || !data_->header.lru.filled) {
1888    int cause = data_->header.create_time ? 0 : 1;
1889    if (!data_->header.lru.filled)
1890      cause |= 2;
1891    CACHE_UMA(CACHE_ERROR, "ShortReport", 0, cause);
1892    CACHE_UMA(HOURS, "TotalTimeNotFull", 0, static_cast<int>(total_hours));
1893    return;
1894  }
1895
1896  // This is an up to date client that will report FirstEviction() data. After
1897  // that event, start reporting this:
1898
1899  CACHE_UMA(HOURS, "TotalTime", 0, static_cast<int>(total_hours));
1900  // For any bin in HitRatioByTotalTime, the hit ratio of caches of that total
1901  // time is the ratio of that bin's total count to the count in the same bin in
1902  // the TotalTime histogram.
1903  if (base::RandInt(0, 99) < hit_ratio_as_percentage)
1904    CACHE_UMA(HOURS, "HitRatioByTotalTime", 0, implicit_cast<int>(total_hours));
1905
1906  int64 use_hours = stats_.GetCounter(Stats::LAST_REPORT_TIMER) / 120;
1907  stats_.SetCounter(Stats::LAST_REPORT_TIMER, stats_.GetCounter(Stats::TIMER));
1908
1909  // We may see users with no use_hours at this point if this is the first time
1910  // we are running this code.
1911  if (use_hours)
1912    use_hours = total_hours - use_hours;
1913
1914  if (!use_hours || !GetEntryCount() || !data_->header.num_bytes)
1915    return;
1916
1917  CACHE_UMA(HOURS, "UseTime", 0, static_cast<int>(use_hours));
1918  // For any bin in HitRatioByUseTime, the hit ratio of caches of that use time
1919  // is the ratio of that bin's total count to the count in the same bin in the
1920  // UseTime histogram.
1921  if (base::RandInt(0, 99) < hit_ratio_as_percentage)
1922    CACHE_UMA(HOURS, "HitRatioByUseTime", 0, implicit_cast<int>(use_hours));
1923  CACHE_UMA(PERCENTAGE, "HitRatio", 0, hit_ratio_as_percentage);
1924
1925  int64 trim_rate = stats_.GetCounter(Stats::TRIM_ENTRY) / use_hours;
1926  CACHE_UMA(COUNTS, "TrimRate", 0, static_cast<int>(trim_rate));
1927
1928  int avg_size = data_->header.num_bytes / GetEntryCount();
1929  CACHE_UMA(COUNTS, "EntrySize", 0, avg_size);
1930  CACHE_UMA(COUNTS, "EntriesFull", 0, data_->header.num_entries);
1931
1932  CACHE_UMA(PERCENTAGE, "IndexLoad", 0,
1933            data_->header.num_entries * 100 / (mask_ + 1));
1934
1935  int large_entries_bytes = stats_.GetLargeEntriesSize();
1936  int large_ratio = large_entries_bytes * 100 / data_->header.num_bytes;
1937  CACHE_UMA(PERCENTAGE, "LargeEntriesRatio", 0, large_ratio);
1938
1939  if (new_eviction_) {
1940    CACHE_UMA(PERCENTAGE, "ResurrectRatio", 0, stats_.GetResurrectRatio());
1941    CACHE_UMA(PERCENTAGE, "NoUseRatio", 0,
1942              data_->header.lru.sizes[0] * 100 / data_->header.num_entries);
1943    CACHE_UMA(PERCENTAGE, "LowUseRatio", 0,
1944              data_->header.lru.sizes[1] * 100 / data_->header.num_entries);
1945    CACHE_UMA(PERCENTAGE, "HighUseRatio", 0,
1946              data_->header.lru.sizes[2] * 100 / data_->header.num_entries);
1947    CACHE_UMA(PERCENTAGE, "DeletedRatio", 0,
1948              data_->header.lru.sizes[4] * 100 / data_->header.num_entries);
1949  }
1950
1951  stats_.ResetRatios();
1952  stats_.SetCounter(Stats::TRIM_ENTRY, 0);
1953
1954  if (cache_type_ == net::DISK_CACHE)
1955    block_files_.ReportStats();
1956}
1957
1958void BackendImpl::UpgradeTo2_1() {
1959  // 2.1 is basically the same as 2.0, except that new fields are actually
1960  // updated by the new eviction algorithm.
1961  DCHECK(0x20000 == data_->header.version);
1962  data_->header.version = 0x20001;
1963  data_->header.lru.sizes[Rankings::NO_USE] = data_->header.num_entries;
1964}
1965
1966bool BackendImpl::CheckIndex() {
1967  DCHECK(data_);
1968
1969  size_t current_size = index_->GetLength();
1970  if (current_size < sizeof(Index)) {
1971    LOG(ERROR) << "Corrupt Index file";
1972    return false;
1973  }
1974
1975  if (new_eviction_) {
1976    // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
1977    if (kIndexMagic != data_->header.magic ||
1978        kCurrentVersion >> 16 != data_->header.version >> 16) {
1979      LOG(ERROR) << "Invalid file version or magic";
1980      return false;
1981    }
1982    if (kCurrentVersion == data_->header.version) {
1983      // We need file version 2.1 for the new eviction algorithm.
1984      UpgradeTo2_1();
1985    }
1986  } else {
1987    if (kIndexMagic != data_->header.magic ||
1988        kCurrentVersion != data_->header.version) {
1989      LOG(ERROR) << "Invalid file version or magic";
1990      return false;
1991    }
1992  }
1993
1994  if (!data_->header.table_len) {
1995    LOG(ERROR) << "Invalid table size";
1996    return false;
1997  }
1998
1999  if (current_size < GetIndexSize(data_->header.table_len) ||
2000      data_->header.table_len & (kBaseTableLen - 1)) {
2001    LOG(ERROR) << "Corrupt Index file";
2002    return false;
2003  }
2004
2005  AdjustMaxCacheSize(data_->header.table_len);
2006
2007#if !defined(NET_BUILD_STRESS_CACHE)
2008  if (data_->header.num_bytes < 0 ||
2009      (max_size_ < kint32max - kDefaultCacheSize &&
2010       data_->header.num_bytes > max_size_ + kDefaultCacheSize)) {
2011    LOG(ERROR) << "Invalid cache (current) size";
2012    return false;
2013  }
2014#endif
2015
2016  if (data_->header.num_entries < 0) {
2017    LOG(ERROR) << "Invalid number of entries";
2018    return false;
2019  }
2020
2021  if (!mask_)
2022    mask_ = data_->header.table_len - 1;
2023
2024  // Load the table into memory.
2025  return index_->Preload();
2026}
2027
2028int BackendImpl::CheckAllEntries() {
2029  int num_dirty = 0;
2030  int num_entries = 0;
2031  DCHECK(mask_ < kuint32max);
2032  for (unsigned int i = 0; i <= mask_; i++) {
2033    Addr address(data_->table[i]);
2034    if (!address.is_initialized())
2035      continue;
2036    for (;;) {
2037      EntryImpl* tmp;
2038      int ret = NewEntry(address, &tmp);
2039      if (ret) {
2040        STRESS_NOTREACHED();
2041        return ret;
2042      }
2043      scoped_refptr<EntryImpl> cache_entry;
2044      cache_entry.swap(&tmp);
2045
2046      if (cache_entry->dirty())
2047        num_dirty++;
2048      else if (CheckEntry(cache_entry.get()))
2049        num_entries++;
2050      else
2051        return ERR_INVALID_ENTRY;
2052
2053      DCHECK_EQ(i, cache_entry->entry()->Data()->hash & mask_);
2054      address.set_value(cache_entry->GetNextAddress());
2055      if (!address.is_initialized())
2056        break;
2057    }
2058  }
2059
2060  Trace("CheckAllEntries End");
2061  if (num_entries + num_dirty != data_->header.num_entries) {
2062    LOG(ERROR) << "Number of entries " << num_entries << " " << num_dirty <<
2063                  " " << data_->header.num_entries;
2064    DCHECK_LT(num_entries, data_->header.num_entries);
2065    return ERR_NUM_ENTRIES_MISMATCH;
2066  }
2067
2068  return num_dirty;
2069}
2070
2071bool BackendImpl::CheckEntry(EntryImpl* cache_entry) {
2072  bool ok = block_files_.IsValid(cache_entry->entry()->address());
2073  ok = ok && block_files_.IsValid(cache_entry->rankings()->address());
2074  EntryStore* data = cache_entry->entry()->Data();
2075  for (size_t i = 0; i < arraysize(data->data_addr); i++) {
2076    if (data->data_addr[i]) {
2077      Addr address(data->data_addr[i]);
2078      if (address.is_block_file())
2079        ok = ok && block_files_.IsValid(address);
2080    }
2081  }
2082
2083  return ok && cache_entry->rankings()->VerifyHash();
2084}
2085
2086int BackendImpl::MaxBuffersSize() {
2087  static int64 total_memory = base::SysInfo::AmountOfPhysicalMemory();
2088  static bool done = false;
2089
2090  if (!done) {
2091    const int kMaxBuffersSize = 30 * 1024 * 1024;
2092
2093    // We want to use up to 2% of the computer's memory.
2094    total_memory = total_memory * 2 / 100;
2095    if (total_memory > kMaxBuffersSize || total_memory <= 0)
2096      total_memory = kMaxBuffersSize;
2097
2098    done = true;
2099  }
2100
2101  return static_cast<int>(total_memory);
2102}
2103
2104}  // namespace disk_cache
2105