backend_worker_v3.cc revision effb81e5f8246d0db0270817048dc992db66e9fb
1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/disk_cache/blockfile/backend_worker_v3.h"
6
7#include "base/bind.h"
8#include "base/bind_helpers.h"
9#include "base/file_util.h"
10#include "base/files/file_path.h"
11#include "base/message_loop/message_loop.h"
12#include "base/strings/string_util.h"
13#include "base/strings/stringprintf.h"
14#include "base/time/time.h"
15#include "base/timer/timer.h"
16#include "net/base/net_errors.h"
17#include "net/disk_cache/blockfile/errors.h"
18#include "net/disk_cache/blockfile/experiments.h"
19#include "net/disk_cache/blockfile/file.h"
20
21using base::Time;
22using base::TimeDelta;
23using base::TimeTicks;
24
25namespace {
26
27#if defined(V3_NOT_JUST_YET_READY)
28
29const char* kIndexName = "index";
30
31// Seems like ~240 MB correspond to less than 50k entries for 99% of the people.
32// Note that the actual target is to keep the index table load factor under 55%
33// for most users.
34const int k64kEntriesStore = 240 * 1000 * 1000;
35const int kBaseTableLen = 64 * 1024;
36const int kDefaultCacheSize = 80 * 1024 * 1024;
37
38// Avoid trimming the cache for the first 5 minutes (10 timer ticks).
39const int kTrimDelay = 10;
40
41int DesiredIndexTableLen(int32 storage_size) {
42  if (storage_size <= k64kEntriesStore)
43    return kBaseTableLen;
44  if (storage_size <= k64kEntriesStore * 2)
45    return kBaseTableLen * 2;
46  if (storage_size <= k64kEntriesStore * 4)
47    return kBaseTableLen * 4;
48  if (storage_size <= k64kEntriesStore * 8)
49    return kBaseTableLen * 8;
50
51  // The biggest storage_size for int32 requires a 4 MB table.
52  return kBaseTableLen * 16;
53}
54
55int MaxStorageSizeForTable(int table_len) {
56  return table_len * (k64kEntriesStore / kBaseTableLen);
57}
58
59size_t GetIndexSize(int table_len) {
60  size_t table_size = sizeof(disk_cache::CacheAddr) * table_len;
61  return sizeof(disk_cache::IndexHeader) + table_size;
62}
63
64// ------------------------------------------------------------------------
65
66// Sets group for the current experiment. Returns false if the files should be
67// discarded.
68bool InitExperiment(disk_cache::IndexHeader* header, bool cache_created) {
69  if (header->experiment == disk_cache::EXPERIMENT_OLD_FILE1 ||
70      header->experiment == disk_cache::EXPERIMENT_OLD_FILE2) {
71    // Discard current cache.
72    return false;
73  }
74
75  if (base::FieldTrialList::FindFullName("SimpleCacheTrial") ==
76          "ExperimentControl") {
77    if (cache_created) {
78      header->experiment = disk_cache::EXPERIMENT_SIMPLE_CONTROL;
79      return true;
80    } else if (header->experiment != disk_cache::EXPERIMENT_SIMPLE_CONTROL) {
81      return false;
82    }
83  }
84
85  header->experiment = disk_cache::NO_EXPERIMENT;
86  return true;
87}
88#endif  // defined(V3_NOT_JUST_YET_READY).
89
90}  // namespace
91
92// ------------------------------------------------------------------------
93
94namespace disk_cache {
95
96BackendImplV3::Worker::Worker(const base::FilePath& path,
97                              base::MessageLoopProxy* main_thread)
98      : path_(path),
99        block_files_(path),
100        init_(false) {
101}
102
103#if defined(V3_NOT_JUST_YET_READY)
104
105int BackendImpl::SyncInit() {
106#if defined(NET_BUILD_STRESS_CACHE)
107  // Start evictions right away.
108  up_ticks_ = kTrimDelay * 2;
109#endif
110  DCHECK(!init_);
111  if (init_)
112    return net::ERR_FAILED;
113
114  bool create_files = false;
115  if (!InitBackingStore(&create_files)) {
116    ReportError(ERR_STORAGE_ERROR);
117    return net::ERR_FAILED;
118  }
119
120  num_refs_ = num_pending_io_ = max_refs_ = 0;
121  entry_count_ = byte_count_ = 0;
122
123  if (!restarted_) {
124    buffer_bytes_ = 0;
125    trace_object_ = TraceObject::GetTraceObject();
126    // Create a recurrent timer of 30 secs.
127    int timer_delay = unit_test_ ? 1000 : 30000;
128    timer_.reset(new base::RepeatingTimer<BackendImpl>());
129    timer_->Start(FROM_HERE, TimeDelta::FromMilliseconds(timer_delay), this,
130                  &BackendImpl::OnStatsTimer);
131  }
132
133  init_ = true;
134  Trace("Init");
135
136  if (data_->header.experiment != NO_EXPERIMENT &&
137      cache_type_ != net::DISK_CACHE) {
138    // No experiment for other caches.
139    return net::ERR_FAILED;
140  }
141
142  if (!(user_flags_ & kNoRandom)) {
143    // The unit test controls directly what to test.
144    new_eviction_ = (cache_type_ == net::DISK_CACHE);
145  }
146
147  if (!CheckIndex()) {
148    ReportError(ERR_INIT_FAILED);
149    return net::ERR_FAILED;
150  }
151
152  if (!restarted_ && (create_files || !data_->header.num_entries))
153    ReportError(ERR_CACHE_CREATED);
154
155  if (!(user_flags_ & kNoRandom) && cache_type_ == net::DISK_CACHE &&
156      !InitExperiment(&data_->header, create_files)) {
157    return net::ERR_FAILED;
158  }
159
160  // We don't care if the value overflows. The only thing we care about is that
161  // the id cannot be zero, because that value is used as "not dirty".
162  // Increasing the value once per second gives us many years before we start
163  // having collisions.
164  data_->header.this_id++;
165  if (!data_->header.this_id)
166    data_->header.this_id++;
167
168  bool previous_crash = (data_->header.crash != 0);
169  data_->header.crash = 1;
170
171  if (!block_files_.Init(create_files))
172    return net::ERR_FAILED;
173
174  // We want to minimize the changes to cache for an AppCache.
175  if (cache_type() == net::APP_CACHE) {
176    DCHECK(!new_eviction_);
177    read_only_ = true;
178  } else if (cache_type() == net::SHADER_CACHE) {
179    DCHECK(!new_eviction_);
180  }
181
182  eviction_.Init(this);
183
184  // stats_ and rankings_ may end up calling back to us so we better be enabled.
185  disabled_ = false;
186  if (!InitStats())
187    return net::ERR_FAILED;
188
189  disabled_ = !rankings_.Init(this, new_eviction_);
190
191#if defined(STRESS_CACHE_EXTENDED_VALIDATION)
192  trace_object_->EnableTracing(false);
193  int sc = SelfCheck();
194  if (sc < 0 && sc != ERR_NUM_ENTRIES_MISMATCH)
195    NOTREACHED();
196  trace_object_->EnableTracing(true);
197#endif
198
199  if (previous_crash) {
200    ReportError(ERR_PREVIOUS_CRASH);
201  } else if (!restarted_) {
202    ReportError(ERR_NO_ERROR);
203  }
204
205  FlushIndex();
206
207  return disabled_ ? net::ERR_FAILED : net::OK;
208}
209
210void BackendImpl::PrepareForRestart() {
211  // Reset the mask_ if it was not given by the user.
212  if (!(user_flags_ & kMask))
213    mask_ = 0;
214
215  if (!(user_flags_ & kNewEviction))
216    new_eviction_ = false;
217
218  disabled_ = true;
219  data_->header.crash = 0;
220  index_->Flush();
221  index_ = NULL;
222  data_ = NULL;
223  block_files_.CloseFiles();
224  rankings_.Reset();
225  init_ = false;
226  restarted_ = true;
227}
228
229BackendImpl::~BackendImpl() {
230  if (user_flags_ & kNoRandom) {
231    // This is a unit test, so we want to be strict about not leaking entries
232    // and completing all the work.
233    background_queue_.WaitForPendingIO();
234  } else {
235    // This is most likely not a test, so we want to do as little work as
236    // possible at this time, at the price of leaving dirty entries behind.
237    background_queue_.DropPendingIO();
238  }
239
240  if (background_queue_.BackgroundIsCurrentThread()) {
241    // Unit tests may use the same thread for everything.
242    CleanupCache();
243  } else {
244    background_queue_.background_thread()->PostTask(
245        FROM_HERE, base::Bind(&FinalCleanupCallback, base::Unretained(this)));
246    // http://crbug.com/74623
247    base::ThreadRestrictions::ScopedAllowWait allow_wait;
248    done_.Wait();
249  }
250}
251
252void BackendImpl::CleanupCache() {
253  Trace("Backend Cleanup");
254  eviction_.Stop();
255  timer_.reset();
256
257  if (init_) {
258    StoreStats();
259    if (data_)
260      data_->header.crash = 0;
261
262    if (user_flags_ & kNoRandom) {
263      // This is a net_unittest, verify that we are not 'leaking' entries.
264      File::WaitForPendingIO(&num_pending_io_);
265      DCHECK(!num_refs_);
266    } else {
267      File::DropPendingIO();
268    }
269  }
270  block_files_.CloseFiles();
271  FlushIndex();
272  index_ = NULL;
273  ptr_factory_.InvalidateWeakPtrs();
274  done_.Signal();
275}
276
277base::FilePath BackendImpl::GetFileName(Addr address) const {
278  if (!address.is_separate_file() || !address.is_initialized()) {
279    NOTREACHED();
280    return base::FilePath();
281  }
282
283  std::string tmp = base::StringPrintf("f_%06x", address.FileNumber());
284  return path_.AppendASCII(tmp);
285}
286
287// We just created a new file so we're going to write the header and set the
288// file length to include the hash table (zero filled).
289bool BackendImpl::CreateBackingStore(disk_cache::File* file) {
290  AdjustMaxCacheSize(0);
291
292  IndexHeader header;
293  header.table_len = DesiredIndexTableLen(max_size_);
294
295  // We need file version 2.1 for the new eviction algorithm.
296  if (new_eviction_)
297    header.version = 0x20001;
298
299  header.create_time = Time::Now().ToInternalValue();
300
301  if (!file->Write(&header, sizeof(header), 0))
302    return false;
303
304  return file->SetLength(GetIndexSize(header.table_len));
305}
306
307bool BackendImpl::InitBackingStore(bool* file_created) {
308  if (!base::CreateDirectory(path_))
309    return false;
310
311  base::FilePath index_name = path_.AppendASCII(kIndexName);
312
313  int flags = base::PLATFORM_FILE_READ |
314              base::PLATFORM_FILE_WRITE |
315              base::PLATFORM_FILE_OPEN_ALWAYS |
316              base::PLATFORM_FILE_EXCLUSIVE_WRITE;
317  scoped_refptr<disk_cache::File> file(new disk_cache::File(
318      base::CreatePlatformFile(index_name, flags, file_created, NULL)));
319
320  if (!file->IsValid())
321    return false;
322
323  bool ret = true;
324  if (*file_created)
325    ret = CreateBackingStore(file.get());
326
327  file = NULL;
328  if (!ret)
329    return false;
330
331  index_ = new MappedFile();
332  data_ = reinterpret_cast<Index*>(index_->Init(index_name, 0));
333  if (!data_) {
334    LOG(ERROR) << "Unable to map Index file";
335    return false;
336  }
337
338  if (index_->GetLength() < sizeof(Index)) {
339    // We verify this again on CheckIndex() but it's easier to make sure now
340    // that the header is there.
341    LOG(ERROR) << "Corrupt Index file";
342    return false;
343  }
344
345  return true;
346}
347
348void BackendImpl::ReportError(int error) {
349  STRESS_DCHECK(!error || error == ERR_PREVIOUS_CRASH ||
350                error == ERR_CACHE_CREATED);
351
352  // We transmit positive numbers, instead of direct error codes.
353  DCHECK_LE(error, 0);
354  CACHE_UMA(CACHE_ERROR, "Error", 0, error * -1);
355}
356
357
358bool BackendImpl::CheckIndex() {
359  DCHECK(data_);
360
361  size_t current_size = index_->GetLength();
362  if (current_size < sizeof(Index)) {
363    LOG(ERROR) << "Corrupt Index file";
364    return false;
365  }
366
367  if (new_eviction_) {
368    // We support versions 2.0 and 2.1, upgrading 2.0 to 2.1.
369    if (kIndexMagic != data_->header.magic ||
370        kCurrentVersion >> 16 != data_->header.version >> 16) {
371      LOG(ERROR) << "Invalid file version or magic";
372      return false;
373    }
374    if (kCurrentVersion == data_->header.version) {
375      // We need file version 2.1 for the new eviction algorithm.
376      UpgradeTo2_1();
377    }
378  } else {
379    if (kIndexMagic != data_->header.magic ||
380        kCurrentVersion != data_->header.version) {
381      LOG(ERROR) << "Invalid file version or magic";
382      return false;
383    }
384  }
385
386  if (!data_->header.table_len) {
387    LOG(ERROR) << "Invalid table size";
388    return false;
389  }
390
391  if (current_size < GetIndexSize(data_->header.table_len) ||
392      data_->header.table_len & (kBaseTableLen - 1)) {
393    LOG(ERROR) << "Corrupt Index file";
394    return false;
395  }
396
397  AdjustMaxCacheSize(data_->header.table_len);
398
399#if !defined(NET_BUILD_STRESS_CACHE)
400  if (data_->header.num_bytes < 0 ||
401      (max_size_ < kint32max - kDefaultCacheSize &&
402       data_->header.num_bytes > max_size_ + kDefaultCacheSize)) {
403    LOG(ERROR) << "Invalid cache (current) size";
404    return false;
405  }
406#endif
407
408  if (data_->header.num_entries < 0) {
409    LOG(ERROR) << "Invalid number of entries";
410    return false;
411  }
412
413  if (!mask_)
414    mask_ = data_->header.table_len - 1;
415
416  // Load the table into memory with a single read.
417  scoped_ptr<char[]> buf(new char[current_size]);
418  return index_->Read(buf.get(), current_size, 0);
419}
420
421bool BackendImpl::InitStats() {
422  Addr address(data_->header.stats);
423  int size = stats_.StorageSize();
424
425  if (!address.is_initialized()) {
426    FileType file_type = Addr::RequiredFileType(size);
427    DCHECK_NE(file_type, EXTERNAL);
428    int num_blocks = Addr::RequiredBlocks(size, file_type);
429
430    if (!CreateBlock(file_type, num_blocks, &address))
431      return false;
432    return stats_.Init(NULL, 0, address);
433  }
434
435  if (!address.is_block_file()) {
436    NOTREACHED();
437    return false;
438  }
439
440  // Load the required data.
441  size = address.num_blocks() * address.BlockSize();
442  MappedFile* file = File(address);
443  if (!file)
444    return false;
445
446  scoped_ptr<char[]> data(new char[size]);
447  size_t offset = address.start_block() * address.BlockSize() +
448                  kBlockHeaderSize;
449  if (!file->Read(data.get(), size, offset))
450    return false;
451
452  if (!stats_.Init(data.get(), size, address))
453    return false;
454  if (cache_type_ == net::DISK_CACHE && ShouldReportAgain())
455    stats_.InitSizeHistogram();
456  return true;
457}
458
459#endif  // defined(V3_NOT_JUST_YET_READY).
460
461int BackendImplV3::Worker::Init(const CompletionCallback& callback) {
462  return net::ERR_FAILED;
463}
464
465BackendImplV3::Worker::~Worker() {
466}
467
468}  // namespace disk_cache
469