1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "base/basictypes.h"
6#include "base/files/file_util.h"
7#include "base/metrics/field_trial.h"
8#include "base/port.h"
9#include "base/strings/string_util.h"
10#include "base/strings/stringprintf.h"
11#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
12#include "base/thread_task_runner_handle.h"
13#include "base/threading/platform_thread.h"
14#include "base/threading/thread_restrictions.h"
15#include "net/base/cache_type.h"
16#include "net/base/io_buffer.h"
17#include "net/base/net_errors.h"
18#include "net/base/test_completion_callback.h"
19#include "net/disk_cache/blockfile/backend_impl.h"
20#include "net/disk_cache/blockfile/entry_impl.h"
21#include "net/disk_cache/blockfile/experiments.h"
22#include "net/disk_cache/blockfile/histogram_macros.h"
23#include "net/disk_cache/blockfile/mapped_file.h"
24#include "net/disk_cache/cache_util.h"
25#include "net/disk_cache/disk_cache_test_base.h"
26#include "net/disk_cache/disk_cache_test_util.h"
27#include "net/disk_cache/memory/mem_backend_impl.h"
28#include "net/disk_cache/simple/simple_backend_impl.h"
29#include "net/disk_cache/simple/simple_entry_format.h"
30#include "net/disk_cache/simple/simple_test_util.h"
31#include "net/disk_cache/simple/simple_util.h"
32#include "testing/gtest/include/gtest/gtest.h"
33
34#if defined(OS_WIN)
35#include "base/win/scoped_handle.h"
36#endif
37
38// Provide a BackendImpl object to macros from histogram_macros.h.
39#define CACHE_UMA_BACKEND_IMPL_OBJ backend_
40
41using base::Time;
42
43namespace {
44
45const char kExistingEntryKey[] = "existing entry key";
46
47scoped_ptr<disk_cache::BackendImpl> CreateExistingEntryCache(
48    const base::Thread& cache_thread,
49    base::FilePath& cache_path) {
50  net::TestCompletionCallback cb;
51
52  scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
53      cache_path, cache_thread.message_loop_proxy(), NULL));
54  int rv = cache->Init(cb.callback());
55  if (cb.GetResult(rv) != net::OK)
56    return scoped_ptr<disk_cache::BackendImpl>();
57
58  disk_cache::Entry* entry = NULL;
59  rv = cache->CreateEntry(kExistingEntryKey, &entry, cb.callback());
60  if (cb.GetResult(rv) != net::OK)
61    return scoped_ptr<disk_cache::BackendImpl>();
62  entry->Close();
63
64  return cache.Pass();
65}
66
67}  // namespace
68
69// Tests that can run with different types of caches.
70class DiskCacheBackendTest : public DiskCacheTestWithCache {
71 protected:
72  // Some utility methods:
73
74  // Perform IO operations on the cache until there is pending IO.
75  int GeneratePendingIO(net::TestCompletionCallback* cb);
76
77  // Adds 5 sparse entries. |doomed_start| and |doomed_end| if not NULL,
78  // will be filled with times, used by DoomEntriesSince and DoomEntriesBetween.
79  // There are 4 entries after doomed_start and 2 after doomed_end.
80  void InitSparseCache(base::Time* doomed_start, base::Time* doomed_end);
81
82  bool CreateSetOfRandomEntries(std::set<std::string>* key_pool);
83  bool EnumerateAndMatchKeys(int max_to_open,
84                             TestIterator* iter,
85                             std::set<std::string>* keys_to_match,
86                             size_t* count);
87
88  // Actual tests:
89  void BackendBasics();
90  void BackendKeying();
91  void BackendShutdownWithPendingFileIO(bool fast);
92  void BackendShutdownWithPendingIO(bool fast);
93  void BackendShutdownWithPendingCreate(bool fast);
94  void BackendSetSize();
95  void BackendLoad();
96  void BackendChain();
97  void BackendValidEntry();
98  void BackendInvalidEntry();
99  void BackendInvalidEntryRead();
100  void BackendInvalidEntryWithLoad();
101  void BackendTrimInvalidEntry();
102  void BackendTrimInvalidEntry2();
103  void BackendEnumerations();
104  void BackendEnumerations2();
105  void BackendInvalidEntryEnumeration();
106  void BackendFixEnumerators();
107  void BackendDoomRecent();
108  void BackendDoomBetween();
109  void BackendTransaction(const std::string& name, int num_entries, bool load);
110  void BackendRecoverInsert();
111  void BackendRecoverRemove();
112  void BackendRecoverWithEviction();
113  void BackendInvalidEntry2();
114  void BackendInvalidEntry3();
115  void BackendInvalidEntry7();
116  void BackendInvalidEntry8();
117  void BackendInvalidEntry9(bool eviction);
118  void BackendInvalidEntry10(bool eviction);
119  void BackendInvalidEntry11(bool eviction);
120  void BackendTrimInvalidEntry12();
121  void BackendDoomAll();
122  void BackendDoomAll2();
123  void BackendInvalidRankings();
124  void BackendInvalidRankings2();
125  void BackendDisable();
126  void BackendDisable2();
127  void BackendDisable3();
128  void BackendDisable4();
129};
130
131int DiskCacheBackendTest::GeneratePendingIO(net::TestCompletionCallback* cb) {
132  if (!use_current_thread_) {
133    ADD_FAILURE();
134    return net::ERR_FAILED;
135  }
136
137  disk_cache::Entry* entry;
138  int rv = cache_->CreateEntry("some key", &entry, cb->callback());
139  if (cb->GetResult(rv) != net::OK)
140    return net::ERR_CACHE_CREATE_FAILURE;
141
142  const int kSize = 25000;
143  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
144  CacheTestFillBuffer(buffer->data(), kSize, false);
145
146  for (int i = 0; i < 10 * 1024 * 1024; i += 64 * 1024) {
147    // We are using the current thread as the cache thread because we want to
148    // be able to call directly this method to make sure that the OS (instead
149    // of us switching thread) is returning IO pending.
150    if (!simple_cache_mode_) {
151      rv = static_cast<disk_cache::EntryImpl*>(entry)->WriteDataImpl(
152          0, i, buffer.get(), kSize, cb->callback(), false);
153    } else {
154      rv = entry->WriteData(0, i, buffer.get(), kSize, cb->callback(), false);
155    }
156
157    if (rv == net::ERR_IO_PENDING)
158      break;
159    if (rv != kSize)
160      rv = net::ERR_FAILED;
161  }
162
163  // Don't call Close() to avoid going through the queue or we'll deadlock
164  // waiting for the operation to finish.
165  if (!simple_cache_mode_)
166    static_cast<disk_cache::EntryImpl*>(entry)->Release();
167  else
168    entry->Close();
169
170  return rv;
171}
172
173void DiskCacheBackendTest::InitSparseCache(base::Time* doomed_start,
174                                           base::Time* doomed_end) {
175  InitCache();
176
177  const int kSize = 50;
178  // This must be greater then MemEntryImpl::kMaxSparseEntrySize.
179  const int kOffset = 10 + 1024 * 1024;
180
181  disk_cache::Entry* entry0 = NULL;
182  disk_cache::Entry* entry1 = NULL;
183  disk_cache::Entry* entry2 = NULL;
184
185  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
186  CacheTestFillBuffer(buffer->data(), kSize, false);
187
188  ASSERT_EQ(net::OK, CreateEntry("zeroth", &entry0));
189  ASSERT_EQ(kSize, WriteSparseData(entry0, 0, buffer.get(), kSize));
190  ASSERT_EQ(kSize,
191            WriteSparseData(entry0, kOffset + kSize, buffer.get(), kSize));
192  entry0->Close();
193
194  FlushQueueForTest();
195  AddDelay();
196  if (doomed_start)
197    *doomed_start = base::Time::Now();
198
199  // Order in rankings list:
200  // first_part1, first_part2, second_part1, second_part2
201  ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
202  ASSERT_EQ(kSize, WriteSparseData(entry1, 0, buffer.get(), kSize));
203  ASSERT_EQ(kSize,
204            WriteSparseData(entry1, kOffset + kSize, buffer.get(), kSize));
205  entry1->Close();
206
207  ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
208  ASSERT_EQ(kSize, WriteSparseData(entry2, 0, buffer.get(), kSize));
209  ASSERT_EQ(kSize,
210            WriteSparseData(entry2, kOffset + kSize, buffer.get(), kSize));
211  entry2->Close();
212
213  FlushQueueForTest();
214  AddDelay();
215  if (doomed_end)
216    *doomed_end = base::Time::Now();
217
218  // Order in rankings list:
219  // third_part1, fourth_part1, third_part2, fourth_part2
220  disk_cache::Entry* entry3 = NULL;
221  disk_cache::Entry* entry4 = NULL;
222  ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
223  ASSERT_EQ(kSize, WriteSparseData(entry3, 0, buffer.get(), kSize));
224  ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
225  ASSERT_EQ(kSize, WriteSparseData(entry4, 0, buffer.get(), kSize));
226  ASSERT_EQ(kSize,
227            WriteSparseData(entry3, kOffset + kSize, buffer.get(), kSize));
228  ASSERT_EQ(kSize,
229            WriteSparseData(entry4, kOffset + kSize, buffer.get(), kSize));
230  entry3->Close();
231  entry4->Close();
232
233  FlushQueueForTest();
234  AddDelay();
235}
236
237// Creates entries based on random keys. Stores these keys in |key_pool|.
238bool DiskCacheBackendTest::CreateSetOfRandomEntries(
239    std::set<std::string>* key_pool) {
240  const int kNumEntries = 10;
241
242  for (int i = 0; i < kNumEntries; ++i) {
243    std::string key = GenerateKey(true);
244    disk_cache::Entry* entry;
245    if (CreateEntry(key, &entry) != net::OK)
246      return false;
247    key_pool->insert(key);
248    entry->Close();
249  }
250  return key_pool->size() == implicit_cast<size_t>(cache_->GetEntryCount());
251}
252
253// Performs iteration over the backend and checks that the keys of entries
254// opened are in |keys_to_match|, then erases them. Up to |max_to_open| entries
255// will be opened, if it is positive. Otherwise, iteration will continue until
256// OpenNextEntry stops returning net::OK.
257bool DiskCacheBackendTest::EnumerateAndMatchKeys(
258    int max_to_open,
259    TestIterator* iter,
260    std::set<std::string>* keys_to_match,
261    size_t* count) {
262  disk_cache::Entry* entry;
263
264  if (!iter)
265    return false;
266  while (iter->OpenNextEntry(&entry) == net::OK) {
267    if (!entry)
268      return false;
269    EXPECT_EQ(1U, keys_to_match->erase(entry->GetKey()));
270    entry->Close();
271    ++(*count);
272    if (max_to_open >= 0 && implicit_cast<int>(*count) >= max_to_open)
273      break;
274  };
275
276  return true;
277}
278
279void DiskCacheBackendTest::BackendBasics() {
280  InitCache();
281  disk_cache::Entry *entry1 = NULL, *entry2 = NULL;
282  EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
283  ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
284  ASSERT_TRUE(NULL != entry1);
285  entry1->Close();
286  entry1 = NULL;
287
288  ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
289  ASSERT_TRUE(NULL != entry1);
290  entry1->Close();
291  entry1 = NULL;
292
293  EXPECT_NE(net::OK, CreateEntry("the first key", &entry1));
294  ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
295  EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
296  ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
297  ASSERT_TRUE(NULL != entry1);
298  ASSERT_TRUE(NULL != entry2);
299  EXPECT_EQ(2, cache_->GetEntryCount());
300
301  disk_cache::Entry* entry3 = NULL;
302  ASSERT_EQ(net::OK, OpenEntry("some other key", &entry3));
303  ASSERT_TRUE(NULL != entry3);
304  EXPECT_TRUE(entry2 == entry3);
305  EXPECT_EQ(2, cache_->GetEntryCount());
306
307  EXPECT_EQ(net::OK, DoomEntry("some other key"));
308  EXPECT_EQ(1, cache_->GetEntryCount());
309  entry1->Close();
310  entry2->Close();
311  entry3->Close();
312
313  EXPECT_EQ(net::OK, DoomEntry("the first key"));
314  EXPECT_EQ(0, cache_->GetEntryCount());
315
316  ASSERT_EQ(net::OK, CreateEntry("the first key", &entry1));
317  ASSERT_EQ(net::OK, CreateEntry("some other key", &entry2));
318  entry1->Doom();
319  entry1->Close();
320  EXPECT_EQ(net::OK, DoomEntry("some other key"));
321  EXPECT_EQ(0, cache_->GetEntryCount());
322  entry2->Close();
323}
324
325TEST_F(DiskCacheBackendTest, Basics) {
326  BackendBasics();
327}
328
329TEST_F(DiskCacheBackendTest, NewEvictionBasics) {
330  SetNewEviction();
331  BackendBasics();
332}
333
334TEST_F(DiskCacheBackendTest, MemoryOnlyBasics) {
335  SetMemoryOnlyMode();
336  BackendBasics();
337}
338
339TEST_F(DiskCacheBackendTest, AppCacheBasics) {
340  SetCacheType(net::APP_CACHE);
341  BackendBasics();
342}
343
344TEST_F(DiskCacheBackendTest, ShaderCacheBasics) {
345  SetCacheType(net::SHADER_CACHE);
346  BackendBasics();
347}
348
349void DiskCacheBackendTest::BackendKeying() {
350  InitCache();
351  const char* kName1 = "the first key";
352  const char* kName2 = "the first Key";
353  disk_cache::Entry *entry1, *entry2;
354  ASSERT_EQ(net::OK, CreateEntry(kName1, &entry1));
355
356  ASSERT_EQ(net::OK, CreateEntry(kName2, &entry2));
357  EXPECT_TRUE(entry1 != entry2) << "Case sensitive";
358  entry2->Close();
359
360  char buffer[30];
361  base::strlcpy(buffer, kName1, arraysize(buffer));
362  ASSERT_EQ(net::OK, OpenEntry(buffer, &entry2));
363  EXPECT_TRUE(entry1 == entry2);
364  entry2->Close();
365
366  base::strlcpy(buffer + 1, kName1, arraysize(buffer) - 1);
367  ASSERT_EQ(net::OK, OpenEntry(buffer + 1, &entry2));
368  EXPECT_TRUE(entry1 == entry2);
369  entry2->Close();
370
371  base::strlcpy(buffer + 3,  kName1, arraysize(buffer) - 3);
372  ASSERT_EQ(net::OK, OpenEntry(buffer + 3, &entry2));
373  EXPECT_TRUE(entry1 == entry2);
374  entry2->Close();
375
376  // Now verify long keys.
377  char buffer2[20000];
378  memset(buffer2, 's', sizeof(buffer2));
379  buffer2[1023] = '\0';
380  ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on block file";
381  entry2->Close();
382
383  buffer2[1023] = 'g';
384  buffer2[19999] = '\0';
385  ASSERT_EQ(net::OK, CreateEntry(buffer2, &entry2)) << "key on external file";
386  entry2->Close();
387  entry1->Close();
388}
389
390TEST_F(DiskCacheBackendTest, Keying) {
391  BackendKeying();
392}
393
394TEST_F(DiskCacheBackendTest, NewEvictionKeying) {
395  SetNewEviction();
396  BackendKeying();
397}
398
399TEST_F(DiskCacheBackendTest, MemoryOnlyKeying) {
400  SetMemoryOnlyMode();
401  BackendKeying();
402}
403
404TEST_F(DiskCacheBackendTest, AppCacheKeying) {
405  SetCacheType(net::APP_CACHE);
406  BackendKeying();
407}
408
409TEST_F(DiskCacheBackendTest, ShaderCacheKeying) {
410  SetCacheType(net::SHADER_CACHE);
411  BackendKeying();
412}
413
414TEST_F(DiskCacheTest, CreateBackend) {
415  net::TestCompletionCallback cb;
416
417  {
418    ASSERT_TRUE(CleanupCacheDir());
419    base::Thread cache_thread("CacheThread");
420    ASSERT_TRUE(cache_thread.StartWithOptions(
421        base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
422
423    // Test the private factory method(s).
424    scoped_ptr<disk_cache::Backend> cache;
425    cache = disk_cache::MemBackendImpl::CreateBackend(0, NULL);
426    ASSERT_TRUE(cache.get());
427    cache.reset();
428
429    // Now test the public API.
430    int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
431                                            net::CACHE_BACKEND_DEFAULT,
432                                            cache_path_,
433                                            0,
434                                            false,
435                                            cache_thread.task_runner(),
436                                            NULL,
437                                            &cache,
438                                            cb.callback());
439    ASSERT_EQ(net::OK, cb.GetResult(rv));
440    ASSERT_TRUE(cache.get());
441    cache.reset();
442
443    rv = disk_cache::CreateCacheBackend(net::MEMORY_CACHE,
444                                        net::CACHE_BACKEND_DEFAULT,
445                                        base::FilePath(), 0,
446                                        false, NULL, NULL, &cache,
447                                        cb.callback());
448    ASSERT_EQ(net::OK, cb.GetResult(rv));
449    ASSERT_TRUE(cache.get());
450    cache.reset();
451  }
452
453  base::MessageLoop::current()->RunUntilIdle();
454}
455
456// Tests that |BackendImpl| fails to initialize with a missing file.
457TEST_F(DiskCacheBackendTest, CreateBackend_MissingFile) {
458  ASSERT_TRUE(CopyTestCache("bad_entry"));
459  base::FilePath filename = cache_path_.AppendASCII("data_1");
460  base::DeleteFile(filename, false);
461  base::Thread cache_thread("CacheThread");
462  ASSERT_TRUE(cache_thread.StartWithOptions(
463      base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
464  net::TestCompletionCallback cb;
465
466  bool prev = base::ThreadRestrictions::SetIOAllowed(false);
467  scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
468      cache_path_, cache_thread.task_runner(), NULL));
469  int rv = cache->Init(cb.callback());
470  EXPECT_EQ(net::ERR_FAILED, cb.GetResult(rv));
471  base::ThreadRestrictions::SetIOAllowed(prev);
472
473  cache.reset();
474  DisableIntegrityCheck();
475}
476
477TEST_F(DiskCacheBackendTest, ExternalFiles) {
478  InitCache();
479  // First, let's create a file on the folder.
480  base::FilePath filename = cache_path_.AppendASCII("f_000001");
481
482  const int kSize = 50;
483  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
484  CacheTestFillBuffer(buffer1->data(), kSize, false);
485  ASSERT_EQ(kSize, base::WriteFile(filename, buffer1->data(), kSize));
486
487  // Now let's create a file with the cache.
488  disk_cache::Entry* entry;
489  ASSERT_EQ(net::OK, CreateEntry("key", &entry));
490  ASSERT_EQ(0, WriteData(entry, 0, 20000, buffer1.get(), 0, false));
491  entry->Close();
492
493  // And verify that the first file is still there.
494  scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
495  ASSERT_EQ(kSize, base::ReadFile(filename, buffer2->data(), kSize));
496  EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize));
497}
498
499// Tests that we deal with file-level pending operations at destruction time.
500void DiskCacheBackendTest::BackendShutdownWithPendingFileIO(bool fast) {
501  ASSERT_TRUE(CleanupCacheDir());
502  uint32 flags = disk_cache::kNoBuffering;
503  if (!fast)
504    flags |= disk_cache::kNoRandom;
505
506  UseCurrentThread();
507  CreateBackend(flags, NULL);
508
509  net::TestCompletionCallback cb;
510  int rv = GeneratePendingIO(&cb);
511
512  // The cache destructor will see one pending operation here.
513  cache_.reset();
514
515  if (rv == net::ERR_IO_PENDING) {
516    if (fast || simple_cache_mode_)
517      EXPECT_FALSE(cb.have_result());
518    else
519      EXPECT_TRUE(cb.have_result());
520  }
521
522  base::MessageLoop::current()->RunUntilIdle();
523
524#if !defined(OS_IOS)
525  // Wait for the actual operation to complete, or we'll keep a file handle that
526  // may cause issues later. Note that on iOS systems even though this test
527  // uses a single thread, the actual IO is posted to a worker thread and the
528  // cache destructor breaks the link to reach cb when the operation completes.
529  rv = cb.GetResult(rv);
530#endif
531}
532
533TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO) {
534  BackendShutdownWithPendingFileIO(false);
535}
536
537// Here and below, tests that simulate crashes are not compiled in LeakSanitizer
538// builds because they contain a lot of intentional memory leaks.
539// The wrapper scripts used to run tests under Valgrind Memcheck will also
540// disable these tests. See:
541// tools/valgrind/gtest_exclude/net_unittests.gtest-memcheck.txt
542#if !defined(LEAK_SANITIZER)
543// We'll be leaking from this test.
544TEST_F(DiskCacheBackendTest, ShutdownWithPendingFileIO_Fast) {
545  // The integrity test sets kNoRandom so there's a version mismatch if we don't
546  // force new eviction.
547  SetNewEviction();
548  BackendShutdownWithPendingFileIO(true);
549}
550#endif
551
552// See crbug.com/330074
553#if !defined(OS_IOS)
554// Tests that one cache instance is not affected by another one going away.
555TEST_F(DiskCacheBackendTest, MultipleInstancesWithPendingFileIO) {
556  base::ScopedTempDir store;
557  ASSERT_TRUE(store.CreateUniqueTempDir());
558
559  net::TestCompletionCallback cb;
560  scoped_ptr<disk_cache::Backend> extra_cache;
561  int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
562                                          net::CACHE_BACKEND_DEFAULT,
563                                          store.path(),
564                                          0,
565                                          false,
566                                          base::ThreadTaskRunnerHandle::Get(),
567                                          NULL,
568                                          &extra_cache,
569                                          cb.callback());
570  ASSERT_EQ(net::OK, cb.GetResult(rv));
571  ASSERT_TRUE(extra_cache.get() != NULL);
572
573  ASSERT_TRUE(CleanupCacheDir());
574  SetNewEviction();  // Match the expected behavior for integrity verification.
575  UseCurrentThread();
576
577  CreateBackend(disk_cache::kNoBuffering, NULL);
578  rv = GeneratePendingIO(&cb);
579
580  // cache_ has a pending operation, and extra_cache will go away.
581  extra_cache.reset();
582
583  if (rv == net::ERR_IO_PENDING)
584    EXPECT_FALSE(cb.have_result());
585
586  base::MessageLoop::current()->RunUntilIdle();
587
588  // Wait for the actual operation to complete, or we'll keep a file handle that
589  // may cause issues later.
590  rv = cb.GetResult(rv);
591}
592#endif
593
594// Tests that we deal with background-thread pending operations.
595void DiskCacheBackendTest::BackendShutdownWithPendingIO(bool fast) {
596  net::TestCompletionCallback cb;
597
598  {
599    ASSERT_TRUE(CleanupCacheDir());
600    base::Thread cache_thread("CacheThread");
601    ASSERT_TRUE(cache_thread.StartWithOptions(
602        base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
603
604    uint32 flags = disk_cache::kNoBuffering;
605    if (!fast)
606      flags |= disk_cache::kNoRandom;
607
608    CreateBackend(flags, &cache_thread);
609
610    disk_cache::Entry* entry;
611    int rv = cache_->CreateEntry("some key", &entry, cb.callback());
612    ASSERT_EQ(net::OK, cb.GetResult(rv));
613
614    entry->Close();
615
616    // The cache destructor will see one pending operation here.
617    cache_.reset();
618  }
619
620  base::MessageLoop::current()->RunUntilIdle();
621}
622
623TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO) {
624  BackendShutdownWithPendingIO(false);
625}
626
627#if !defined(LEAK_SANITIZER)
628// We'll be leaking from this test.
629TEST_F(DiskCacheBackendTest, ShutdownWithPendingIO_Fast) {
630  // The integrity test sets kNoRandom so there's a version mismatch if we don't
631  // force new eviction.
632  SetNewEviction();
633  BackendShutdownWithPendingIO(true);
634}
635#endif
636
637// Tests that we deal with create-type pending operations.
638void DiskCacheBackendTest::BackendShutdownWithPendingCreate(bool fast) {
639  net::TestCompletionCallback cb;
640
641  {
642    ASSERT_TRUE(CleanupCacheDir());
643    base::Thread cache_thread("CacheThread");
644    ASSERT_TRUE(cache_thread.StartWithOptions(
645        base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
646
647    disk_cache::BackendFlags flags =
648      fast ? disk_cache::kNone : disk_cache::kNoRandom;
649    CreateBackend(flags, &cache_thread);
650
651    disk_cache::Entry* entry;
652    int rv = cache_->CreateEntry("some key", &entry, cb.callback());
653    ASSERT_EQ(net::ERR_IO_PENDING, rv);
654
655    cache_.reset();
656    EXPECT_FALSE(cb.have_result());
657  }
658
659  base::MessageLoop::current()->RunUntilIdle();
660}
661
662TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate) {
663  BackendShutdownWithPendingCreate(false);
664}
665
666#if !defined(LEAK_SANITIZER)
667// We'll be leaking an entry from this test.
668TEST_F(DiskCacheBackendTest, ShutdownWithPendingCreate_Fast) {
669  // The integrity test sets kNoRandom so there's a version mismatch if we don't
670  // force new eviction.
671  SetNewEviction();
672  BackendShutdownWithPendingCreate(true);
673}
674#endif
675
676// Disabled on android since this test requires cache creator to create
677// blockfile caches.
678#if !defined(OS_ANDROID)
679TEST_F(DiskCacheTest, TruncatedIndex) {
680  ASSERT_TRUE(CleanupCacheDir());
681  base::FilePath index = cache_path_.AppendASCII("index");
682  ASSERT_EQ(5, base::WriteFile(index, "hello", 5));
683
684  base::Thread cache_thread("CacheThread");
685  ASSERT_TRUE(cache_thread.StartWithOptions(
686      base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
687  net::TestCompletionCallback cb;
688
689  scoped_ptr<disk_cache::Backend> backend;
690  int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
691                                          net::CACHE_BACKEND_BLOCKFILE,
692                                          cache_path_,
693                                          0,
694                                          false,
695                                          cache_thread.task_runner(),
696                                          NULL,
697                                          &backend,
698                                          cb.callback());
699  ASSERT_NE(net::OK, cb.GetResult(rv));
700
701  ASSERT_FALSE(backend);
702}
703#endif
704
705void DiskCacheBackendTest::BackendSetSize() {
706  const int cache_size = 0x10000;  // 64 kB
707  SetMaxSize(cache_size);
708  InitCache();
709
710  std::string first("some key");
711  std::string second("something else");
712  disk_cache::Entry* entry;
713  ASSERT_EQ(net::OK, CreateEntry(first, &entry));
714
715  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(cache_size));
716  memset(buffer->data(), 0, cache_size);
717  EXPECT_EQ(cache_size / 10,
718            WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false))
719      << "normal file";
720
721  EXPECT_EQ(net::ERR_FAILED,
722            WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false))
723      << "file size above the limit";
724
725  // By doubling the total size, we make this file cacheable.
726  SetMaxSize(cache_size * 2);
727  EXPECT_EQ(cache_size / 5,
728            WriteData(entry, 1, 0, buffer.get(), cache_size / 5, false));
729
730  // Let's fill up the cache!.
731  SetMaxSize(cache_size * 10);
732  EXPECT_EQ(cache_size * 3 / 4,
733            WriteData(entry, 0, 0, buffer.get(), cache_size * 3 / 4, false));
734  entry->Close();
735  FlushQueueForTest();
736
737  SetMaxSize(cache_size);
738
739  // The cache is 95% full.
740
741  ASSERT_EQ(net::OK, CreateEntry(second, &entry));
742  EXPECT_EQ(cache_size / 10,
743            WriteData(entry, 0, 0, buffer.get(), cache_size / 10, false));
744
745  disk_cache::Entry* entry2;
746  ASSERT_EQ(net::OK, CreateEntry("an extra key", &entry2));
747  EXPECT_EQ(cache_size / 10,
748            WriteData(entry2, 0, 0, buffer.get(), cache_size / 10, false));
749  entry2->Close();  // This will trigger the cache trim.
750
751  EXPECT_NE(net::OK, OpenEntry(first, &entry2));
752
753  FlushQueueForTest();  // Make sure that we are done trimming the cache.
754  FlushQueueForTest();  // We may have posted two tasks to evict stuff.
755
756  entry->Close();
757  ASSERT_EQ(net::OK, OpenEntry(second, &entry));
758  EXPECT_EQ(cache_size / 10, entry->GetDataSize(0));
759  entry->Close();
760}
761
762TEST_F(DiskCacheBackendTest, SetSize) {
763  BackendSetSize();
764}
765
766TEST_F(DiskCacheBackendTest, NewEvictionSetSize) {
767  SetNewEviction();
768  BackendSetSize();
769}
770
771TEST_F(DiskCacheBackendTest, MemoryOnlySetSize) {
772  SetMemoryOnlyMode();
773  BackendSetSize();
774}
775
776void DiskCacheBackendTest::BackendLoad() {
777  InitCache();
778  int seed = static_cast<int>(Time::Now().ToInternalValue());
779  srand(seed);
780
781  disk_cache::Entry* entries[100];
782  for (int i = 0; i < 100; i++) {
783    std::string key = GenerateKey(true);
784    ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
785  }
786  EXPECT_EQ(100, cache_->GetEntryCount());
787
788  for (int i = 0; i < 100; i++) {
789    int source1 = rand() % 100;
790    int source2 = rand() % 100;
791    disk_cache::Entry* temp = entries[source1];
792    entries[source1] = entries[source2];
793    entries[source2] = temp;
794  }
795
796  for (int i = 0; i < 100; i++) {
797    disk_cache::Entry* entry;
798    ASSERT_EQ(net::OK, OpenEntry(entries[i]->GetKey(), &entry));
799    EXPECT_TRUE(entry == entries[i]);
800    entry->Close();
801    entries[i]->Doom();
802    entries[i]->Close();
803  }
804  FlushQueueForTest();
805  EXPECT_EQ(0, cache_->GetEntryCount());
806}
807
808TEST_F(DiskCacheBackendTest, Load) {
809  // Work with a tiny index table (16 entries)
810  SetMask(0xf);
811  SetMaxSize(0x100000);
812  BackendLoad();
813}
814
815TEST_F(DiskCacheBackendTest, NewEvictionLoad) {
816  SetNewEviction();
817  // Work with a tiny index table (16 entries)
818  SetMask(0xf);
819  SetMaxSize(0x100000);
820  BackendLoad();
821}
822
823TEST_F(DiskCacheBackendTest, MemoryOnlyLoad) {
824  SetMaxSize(0x100000);
825  SetMemoryOnlyMode();
826  BackendLoad();
827}
828
829TEST_F(DiskCacheBackendTest, AppCacheLoad) {
830  SetCacheType(net::APP_CACHE);
831  // Work with a tiny index table (16 entries)
832  SetMask(0xf);
833  SetMaxSize(0x100000);
834  BackendLoad();
835}
836
837TEST_F(DiskCacheBackendTest, ShaderCacheLoad) {
838  SetCacheType(net::SHADER_CACHE);
839  // Work with a tiny index table (16 entries)
840  SetMask(0xf);
841  SetMaxSize(0x100000);
842  BackendLoad();
843}
844
845// Tests the chaining of an entry to the current head.
846void DiskCacheBackendTest::BackendChain() {
847  SetMask(0x1);  // 2-entry table.
848  SetMaxSize(0x3000);  // 12 kB.
849  InitCache();
850
851  disk_cache::Entry* entry;
852  ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
853  entry->Close();
854  ASSERT_EQ(net::OK, CreateEntry("The Second key", &entry));
855  entry->Close();
856}
857
858TEST_F(DiskCacheBackendTest, Chain) {
859  BackendChain();
860}
861
862TEST_F(DiskCacheBackendTest, NewEvictionChain) {
863  SetNewEviction();
864  BackendChain();
865}
866
867TEST_F(DiskCacheBackendTest, AppCacheChain) {
868  SetCacheType(net::APP_CACHE);
869  BackendChain();
870}
871
872TEST_F(DiskCacheBackendTest, ShaderCacheChain) {
873  SetCacheType(net::SHADER_CACHE);
874  BackendChain();
875}
876
877TEST_F(DiskCacheBackendTest, NewEvictionTrim) {
878  SetNewEviction();
879  InitCache();
880
881  disk_cache::Entry* entry;
882  for (int i = 0; i < 100; i++) {
883    std::string name(base::StringPrintf("Key %d", i));
884    ASSERT_EQ(net::OK, CreateEntry(name, &entry));
885    entry->Close();
886    if (i < 90) {
887      // Entries 0 to 89 are in list 1; 90 to 99 are in list 0.
888      ASSERT_EQ(net::OK, OpenEntry(name, &entry));
889      entry->Close();
890    }
891  }
892
893  // The first eviction must come from list 1 (10% limit), the second must come
894  // from list 0.
895  TrimForTest(false);
896  EXPECT_NE(net::OK, OpenEntry("Key 0", &entry));
897  TrimForTest(false);
898  EXPECT_NE(net::OK, OpenEntry("Key 90", &entry));
899
900  // Double check that we still have the list tails.
901  ASSERT_EQ(net::OK, OpenEntry("Key 1", &entry));
902  entry->Close();
903  ASSERT_EQ(net::OK, OpenEntry("Key 91", &entry));
904  entry->Close();
905}
906
907// Before looking for invalid entries, let's check a valid entry.
908void DiskCacheBackendTest::BackendValidEntry() {
909  InitCache();
910
911  std::string key("Some key");
912  disk_cache::Entry* entry;
913  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
914
915  const int kSize = 50;
916  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
917  memset(buffer1->data(), 0, kSize);
918  base::strlcpy(buffer1->data(), "And the data to save", kSize);
919  EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer1.get(), kSize, false));
920  entry->Close();
921  SimulateCrash();
922
923  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
924
925  scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
926  memset(buffer2->data(), 0, kSize);
927  EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer2.get(), kSize));
928  entry->Close();
929  EXPECT_STREQ(buffer1->data(), buffer2->data());
930}
931
932TEST_F(DiskCacheBackendTest, ValidEntry) {
933  BackendValidEntry();
934}
935
936TEST_F(DiskCacheBackendTest, NewEvictionValidEntry) {
937  SetNewEviction();
938  BackendValidEntry();
939}
940
941// The same logic of the previous test (ValidEntry), but this time force the
942// entry to be invalid, simulating a crash in the middle.
943// We'll be leaking memory from this test.
944void DiskCacheBackendTest::BackendInvalidEntry() {
945  InitCache();
946
947  std::string key("Some key");
948  disk_cache::Entry* entry;
949  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
950
951  const int kSize = 50;
952  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
953  memset(buffer->data(), 0, kSize);
954  base::strlcpy(buffer->data(), "And the data to save", kSize);
955  EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
956  SimulateCrash();
957
958  EXPECT_NE(net::OK, OpenEntry(key, &entry));
959  EXPECT_EQ(0, cache_->GetEntryCount());
960}
961
962#if !defined(LEAK_SANITIZER)
963// We'll be leaking memory from this test.
964TEST_F(DiskCacheBackendTest, InvalidEntry) {
965  BackendInvalidEntry();
966}
967
968// We'll be leaking memory from this test.
969TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry) {
970  SetNewEviction();
971  BackendInvalidEntry();
972}
973
974// We'll be leaking memory from this test.
975TEST_F(DiskCacheBackendTest, AppCacheInvalidEntry) {
976  SetCacheType(net::APP_CACHE);
977  BackendInvalidEntry();
978}
979
980// We'll be leaking memory from this test.
981TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntry) {
982  SetCacheType(net::SHADER_CACHE);
983  BackendInvalidEntry();
984}
985
986// Almost the same test, but this time crash the cache after reading an entry.
987// We'll be leaking memory from this test.
988void DiskCacheBackendTest::BackendInvalidEntryRead() {
989  InitCache();
990
991  std::string key("Some key");
992  disk_cache::Entry* entry;
993  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
994
995  const int kSize = 50;
996  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
997  memset(buffer->data(), 0, kSize);
998  base::strlcpy(buffer->data(), "And the data to save", kSize);
999  EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1000  entry->Close();
1001  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1002  EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer.get(), kSize));
1003
1004  SimulateCrash();
1005
1006  if (type_ == net::APP_CACHE) {
1007    // Reading an entry and crashing should not make it dirty.
1008    ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1009    EXPECT_EQ(1, cache_->GetEntryCount());
1010    entry->Close();
1011  } else {
1012    EXPECT_NE(net::OK, OpenEntry(key, &entry));
1013    EXPECT_EQ(0, cache_->GetEntryCount());
1014  }
1015}
1016
1017// We'll be leaking memory from this test.
1018TEST_F(DiskCacheBackendTest, InvalidEntryRead) {
1019  BackendInvalidEntryRead();
1020}
1021
1022// We'll be leaking memory from this test.
1023TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryRead) {
1024  SetNewEviction();
1025  BackendInvalidEntryRead();
1026}
1027
1028// We'll be leaking memory from this test.
1029TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryRead) {
1030  SetCacheType(net::APP_CACHE);
1031  BackendInvalidEntryRead();
1032}
1033
1034// We'll be leaking memory from this test.
1035TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryRead) {
1036  SetCacheType(net::SHADER_CACHE);
1037  BackendInvalidEntryRead();
1038}
1039
1040// We'll be leaking memory from this test.
1041void DiskCacheBackendTest::BackendInvalidEntryWithLoad() {
1042  // Work with a tiny index table (16 entries)
1043  SetMask(0xf);
1044  SetMaxSize(0x100000);
1045  InitCache();
1046
1047  int seed = static_cast<int>(Time::Now().ToInternalValue());
1048  srand(seed);
1049
1050  const int kNumEntries = 100;
1051  disk_cache::Entry* entries[kNumEntries];
1052  for (int i = 0; i < kNumEntries; i++) {
1053    std::string key = GenerateKey(true);
1054    ASSERT_EQ(net::OK, CreateEntry(key, &entries[i]));
1055  }
1056  EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1057
1058  for (int i = 0; i < kNumEntries; i++) {
1059    int source1 = rand() % kNumEntries;
1060    int source2 = rand() % kNumEntries;
1061    disk_cache::Entry* temp = entries[source1];
1062    entries[source1] = entries[source2];
1063    entries[source2] = temp;
1064  }
1065
1066  std::string keys[kNumEntries];
1067  for (int i = 0; i < kNumEntries; i++) {
1068    keys[i] = entries[i]->GetKey();
1069    if (i < kNumEntries / 2)
1070      entries[i]->Close();
1071  }
1072
1073  SimulateCrash();
1074
1075  for (int i = kNumEntries / 2; i < kNumEntries; i++) {
1076    disk_cache::Entry* entry;
1077    EXPECT_NE(net::OK, OpenEntry(keys[i], &entry));
1078  }
1079
1080  for (int i = 0; i < kNumEntries / 2; i++) {
1081    disk_cache::Entry* entry;
1082    ASSERT_EQ(net::OK, OpenEntry(keys[i], &entry));
1083    entry->Close();
1084  }
1085
1086  EXPECT_EQ(kNumEntries / 2, cache_->GetEntryCount());
1087}
1088
1089// We'll be leaking memory from this test.
1090TEST_F(DiskCacheBackendTest, InvalidEntryWithLoad) {
1091  BackendInvalidEntryWithLoad();
1092}
1093
1094// We'll be leaking memory from this test.
1095TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryWithLoad) {
1096  SetNewEviction();
1097  BackendInvalidEntryWithLoad();
1098}
1099
1100// We'll be leaking memory from this test.
1101TEST_F(DiskCacheBackendTest, AppCacheInvalidEntryWithLoad) {
1102  SetCacheType(net::APP_CACHE);
1103  BackendInvalidEntryWithLoad();
1104}
1105
1106// We'll be leaking memory from this test.
1107TEST_F(DiskCacheBackendTest, ShaderCacheInvalidEntryWithLoad) {
1108  SetCacheType(net::SHADER_CACHE);
1109  BackendInvalidEntryWithLoad();
1110}
1111
1112// We'll be leaking memory from this test.
1113void DiskCacheBackendTest::BackendTrimInvalidEntry() {
1114  const int kSize = 0x3000;  // 12 kB
1115  SetMaxSize(kSize * 10);
1116  InitCache();
1117
1118  std::string first("some key");
1119  std::string second("something else");
1120  disk_cache::Entry* entry;
1121  ASSERT_EQ(net::OK, CreateEntry(first, &entry));
1122
1123  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1124  memset(buffer->data(), 0, kSize);
1125  EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1126
1127  // Simulate a crash.
1128  SimulateCrash();
1129
1130  ASSERT_EQ(net::OK, CreateEntry(second, &entry));
1131  EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1132
1133  EXPECT_EQ(2, cache_->GetEntryCount());
1134  SetMaxSize(kSize);
1135  entry->Close();  // Trim the cache.
1136  FlushQueueForTest();
1137
1138  // If we evicted the entry in less than 20mS, we have one entry in the cache;
1139  // if it took more than that, we posted a task and we'll delete the second
1140  // entry too.
1141  base::MessageLoop::current()->RunUntilIdle();
1142
1143  // This may be not thread-safe in general, but for now it's OK so add some
1144  // ThreadSanitizer annotations to ignore data races on cache_.
1145  // See http://crbug.com/55970
1146  ANNOTATE_IGNORE_READS_BEGIN();
1147  EXPECT_GE(1, cache_->GetEntryCount());
1148  ANNOTATE_IGNORE_READS_END();
1149
1150  EXPECT_NE(net::OK, OpenEntry(first, &entry));
1151}
1152
1153// We'll be leaking memory from this test.
1154TEST_F(DiskCacheBackendTest, TrimInvalidEntry) {
1155  BackendTrimInvalidEntry();
1156}
1157
1158// We'll be leaking memory from this test.
1159TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry) {
1160  SetNewEviction();
1161  BackendTrimInvalidEntry();
1162}
1163
1164// We'll be leaking memory from this test.
1165void DiskCacheBackendTest::BackendTrimInvalidEntry2() {
1166  SetMask(0xf);  // 16-entry table.
1167
1168  const int kSize = 0x3000;  // 12 kB
1169  SetMaxSize(kSize * 40);
1170  InitCache();
1171
1172  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1173  memset(buffer->data(), 0, kSize);
1174  disk_cache::Entry* entry;
1175
1176  // Writing 32 entries to this cache chains most of them.
1177  for (int i = 0; i < 32; i++) {
1178    std::string key(base::StringPrintf("some key %d", i));
1179    ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1180    EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1181    entry->Close();
1182    ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1183    // Note that we are not closing the entries.
1184  }
1185
1186  // Simulate a crash.
1187  SimulateCrash();
1188
1189  ASSERT_EQ(net::OK, CreateEntry("Something else", &entry));
1190  EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1191
1192  FlushQueueForTest();
1193  EXPECT_EQ(33, cache_->GetEntryCount());
1194  SetMaxSize(kSize);
1195
1196  // For the new eviction code, all corrupt entries are on the second list so
1197  // they are not going away that easy.
1198  if (new_eviction_) {
1199    EXPECT_EQ(net::OK, DoomAllEntries());
1200  }
1201
1202  entry->Close();  // Trim the cache.
1203  FlushQueueForTest();
1204
1205  // We may abort the eviction before cleaning up everything.
1206  base::MessageLoop::current()->RunUntilIdle();
1207  FlushQueueForTest();
1208  // If it's not clear enough: we may still have eviction tasks running at this
1209  // time, so the number of entries is changing while we read it.
1210  ANNOTATE_IGNORE_READS_AND_WRITES_BEGIN();
1211  EXPECT_GE(30, cache_->GetEntryCount());
1212  ANNOTATE_IGNORE_READS_AND_WRITES_END();
1213}
1214
1215// We'll be leaking memory from this test.
1216TEST_F(DiskCacheBackendTest, TrimInvalidEntry2) {
1217  BackendTrimInvalidEntry2();
1218}
1219
1220// We'll be leaking memory from this test.
1221TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry2) {
1222  SetNewEviction();
1223  BackendTrimInvalidEntry2();
1224}
1225#endif  // !defined(LEAK_SANITIZER)
1226
1227void DiskCacheBackendTest::BackendEnumerations() {
1228  InitCache();
1229  Time initial = Time::Now();
1230
1231  const int kNumEntries = 100;
1232  for (int i = 0; i < kNumEntries; i++) {
1233    std::string key = GenerateKey(true);
1234    disk_cache::Entry* entry;
1235    ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1236    entry->Close();
1237  }
1238  EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1239  Time final = Time::Now();
1240
1241  disk_cache::Entry* entry;
1242  scoped_ptr<TestIterator> iter = CreateIterator();
1243  int count = 0;
1244  Time last_modified[kNumEntries];
1245  Time last_used[kNumEntries];
1246  while (iter->OpenNextEntry(&entry) == net::OK) {
1247    ASSERT_TRUE(NULL != entry);
1248    if (count < kNumEntries) {
1249      last_modified[count] = entry->GetLastModified();
1250      last_used[count] = entry->GetLastUsed();
1251      EXPECT_TRUE(initial <= last_modified[count]);
1252      EXPECT_TRUE(final >= last_modified[count]);
1253    }
1254
1255    entry->Close();
1256    count++;
1257  };
1258  EXPECT_EQ(kNumEntries, count);
1259
1260  iter = CreateIterator();
1261  count = 0;
1262  // The previous enumeration should not have changed the timestamps.
1263  while (iter->OpenNextEntry(&entry) == net::OK) {
1264    ASSERT_TRUE(NULL != entry);
1265    if (count < kNumEntries) {
1266      EXPECT_TRUE(last_modified[count] == entry->GetLastModified());
1267      EXPECT_TRUE(last_used[count] == entry->GetLastUsed());
1268    }
1269    entry->Close();
1270    count++;
1271  };
1272  EXPECT_EQ(kNumEntries, count);
1273}
1274
1275TEST_F(DiskCacheBackendTest, Enumerations) {
1276  BackendEnumerations();
1277}
1278
1279TEST_F(DiskCacheBackendTest, NewEvictionEnumerations) {
1280  SetNewEviction();
1281  BackendEnumerations();
1282}
1283
1284TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations) {
1285  SetMemoryOnlyMode();
1286  BackendEnumerations();
1287}
1288
1289TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations) {
1290  SetCacheType(net::SHADER_CACHE);
1291  BackendEnumerations();
1292}
1293
1294TEST_F(DiskCacheBackendTest, AppCacheEnumerations) {
1295  SetCacheType(net::APP_CACHE);
1296  BackendEnumerations();
1297}
1298
1299// Verifies enumerations while entries are open.
1300void DiskCacheBackendTest::BackendEnumerations2() {
1301  InitCache();
1302  const std::string first("first");
1303  const std::string second("second");
1304  disk_cache::Entry *entry1, *entry2;
1305  ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1306  entry1->Close();
1307  ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1308  entry2->Close();
1309  FlushQueueForTest();
1310
1311  // Make sure that the timestamp is not the same.
1312  AddDelay();
1313  ASSERT_EQ(net::OK, OpenEntry(second, &entry1));
1314  scoped_ptr<TestIterator> iter = CreateIterator();
1315  ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
1316  EXPECT_EQ(entry2->GetKey(), second);
1317
1318  // Two entries and the iterator pointing at "first".
1319  entry1->Close();
1320  entry2->Close();
1321
1322  // The iterator should still be valid, so we should not crash.
1323  ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
1324  EXPECT_EQ(entry2->GetKey(), first);
1325  entry2->Close();
1326  iter = CreateIterator();
1327
1328  // Modify the oldest entry and get the newest element.
1329  ASSERT_EQ(net::OK, OpenEntry(first, &entry1));
1330  EXPECT_EQ(0, WriteData(entry1, 0, 200, NULL, 0, false));
1331  ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
1332  if (type_ == net::APP_CACHE) {
1333    // The list is not updated.
1334    EXPECT_EQ(entry2->GetKey(), second);
1335  } else {
1336    EXPECT_EQ(entry2->GetKey(), first);
1337  }
1338
1339  entry1->Close();
1340  entry2->Close();
1341}
1342
1343TEST_F(DiskCacheBackendTest, Enumerations2) {
1344  BackendEnumerations2();
1345}
1346
1347TEST_F(DiskCacheBackendTest, NewEvictionEnumerations2) {
1348  SetNewEviction();
1349  BackendEnumerations2();
1350}
1351
1352TEST_F(DiskCacheBackendTest, MemoryOnlyEnumerations2) {
1353  SetMemoryOnlyMode();
1354  BackendEnumerations2();
1355}
1356
1357TEST_F(DiskCacheBackendTest, AppCacheEnumerations2) {
1358  SetCacheType(net::APP_CACHE);
1359  BackendEnumerations2();
1360}
1361
1362TEST_F(DiskCacheBackendTest, ShaderCacheEnumerations2) {
1363  SetCacheType(net::SHADER_CACHE);
1364  BackendEnumerations2();
1365}
1366
1367// Verify that ReadData calls do not update the LRU cache
1368// when using the SHADER_CACHE type.
1369TEST_F(DiskCacheBackendTest, ShaderCacheEnumerationReadData) {
1370  SetCacheType(net::SHADER_CACHE);
1371  InitCache();
1372  const std::string first("first");
1373  const std::string second("second");
1374  disk_cache::Entry *entry1, *entry2;
1375  const int kSize = 50;
1376  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1377
1378  ASSERT_EQ(net::OK, CreateEntry(first, &entry1));
1379  memset(buffer1->data(), 0, kSize);
1380  base::strlcpy(buffer1->data(), "And the data to save", kSize);
1381  EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1382
1383  ASSERT_EQ(net::OK, CreateEntry(second, &entry2));
1384  entry2->Close();
1385
1386  FlushQueueForTest();
1387
1388  // Make sure that the timestamp is not the same.
1389  AddDelay();
1390
1391  // Read from the last item in the LRU.
1392  EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1393  entry1->Close();
1394
1395  scoped_ptr<TestIterator> iter = CreateIterator();
1396  ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry2));
1397  EXPECT_EQ(entry2->GetKey(), second);
1398  entry2->Close();
1399}
1400
1401#if !defined(LEAK_SANITIZER)
1402// Verify handling of invalid entries while doing enumerations.
1403// We'll be leaking memory from this test.
1404void DiskCacheBackendTest::BackendInvalidEntryEnumeration() {
1405  InitCache();
1406
1407  std::string key("Some key");
1408  disk_cache::Entry *entry, *entry1, *entry2;
1409  ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
1410
1411  const int kSize = 50;
1412  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1413  memset(buffer1->data(), 0, kSize);
1414  base::strlcpy(buffer1->data(), "And the data to save", kSize);
1415  EXPECT_EQ(kSize, WriteData(entry1, 0, 0, buffer1.get(), kSize, false));
1416  entry1->Close();
1417  ASSERT_EQ(net::OK, OpenEntry(key, &entry1));
1418  EXPECT_EQ(kSize, ReadData(entry1, 0, 0, buffer1.get(), kSize));
1419
1420  std::string key2("Another key");
1421  ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
1422  entry2->Close();
1423  ASSERT_EQ(2, cache_->GetEntryCount());
1424
1425  SimulateCrash();
1426
1427  scoped_ptr<TestIterator> iter = CreateIterator();
1428  int count = 0;
1429  while (iter->OpenNextEntry(&entry) == net::OK) {
1430    ASSERT_TRUE(NULL != entry);
1431    EXPECT_EQ(key2, entry->GetKey());
1432    entry->Close();
1433    count++;
1434  };
1435  EXPECT_EQ(1, count);
1436  EXPECT_EQ(1, cache_->GetEntryCount());
1437}
1438
1439// We'll be leaking memory from this test.
1440TEST_F(DiskCacheBackendTest, InvalidEntryEnumeration) {
1441  BackendInvalidEntryEnumeration();
1442}
1443
1444// We'll be leaking memory from this test.
1445TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntryEnumeration) {
1446  SetNewEviction();
1447  BackendInvalidEntryEnumeration();
1448}
1449#endif  // !defined(LEAK_SANITIZER)
1450
1451// Tests that if for some reason entries are modified close to existing cache
1452// iterators, we don't generate fatal errors or reset the cache.
1453void DiskCacheBackendTest::BackendFixEnumerators() {
1454  InitCache();
1455
1456  int seed = static_cast<int>(Time::Now().ToInternalValue());
1457  srand(seed);
1458
1459  const int kNumEntries = 10;
1460  for (int i = 0; i < kNumEntries; i++) {
1461    std::string key = GenerateKey(true);
1462    disk_cache::Entry* entry;
1463    ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1464    entry->Close();
1465  }
1466  EXPECT_EQ(kNumEntries, cache_->GetEntryCount());
1467
1468  disk_cache::Entry *entry1, *entry2;
1469  scoped_ptr<TestIterator> iter1 = CreateIterator(), iter2 = CreateIterator();
1470  ASSERT_EQ(net::OK, iter1->OpenNextEntry(&entry1));
1471  ASSERT_TRUE(NULL != entry1);
1472  entry1->Close();
1473  entry1 = NULL;
1474
1475  // Let's go to the middle of the list.
1476  for (int i = 0; i < kNumEntries / 2; i++) {
1477    if (entry1)
1478      entry1->Close();
1479    ASSERT_EQ(net::OK, iter1->OpenNextEntry(&entry1));
1480    ASSERT_TRUE(NULL != entry1);
1481
1482    ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2));
1483    ASSERT_TRUE(NULL != entry2);
1484    entry2->Close();
1485  }
1486
1487  // Messing up with entry1 will modify entry2->next.
1488  entry1->Doom();
1489  ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2));
1490  ASSERT_TRUE(NULL != entry2);
1491
1492  // The link entry2->entry1 should be broken.
1493  EXPECT_NE(entry2->GetKey(), entry1->GetKey());
1494  entry1->Close();
1495  entry2->Close();
1496
1497  // And the second iterator should keep working.
1498  ASSERT_EQ(net::OK, iter2->OpenNextEntry(&entry2));
1499  ASSERT_TRUE(NULL != entry2);
1500  entry2->Close();
1501}
1502
1503TEST_F(DiskCacheBackendTest, FixEnumerators) {
1504  BackendFixEnumerators();
1505}
1506
1507TEST_F(DiskCacheBackendTest, NewEvictionFixEnumerators) {
1508  SetNewEviction();
1509  BackendFixEnumerators();
1510}
1511
1512void DiskCacheBackendTest::BackendDoomRecent() {
1513  InitCache();
1514
1515  disk_cache::Entry *entry;
1516  ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1517  entry->Close();
1518  ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1519  entry->Close();
1520  FlushQueueForTest();
1521
1522  AddDelay();
1523  Time middle = Time::Now();
1524
1525  ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1526  entry->Close();
1527  ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1528  entry->Close();
1529  FlushQueueForTest();
1530
1531  AddDelay();
1532  Time final = Time::Now();
1533
1534  ASSERT_EQ(4, cache_->GetEntryCount());
1535  EXPECT_EQ(net::OK, DoomEntriesSince(final));
1536  ASSERT_EQ(4, cache_->GetEntryCount());
1537
1538  EXPECT_EQ(net::OK, DoomEntriesSince(middle));
1539  ASSERT_EQ(2, cache_->GetEntryCount());
1540
1541  ASSERT_EQ(net::OK, OpenEntry("second", &entry));
1542  entry->Close();
1543}
1544
1545TEST_F(DiskCacheBackendTest, DoomRecent) {
1546  BackendDoomRecent();
1547}
1548
1549TEST_F(DiskCacheBackendTest, NewEvictionDoomRecent) {
1550  SetNewEviction();
1551  BackendDoomRecent();
1552}
1553
1554TEST_F(DiskCacheBackendTest, MemoryOnlyDoomRecent) {
1555  SetMemoryOnlyMode();
1556  BackendDoomRecent();
1557}
1558
1559TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesSinceSparse) {
1560  SetMemoryOnlyMode();
1561  base::Time start;
1562  InitSparseCache(&start, NULL);
1563  DoomEntriesSince(start);
1564  EXPECT_EQ(1, cache_->GetEntryCount());
1565}
1566
1567TEST_F(DiskCacheBackendTest, DoomEntriesSinceSparse) {
1568  base::Time start;
1569  InitSparseCache(&start, NULL);
1570  DoomEntriesSince(start);
1571  // NOTE: BackendImpl counts child entries in its GetEntryCount(), while
1572  // MemBackendImpl does not. Thats why expected value differs here from
1573  // MemoryOnlyDoomEntriesSinceSparse.
1574  EXPECT_EQ(3, cache_->GetEntryCount());
1575}
1576
1577TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAllSparse) {
1578  SetMemoryOnlyMode();
1579  InitSparseCache(NULL, NULL);
1580  EXPECT_EQ(net::OK, DoomAllEntries());
1581  EXPECT_EQ(0, cache_->GetEntryCount());
1582}
1583
1584TEST_F(DiskCacheBackendTest, DoomAllSparse) {
1585  InitSparseCache(NULL, NULL);
1586  EXPECT_EQ(net::OK, DoomAllEntries());
1587  EXPECT_EQ(0, cache_->GetEntryCount());
1588}
1589
1590void DiskCacheBackendTest::BackendDoomBetween() {
1591  InitCache();
1592
1593  disk_cache::Entry *entry;
1594  ASSERT_EQ(net::OK, CreateEntry("first", &entry));
1595  entry->Close();
1596  FlushQueueForTest();
1597
1598  AddDelay();
1599  Time middle_start = Time::Now();
1600
1601  ASSERT_EQ(net::OK, CreateEntry("second", &entry));
1602  entry->Close();
1603  ASSERT_EQ(net::OK, CreateEntry("third", &entry));
1604  entry->Close();
1605  FlushQueueForTest();
1606
1607  AddDelay();
1608  Time middle_end = Time::Now();
1609
1610  ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
1611  entry->Close();
1612  ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1613  entry->Close();
1614  FlushQueueForTest();
1615
1616  AddDelay();
1617  Time final = Time::Now();
1618
1619  ASSERT_EQ(4, cache_->GetEntryCount());
1620  EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, middle_end));
1621  ASSERT_EQ(2, cache_->GetEntryCount());
1622
1623  ASSERT_EQ(net::OK, OpenEntry("fourth", &entry));
1624  entry->Close();
1625
1626  EXPECT_EQ(net::OK, DoomEntriesBetween(middle_start, final));
1627  ASSERT_EQ(1, cache_->GetEntryCount());
1628
1629  ASSERT_EQ(net::OK, OpenEntry("first", &entry));
1630  entry->Close();
1631}
1632
1633TEST_F(DiskCacheBackendTest, DoomBetween) {
1634  BackendDoomBetween();
1635}
1636
1637TEST_F(DiskCacheBackendTest, NewEvictionDoomBetween) {
1638  SetNewEviction();
1639  BackendDoomBetween();
1640}
1641
1642TEST_F(DiskCacheBackendTest, MemoryOnlyDoomBetween) {
1643  SetMemoryOnlyMode();
1644  BackendDoomBetween();
1645}
1646
1647TEST_F(DiskCacheBackendTest, MemoryOnlyDoomEntriesBetweenSparse) {
1648  SetMemoryOnlyMode();
1649  base::Time start, end;
1650  InitSparseCache(&start, &end);
1651  DoomEntriesBetween(start, end);
1652  EXPECT_EQ(3, cache_->GetEntryCount());
1653
1654  start = end;
1655  end = base::Time::Now();
1656  DoomEntriesBetween(start, end);
1657  EXPECT_EQ(1, cache_->GetEntryCount());
1658}
1659
1660TEST_F(DiskCacheBackendTest, DoomEntriesBetweenSparse) {
1661  base::Time start, end;
1662  InitSparseCache(&start, &end);
1663  DoomEntriesBetween(start, end);
1664  EXPECT_EQ(9, cache_->GetEntryCount());
1665
1666  start = end;
1667  end = base::Time::Now();
1668  DoomEntriesBetween(start, end);
1669  EXPECT_EQ(3, cache_->GetEntryCount());
1670}
1671
1672void DiskCacheBackendTest::BackendTransaction(const std::string& name,
1673                                              int num_entries, bool load) {
1674  success_ = false;
1675  ASSERT_TRUE(CopyTestCache(name));
1676  DisableFirstCleanup();
1677
1678  uint32 mask;
1679  if (load) {
1680    mask = 0xf;
1681    SetMaxSize(0x100000);
1682  } else {
1683    // Clear the settings from the previous run.
1684    mask = 0;
1685    SetMaxSize(0);
1686  }
1687  SetMask(mask);
1688
1689  InitCache();
1690  ASSERT_EQ(num_entries + 1, cache_->GetEntryCount());
1691
1692  std::string key("the first key");
1693  disk_cache::Entry* entry1;
1694  ASSERT_NE(net::OK, OpenEntry(key, &entry1));
1695
1696  int actual = cache_->GetEntryCount();
1697  if (num_entries != actual) {
1698    ASSERT_TRUE(load);
1699    // If there is a heavy load, inserting an entry will make another entry
1700    // dirty (on the hash bucket) so two entries are removed.
1701    ASSERT_EQ(num_entries - 1, actual);
1702  }
1703
1704  cache_.reset();
1705  cache_impl_ = NULL;
1706
1707  ASSERT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask));
1708  success_ = true;
1709}
1710
1711void DiskCacheBackendTest::BackendRecoverInsert() {
1712  // Tests with an empty cache.
1713  BackendTransaction("insert_empty1", 0, false);
1714  ASSERT_TRUE(success_) << "insert_empty1";
1715  BackendTransaction("insert_empty2", 0, false);
1716  ASSERT_TRUE(success_) << "insert_empty2";
1717  BackendTransaction("insert_empty3", 0, false);
1718  ASSERT_TRUE(success_) << "insert_empty3";
1719
1720  // Tests with one entry on the cache.
1721  BackendTransaction("insert_one1", 1, false);
1722  ASSERT_TRUE(success_) << "insert_one1";
1723  BackendTransaction("insert_one2", 1, false);
1724  ASSERT_TRUE(success_) << "insert_one2";
1725  BackendTransaction("insert_one3", 1, false);
1726  ASSERT_TRUE(success_) << "insert_one3";
1727
1728  // Tests with one hundred entries on the cache, tiny index.
1729  BackendTransaction("insert_load1", 100, true);
1730  ASSERT_TRUE(success_) << "insert_load1";
1731  BackendTransaction("insert_load2", 100, true);
1732  ASSERT_TRUE(success_) << "insert_load2";
1733}
1734
1735TEST_F(DiskCacheBackendTest, RecoverInsert) {
1736  BackendRecoverInsert();
1737}
1738
1739TEST_F(DiskCacheBackendTest, NewEvictionRecoverInsert) {
1740  SetNewEviction();
1741  BackendRecoverInsert();
1742}
1743
1744void DiskCacheBackendTest::BackendRecoverRemove() {
1745  // Removing the only element.
1746  BackendTransaction("remove_one1", 0, false);
1747  ASSERT_TRUE(success_) << "remove_one1";
1748  BackendTransaction("remove_one2", 0, false);
1749  ASSERT_TRUE(success_) << "remove_one2";
1750  BackendTransaction("remove_one3", 0, false);
1751  ASSERT_TRUE(success_) << "remove_one3";
1752
1753  // Removing the head.
1754  BackendTransaction("remove_head1", 1, false);
1755  ASSERT_TRUE(success_) << "remove_head1";
1756  BackendTransaction("remove_head2", 1, false);
1757  ASSERT_TRUE(success_) << "remove_head2";
1758  BackendTransaction("remove_head3", 1, false);
1759  ASSERT_TRUE(success_) << "remove_head3";
1760
1761  // Removing the tail.
1762  BackendTransaction("remove_tail1", 1, false);
1763  ASSERT_TRUE(success_) << "remove_tail1";
1764  BackendTransaction("remove_tail2", 1, false);
1765  ASSERT_TRUE(success_) << "remove_tail2";
1766  BackendTransaction("remove_tail3", 1, false);
1767  ASSERT_TRUE(success_) << "remove_tail3";
1768
1769  // Removing with one hundred entries on the cache, tiny index.
1770  BackendTransaction("remove_load1", 100, true);
1771  ASSERT_TRUE(success_) << "remove_load1";
1772  BackendTransaction("remove_load2", 100, true);
1773  ASSERT_TRUE(success_) << "remove_load2";
1774  BackendTransaction("remove_load3", 100, true);
1775  ASSERT_TRUE(success_) << "remove_load3";
1776
1777  // This case cannot be reverted.
1778  BackendTransaction("remove_one4", 0, false);
1779  ASSERT_TRUE(success_) << "remove_one4";
1780  BackendTransaction("remove_head4", 1, false);
1781  ASSERT_TRUE(success_) << "remove_head4";
1782}
1783
1784#if defined(OS_WIN)
1785// http://crbug.com/396392
1786#define MAYBE_RecoverRemove DISABLED_RecoverRemove
1787#else
1788#define MAYBE_RecoverRemove RecoverRemove
1789#endif
1790TEST_F(DiskCacheBackendTest, MAYBE_RecoverRemove) {
1791  BackendRecoverRemove();
1792}
1793
1794#if defined(OS_WIN)
1795// http://crbug.com/396392
1796#define MAYBE_NewEvictionRecoverRemove DISABLED_NewEvictionRecoverRemove
1797#else
1798#define MAYBE_NewEvictionRecoverRemove NewEvictionRecoverRemove
1799#endif
1800TEST_F(DiskCacheBackendTest, MAYBE_NewEvictionRecoverRemove) {
1801  SetNewEviction();
1802  BackendRecoverRemove();
1803}
1804
1805void DiskCacheBackendTest::BackendRecoverWithEviction() {
1806  success_ = false;
1807  ASSERT_TRUE(CopyTestCache("insert_load1"));
1808  DisableFirstCleanup();
1809
1810  SetMask(0xf);
1811  SetMaxSize(0x1000);
1812
1813  // We should not crash here.
1814  InitCache();
1815  DisableIntegrityCheck();
1816}
1817
1818TEST_F(DiskCacheBackendTest, RecoverWithEviction) {
1819  BackendRecoverWithEviction();
1820}
1821
1822TEST_F(DiskCacheBackendTest, NewEvictionRecoverWithEviction) {
1823  SetNewEviction();
1824  BackendRecoverWithEviction();
1825}
1826
1827// Tests that the |BackendImpl| fails to start with the wrong cache version.
1828TEST_F(DiskCacheTest, WrongVersion) {
1829  ASSERT_TRUE(CopyTestCache("wrong_version"));
1830  base::Thread cache_thread("CacheThread");
1831  ASSERT_TRUE(cache_thread.StartWithOptions(
1832      base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1833  net::TestCompletionCallback cb;
1834
1835  scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1836      cache_path_, cache_thread.task_runner(), NULL));
1837  int rv = cache->Init(cb.callback());
1838  ASSERT_EQ(net::ERR_FAILED, cb.GetResult(rv));
1839}
1840
1841class BadEntropyProvider : public base::FieldTrial::EntropyProvider {
1842 public:
1843  virtual ~BadEntropyProvider() {}
1844
1845  virtual double GetEntropyForTrial(const std::string& trial_name,
1846                                    uint32 randomization_seed) const OVERRIDE {
1847    return 0.5;
1848  }
1849};
1850
1851// Tests that the disk cache successfully joins the control group, dropping the
1852// existing cache in favour of a new empty cache.
1853// Disabled on android since this test requires cache creator to create
1854// blockfile caches.
1855#if !defined(OS_ANDROID)
1856TEST_F(DiskCacheTest, SimpleCacheControlJoin) {
1857  base::Thread cache_thread("CacheThread");
1858  ASSERT_TRUE(cache_thread.StartWithOptions(
1859                  base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1860
1861  scoped_ptr<disk_cache::BackendImpl> cache =
1862      CreateExistingEntryCache(cache_thread, cache_path_);
1863  ASSERT_TRUE(cache.get());
1864  cache.reset();
1865
1866  // Instantiate the SimpleCacheTrial, forcing this run into the
1867  // ExperimentControl group.
1868  base::FieldTrialList field_trial_list(new BadEntropyProvider());
1869  base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1870                                         "ExperimentControl");
1871  net::TestCompletionCallback cb;
1872  scoped_ptr<disk_cache::Backend> base_cache;
1873  int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
1874                                          net::CACHE_BACKEND_BLOCKFILE,
1875                                          cache_path_,
1876                                          0,
1877                                          true,
1878                                          cache_thread.task_runner(),
1879                                          NULL,
1880                                          &base_cache,
1881                                          cb.callback());
1882  ASSERT_EQ(net::OK, cb.GetResult(rv));
1883  EXPECT_EQ(0, base_cache->GetEntryCount());
1884}
1885#endif
1886
1887// Tests that the disk cache can restart in the control group preserving
1888// existing entries.
1889TEST_F(DiskCacheTest, SimpleCacheControlRestart) {
1890  // Instantiate the SimpleCacheTrial, forcing this run into the
1891  // ExperimentControl group.
1892  base::FieldTrialList field_trial_list(new BadEntropyProvider());
1893  base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1894                                         "ExperimentControl");
1895
1896  base::Thread cache_thread("CacheThread");
1897  ASSERT_TRUE(cache_thread.StartWithOptions(
1898                  base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1899
1900  scoped_ptr<disk_cache::BackendImpl> cache =
1901      CreateExistingEntryCache(cache_thread, cache_path_);
1902  ASSERT_TRUE(cache.get());
1903
1904  net::TestCompletionCallback cb;
1905
1906  const int kRestartCount = 5;
1907  for (int i = 0; i < kRestartCount; ++i) {
1908    cache.reset(new disk_cache::BackendImpl(
1909        cache_path_, cache_thread.message_loop_proxy(), NULL));
1910    int rv = cache->Init(cb.callback());
1911    ASSERT_EQ(net::OK, cb.GetResult(rv));
1912    EXPECT_EQ(1, cache->GetEntryCount());
1913
1914    disk_cache::Entry* entry = NULL;
1915    rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1916    EXPECT_EQ(net::OK, cb.GetResult(rv));
1917    EXPECT_TRUE(entry);
1918    entry->Close();
1919  }
1920}
1921
1922// Tests that the disk cache can leave the control group preserving existing
1923// entries.
1924TEST_F(DiskCacheTest, SimpleCacheControlLeave) {
1925  base::Thread cache_thread("CacheThread");
1926  ASSERT_TRUE(cache_thread.StartWithOptions(
1927      base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1928
1929  {
1930    // Instantiate the SimpleCacheTrial, forcing this run into the
1931    // ExperimentControl group.
1932    base::FieldTrialList field_trial_list(new BadEntropyProvider());
1933    base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial",
1934                                           "ExperimentControl");
1935
1936    scoped_ptr<disk_cache::BackendImpl> cache =
1937        CreateExistingEntryCache(cache_thread, cache_path_);
1938    ASSERT_TRUE(cache.get());
1939  }
1940
1941  // Instantiate the SimpleCacheTrial, forcing this run into the
1942  // ExperimentNo group.
1943  base::FieldTrialList field_trial_list(new BadEntropyProvider());
1944  base::FieldTrialList::CreateFieldTrial("SimpleCacheTrial", "ExperimentNo");
1945  net::TestCompletionCallback cb;
1946
1947  const int kRestartCount = 5;
1948  for (int i = 0; i < kRestartCount; ++i) {
1949    scoped_ptr<disk_cache::BackendImpl> cache(new disk_cache::BackendImpl(
1950        cache_path_, cache_thread.message_loop_proxy(), NULL));
1951    int rv = cache->Init(cb.callback());
1952    ASSERT_EQ(net::OK, cb.GetResult(rv));
1953    EXPECT_EQ(1, cache->GetEntryCount());
1954
1955    disk_cache::Entry* entry = NULL;
1956    rv = cache->OpenEntry(kExistingEntryKey, &entry, cb.callback());
1957    EXPECT_EQ(net::OK, cb.GetResult(rv));
1958    EXPECT_TRUE(entry);
1959    entry->Close();
1960  }
1961}
1962
1963// Tests that the cache is properly restarted on recovery error.
1964// Disabled on android since this test requires cache creator to create
1965// blockfile caches.
1966#if !defined(OS_ANDROID)
1967TEST_F(DiskCacheBackendTest, DeleteOld) {
1968  ASSERT_TRUE(CopyTestCache("wrong_version"));
1969  SetNewEviction();
1970  base::Thread cache_thread("CacheThread");
1971  ASSERT_TRUE(cache_thread.StartWithOptions(
1972      base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
1973
1974  net::TestCompletionCallback cb;
1975  bool prev = base::ThreadRestrictions::SetIOAllowed(false);
1976  base::FilePath path(cache_path_);
1977  int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
1978                                          net::CACHE_BACKEND_BLOCKFILE,
1979                                          path,
1980                                          0,
1981                                          true,
1982                                          cache_thread.task_runner(),
1983                                          NULL,
1984                                          &cache_,
1985                                          cb.callback());
1986  path.clear();  // Make sure path was captured by the previous call.
1987  ASSERT_EQ(net::OK, cb.GetResult(rv));
1988  base::ThreadRestrictions::SetIOAllowed(prev);
1989  cache_.reset();
1990  EXPECT_TRUE(CheckCacheIntegrity(cache_path_, new_eviction_, mask_));
1991}
1992#endif
1993
1994// We want to be able to deal with messed up entries on disk.
1995void DiskCacheBackendTest::BackendInvalidEntry2() {
1996  ASSERT_TRUE(CopyTestCache("bad_entry"));
1997  DisableFirstCleanup();
1998  InitCache();
1999
2000  disk_cache::Entry *entry1, *entry2;
2001  ASSERT_EQ(net::OK, OpenEntry("the first key", &entry1));
2002  EXPECT_NE(net::OK, OpenEntry("some other key", &entry2));
2003  entry1->Close();
2004
2005  // CheckCacheIntegrity will fail at this point.
2006  DisableIntegrityCheck();
2007}
2008
2009TEST_F(DiskCacheBackendTest, InvalidEntry2) {
2010  BackendInvalidEntry2();
2011}
2012
2013TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry2) {
2014  SetNewEviction();
2015  BackendInvalidEntry2();
2016}
2017
2018// Tests that we don't crash or hang when enumerating this cache.
2019void DiskCacheBackendTest::BackendInvalidEntry3() {
2020  SetMask(0x1);  // 2-entry table.
2021  SetMaxSize(0x3000);  // 12 kB.
2022  DisableFirstCleanup();
2023  InitCache();
2024
2025  disk_cache::Entry* entry;
2026  scoped_ptr<TestIterator> iter = CreateIterator();
2027  while (iter->OpenNextEntry(&entry) == net::OK) {
2028    entry->Close();
2029  }
2030}
2031
2032TEST_F(DiskCacheBackendTest, InvalidEntry3) {
2033  ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2034  BackendInvalidEntry3();
2035}
2036
2037TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry3) {
2038  ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2039  SetNewEviction();
2040  BackendInvalidEntry3();
2041  DisableIntegrityCheck();
2042}
2043
2044// Test that we handle a dirty entry on the LRU list, already replaced with
2045// the same key, and with hash collisions.
2046TEST_F(DiskCacheBackendTest, InvalidEntry4) {
2047  ASSERT_TRUE(CopyTestCache("dirty_entry3"));
2048  SetMask(0x1);  // 2-entry table.
2049  SetMaxSize(0x3000);  // 12 kB.
2050  DisableFirstCleanup();
2051  InitCache();
2052
2053  TrimForTest(false);
2054}
2055
2056// Test that we handle a dirty entry on the deleted list, already replaced with
2057// the same key, and with hash collisions.
2058TEST_F(DiskCacheBackendTest, InvalidEntry5) {
2059  ASSERT_TRUE(CopyTestCache("dirty_entry4"));
2060  SetNewEviction();
2061  SetMask(0x1);  // 2-entry table.
2062  SetMaxSize(0x3000);  // 12 kB.
2063  DisableFirstCleanup();
2064  InitCache();
2065
2066  TrimDeletedListForTest(false);
2067}
2068
2069TEST_F(DiskCacheBackendTest, InvalidEntry6) {
2070  ASSERT_TRUE(CopyTestCache("dirty_entry5"));
2071  SetMask(0x1);  // 2-entry table.
2072  SetMaxSize(0x3000);  // 12 kB.
2073  DisableFirstCleanup();
2074  InitCache();
2075
2076  // There is a dirty entry (but marked as clean) at the end, pointing to a
2077  // deleted entry through the hash collision list. We should not re-insert the
2078  // deleted entry into the index table.
2079
2080  TrimForTest(false);
2081  // The cache should be clean (as detected by CheckCacheIntegrity).
2082}
2083
2084// Tests that we don't hang when there is a loop on the hash collision list.
2085// The test cache could be a result of bug 69135.
2086TEST_F(DiskCacheBackendTest, BadNextEntry1) {
2087  ASSERT_TRUE(CopyTestCache("list_loop2"));
2088  SetMask(0x1);  // 2-entry table.
2089  SetMaxSize(0x3000);  // 12 kB.
2090  DisableFirstCleanup();
2091  InitCache();
2092
2093  // The second entry points at itselft, and the first entry is not accessible
2094  // though the index, but it is at the head of the LRU.
2095
2096  disk_cache::Entry* entry;
2097  ASSERT_EQ(net::OK, CreateEntry("The first key", &entry));
2098  entry->Close();
2099
2100  TrimForTest(false);
2101  TrimForTest(false);
2102  ASSERT_EQ(net::OK, OpenEntry("The first key", &entry));
2103  entry->Close();
2104  EXPECT_EQ(1, cache_->GetEntryCount());
2105}
2106
2107// Tests that we don't hang when there is a loop on the hash collision list.
2108// The test cache could be a result of bug 69135.
2109TEST_F(DiskCacheBackendTest, BadNextEntry2) {
2110  ASSERT_TRUE(CopyTestCache("list_loop3"));
2111  SetMask(0x1);  // 2-entry table.
2112  SetMaxSize(0x3000);  // 12 kB.
2113  DisableFirstCleanup();
2114  InitCache();
2115
2116  // There is a wide loop of 5 entries.
2117
2118  disk_cache::Entry* entry;
2119  ASSERT_NE(net::OK, OpenEntry("Not present key", &entry));
2120}
2121
2122TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry6) {
2123  ASSERT_TRUE(CopyTestCache("bad_rankings3"));
2124  DisableFirstCleanup();
2125  SetNewEviction();
2126  InitCache();
2127
2128  // The second entry is dirty, but removing it should not corrupt the list.
2129  disk_cache::Entry* entry;
2130  ASSERT_NE(net::OK, OpenEntry("the second key", &entry));
2131  ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
2132
2133  // This should not delete the cache.
2134  entry->Doom();
2135  FlushQueueForTest();
2136  entry->Close();
2137
2138  ASSERT_EQ(net::OK, OpenEntry("some other key", &entry));
2139  entry->Close();
2140}
2141
2142// Tests handling of corrupt entries by keeping the rankings node around, with
2143// a fatal failure.
2144void DiskCacheBackendTest::BackendInvalidEntry7() {
2145  const int kSize = 0x3000;  // 12 kB.
2146  SetMaxSize(kSize * 10);
2147  InitCache();
2148
2149  std::string first("some key");
2150  std::string second("something else");
2151  disk_cache::Entry* entry;
2152  ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2153  entry->Close();
2154  ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2155
2156  // Corrupt this entry.
2157  disk_cache::EntryImpl* entry_impl =
2158      static_cast<disk_cache::EntryImpl*>(entry);
2159
2160  entry_impl->rankings()->Data()->next = 0;
2161  entry_impl->rankings()->Store();
2162  entry->Close();
2163  FlushQueueForTest();
2164  EXPECT_EQ(2, cache_->GetEntryCount());
2165
2166  // This should detect the bad entry.
2167  EXPECT_NE(net::OK, OpenEntry(second, &entry));
2168  EXPECT_EQ(1, cache_->GetEntryCount());
2169
2170  // We should delete the cache. The list still has a corrupt node.
2171  scoped_ptr<TestIterator> iter = CreateIterator();
2172  EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2173  FlushQueueForTest();
2174  EXPECT_EQ(0, cache_->GetEntryCount());
2175}
2176
2177TEST_F(DiskCacheBackendTest, InvalidEntry7) {
2178  BackendInvalidEntry7();
2179}
2180
2181TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry7) {
2182  SetNewEviction();
2183  BackendInvalidEntry7();
2184}
2185
2186// Tests handling of corrupt entries by keeping the rankings node around, with
2187// a non fatal failure.
2188void DiskCacheBackendTest::BackendInvalidEntry8() {
2189  const int kSize = 0x3000;  // 12 kB
2190  SetMaxSize(kSize * 10);
2191  InitCache();
2192
2193  std::string first("some key");
2194  std::string second("something else");
2195  disk_cache::Entry* entry;
2196  ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2197  entry->Close();
2198  ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2199
2200  // Corrupt this entry.
2201  disk_cache::EntryImpl* entry_impl =
2202      static_cast<disk_cache::EntryImpl*>(entry);
2203
2204  entry_impl->rankings()->Data()->contents = 0;
2205  entry_impl->rankings()->Store();
2206  entry->Close();
2207  FlushQueueForTest();
2208  EXPECT_EQ(2, cache_->GetEntryCount());
2209
2210  // This should detect the bad entry.
2211  EXPECT_NE(net::OK, OpenEntry(second, &entry));
2212  EXPECT_EQ(1, cache_->GetEntryCount());
2213
2214  // We should not delete the cache.
2215  scoped_ptr<TestIterator> iter = CreateIterator();
2216  ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2217  entry->Close();
2218  EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2219  EXPECT_EQ(1, cache_->GetEntryCount());
2220}
2221
2222TEST_F(DiskCacheBackendTest, InvalidEntry8) {
2223  BackendInvalidEntry8();
2224}
2225
2226TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry8) {
2227  SetNewEviction();
2228  BackendInvalidEntry8();
2229}
2230
2231// Tests handling of corrupt entries detected by enumerations. Note that these
2232// tests (xx9 to xx11) are basically just going though slightly different
2233// codepaths so they are tighlty coupled with the code, but that is better than
2234// not testing error handling code.
2235void DiskCacheBackendTest::BackendInvalidEntry9(bool eviction) {
2236  const int kSize = 0x3000;  // 12 kB.
2237  SetMaxSize(kSize * 10);
2238  InitCache();
2239
2240  std::string first("some key");
2241  std::string second("something else");
2242  disk_cache::Entry* entry;
2243  ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2244  entry->Close();
2245  ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2246
2247  // Corrupt this entry.
2248  disk_cache::EntryImpl* entry_impl =
2249      static_cast<disk_cache::EntryImpl*>(entry);
2250
2251  entry_impl->entry()->Data()->state = 0xbad;
2252  entry_impl->entry()->Store();
2253  entry->Close();
2254  FlushQueueForTest();
2255  EXPECT_EQ(2, cache_->GetEntryCount());
2256
2257  if (eviction) {
2258    TrimForTest(false);
2259    EXPECT_EQ(1, cache_->GetEntryCount());
2260    TrimForTest(false);
2261    EXPECT_EQ(1, cache_->GetEntryCount());
2262  } else {
2263    // We should detect the problem through the list, but we should not delete
2264    // the entry, just fail the iteration.
2265    scoped_ptr<TestIterator> iter = CreateIterator();
2266    EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2267
2268    // Now a full iteration will work, and return one entry.
2269    ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2270    entry->Close();
2271    EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2272
2273    // This should detect what's left of the bad entry.
2274    EXPECT_NE(net::OK, OpenEntry(second, &entry));
2275    EXPECT_EQ(2, cache_->GetEntryCount());
2276  }
2277  DisableIntegrityCheck();
2278}
2279
2280TEST_F(DiskCacheBackendTest, InvalidEntry9) {
2281  BackendInvalidEntry9(false);
2282}
2283
2284TEST_F(DiskCacheBackendTest, NewEvictionInvalidEntry9) {
2285  SetNewEviction();
2286  BackendInvalidEntry9(false);
2287}
2288
2289TEST_F(DiskCacheBackendTest, TrimInvalidEntry9) {
2290  BackendInvalidEntry9(true);
2291}
2292
2293TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry9) {
2294  SetNewEviction();
2295  BackendInvalidEntry9(true);
2296}
2297
2298// Tests handling of corrupt entries detected by enumerations.
2299void DiskCacheBackendTest::BackendInvalidEntry10(bool eviction) {
2300  const int kSize = 0x3000;  // 12 kB.
2301  SetMaxSize(kSize * 10);
2302  SetNewEviction();
2303  InitCache();
2304
2305  std::string first("some key");
2306  std::string second("something else");
2307  disk_cache::Entry* entry;
2308  ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2309  entry->Close();
2310  ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2311  EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2312  entry->Close();
2313  ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2314
2315  // Corrupt this entry.
2316  disk_cache::EntryImpl* entry_impl =
2317      static_cast<disk_cache::EntryImpl*>(entry);
2318
2319  entry_impl->entry()->Data()->state = 0xbad;
2320  entry_impl->entry()->Store();
2321  entry->Close();
2322  ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2323  entry->Close();
2324  EXPECT_EQ(3, cache_->GetEntryCount());
2325
2326  // We have:
2327  // List 0: third -> second (bad).
2328  // List 1: first.
2329
2330  if (eviction) {
2331    // Detection order: second -> first -> third.
2332    TrimForTest(false);
2333    EXPECT_EQ(3, cache_->GetEntryCount());
2334    TrimForTest(false);
2335    EXPECT_EQ(2, cache_->GetEntryCount());
2336    TrimForTest(false);
2337    EXPECT_EQ(1, cache_->GetEntryCount());
2338  } else {
2339    // Detection order: third -> second -> first.
2340    // We should detect the problem through the list, but we should not delete
2341    // the entry.
2342    scoped_ptr<TestIterator> iter = CreateIterator();
2343    ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2344    entry->Close();
2345    ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2346    EXPECT_EQ(first, entry->GetKey());
2347    entry->Close();
2348    EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2349  }
2350  DisableIntegrityCheck();
2351}
2352
2353TEST_F(DiskCacheBackendTest, InvalidEntry10) {
2354  BackendInvalidEntry10(false);
2355}
2356
2357TEST_F(DiskCacheBackendTest, TrimInvalidEntry10) {
2358  BackendInvalidEntry10(true);
2359}
2360
2361// Tests handling of corrupt entries detected by enumerations.
2362void DiskCacheBackendTest::BackendInvalidEntry11(bool eviction) {
2363  const int kSize = 0x3000;  // 12 kB.
2364  SetMaxSize(kSize * 10);
2365  SetNewEviction();
2366  InitCache();
2367
2368  std::string first("some key");
2369  std::string second("something else");
2370  disk_cache::Entry* entry;
2371  ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2372  entry->Close();
2373  ASSERT_EQ(net::OK, OpenEntry(first, &entry));
2374  EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2375  entry->Close();
2376  ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2377  entry->Close();
2378  ASSERT_EQ(net::OK, OpenEntry(second, &entry));
2379  EXPECT_EQ(0, WriteData(entry, 0, 200, NULL, 0, false));
2380
2381  // Corrupt this entry.
2382  disk_cache::EntryImpl* entry_impl =
2383      static_cast<disk_cache::EntryImpl*>(entry);
2384
2385  entry_impl->entry()->Data()->state = 0xbad;
2386  entry_impl->entry()->Store();
2387  entry->Close();
2388  ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2389  entry->Close();
2390  FlushQueueForTest();
2391  EXPECT_EQ(3, cache_->GetEntryCount());
2392
2393  // We have:
2394  // List 0: third.
2395  // List 1: second (bad) -> first.
2396
2397  if (eviction) {
2398    // Detection order: third -> first -> second.
2399    TrimForTest(false);
2400    EXPECT_EQ(2, cache_->GetEntryCount());
2401    TrimForTest(false);
2402    EXPECT_EQ(1, cache_->GetEntryCount());
2403    TrimForTest(false);
2404    EXPECT_EQ(1, cache_->GetEntryCount());
2405  } else {
2406    // Detection order: third -> second.
2407    // We should detect the problem through the list, but we should not delete
2408    // the entry, just fail the iteration.
2409    scoped_ptr<TestIterator> iter = CreateIterator();
2410    ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2411    entry->Close();
2412    EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2413
2414    // Now a full iteration will work, and return two entries.
2415    ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2416    entry->Close();
2417    ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2418    entry->Close();
2419    EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2420  }
2421  DisableIntegrityCheck();
2422}
2423
2424TEST_F(DiskCacheBackendTest, InvalidEntry11) {
2425  BackendInvalidEntry11(false);
2426}
2427
2428TEST_F(DiskCacheBackendTest, TrimInvalidEntry11) {
2429  BackendInvalidEntry11(true);
2430}
2431
2432// Tests handling of corrupt entries in the middle of a long eviction run.
2433void DiskCacheBackendTest::BackendTrimInvalidEntry12() {
2434  const int kSize = 0x3000;  // 12 kB
2435  SetMaxSize(kSize * 10);
2436  InitCache();
2437
2438  std::string first("some key");
2439  std::string second("something else");
2440  disk_cache::Entry* entry;
2441  ASSERT_EQ(net::OK, CreateEntry(first, &entry));
2442  entry->Close();
2443  ASSERT_EQ(net::OK, CreateEntry(second, &entry));
2444
2445  // Corrupt this entry.
2446  disk_cache::EntryImpl* entry_impl =
2447      static_cast<disk_cache::EntryImpl*>(entry);
2448
2449  entry_impl->entry()->Data()->state = 0xbad;
2450  entry_impl->entry()->Store();
2451  entry->Close();
2452  ASSERT_EQ(net::OK, CreateEntry("third", &entry));
2453  entry->Close();
2454  ASSERT_EQ(net::OK, CreateEntry("fourth", &entry));
2455  TrimForTest(true);
2456  EXPECT_EQ(1, cache_->GetEntryCount());
2457  entry->Close();
2458  DisableIntegrityCheck();
2459}
2460
2461TEST_F(DiskCacheBackendTest, TrimInvalidEntry12) {
2462  BackendTrimInvalidEntry12();
2463}
2464
2465TEST_F(DiskCacheBackendTest, NewEvictionTrimInvalidEntry12) {
2466  SetNewEviction();
2467  BackendTrimInvalidEntry12();
2468}
2469
2470// We want to be able to deal with messed up entries on disk.
2471void DiskCacheBackendTest::BackendInvalidRankings2() {
2472  ASSERT_TRUE(CopyTestCache("bad_rankings"));
2473  DisableFirstCleanup();
2474  InitCache();
2475
2476  disk_cache::Entry *entry1, *entry2;
2477  EXPECT_NE(net::OK, OpenEntry("the first key", &entry1));
2478  ASSERT_EQ(net::OK, OpenEntry("some other key", &entry2));
2479  entry2->Close();
2480
2481  // CheckCacheIntegrity will fail at this point.
2482  DisableIntegrityCheck();
2483}
2484
2485TEST_F(DiskCacheBackendTest, InvalidRankings2) {
2486  BackendInvalidRankings2();
2487}
2488
2489TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankings2) {
2490  SetNewEviction();
2491  BackendInvalidRankings2();
2492}
2493
2494// If the LRU is corrupt, we delete the cache.
2495void DiskCacheBackendTest::BackendInvalidRankings() {
2496  disk_cache::Entry* entry;
2497  scoped_ptr<TestIterator> iter = CreateIterator();
2498  ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
2499  entry->Close();
2500  EXPECT_EQ(2, cache_->GetEntryCount());
2501
2502  EXPECT_NE(net::OK, iter->OpenNextEntry(&entry));
2503  FlushQueueForTest();  // Allow the restart to finish.
2504  EXPECT_EQ(0, cache_->GetEntryCount());
2505}
2506
2507TEST_F(DiskCacheBackendTest, InvalidRankingsSuccess) {
2508  ASSERT_TRUE(CopyTestCache("bad_rankings"));
2509  DisableFirstCleanup();
2510  InitCache();
2511  BackendInvalidRankings();
2512}
2513
2514TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsSuccess) {
2515  ASSERT_TRUE(CopyTestCache("bad_rankings"));
2516  DisableFirstCleanup();
2517  SetNewEviction();
2518  InitCache();
2519  BackendInvalidRankings();
2520}
2521
2522TEST_F(DiskCacheBackendTest, InvalidRankingsFailure) {
2523  ASSERT_TRUE(CopyTestCache("bad_rankings"));
2524  DisableFirstCleanup();
2525  InitCache();
2526  SetTestMode();  // Fail cache reinitialization.
2527  BackendInvalidRankings();
2528}
2529
2530TEST_F(DiskCacheBackendTest, NewEvictionInvalidRankingsFailure) {
2531  ASSERT_TRUE(CopyTestCache("bad_rankings"));
2532  DisableFirstCleanup();
2533  SetNewEviction();
2534  InitCache();
2535  SetTestMode();  // Fail cache reinitialization.
2536  BackendInvalidRankings();
2537}
2538
2539// If the LRU is corrupt and we have open entries, we disable the cache.
2540void DiskCacheBackendTest::BackendDisable() {
2541  disk_cache::Entry *entry1, *entry2;
2542  scoped_ptr<TestIterator> iter = CreateIterator();
2543  ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
2544
2545  EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
2546  EXPECT_EQ(0, cache_->GetEntryCount());
2547  EXPECT_NE(net::OK, CreateEntry("Something new", &entry2));
2548
2549  entry1->Close();
2550  FlushQueueForTest();  // Flushing the Close posts a task to restart the cache.
2551  FlushQueueForTest();  // This one actually allows that task to complete.
2552
2553  EXPECT_EQ(0, cache_->GetEntryCount());
2554}
2555
2556TEST_F(DiskCacheBackendTest, DisableSuccess) {
2557  ASSERT_TRUE(CopyTestCache("bad_rankings"));
2558  DisableFirstCleanup();
2559  InitCache();
2560  BackendDisable();
2561}
2562
2563TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess) {
2564  ASSERT_TRUE(CopyTestCache("bad_rankings"));
2565  DisableFirstCleanup();
2566  SetNewEviction();
2567  InitCache();
2568  BackendDisable();
2569}
2570
2571TEST_F(DiskCacheBackendTest, DisableFailure) {
2572  ASSERT_TRUE(CopyTestCache("bad_rankings"));
2573  DisableFirstCleanup();
2574  InitCache();
2575  SetTestMode();  // Fail cache reinitialization.
2576  BackendDisable();
2577}
2578
2579TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure) {
2580  ASSERT_TRUE(CopyTestCache("bad_rankings"));
2581  DisableFirstCleanup();
2582  SetNewEviction();
2583  InitCache();
2584  SetTestMode();  // Fail cache reinitialization.
2585  BackendDisable();
2586}
2587
2588// This is another type of corruption on the LRU; disable the cache.
2589void DiskCacheBackendTest::BackendDisable2() {
2590  EXPECT_EQ(8, cache_->GetEntryCount());
2591
2592  disk_cache::Entry* entry;
2593  scoped_ptr<TestIterator> iter = CreateIterator();
2594  int count = 0;
2595  while (iter->OpenNextEntry(&entry) == net::OK) {
2596    ASSERT_TRUE(NULL != entry);
2597    entry->Close();
2598    count++;
2599    ASSERT_LT(count, 9);
2600  };
2601
2602  FlushQueueForTest();
2603  EXPECT_EQ(0, cache_->GetEntryCount());
2604}
2605
2606TEST_F(DiskCacheBackendTest, DisableSuccess2) {
2607  ASSERT_TRUE(CopyTestCache("list_loop"));
2608  DisableFirstCleanup();
2609  InitCache();
2610  BackendDisable2();
2611}
2612
2613TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess2) {
2614  ASSERT_TRUE(CopyTestCache("list_loop"));
2615  DisableFirstCleanup();
2616  SetNewEviction();
2617  InitCache();
2618  BackendDisable2();
2619}
2620
2621TEST_F(DiskCacheBackendTest, DisableFailure2) {
2622  ASSERT_TRUE(CopyTestCache("list_loop"));
2623  DisableFirstCleanup();
2624  InitCache();
2625  SetTestMode();  // Fail cache reinitialization.
2626  BackendDisable2();
2627}
2628
2629TEST_F(DiskCacheBackendTest, NewEvictionDisableFailure2) {
2630  ASSERT_TRUE(CopyTestCache("list_loop"));
2631  DisableFirstCleanup();
2632  SetNewEviction();
2633  InitCache();
2634  SetTestMode();  // Fail cache reinitialization.
2635  BackendDisable2();
2636}
2637
2638// If the index size changes when we disable the cache, we should not crash.
2639void DiskCacheBackendTest::BackendDisable3() {
2640  disk_cache::Entry *entry1, *entry2;
2641  scoped_ptr<TestIterator> iter = CreateIterator();
2642  EXPECT_EQ(2, cache_->GetEntryCount());
2643  ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
2644  entry1->Close();
2645
2646  EXPECT_NE(net::OK, iter->OpenNextEntry(&entry2));
2647  FlushQueueForTest();
2648
2649  ASSERT_EQ(net::OK, CreateEntry("Something new", &entry2));
2650  entry2->Close();
2651
2652  EXPECT_EQ(1, cache_->GetEntryCount());
2653}
2654
2655TEST_F(DiskCacheBackendTest, DisableSuccess3) {
2656  ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2657  DisableFirstCleanup();
2658  SetMaxSize(20 * 1024 * 1024);
2659  InitCache();
2660  BackendDisable3();
2661}
2662
2663TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess3) {
2664  ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2665  DisableFirstCleanup();
2666  SetMaxSize(20 * 1024 * 1024);
2667  SetNewEviction();
2668  InitCache();
2669  BackendDisable3();
2670}
2671
2672// If we disable the cache, already open entries should work as far as possible.
2673void DiskCacheBackendTest::BackendDisable4() {
2674  disk_cache::Entry *entry1, *entry2, *entry3, *entry4;
2675  scoped_ptr<TestIterator> iter = CreateIterator();
2676  ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry1));
2677
2678  char key2[2000];
2679  char key3[20000];
2680  CacheTestFillBuffer(key2, sizeof(key2), true);
2681  CacheTestFillBuffer(key3, sizeof(key3), true);
2682  key2[sizeof(key2) - 1] = '\0';
2683  key3[sizeof(key3) - 1] = '\0';
2684  ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
2685  ASSERT_EQ(net::OK, CreateEntry(key3, &entry3));
2686
2687  const int kBufSize = 20000;
2688  scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kBufSize));
2689  memset(buf->data(), 0, kBufSize);
2690  EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2691  EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2692
2693  // This line should disable the cache but not delete it.
2694  EXPECT_NE(net::OK, iter->OpenNextEntry(&entry4));
2695  EXPECT_EQ(0, cache_->GetEntryCount());
2696
2697  EXPECT_NE(net::OK, CreateEntry("cache is disabled", &entry4));
2698
2699  EXPECT_EQ(100, ReadData(entry2, 0, 0, buf.get(), 100));
2700  EXPECT_EQ(100, WriteData(entry2, 0, 0, buf.get(), 100, false));
2701  EXPECT_EQ(100, WriteData(entry2, 1, 0, buf.get(), 100, false));
2702
2703  EXPECT_EQ(kBufSize, ReadData(entry3, 0, 0, buf.get(), kBufSize));
2704  EXPECT_EQ(kBufSize, WriteData(entry3, 0, 0, buf.get(), kBufSize, false));
2705  EXPECT_EQ(kBufSize, WriteData(entry3, 1, 0, buf.get(), kBufSize, false));
2706
2707  std::string key = entry2->GetKey();
2708  EXPECT_EQ(sizeof(key2) - 1, key.size());
2709  key = entry3->GetKey();
2710  EXPECT_EQ(sizeof(key3) - 1, key.size());
2711
2712  entry1->Close();
2713  entry2->Close();
2714  entry3->Close();
2715  FlushQueueForTest();  // Flushing the Close posts a task to restart the cache.
2716  FlushQueueForTest();  // This one actually allows that task to complete.
2717
2718  EXPECT_EQ(0, cache_->GetEntryCount());
2719}
2720
2721TEST_F(DiskCacheBackendTest, DisableSuccess4) {
2722  ASSERT_TRUE(CopyTestCache("bad_rankings"));
2723  DisableFirstCleanup();
2724  InitCache();
2725  BackendDisable4();
2726}
2727
2728TEST_F(DiskCacheBackendTest, NewEvictionDisableSuccess4) {
2729  ASSERT_TRUE(CopyTestCache("bad_rankings"));
2730  DisableFirstCleanup();
2731  SetNewEviction();
2732  InitCache();
2733  BackendDisable4();
2734}
2735
2736TEST_F(DiskCacheTest, Backend_UsageStatsTimer) {
2737  MessageLoopHelper helper;
2738
2739  ASSERT_TRUE(CleanupCacheDir());
2740  scoped_ptr<disk_cache::BackendImpl> cache;
2741  cache.reset(new disk_cache::BackendImpl(
2742      cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL));
2743  ASSERT_TRUE(NULL != cache.get());
2744  cache->SetUnitTestMode();
2745  ASSERT_EQ(net::OK, cache->SyncInit());
2746
2747  // Wait for a callback that never comes... about 2 secs :). The message loop
2748  // has to run to allow invocation of the usage timer.
2749  helper.WaitUntilCacheIoFinished(1);
2750}
2751
2752TEST_F(DiskCacheBackendTest, TimerNotCreated) {
2753  ASSERT_TRUE(CopyTestCache("wrong_version"));
2754
2755  scoped_ptr<disk_cache::BackendImpl> cache;
2756  cache.reset(new disk_cache::BackendImpl(
2757      cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL));
2758  ASSERT_TRUE(NULL != cache.get());
2759  cache->SetUnitTestMode();
2760  ASSERT_NE(net::OK, cache->SyncInit());
2761
2762  ASSERT_TRUE(NULL == cache->GetTimerForTest());
2763
2764  DisableIntegrityCheck();
2765}
2766
2767TEST_F(DiskCacheBackendTest, Backend_UsageStats) {
2768  InitCache();
2769  disk_cache::Entry* entry;
2770  ASSERT_EQ(net::OK, CreateEntry("key", &entry));
2771  entry->Close();
2772  FlushQueueForTest();
2773
2774  disk_cache::StatsItems stats;
2775  cache_->GetStats(&stats);
2776  EXPECT_FALSE(stats.empty());
2777
2778  disk_cache::StatsItems::value_type hits("Create hit", "0x1");
2779  EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2780
2781  cache_.reset();
2782
2783  // Now open the cache and verify that the stats are still there.
2784  DisableFirstCleanup();
2785  InitCache();
2786  EXPECT_EQ(1, cache_->GetEntryCount());
2787
2788  stats.clear();
2789  cache_->GetStats(&stats);
2790  EXPECT_FALSE(stats.empty());
2791
2792  EXPECT_EQ(1, std::count(stats.begin(), stats.end(), hits));
2793}
2794
2795void DiskCacheBackendTest::BackendDoomAll() {
2796  InitCache();
2797
2798  disk_cache::Entry *entry1, *entry2;
2799  ASSERT_EQ(net::OK, CreateEntry("first", &entry1));
2800  ASSERT_EQ(net::OK, CreateEntry("second", &entry2));
2801  entry1->Close();
2802  entry2->Close();
2803
2804  ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2805  ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2806
2807  ASSERT_EQ(4, cache_->GetEntryCount());
2808  EXPECT_EQ(net::OK, DoomAllEntries());
2809  ASSERT_EQ(0, cache_->GetEntryCount());
2810
2811  // We should stop posting tasks at some point (if we post any).
2812  base::MessageLoop::current()->RunUntilIdle();
2813
2814  disk_cache::Entry *entry3, *entry4;
2815  EXPECT_NE(net::OK, OpenEntry("third", &entry3));
2816  ASSERT_EQ(net::OK, CreateEntry("third", &entry3));
2817  ASSERT_EQ(net::OK, CreateEntry("fourth", &entry4));
2818
2819  EXPECT_EQ(net::OK, DoomAllEntries());
2820  ASSERT_EQ(0, cache_->GetEntryCount());
2821
2822  entry1->Close();
2823  entry2->Close();
2824  entry3->Doom();  // The entry should be already doomed, but this must work.
2825  entry3->Close();
2826  entry4->Close();
2827
2828  // Now try with all references released.
2829  ASSERT_EQ(net::OK, CreateEntry("third", &entry1));
2830  ASSERT_EQ(net::OK, CreateEntry("fourth", &entry2));
2831  entry1->Close();
2832  entry2->Close();
2833
2834  ASSERT_EQ(2, cache_->GetEntryCount());
2835  EXPECT_EQ(net::OK, DoomAllEntries());
2836  ASSERT_EQ(0, cache_->GetEntryCount());
2837
2838  EXPECT_EQ(net::OK, DoomAllEntries());
2839}
2840
2841TEST_F(DiskCacheBackendTest, DoomAll) {
2842  BackendDoomAll();
2843}
2844
2845TEST_F(DiskCacheBackendTest, NewEvictionDoomAll) {
2846  SetNewEviction();
2847  BackendDoomAll();
2848}
2849
2850TEST_F(DiskCacheBackendTest, MemoryOnlyDoomAll) {
2851  SetMemoryOnlyMode();
2852  BackendDoomAll();
2853}
2854
2855TEST_F(DiskCacheBackendTest, AppCacheOnlyDoomAll) {
2856  SetCacheType(net::APP_CACHE);
2857  BackendDoomAll();
2858}
2859
2860TEST_F(DiskCacheBackendTest, ShaderCacheOnlyDoomAll) {
2861  SetCacheType(net::SHADER_CACHE);
2862  BackendDoomAll();
2863}
2864
2865// If the index size changes when we doom the cache, we should not crash.
2866void DiskCacheBackendTest::BackendDoomAll2() {
2867  EXPECT_EQ(2, cache_->GetEntryCount());
2868  EXPECT_EQ(net::OK, DoomAllEntries());
2869
2870  disk_cache::Entry* entry;
2871  ASSERT_EQ(net::OK, CreateEntry("Something new", &entry));
2872  entry->Close();
2873
2874  EXPECT_EQ(1, cache_->GetEntryCount());
2875}
2876
2877TEST_F(DiskCacheBackendTest, DoomAll2) {
2878  ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2879  DisableFirstCleanup();
2880  SetMaxSize(20 * 1024 * 1024);
2881  InitCache();
2882  BackendDoomAll2();
2883}
2884
2885TEST_F(DiskCacheBackendTest, NewEvictionDoomAll2) {
2886  ASSERT_TRUE(CopyTestCache("bad_rankings2"));
2887  DisableFirstCleanup();
2888  SetMaxSize(20 * 1024 * 1024);
2889  SetNewEviction();
2890  InitCache();
2891  BackendDoomAll2();
2892}
2893
2894// We should be able to create the same entry on multiple simultaneous instances
2895// of the cache.
2896TEST_F(DiskCacheTest, MultipleInstances) {
2897  base::ScopedTempDir store1, store2;
2898  ASSERT_TRUE(store1.CreateUniqueTempDir());
2899  ASSERT_TRUE(store2.CreateUniqueTempDir());
2900
2901  base::Thread cache_thread("CacheThread");
2902  ASSERT_TRUE(cache_thread.StartWithOptions(
2903      base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
2904  net::TestCompletionCallback cb;
2905
2906  const int kNumberOfCaches = 2;
2907  scoped_ptr<disk_cache::Backend> cache[kNumberOfCaches];
2908
2909  int rv = disk_cache::CreateCacheBackend(net::DISK_CACHE,
2910                                          net::CACHE_BACKEND_DEFAULT,
2911                                          store1.path(),
2912                                          0,
2913                                          false,
2914                                          cache_thread.task_runner(),
2915                                          NULL,
2916                                          &cache[0],
2917                                          cb.callback());
2918  ASSERT_EQ(net::OK, cb.GetResult(rv));
2919  rv = disk_cache::CreateCacheBackend(net::MEDIA_CACHE,
2920                                      net::CACHE_BACKEND_DEFAULT,
2921                                      store2.path(),
2922                                      0,
2923                                      false,
2924                                      cache_thread.task_runner(),
2925                                      NULL,
2926                                      &cache[1],
2927                                      cb.callback());
2928  ASSERT_EQ(net::OK, cb.GetResult(rv));
2929
2930  ASSERT_TRUE(cache[0].get() != NULL && cache[1].get() != NULL);
2931
2932  std::string key("the first key");
2933  disk_cache::Entry* entry;
2934  for (int i = 0; i < kNumberOfCaches; i++) {
2935    rv = cache[i]->CreateEntry(key, &entry, cb.callback());
2936    ASSERT_EQ(net::OK, cb.GetResult(rv));
2937    entry->Close();
2938  }
2939}
2940
2941// Test the six regions of the curve that determines the max cache size.
2942TEST_F(DiskCacheTest, AutomaticMaxSize) {
2943  using disk_cache::kDefaultCacheSize;
2944  int64 large_size = kDefaultCacheSize;
2945
2946  // Region 1: expected = available * 0.8
2947  EXPECT_EQ((kDefaultCacheSize - 1) * 8 / 10,
2948            disk_cache::PreferredCacheSize(large_size - 1));
2949  EXPECT_EQ(kDefaultCacheSize * 8 / 10,
2950            disk_cache::PreferredCacheSize(large_size));
2951  EXPECT_EQ(kDefaultCacheSize - 1,
2952            disk_cache::PreferredCacheSize(large_size * 10 / 8 - 1));
2953
2954  // Region 2: expected = default_size
2955  EXPECT_EQ(kDefaultCacheSize,
2956            disk_cache::PreferredCacheSize(large_size * 10 / 8));
2957  EXPECT_EQ(kDefaultCacheSize,
2958            disk_cache::PreferredCacheSize(large_size * 10 - 1));
2959
2960  // Region 3: expected = available * 0.1
2961  EXPECT_EQ(kDefaultCacheSize,
2962            disk_cache::PreferredCacheSize(large_size * 10));
2963  EXPECT_EQ((kDefaultCacheSize * 25 - 1) / 10,
2964            disk_cache::PreferredCacheSize(large_size * 25 - 1));
2965
2966  // Region 4: expected = default_size * 2.5
2967  EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2968            disk_cache::PreferredCacheSize(large_size * 25));
2969  EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2970            disk_cache::PreferredCacheSize(large_size * 100 - 1));
2971  EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2972            disk_cache::PreferredCacheSize(large_size * 100));
2973  EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2974            disk_cache::PreferredCacheSize(large_size * 250 - 1));
2975
2976  // Region 5: expected = available * 0.1
2977  int64 largest_size = kDefaultCacheSize * 4;
2978  EXPECT_EQ(kDefaultCacheSize * 25 / 10,
2979            disk_cache::PreferredCacheSize(large_size * 250));
2980  EXPECT_EQ(largest_size - 1,
2981            disk_cache::PreferredCacheSize(largest_size * 100 - 1));
2982
2983  // Region 6: expected = largest possible size
2984  EXPECT_EQ(largest_size,
2985            disk_cache::PreferredCacheSize(largest_size * 100));
2986  EXPECT_EQ(largest_size,
2987            disk_cache::PreferredCacheSize(largest_size * 10000));
2988}
2989
2990// Tests that we can "migrate" a running instance from one experiment group to
2991// another.
2992TEST_F(DiskCacheBackendTest, Histograms) {
2993  InitCache();
2994  disk_cache::BackendImpl* backend_ = cache_impl_;  // Needed be the macro.
2995
2996  for (int i = 1; i < 3; i++) {
2997    CACHE_UMA(HOURS, "FillupTime", i, 28);
2998  }
2999}
3000
3001// Make sure that we keep the total memory used by the internal buffers under
3002// control.
3003TEST_F(DiskCacheBackendTest, TotalBuffersSize1) {
3004  InitCache();
3005  std::string key("the first key");
3006  disk_cache::Entry* entry;
3007  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3008
3009  const int kSize = 200;
3010  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3011  CacheTestFillBuffer(buffer->data(), kSize, true);
3012
3013  for (int i = 0; i < 10; i++) {
3014    SCOPED_TRACE(i);
3015    // Allocate 2MB for this entry.
3016    EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, true));
3017    EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer.get(), kSize, true));
3018    EXPECT_EQ(kSize,
3019              WriteData(entry, 0, 1024 * 1024, buffer.get(), kSize, false));
3020    EXPECT_EQ(kSize,
3021              WriteData(entry, 1, 1024 * 1024, buffer.get(), kSize, false));
3022
3023    // Delete one of the buffers and truncate the other.
3024    EXPECT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, true));
3025    EXPECT_EQ(0, WriteData(entry, 1, 10, buffer.get(), 0, true));
3026
3027    // Delete the second buffer, writing 10 bytes to disk.
3028    entry->Close();
3029    ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3030  }
3031
3032  entry->Close();
3033  EXPECT_EQ(0, cache_impl_->GetTotalBuffersSize());
3034}
3035
3036// This test assumes at least 150MB of system memory.
3037TEST_F(DiskCacheBackendTest, TotalBuffersSize2) {
3038  InitCache();
3039
3040  const int kOneMB = 1024 * 1024;
3041  EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3042  EXPECT_EQ(kOneMB, cache_impl_->GetTotalBuffersSize());
3043
3044  EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3045  EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3046
3047  EXPECT_TRUE(cache_impl_->IsAllocAllowed(0, kOneMB));
3048  EXPECT_EQ(kOneMB * 3, cache_impl_->GetTotalBuffersSize());
3049
3050  cache_impl_->BufferDeleted(kOneMB);
3051  EXPECT_EQ(kOneMB * 2, cache_impl_->GetTotalBuffersSize());
3052
3053  // Check the upper limit.
3054  EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, 30 * kOneMB));
3055
3056  for (int i = 0; i < 30; i++)
3057    cache_impl_->IsAllocAllowed(0, kOneMB);  // Ignore the result.
3058
3059  EXPECT_FALSE(cache_impl_->IsAllocAllowed(0, kOneMB));
3060}
3061
3062// Tests that sharing of external files works and we are able to delete the
3063// files when we need to.
3064TEST_F(DiskCacheBackendTest, FileSharing) {
3065  InitCache();
3066
3067  disk_cache::Addr address(0x80000001);
3068  ASSERT_TRUE(cache_impl_->CreateExternalFile(&address));
3069  base::FilePath name = cache_impl_->GetFileName(address);
3070
3071  scoped_refptr<disk_cache::File> file(new disk_cache::File(false));
3072  file->Init(name);
3073
3074#if defined(OS_WIN)
3075  DWORD sharing = FILE_SHARE_READ | FILE_SHARE_WRITE;
3076  DWORD access = GENERIC_READ | GENERIC_WRITE;
3077  base::win::ScopedHandle file2(CreateFile(
3078      name.value().c_str(), access, sharing, NULL, OPEN_EXISTING, 0, NULL));
3079  EXPECT_FALSE(file2.IsValid());
3080
3081  sharing |= FILE_SHARE_DELETE;
3082  file2.Set(CreateFile(name.value().c_str(), access, sharing, NULL,
3083                       OPEN_EXISTING, 0, NULL));
3084  EXPECT_TRUE(file2.IsValid());
3085#endif
3086
3087  EXPECT_TRUE(base::DeleteFile(name, false));
3088
3089  // We should be able to use the file.
3090  const int kSize = 200;
3091  char buffer1[kSize];
3092  char buffer2[kSize];
3093  memset(buffer1, 't', kSize);
3094  memset(buffer2, 0, kSize);
3095  EXPECT_TRUE(file->Write(buffer1, kSize, 0));
3096  EXPECT_TRUE(file->Read(buffer2, kSize, 0));
3097  EXPECT_EQ(0, memcmp(buffer1, buffer2, kSize));
3098
3099  EXPECT_TRUE(disk_cache::DeleteCacheFile(name));
3100}
3101
3102TEST_F(DiskCacheBackendTest, UpdateRankForExternalCacheHit) {
3103  InitCache();
3104
3105  disk_cache::Entry* entry;
3106
3107  for (int i = 0; i < 2; ++i) {
3108    std::string key = base::StringPrintf("key%d", i);
3109    ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3110    entry->Close();
3111  }
3112
3113  // Ping the oldest entry.
3114  cache_->OnExternalCacheHit("key0");
3115
3116  TrimForTest(false);
3117
3118  // Make sure the older key remains.
3119  EXPECT_EQ(1, cache_->GetEntryCount());
3120  ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3121  entry->Close();
3122}
3123
3124TEST_F(DiskCacheBackendTest, ShaderCacheUpdateRankForExternalCacheHit) {
3125  SetCacheType(net::SHADER_CACHE);
3126  InitCache();
3127
3128  disk_cache::Entry* entry;
3129
3130  for (int i = 0; i < 2; ++i) {
3131    std::string key = base::StringPrintf("key%d", i);
3132    ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3133    entry->Close();
3134  }
3135
3136  // Ping the oldest entry.
3137  cache_->OnExternalCacheHit("key0");
3138
3139  TrimForTest(false);
3140
3141  // Make sure the older key remains.
3142  EXPECT_EQ(1, cache_->GetEntryCount());
3143  ASSERT_EQ(net::OK, OpenEntry("key0", &entry));
3144  entry->Close();
3145}
3146
3147// The Simple Cache backend requires a few guarantees from the filesystem like
3148// atomic renaming of recently open files. Those guarantees are not provided in
3149// general on Windows.
3150#if defined(OS_POSIX)
3151
3152TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingCreate) {
3153  SetCacheType(net::APP_CACHE);
3154  SetSimpleCacheMode();
3155  BackendShutdownWithPendingCreate(false);
3156}
3157
3158TEST_F(DiskCacheBackendTest, SimpleCacheShutdownWithPendingFileIO) {
3159  SetCacheType(net::APP_CACHE);
3160  SetSimpleCacheMode();
3161  BackendShutdownWithPendingFileIO(false);
3162}
3163
3164TEST_F(DiskCacheBackendTest, SimpleCacheBasics) {
3165  SetSimpleCacheMode();
3166  BackendBasics();
3167}
3168
3169TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheBasics) {
3170  SetCacheType(net::APP_CACHE);
3171  SetSimpleCacheMode();
3172  BackendBasics();
3173}
3174
3175TEST_F(DiskCacheBackendTest, SimpleCacheKeying) {
3176  SetSimpleCacheMode();
3177  BackendKeying();
3178}
3179
3180TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheKeying) {
3181  SetSimpleCacheMode();
3182  SetCacheType(net::APP_CACHE);
3183  BackendKeying();
3184}
3185
3186TEST_F(DiskCacheBackendTest, DISABLED_SimpleCacheSetSize) {
3187  SetSimpleCacheMode();
3188  BackendSetSize();
3189}
3190
3191// MacOS has a default open file limit of 256 files, which is incompatible with
3192// this simple cache test.
3193#if defined(OS_MACOSX)
3194#define SIMPLE_MAYBE_MACOS(TestName) DISABLED_ ## TestName
3195#else
3196#define SIMPLE_MAYBE_MACOS(TestName) TestName
3197#endif
3198
3199TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheLoad)) {
3200  SetMaxSize(0x100000);
3201  SetSimpleCacheMode();
3202  BackendLoad();
3203}
3204
3205TEST_F(DiskCacheBackendTest, SIMPLE_MAYBE_MACOS(SimpleCacheAppCacheLoad)) {
3206  SetCacheType(net::APP_CACHE);
3207  SetSimpleCacheMode();
3208  SetMaxSize(0x100000);
3209  BackendLoad();
3210}
3211
3212TEST_F(DiskCacheBackendTest, SimpleDoomRecent) {
3213  SetSimpleCacheMode();
3214  BackendDoomRecent();
3215}
3216
3217// crbug.com/330926, crbug.com/370677
3218TEST_F(DiskCacheBackendTest, DISABLED_SimpleDoomBetween) {
3219  SetSimpleCacheMode();
3220  BackendDoomBetween();
3221}
3222
3223TEST_F(DiskCacheBackendTest, SimpleCacheDoomAll) {
3224  SetSimpleCacheMode();
3225  BackendDoomAll();
3226}
3227
3228TEST_F(DiskCacheBackendTest, SimpleCacheAppCacheOnlyDoomAll) {
3229  SetCacheType(net::APP_CACHE);
3230  SetSimpleCacheMode();
3231  BackendDoomAll();
3232}
3233
3234TEST_F(DiskCacheBackendTest, SimpleCacheOpenMissingFile) {
3235  SetSimpleCacheMode();
3236  InitCache();
3237
3238  const char* key = "the first key";
3239  disk_cache::Entry* entry = NULL;
3240
3241  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3242  ASSERT_TRUE(entry != NULL);
3243  entry->Close();
3244  entry = NULL;
3245
3246  // To make sure the file creation completed we need to call open again so that
3247  // we block until it actually created the files.
3248  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3249  ASSERT_TRUE(entry != NULL);
3250  entry->Close();
3251  entry = NULL;
3252
3253  // Delete one of the files in the entry.
3254  base::FilePath to_delete_file = cache_path_.AppendASCII(
3255      disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3256  EXPECT_TRUE(base::PathExists(to_delete_file));
3257  EXPECT_TRUE(disk_cache::DeleteCacheFile(to_delete_file));
3258
3259  // Failing to open the entry should delete the rest of these files.
3260  ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3261
3262  // Confirm the rest of the files are gone.
3263  for (int i = 1; i < disk_cache::kSimpleEntryFileCount; ++i) {
3264    base::FilePath should_be_gone_file(cache_path_.AppendASCII(
3265        disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i)));
3266    EXPECT_FALSE(base::PathExists(should_be_gone_file));
3267  }
3268}
3269
3270TEST_F(DiskCacheBackendTest, SimpleCacheOpenBadFile) {
3271  SetSimpleCacheMode();
3272  InitCache();
3273
3274  const char* key = "the first key";
3275  disk_cache::Entry* entry = NULL;
3276
3277  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3278  disk_cache::Entry* null = NULL;
3279  ASSERT_NE(null, entry);
3280  entry->Close();
3281  entry = NULL;
3282
3283  // To make sure the file creation completed we need to call open again so that
3284  // we block until it actually created the files.
3285  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3286  ASSERT_NE(null, entry);
3287  entry->Close();
3288  entry = NULL;
3289
3290  // Write an invalid header for stream 0 and stream 1.
3291  base::FilePath entry_file1_path = cache_path_.AppendASCII(
3292      disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3293
3294  disk_cache::SimpleFileHeader header;
3295  header.initial_magic_number = GG_UINT64_C(0xbadf00d);
3296  EXPECT_EQ(
3297      implicit_cast<int>(sizeof(header)),
3298      base::WriteFile(entry_file1_path, reinterpret_cast<char*>(&header),
3299                           sizeof(header)));
3300  ASSERT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
3301}
3302
3303// Tests that the Simple Cache Backend fails to initialize with non-matching
3304// file structure on disk.
3305TEST_F(DiskCacheBackendTest, SimpleCacheOverBlockfileCache) {
3306  // Create a cache structure with the |BackendImpl|.
3307  InitCache();
3308  disk_cache::Entry* entry;
3309  const int kSize = 50;
3310  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3311  CacheTestFillBuffer(buffer->data(), kSize, false);
3312  ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3313  ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3314  entry->Close();
3315  cache_.reset();
3316
3317  // Check that the |SimpleBackendImpl| does not favor this structure.
3318  base::Thread cache_thread("CacheThread");
3319  ASSERT_TRUE(cache_thread.StartWithOptions(
3320      base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3321  disk_cache::SimpleBackendImpl* simple_cache =
3322      new disk_cache::SimpleBackendImpl(
3323          cache_path_, 0, net::DISK_CACHE, cache_thread.task_runner(), NULL);
3324  net::TestCompletionCallback cb;
3325  int rv = simple_cache->Init(cb.callback());
3326  EXPECT_NE(net::OK, cb.GetResult(rv));
3327  delete simple_cache;
3328  DisableIntegrityCheck();
3329}
3330
3331// Tests that the |BackendImpl| refuses to initialize on top of the files
3332// generated by the Simple Cache Backend.
3333TEST_F(DiskCacheBackendTest, BlockfileCacheOverSimpleCache) {
3334  // Create a cache structure with the |SimpleBackendImpl|.
3335  SetSimpleCacheMode();
3336  InitCache();
3337  disk_cache::Entry* entry;
3338  const int kSize = 50;
3339  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3340  CacheTestFillBuffer(buffer->data(), kSize, false);
3341  ASSERT_EQ(net::OK, CreateEntry("key", &entry));
3342  ASSERT_EQ(0, WriteData(entry, 0, 0, buffer.get(), 0, false));
3343  entry->Close();
3344  cache_.reset();
3345
3346  // Check that the |BackendImpl| does not favor this structure.
3347  base::Thread cache_thread("CacheThread");
3348  ASSERT_TRUE(cache_thread.StartWithOptions(
3349      base::Thread::Options(base::MessageLoop::TYPE_IO, 0)));
3350  disk_cache::BackendImpl* cache = new disk_cache::BackendImpl(
3351      cache_path_, base::ThreadTaskRunnerHandle::Get(), NULL);
3352  cache->SetUnitTestMode();
3353  net::TestCompletionCallback cb;
3354  int rv = cache->Init(cb.callback());
3355  EXPECT_NE(net::OK, cb.GetResult(rv));
3356  delete cache;
3357  DisableIntegrityCheck();
3358}
3359
3360TEST_F(DiskCacheBackendTest, SimpleCacheFixEnumerators) {
3361  SetSimpleCacheMode();
3362  BackendFixEnumerators();
3363}
3364
3365// Tests basic functionality of the SimpleBackend implementation of the
3366// enumeration API.
3367TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationBasics) {
3368  SetSimpleCacheMode();
3369  InitCache();
3370  std::set<std::string> key_pool;
3371  ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3372
3373  // Check that enumeration returns all entries.
3374  std::set<std::string> keys_to_match(key_pool);
3375  scoped_ptr<TestIterator> iter = CreateIterator();
3376  size_t count = 0;
3377  ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
3378  iter.reset();
3379  EXPECT_EQ(key_pool.size(), count);
3380  EXPECT_TRUE(keys_to_match.empty());
3381
3382  // Check that opening entries does not affect enumeration.
3383  keys_to_match = key_pool;
3384  iter = CreateIterator();
3385  count = 0;
3386  disk_cache::Entry* entry_opened_before;
3387  ASSERT_EQ(net::OK, OpenEntry(*(key_pool.begin()), &entry_opened_before));
3388  ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3389                                    iter.get(),
3390                                    &keys_to_match,
3391                                    &count));
3392
3393  disk_cache::Entry* entry_opened_middle;
3394  ASSERT_EQ(net::OK,
3395            OpenEntry(*(keys_to_match.begin()), &entry_opened_middle));
3396  ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
3397  iter.reset();
3398  entry_opened_before->Close();
3399  entry_opened_middle->Close();
3400
3401  EXPECT_EQ(key_pool.size(), count);
3402  EXPECT_TRUE(keys_to_match.empty());
3403}
3404
3405// Tests that the enumerations are not affected by dooming an entry in the
3406// middle.
3407TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationWhileDoomed) {
3408  SetSimpleCacheMode();
3409  InitCache();
3410  std::set<std::string> key_pool;
3411  ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3412
3413  // Check that enumeration returns all entries but the doomed one.
3414  std::set<std::string> keys_to_match(key_pool);
3415  scoped_ptr<TestIterator> iter = CreateIterator();
3416  size_t count = 0;
3417  ASSERT_TRUE(EnumerateAndMatchKeys(key_pool.size()/2,
3418                                    iter.get(),
3419                                    &keys_to_match,
3420                                    &count));
3421
3422  std::string key_to_delete = *(keys_to_match.begin());
3423  DoomEntry(key_to_delete);
3424  keys_to_match.erase(key_to_delete);
3425  key_pool.erase(key_to_delete);
3426  ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
3427  iter.reset();
3428
3429  EXPECT_EQ(key_pool.size(), count);
3430  EXPECT_TRUE(keys_to_match.empty());
3431}
3432
3433// Tests that enumerations are not affected by corrupt files.
3434TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationCorruption) {
3435  SetSimpleCacheMode();
3436  InitCache();
3437  std::set<std::string> key_pool;
3438  ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3439
3440  // Create a corrupt entry. The write/read sequence ensures that the entry will
3441  // have been created before corrupting the platform files, in the case of
3442  // optimistic operations.
3443  const std::string key = "the key";
3444  disk_cache::Entry* corrupted_entry;
3445
3446  ASSERT_EQ(net::OK, CreateEntry(key, &corrupted_entry));
3447  ASSERT_TRUE(corrupted_entry);
3448  const int kSize = 50;
3449  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3450  CacheTestFillBuffer(buffer->data(), kSize, false);
3451  ASSERT_EQ(kSize,
3452            WriteData(corrupted_entry, 0, 0, buffer.get(), kSize, false));
3453  ASSERT_EQ(kSize, ReadData(corrupted_entry, 0, 0, buffer.get(), kSize));
3454  corrupted_entry->Close();
3455
3456  EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3457      key, cache_path_));
3458  EXPECT_EQ(key_pool.size() + 1,
3459            implicit_cast<size_t>(cache_->GetEntryCount()));
3460
3461  // Check that enumeration returns all entries but the corrupt one.
3462  std::set<std::string> keys_to_match(key_pool);
3463  scoped_ptr<TestIterator> iter = CreateIterator();
3464  size_t count = 0;
3465  ASSERT_TRUE(EnumerateAndMatchKeys(-1, iter.get(), &keys_to_match, &count));
3466  iter.reset();
3467
3468  EXPECT_EQ(key_pool.size(), count);
3469  EXPECT_TRUE(keys_to_match.empty());
3470}
3471
3472// Tests that enumerations don't leak memory when the backend is destructed
3473// mid-enumeration.
3474TEST_F(DiskCacheBackendTest, SimpleCacheEnumerationDestruction) {
3475  SetSimpleCacheMode();
3476  InitCache();
3477  std::set<std::string> key_pool;
3478  ASSERT_TRUE(CreateSetOfRandomEntries(&key_pool));
3479
3480  scoped_ptr<TestIterator> iter = CreateIterator();
3481  disk_cache::Entry* entry = NULL;
3482  ASSERT_EQ(net::OK, iter->OpenNextEntry(&entry));
3483  EXPECT_TRUE(entry);
3484  disk_cache::ScopedEntryPtr entry_closer(entry);
3485
3486  cache_.reset();
3487  // This test passes if we don't leak memory.
3488}
3489
3490#endif  // defined(OS_POSIX)
3491