1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "base/basictypes.h"
6#include "base/bind.h"
7#include "base/bind_helpers.h"
8#include "base/files/file.h"
9#include "base/files/file_util.h"
10#include "base/strings/string_util.h"
11#include "base/strings/stringprintf.h"
12#include "base/threading/platform_thread.h"
13#include "base/timer/timer.h"
14#include "net/base/completion_callback.h"
15#include "net/base/io_buffer.h"
16#include "net/base/net_errors.h"
17#include "net/base/test_completion_callback.h"
18#include "net/disk_cache/blockfile/backend_impl.h"
19#include "net/disk_cache/blockfile/entry_impl.h"
20#include "net/disk_cache/disk_cache_test_base.h"
21#include "net/disk_cache/disk_cache_test_util.h"
22#include "net/disk_cache/memory/mem_entry_impl.h"
23#include "net/disk_cache/simple/simple_entry_format.h"
24#include "net/disk_cache/simple/simple_entry_impl.h"
25#include "net/disk_cache/simple/simple_synchronous_entry.h"
26#include "net/disk_cache/simple/simple_test_util.h"
27#include "net/disk_cache/simple/simple_util.h"
28#include "testing/gtest/include/gtest/gtest.h"
29
30using base::Time;
31using disk_cache::ScopedEntryPtr;
32
33// Tests that can run with different types of caches.
34class DiskCacheEntryTest : public DiskCacheTestWithCache {
35 public:
36  void InternalSyncIOBackground(disk_cache::Entry* entry);
37  void ExternalSyncIOBackground(disk_cache::Entry* entry);
38
39 protected:
40  void InternalSyncIO();
41  void InternalAsyncIO();
42  void ExternalSyncIO();
43  void ExternalAsyncIO();
44  void ReleaseBuffer(int stream_index);
45  void StreamAccess();
46  void GetKey();
47  void GetTimes(int stream_index);
48  void GrowData(int stream_index);
49  void TruncateData(int stream_index);
50  void ZeroLengthIO(int stream_index);
51  void Buffering();
52  void SizeAtCreate();
53  void SizeChanges(int stream_index);
54  void ReuseEntry(int size, int stream_index);
55  void InvalidData(int stream_index);
56  void ReadWriteDestroyBuffer(int stream_index);
57  void DoomNormalEntry();
58  void DoomEntryNextToOpenEntry();
59  void DoomedEntry(int stream_index);
60  void BasicSparseIO();
61  void HugeSparseIO();
62  void GetAvailableRange();
63  void CouldBeSparse();
64  void UpdateSparseEntry();
65  void DoomSparseEntry();
66  void PartialSparseEntry();
67  bool SimpleCacheMakeBadChecksumEntry(const std::string& key, int* data_size);
68  bool SimpleCacheThirdStreamFileExists(const char* key);
69  void SyncDoomEntry(const char* key);
70};
71
72// This part of the test runs on the background thread.
73void DiskCacheEntryTest::InternalSyncIOBackground(disk_cache::Entry* entry) {
74  const int kSize1 = 10;
75  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
76  CacheTestFillBuffer(buffer1->data(), kSize1, false);
77  EXPECT_EQ(
78      0,
79      entry->ReadData(0, 0, buffer1.get(), kSize1, net::CompletionCallback()));
80  base::strlcpy(buffer1->data(), "the data", kSize1);
81  EXPECT_EQ(10,
82            entry->WriteData(
83                0, 0, buffer1.get(), kSize1, net::CompletionCallback(), false));
84  memset(buffer1->data(), 0, kSize1);
85  EXPECT_EQ(
86      10,
87      entry->ReadData(0, 0, buffer1.get(), kSize1, net::CompletionCallback()));
88  EXPECT_STREQ("the data", buffer1->data());
89
90  const int kSize2 = 5000;
91  const int kSize3 = 10000;
92  scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
93  scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
94  memset(buffer3->data(), 0, kSize3);
95  CacheTestFillBuffer(buffer2->data(), kSize2, false);
96  base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
97  EXPECT_EQ(
98      5000,
99      entry->WriteData(
100          1, 1500, buffer2.get(), kSize2, net::CompletionCallback(), false));
101  memset(buffer2->data(), 0, kSize2);
102  EXPECT_EQ(4989,
103            entry->ReadData(
104                1, 1511, buffer2.get(), kSize2, net::CompletionCallback()));
105  EXPECT_STREQ("big data goes here", buffer2->data());
106  EXPECT_EQ(
107      5000,
108      entry->ReadData(1, 0, buffer2.get(), kSize2, net::CompletionCallback()));
109  EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
110  EXPECT_EQ(1500,
111            entry->ReadData(
112                1, 5000, buffer2.get(), kSize2, net::CompletionCallback()));
113
114  EXPECT_EQ(0,
115            entry->ReadData(
116                1, 6500, buffer2.get(), kSize2, net::CompletionCallback()));
117  EXPECT_EQ(
118      6500,
119      entry->ReadData(1, 0, buffer3.get(), kSize3, net::CompletionCallback()));
120  EXPECT_EQ(8192,
121            entry->WriteData(
122                1, 0, buffer3.get(), 8192, net::CompletionCallback(), false));
123  EXPECT_EQ(
124      8192,
125      entry->ReadData(1, 0, buffer3.get(), kSize3, net::CompletionCallback()));
126  EXPECT_EQ(8192, entry->GetDataSize(1));
127
128  // We need to delete the memory buffer on this thread.
129  EXPECT_EQ(0, entry->WriteData(
130      0, 0, NULL, 0, net::CompletionCallback(), true));
131  EXPECT_EQ(0, entry->WriteData(
132      1, 0, NULL, 0, net::CompletionCallback(), true));
133}
134
135// We need to support synchronous IO even though it is not a supported operation
136// from the point of view of the disk cache's public interface, because we use
137// it internally, not just by a few tests, but as part of the implementation
138// (see sparse_control.cc, for example).
139void DiskCacheEntryTest::InternalSyncIO() {
140  disk_cache::Entry* entry = NULL;
141  ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
142  ASSERT_TRUE(NULL != entry);
143
144  // The bulk of the test runs from within the callback, on the cache thread.
145  RunTaskForTest(base::Bind(&DiskCacheEntryTest::InternalSyncIOBackground,
146                            base::Unretained(this),
147                            entry));
148
149
150  entry->Doom();
151  entry->Close();
152  FlushQueueForTest();
153  EXPECT_EQ(0, cache_->GetEntryCount());
154}
155
156TEST_F(DiskCacheEntryTest, InternalSyncIO) {
157  InitCache();
158  InternalSyncIO();
159}
160
161TEST_F(DiskCacheEntryTest, MemoryOnlyInternalSyncIO) {
162  SetMemoryOnlyMode();
163  InitCache();
164  InternalSyncIO();
165}
166
167void DiskCacheEntryTest::InternalAsyncIO() {
168  disk_cache::Entry* entry = NULL;
169  ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
170  ASSERT_TRUE(NULL != entry);
171
172  // Avoid using internal buffers for the test. We have to write something to
173  // the entry and close it so that we flush the internal buffer to disk. After
174  // that, IO operations will be really hitting the disk. We don't care about
175  // the content, so just extending the entry is enough (all extensions zero-
176  // fill any holes).
177  EXPECT_EQ(0, WriteData(entry, 0, 15 * 1024, NULL, 0, false));
178  EXPECT_EQ(0, WriteData(entry, 1, 15 * 1024, NULL, 0, false));
179  entry->Close();
180  ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
181
182  MessageLoopHelper helper;
183  // Let's verify that each IO goes to the right callback object.
184  CallbackTest callback1(&helper, false);
185  CallbackTest callback2(&helper, false);
186  CallbackTest callback3(&helper, false);
187  CallbackTest callback4(&helper, false);
188  CallbackTest callback5(&helper, false);
189  CallbackTest callback6(&helper, false);
190  CallbackTest callback7(&helper, false);
191  CallbackTest callback8(&helper, false);
192  CallbackTest callback9(&helper, false);
193  CallbackTest callback10(&helper, false);
194  CallbackTest callback11(&helper, false);
195  CallbackTest callback12(&helper, false);
196  CallbackTest callback13(&helper, false);
197
198  const int kSize1 = 10;
199  const int kSize2 = 5000;
200  const int kSize3 = 10000;
201  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
202  scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
203  scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
204  CacheTestFillBuffer(buffer1->data(), kSize1, false);
205  CacheTestFillBuffer(buffer2->data(), kSize2, false);
206  CacheTestFillBuffer(buffer3->data(), kSize3, false);
207
208  EXPECT_EQ(0,
209            entry->ReadData(
210                0,
211                15 * 1024,
212                buffer1.get(),
213                kSize1,
214                base::Bind(&CallbackTest::Run, base::Unretained(&callback1))));
215  base::strlcpy(buffer1->data(), "the data", kSize1);
216  int expected = 0;
217  int ret = entry->WriteData(
218      0,
219      0,
220      buffer1.get(),
221      kSize1,
222      base::Bind(&CallbackTest::Run, base::Unretained(&callback2)),
223      false);
224  EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
225  if (net::ERR_IO_PENDING == ret)
226    expected++;
227
228  EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
229  memset(buffer2->data(), 0, kSize2);
230  ret = entry->ReadData(
231      0,
232      0,
233      buffer2.get(),
234      kSize1,
235      base::Bind(&CallbackTest::Run, base::Unretained(&callback3)));
236  EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
237  if (net::ERR_IO_PENDING == ret)
238    expected++;
239
240  EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
241  EXPECT_STREQ("the data", buffer2->data());
242
243  base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
244  ret = entry->WriteData(
245      1,
246      1500,
247      buffer2.get(),
248      kSize2,
249      base::Bind(&CallbackTest::Run, base::Unretained(&callback4)),
250      true);
251  EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
252  if (net::ERR_IO_PENDING == ret)
253    expected++;
254
255  EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
256  memset(buffer3->data(), 0, kSize3);
257  ret = entry->ReadData(
258      1,
259      1511,
260      buffer3.get(),
261      kSize2,
262      base::Bind(&CallbackTest::Run, base::Unretained(&callback5)));
263  EXPECT_TRUE(4989 == ret || net::ERR_IO_PENDING == ret);
264  if (net::ERR_IO_PENDING == ret)
265    expected++;
266
267  EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
268  EXPECT_STREQ("big data goes here", buffer3->data());
269  ret = entry->ReadData(
270      1,
271      0,
272      buffer2.get(),
273      kSize2,
274      base::Bind(&CallbackTest::Run, base::Unretained(&callback6)));
275  EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
276  if (net::ERR_IO_PENDING == ret)
277    expected++;
278
279  memset(buffer3->data(), 0, kSize3);
280
281  EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
282  EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 1500));
283  ret = entry->ReadData(
284      1,
285      5000,
286      buffer2.get(),
287      kSize2,
288      base::Bind(&CallbackTest::Run, base::Unretained(&callback7)));
289  EXPECT_TRUE(1500 == ret || net::ERR_IO_PENDING == ret);
290  if (net::ERR_IO_PENDING == ret)
291    expected++;
292
293  ret = entry->ReadData(
294      1,
295      0,
296      buffer3.get(),
297      kSize3,
298      base::Bind(&CallbackTest::Run, base::Unretained(&callback9)));
299  EXPECT_TRUE(6500 == ret || net::ERR_IO_PENDING == ret);
300  if (net::ERR_IO_PENDING == ret)
301    expected++;
302
303  ret = entry->WriteData(
304      1,
305      0,
306      buffer3.get(),
307      8192,
308      base::Bind(&CallbackTest::Run, base::Unretained(&callback10)),
309      true);
310  EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
311  if (net::ERR_IO_PENDING == ret)
312    expected++;
313
314  EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
315  ret = entry->ReadData(
316      1,
317      0,
318      buffer3.get(),
319      kSize3,
320      base::Bind(&CallbackTest::Run, base::Unretained(&callback11)));
321  EXPECT_TRUE(8192 == ret || net::ERR_IO_PENDING == ret);
322  if (net::ERR_IO_PENDING == ret)
323    expected++;
324
325  EXPECT_EQ(8192, entry->GetDataSize(1));
326
327  ret = entry->ReadData(
328      0,
329      0,
330      buffer1.get(),
331      kSize1,
332      base::Bind(&CallbackTest::Run, base::Unretained(&callback12)));
333  EXPECT_TRUE(10 == ret || net::ERR_IO_PENDING == ret);
334  if (net::ERR_IO_PENDING == ret)
335    expected++;
336
337  ret = entry->ReadData(
338      1,
339      0,
340      buffer2.get(),
341      kSize2,
342      base::Bind(&CallbackTest::Run, base::Unretained(&callback13)));
343  EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
344  if (net::ERR_IO_PENDING == ret)
345    expected++;
346
347  EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
348
349  EXPECT_FALSE(helper.callback_reused_error());
350
351  entry->Doom();
352  entry->Close();
353  FlushQueueForTest();
354  EXPECT_EQ(0, cache_->GetEntryCount());
355}
356
357TEST_F(DiskCacheEntryTest, InternalAsyncIO) {
358  InitCache();
359  InternalAsyncIO();
360}
361
362TEST_F(DiskCacheEntryTest, MemoryOnlyInternalAsyncIO) {
363  SetMemoryOnlyMode();
364  InitCache();
365  InternalAsyncIO();
366}
367
368// This part of the test runs on the background thread.
369void DiskCacheEntryTest::ExternalSyncIOBackground(disk_cache::Entry* entry) {
370  const int kSize1 = 17000;
371  const int kSize2 = 25000;
372  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
373  scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
374  CacheTestFillBuffer(buffer1->data(), kSize1, false);
375  CacheTestFillBuffer(buffer2->data(), kSize2, false);
376  base::strlcpy(buffer1->data(), "the data", kSize1);
377  EXPECT_EQ(17000,
378            entry->WriteData(
379                0, 0, buffer1.get(), kSize1, net::CompletionCallback(), false));
380  memset(buffer1->data(), 0, kSize1);
381  EXPECT_EQ(
382      17000,
383      entry->ReadData(0, 0, buffer1.get(), kSize1, net::CompletionCallback()));
384  EXPECT_STREQ("the data", buffer1->data());
385
386  base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
387  EXPECT_EQ(
388      25000,
389      entry->WriteData(
390          1, 10000, buffer2.get(), kSize2, net::CompletionCallback(), false));
391  memset(buffer2->data(), 0, kSize2);
392  EXPECT_EQ(24989,
393            entry->ReadData(
394                1, 10011, buffer2.get(), kSize2, net::CompletionCallback()));
395  EXPECT_STREQ("big data goes here", buffer2->data());
396  EXPECT_EQ(
397      25000,
398      entry->ReadData(1, 0, buffer2.get(), kSize2, net::CompletionCallback()));
399  EXPECT_EQ(5000,
400            entry->ReadData(
401                1, 30000, buffer2.get(), kSize2, net::CompletionCallback()));
402
403  EXPECT_EQ(0,
404            entry->ReadData(
405                1, 35000, buffer2.get(), kSize2, net::CompletionCallback()));
406  EXPECT_EQ(
407      17000,
408      entry->ReadData(1, 0, buffer1.get(), kSize1, net::CompletionCallback()));
409  EXPECT_EQ(
410      17000,
411      entry->WriteData(
412          1, 20000, buffer1.get(), kSize1, net::CompletionCallback(), false));
413  EXPECT_EQ(37000, entry->GetDataSize(1));
414
415  // We need to delete the memory buffer on this thread.
416  EXPECT_EQ(0, entry->WriteData(
417      0, 0, NULL, 0, net::CompletionCallback(), true));
418  EXPECT_EQ(0, entry->WriteData(
419      1, 0, NULL, 0, net::CompletionCallback(), true));
420}
421
422void DiskCacheEntryTest::ExternalSyncIO() {
423  disk_cache::Entry* entry;
424  ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
425
426  // The bulk of the test runs from within the callback, on the cache thread.
427  RunTaskForTest(base::Bind(&DiskCacheEntryTest::ExternalSyncIOBackground,
428                            base::Unretained(this),
429                            entry));
430
431  entry->Doom();
432  entry->Close();
433  FlushQueueForTest();
434  EXPECT_EQ(0, cache_->GetEntryCount());
435}
436
437TEST_F(DiskCacheEntryTest, ExternalSyncIO) {
438  InitCache();
439  ExternalSyncIO();
440}
441
442TEST_F(DiskCacheEntryTest, ExternalSyncIONoBuffer) {
443  InitCache();
444  cache_impl_->SetFlags(disk_cache::kNoBuffering);
445  ExternalSyncIO();
446}
447
448TEST_F(DiskCacheEntryTest, MemoryOnlyExternalSyncIO) {
449  SetMemoryOnlyMode();
450  InitCache();
451  ExternalSyncIO();
452}
453
454void DiskCacheEntryTest::ExternalAsyncIO() {
455  disk_cache::Entry* entry;
456  ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
457
458  int expected = 0;
459
460  MessageLoopHelper helper;
461  // Let's verify that each IO goes to the right callback object.
462  CallbackTest callback1(&helper, false);
463  CallbackTest callback2(&helper, false);
464  CallbackTest callback3(&helper, false);
465  CallbackTest callback4(&helper, false);
466  CallbackTest callback5(&helper, false);
467  CallbackTest callback6(&helper, false);
468  CallbackTest callback7(&helper, false);
469  CallbackTest callback8(&helper, false);
470  CallbackTest callback9(&helper, false);
471
472  const int kSize1 = 17000;
473  const int kSize2 = 25000;
474  const int kSize3 = 25000;
475  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
476  scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
477  scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
478  CacheTestFillBuffer(buffer1->data(), kSize1, false);
479  CacheTestFillBuffer(buffer2->data(), kSize2, false);
480  CacheTestFillBuffer(buffer3->data(), kSize3, false);
481  base::strlcpy(buffer1->data(), "the data", kSize1);
482  int ret = entry->WriteData(
483      0,
484      0,
485      buffer1.get(),
486      kSize1,
487      base::Bind(&CallbackTest::Run, base::Unretained(&callback1)),
488      false);
489  EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
490  if (net::ERR_IO_PENDING == ret)
491    expected++;
492
493  EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
494
495  memset(buffer2->data(), 0, kSize1);
496  ret = entry->ReadData(
497      0,
498      0,
499      buffer2.get(),
500      kSize1,
501      base::Bind(&CallbackTest::Run, base::Unretained(&callback2)));
502  EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
503  if (net::ERR_IO_PENDING == ret)
504    expected++;
505
506  EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
507  EXPECT_STREQ("the data", buffer2->data());
508
509  base::strlcpy(buffer2->data(), "The really big data goes here", kSize2);
510  ret = entry->WriteData(
511      1,
512      10000,
513      buffer2.get(),
514      kSize2,
515      base::Bind(&CallbackTest::Run, base::Unretained(&callback3)),
516      false);
517  EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
518  if (net::ERR_IO_PENDING == ret)
519    expected++;
520
521  EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
522
523  memset(buffer3->data(), 0, kSize3);
524  ret = entry->ReadData(
525      1,
526      10011,
527      buffer3.get(),
528      kSize3,
529      base::Bind(&CallbackTest::Run, base::Unretained(&callback4)));
530  EXPECT_TRUE(24989 == ret || net::ERR_IO_PENDING == ret);
531  if (net::ERR_IO_PENDING == ret)
532    expected++;
533
534  EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
535  EXPECT_STREQ("big data goes here", buffer3->data());
536  ret = entry->ReadData(
537      1,
538      0,
539      buffer2.get(),
540      kSize2,
541      base::Bind(&CallbackTest::Run, base::Unretained(&callback5)));
542  EXPECT_TRUE(25000 == ret || net::ERR_IO_PENDING == ret);
543  if (net::ERR_IO_PENDING == ret)
544    expected++;
545
546  EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
547  memset(buffer3->data(), 0, kSize3);
548  EXPECT_EQ(0, memcmp(buffer2->data(), buffer3->data(), 10000));
549  ret = entry->ReadData(
550      1,
551      30000,
552      buffer2.get(),
553      kSize2,
554      base::Bind(&CallbackTest::Run, base::Unretained(&callback6)));
555  EXPECT_TRUE(5000 == ret || net::ERR_IO_PENDING == ret);
556  if (net::ERR_IO_PENDING == ret)
557    expected++;
558
559  EXPECT_EQ(0,
560            entry->ReadData(
561                1,
562                35000,
563                buffer2.get(),
564                kSize2,
565                base::Bind(&CallbackTest::Run, base::Unretained(&callback7))));
566  ret = entry->ReadData(
567      1,
568      0,
569      buffer1.get(),
570      kSize1,
571      base::Bind(&CallbackTest::Run, base::Unretained(&callback8)));
572  EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
573  if (net::ERR_IO_PENDING == ret)
574    expected++;
575  ret = entry->WriteData(
576      1,
577      20000,
578      buffer3.get(),
579      kSize1,
580      base::Bind(&CallbackTest::Run, base::Unretained(&callback9)),
581      false);
582  EXPECT_TRUE(17000 == ret || net::ERR_IO_PENDING == ret);
583  if (net::ERR_IO_PENDING == ret)
584    expected++;
585
586  EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
587  EXPECT_EQ(37000, entry->GetDataSize(1));
588
589  EXPECT_FALSE(helper.callback_reused_error());
590
591  entry->Doom();
592  entry->Close();
593  FlushQueueForTest();
594  EXPECT_EQ(0, cache_->GetEntryCount());
595}
596
597TEST_F(DiskCacheEntryTest, ExternalAsyncIO) {
598  InitCache();
599  ExternalAsyncIO();
600}
601
602TEST_F(DiskCacheEntryTest, ExternalAsyncIONoBuffer) {
603  InitCache();
604  cache_impl_->SetFlags(disk_cache::kNoBuffering);
605  ExternalAsyncIO();
606}
607
608TEST_F(DiskCacheEntryTest, MemoryOnlyExternalAsyncIO) {
609  SetMemoryOnlyMode();
610  InitCache();
611  ExternalAsyncIO();
612}
613
614// Tests that IOBuffers are not referenced after IO completes.
615void DiskCacheEntryTest::ReleaseBuffer(int stream_index) {
616  disk_cache::Entry* entry = NULL;
617  ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
618  ASSERT_TRUE(NULL != entry);
619
620  const int kBufferSize = 1024;
621  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kBufferSize));
622  CacheTestFillBuffer(buffer->data(), kBufferSize, false);
623
624  net::ReleaseBufferCompletionCallback cb(buffer.get());
625  int rv = entry->WriteData(
626      stream_index, 0, buffer.get(), kBufferSize, cb.callback(), false);
627  EXPECT_EQ(kBufferSize, cb.GetResult(rv));
628  entry->Close();
629}
630
631TEST_F(DiskCacheEntryTest, ReleaseBuffer) {
632  InitCache();
633  cache_impl_->SetFlags(disk_cache::kNoBuffering);
634  ReleaseBuffer(0);
635}
636
637TEST_F(DiskCacheEntryTest, MemoryOnlyReleaseBuffer) {
638  SetMemoryOnlyMode();
639  InitCache();
640  ReleaseBuffer(0);
641}
642
643void DiskCacheEntryTest::StreamAccess() {
644  disk_cache::Entry* entry = NULL;
645  ASSERT_EQ(net::OK, CreateEntry("the first key", &entry));
646  ASSERT_TRUE(NULL != entry);
647
648  const int kBufferSize = 1024;
649  const int kNumStreams = 3;
650  scoped_refptr<net::IOBuffer> reference_buffers[kNumStreams];
651  for (int i = 0; i < kNumStreams; i++) {
652    reference_buffers[i] = new net::IOBuffer(kBufferSize);
653    CacheTestFillBuffer(reference_buffers[i]->data(), kBufferSize, false);
654  }
655  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kBufferSize));
656  for (int i = 0; i < kNumStreams; i++) {
657    EXPECT_EQ(
658        kBufferSize,
659        WriteData(entry, i, 0, reference_buffers[i].get(), kBufferSize, false));
660    memset(buffer1->data(), 0, kBufferSize);
661    EXPECT_EQ(kBufferSize, ReadData(entry, i, 0, buffer1.get(), kBufferSize));
662    EXPECT_EQ(
663        0, memcmp(reference_buffers[i]->data(), buffer1->data(), kBufferSize));
664  }
665  EXPECT_EQ(net::ERR_INVALID_ARGUMENT,
666            ReadData(entry, kNumStreams, 0, buffer1.get(), kBufferSize));
667  entry->Close();
668
669  // Open the entry and read it in chunks, including a read past the end.
670  ASSERT_EQ(net::OK, OpenEntry("the first key", &entry));
671  ASSERT_TRUE(NULL != entry);
672  const int kReadBufferSize = 600;
673  const int kFinalReadSize = kBufferSize - kReadBufferSize;
674  COMPILE_ASSERT(kFinalReadSize < kReadBufferSize, should_be_exactly_two_reads);
675  scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kReadBufferSize));
676  for (int i = 0; i < kNumStreams; i++) {
677    memset(buffer2->data(), 0, kReadBufferSize);
678    EXPECT_EQ(kReadBufferSize,
679              ReadData(entry, i, 0, buffer2.get(), kReadBufferSize));
680    EXPECT_EQ(
681        0,
682        memcmp(reference_buffers[i]->data(), buffer2->data(), kReadBufferSize));
683
684    memset(buffer2->data(), 0, kReadBufferSize);
685    EXPECT_EQ(
686        kFinalReadSize,
687        ReadData(entry, i, kReadBufferSize, buffer2.get(), kReadBufferSize));
688    EXPECT_EQ(0,
689              memcmp(reference_buffers[i]->data() + kReadBufferSize,
690                     buffer2->data(),
691                     kFinalReadSize));
692  }
693
694  entry->Close();
695}
696
697TEST_F(DiskCacheEntryTest, StreamAccess) {
698  InitCache();
699  StreamAccess();
700}
701
702TEST_F(DiskCacheEntryTest, MemoryOnlyStreamAccess) {
703  SetMemoryOnlyMode();
704  InitCache();
705  StreamAccess();
706}
707
708void DiskCacheEntryTest::GetKey() {
709  std::string key("the first key");
710  disk_cache::Entry* entry;
711  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
712  EXPECT_EQ(key, entry->GetKey()) << "short key";
713  entry->Close();
714
715  int seed = static_cast<int>(Time::Now().ToInternalValue());
716  srand(seed);
717  char key_buffer[20000];
718
719  CacheTestFillBuffer(key_buffer, 3000, true);
720  key_buffer[1000] = '\0';
721
722  key = key_buffer;
723  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
724  EXPECT_TRUE(key == entry->GetKey()) << "1000 bytes key";
725  entry->Close();
726
727  key_buffer[1000] = 'p';
728  key_buffer[3000] = '\0';
729  key = key_buffer;
730  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
731  EXPECT_TRUE(key == entry->GetKey()) << "medium size key";
732  entry->Close();
733
734  CacheTestFillBuffer(key_buffer, sizeof(key_buffer), true);
735  key_buffer[19999] = '\0';
736
737  key = key_buffer;
738  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
739  EXPECT_TRUE(key == entry->GetKey()) << "long key";
740  entry->Close();
741
742  CacheTestFillBuffer(key_buffer, 0x4000, true);
743  key_buffer[0x4000] = '\0';
744
745  key = key_buffer;
746  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
747  EXPECT_TRUE(key == entry->GetKey()) << "16KB key";
748  entry->Close();
749}
750
751TEST_F(DiskCacheEntryTest, GetKey) {
752  InitCache();
753  GetKey();
754}
755
756TEST_F(DiskCacheEntryTest, MemoryOnlyGetKey) {
757  SetMemoryOnlyMode();
758  InitCache();
759  GetKey();
760}
761
762void DiskCacheEntryTest::GetTimes(int stream_index) {
763  std::string key("the first key");
764  disk_cache::Entry* entry;
765
766  Time t1 = Time::Now();
767  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
768  EXPECT_TRUE(entry->GetLastModified() >= t1);
769  EXPECT_TRUE(entry->GetLastModified() == entry->GetLastUsed());
770
771  AddDelay();
772  Time t2 = Time::Now();
773  EXPECT_TRUE(t2 > t1);
774  EXPECT_EQ(0, WriteData(entry, stream_index, 200, NULL, 0, false));
775  if (type_ == net::APP_CACHE) {
776    EXPECT_TRUE(entry->GetLastModified() < t2);
777  } else {
778    EXPECT_TRUE(entry->GetLastModified() >= t2);
779  }
780  EXPECT_TRUE(entry->GetLastModified() == entry->GetLastUsed());
781
782  AddDelay();
783  Time t3 = Time::Now();
784  EXPECT_TRUE(t3 > t2);
785  const int kSize = 200;
786  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
787  EXPECT_EQ(kSize, ReadData(entry, stream_index, 0, buffer.get(), kSize));
788  if (type_ == net::APP_CACHE) {
789    EXPECT_TRUE(entry->GetLastUsed() < t2);
790    EXPECT_TRUE(entry->GetLastModified() < t2);
791  } else if (type_ == net::SHADER_CACHE) {
792    EXPECT_TRUE(entry->GetLastUsed() < t3);
793    EXPECT_TRUE(entry->GetLastModified() < t3);
794  } else {
795    EXPECT_TRUE(entry->GetLastUsed() >= t3);
796    EXPECT_TRUE(entry->GetLastModified() < t3);
797  }
798  entry->Close();
799}
800
801TEST_F(DiskCacheEntryTest, GetTimes) {
802  InitCache();
803  GetTimes(0);
804}
805
806TEST_F(DiskCacheEntryTest, MemoryOnlyGetTimes) {
807  SetMemoryOnlyMode();
808  InitCache();
809  GetTimes(0);
810}
811
812TEST_F(DiskCacheEntryTest, AppCacheGetTimes) {
813  SetCacheType(net::APP_CACHE);
814  InitCache();
815  GetTimes(0);
816}
817
818TEST_F(DiskCacheEntryTest, ShaderCacheGetTimes) {
819  SetCacheType(net::SHADER_CACHE);
820  InitCache();
821  GetTimes(0);
822}
823
824void DiskCacheEntryTest::GrowData(int stream_index) {
825  std::string key1("the first key");
826  disk_cache::Entry* entry;
827  ASSERT_EQ(net::OK, CreateEntry(key1, &entry));
828
829  const int kSize = 20000;
830  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
831  scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
832  CacheTestFillBuffer(buffer1->data(), kSize, false);
833  memset(buffer2->data(), 0, kSize);
834
835  base::strlcpy(buffer1->data(), "the data", kSize);
836  EXPECT_EQ(10, WriteData(entry, stream_index, 0, buffer1.get(), 10, false));
837  EXPECT_EQ(10, ReadData(entry, stream_index, 0, buffer2.get(), 10));
838  EXPECT_STREQ("the data", buffer2->data());
839  EXPECT_EQ(10, entry->GetDataSize(stream_index));
840
841  EXPECT_EQ(2000,
842            WriteData(entry, stream_index, 0, buffer1.get(), 2000, false));
843  EXPECT_EQ(2000, entry->GetDataSize(stream_index));
844  EXPECT_EQ(2000, ReadData(entry, stream_index, 0, buffer2.get(), 2000));
845  EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
846
847  EXPECT_EQ(20000,
848            WriteData(entry, stream_index, 0, buffer1.get(), kSize, false));
849  EXPECT_EQ(20000, entry->GetDataSize(stream_index));
850  EXPECT_EQ(20000, ReadData(entry, stream_index, 0, buffer2.get(), kSize));
851  EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
852  entry->Close();
853
854  memset(buffer2->data(), 0, kSize);
855  std::string key2("Second key");
856  ASSERT_EQ(net::OK, CreateEntry(key2, &entry));
857  EXPECT_EQ(10, WriteData(entry, stream_index, 0, buffer1.get(), 10, false));
858  EXPECT_EQ(10, entry->GetDataSize(stream_index));
859  entry->Close();
860
861  // Go from an internal address to a bigger block size.
862  ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
863  EXPECT_EQ(2000,
864            WriteData(entry, stream_index, 0, buffer1.get(), 2000, false));
865  EXPECT_EQ(2000, entry->GetDataSize(stream_index));
866  EXPECT_EQ(2000, ReadData(entry, stream_index, 0, buffer2.get(), 2000));
867  EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 2000));
868  entry->Close();
869  memset(buffer2->data(), 0, kSize);
870
871  // Go from an internal address to an external one.
872  ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
873  EXPECT_EQ(20000,
874            WriteData(entry, stream_index, 0, buffer1.get(), kSize, false));
875  EXPECT_EQ(20000, entry->GetDataSize(stream_index));
876  EXPECT_EQ(20000, ReadData(entry, stream_index, 0, buffer2.get(), kSize));
877  EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), kSize));
878  entry->Close();
879
880  // Double check the size from disk.
881  ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
882  EXPECT_EQ(20000, entry->GetDataSize(stream_index));
883
884  // Now extend the entry without actual data.
885  EXPECT_EQ(0, WriteData(entry, stream_index, 45500, buffer1.get(), 0, false));
886  entry->Close();
887
888  // And check again from disk.
889  ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
890  EXPECT_EQ(45500, entry->GetDataSize(stream_index));
891  entry->Close();
892}
893
894TEST_F(DiskCacheEntryTest, GrowData) {
895  InitCache();
896  GrowData(0);
897}
898
899TEST_F(DiskCacheEntryTest, GrowDataNoBuffer) {
900  InitCache();
901  cache_impl_->SetFlags(disk_cache::kNoBuffering);
902  GrowData(0);
903}
904
905TEST_F(DiskCacheEntryTest, MemoryOnlyGrowData) {
906  SetMemoryOnlyMode();
907  InitCache();
908  GrowData(0);
909}
910
911void DiskCacheEntryTest::TruncateData(int stream_index) {
912  std::string key("the first key");
913  disk_cache::Entry* entry;
914  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
915
916  const int kSize1 = 20000;
917  const int kSize2 = 20000;
918  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
919  scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
920
921  CacheTestFillBuffer(buffer1->data(), kSize1, false);
922  memset(buffer2->data(), 0, kSize2);
923
924  // Simple truncation:
925  EXPECT_EQ(200, WriteData(entry, stream_index, 0, buffer1.get(), 200, false));
926  EXPECT_EQ(200, entry->GetDataSize(stream_index));
927  EXPECT_EQ(100, WriteData(entry, stream_index, 0, buffer1.get(), 100, false));
928  EXPECT_EQ(200, entry->GetDataSize(stream_index));
929  EXPECT_EQ(100, WriteData(entry, stream_index, 0, buffer1.get(), 100, true));
930  EXPECT_EQ(100, entry->GetDataSize(stream_index));
931  EXPECT_EQ(0, WriteData(entry, stream_index, 50, buffer1.get(), 0, true));
932  EXPECT_EQ(50, entry->GetDataSize(stream_index));
933  EXPECT_EQ(0, WriteData(entry, stream_index, 0, buffer1.get(), 0, true));
934  EXPECT_EQ(0, entry->GetDataSize(stream_index));
935  entry->Close();
936  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
937
938  // Go to an external file.
939  EXPECT_EQ(20000,
940            WriteData(entry, stream_index, 0, buffer1.get(), 20000, true));
941  EXPECT_EQ(20000, entry->GetDataSize(stream_index));
942  EXPECT_EQ(20000, ReadData(entry, stream_index, 0, buffer2.get(), 20000));
943  EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 20000));
944  memset(buffer2->data(), 0, kSize2);
945
946  // External file truncation
947  EXPECT_EQ(18000,
948            WriteData(entry, stream_index, 0, buffer1.get(), 18000, false));
949  EXPECT_EQ(20000, entry->GetDataSize(stream_index));
950  EXPECT_EQ(18000,
951            WriteData(entry, stream_index, 0, buffer1.get(), 18000, true));
952  EXPECT_EQ(18000, entry->GetDataSize(stream_index));
953  EXPECT_EQ(0, WriteData(entry, stream_index, 17500, buffer1.get(), 0, true));
954  EXPECT_EQ(17500, entry->GetDataSize(stream_index));
955
956  // And back to an internal block.
957  EXPECT_EQ(600,
958            WriteData(entry, stream_index, 1000, buffer1.get(), 600, true));
959  EXPECT_EQ(1600, entry->GetDataSize(stream_index));
960  EXPECT_EQ(600, ReadData(entry, stream_index, 1000, buffer2.get(), 600));
961  EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 600));
962  EXPECT_EQ(1000, ReadData(entry, stream_index, 0, buffer2.get(), 1000));
963  EXPECT_TRUE(!memcmp(buffer1->data(), buffer2->data(), 1000))
964      << "Preserves previous data";
965
966  // Go from external file to zero length.
967  EXPECT_EQ(20000,
968            WriteData(entry, stream_index, 0, buffer1.get(), 20000, true));
969  EXPECT_EQ(20000, entry->GetDataSize(stream_index));
970  EXPECT_EQ(0, WriteData(entry, stream_index, 0, buffer1.get(), 0, true));
971  EXPECT_EQ(0, entry->GetDataSize(stream_index));
972
973  entry->Close();
974}
975
976TEST_F(DiskCacheEntryTest, TruncateData) {
977  InitCache();
978  TruncateData(0);
979}
980
981TEST_F(DiskCacheEntryTest, TruncateDataNoBuffer) {
982  InitCache();
983  cache_impl_->SetFlags(disk_cache::kNoBuffering);
984  TruncateData(0);
985}
986
987TEST_F(DiskCacheEntryTest, MemoryOnlyTruncateData) {
988  SetMemoryOnlyMode();
989  InitCache();
990  TruncateData(0);
991}
992
993void DiskCacheEntryTest::ZeroLengthIO(int stream_index) {
994  std::string key("the first key");
995  disk_cache::Entry* entry;
996  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
997
998  EXPECT_EQ(0, ReadData(entry, stream_index, 0, NULL, 0));
999  EXPECT_EQ(0, WriteData(entry, stream_index, 0, NULL, 0, false));
1000
1001  // This write should extend the entry.
1002  EXPECT_EQ(0, WriteData(entry, stream_index, 1000, NULL, 0, false));
1003  EXPECT_EQ(0, ReadData(entry, stream_index, 500, NULL, 0));
1004  EXPECT_EQ(0, ReadData(entry, stream_index, 2000, NULL, 0));
1005  EXPECT_EQ(1000, entry->GetDataSize(stream_index));
1006
1007  EXPECT_EQ(0, WriteData(entry, stream_index, 100000, NULL, 0, true));
1008  EXPECT_EQ(0, ReadData(entry, stream_index, 50000, NULL, 0));
1009  EXPECT_EQ(100000, entry->GetDataSize(stream_index));
1010
1011  // Let's verify the actual content.
1012  const int kSize = 20;
1013  const char zeros[kSize] = {};
1014  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1015
1016  CacheTestFillBuffer(buffer->data(), kSize, false);
1017  EXPECT_EQ(kSize, ReadData(entry, stream_index, 500, buffer.get(), kSize));
1018  EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
1019
1020  CacheTestFillBuffer(buffer->data(), kSize, false);
1021  EXPECT_EQ(kSize, ReadData(entry, stream_index, 5000, buffer.get(), kSize));
1022  EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
1023
1024  CacheTestFillBuffer(buffer->data(), kSize, false);
1025  EXPECT_EQ(kSize, ReadData(entry, stream_index, 50000, buffer.get(), kSize));
1026  EXPECT_TRUE(!memcmp(buffer->data(), zeros, kSize));
1027
1028  entry->Close();
1029}
1030
1031TEST_F(DiskCacheEntryTest, ZeroLengthIO) {
1032  InitCache();
1033  ZeroLengthIO(0);
1034}
1035
1036TEST_F(DiskCacheEntryTest, ZeroLengthIONoBuffer) {
1037  InitCache();
1038  cache_impl_->SetFlags(disk_cache::kNoBuffering);
1039  ZeroLengthIO(0);
1040}
1041
1042TEST_F(DiskCacheEntryTest, MemoryOnlyZeroLengthIO) {
1043  SetMemoryOnlyMode();
1044  InitCache();
1045  ZeroLengthIO(0);
1046}
1047
1048// Tests that we handle the content correctly when buffering, a feature of the
1049// standard cache that permits fast responses to certain reads.
1050void DiskCacheEntryTest::Buffering() {
1051  std::string key("the first key");
1052  disk_cache::Entry* entry;
1053  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1054
1055  const int kSize = 200;
1056  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1057  scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
1058  CacheTestFillBuffer(buffer1->data(), kSize, true);
1059  CacheTestFillBuffer(buffer2->data(), kSize, true);
1060
1061  EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, false));
1062  entry->Close();
1063
1064  // Write a little more and read what we wrote before.
1065  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1066  EXPECT_EQ(kSize, WriteData(entry, 1, 5000, buffer1.get(), kSize, false));
1067  EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize));
1068  EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1069
1070  // Now go to an external file.
1071  EXPECT_EQ(kSize, WriteData(entry, 1, 18000, buffer1.get(), kSize, false));
1072  entry->Close();
1073
1074  // Write something else and verify old data.
1075  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1076  EXPECT_EQ(kSize, WriteData(entry, 1, 10000, buffer1.get(), kSize, false));
1077  CacheTestFillBuffer(buffer2->data(), kSize, true);
1078  EXPECT_EQ(kSize, ReadData(entry, 1, 5000, buffer2.get(), kSize));
1079  EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1080  CacheTestFillBuffer(buffer2->data(), kSize, true);
1081  EXPECT_EQ(kSize, ReadData(entry, 1, 0, buffer2.get(), kSize));
1082  EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1083  CacheTestFillBuffer(buffer2->data(), kSize, true);
1084  EXPECT_EQ(kSize, ReadData(entry, 1, 18000, buffer2.get(), kSize));
1085  EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1086
1087  // Extend the file some more.
1088  EXPECT_EQ(kSize, WriteData(entry, 1, 23000, buffer1.get(), kSize, false));
1089  entry->Close();
1090
1091  // And now make sure that we can deal with data in both places (ram/disk).
1092  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1093  EXPECT_EQ(kSize, WriteData(entry, 1, 17000, buffer1.get(), kSize, false));
1094
1095  // We should not overwrite the data at 18000 with this.
1096  EXPECT_EQ(kSize, WriteData(entry, 1, 19000, buffer1.get(), kSize, false));
1097  CacheTestFillBuffer(buffer2->data(), kSize, true);
1098  EXPECT_EQ(kSize, ReadData(entry, 1, 18000, buffer2.get(), kSize));
1099  EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1100  CacheTestFillBuffer(buffer2->data(), kSize, true);
1101  EXPECT_EQ(kSize, ReadData(entry, 1, 17000, buffer2.get(), kSize));
1102  EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1103
1104  EXPECT_EQ(kSize, WriteData(entry, 1, 22900, buffer1.get(), kSize, false));
1105  CacheTestFillBuffer(buffer2->data(), kSize, true);
1106  EXPECT_EQ(100, ReadData(entry, 1, 23000, buffer2.get(), kSize));
1107  EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + 100, 100));
1108
1109  CacheTestFillBuffer(buffer2->data(), kSize, true);
1110  EXPECT_EQ(100, ReadData(entry, 1, 23100, buffer2.get(), kSize));
1111  EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + 100, 100));
1112
1113  // Extend the file again and read before without closing the entry.
1114  EXPECT_EQ(kSize, WriteData(entry, 1, 25000, buffer1.get(), kSize, false));
1115  EXPECT_EQ(kSize, WriteData(entry, 1, 45000, buffer1.get(), kSize, false));
1116  CacheTestFillBuffer(buffer2->data(), kSize, true);
1117  EXPECT_EQ(kSize, ReadData(entry, 1, 25000, buffer2.get(), kSize));
1118  EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1119  CacheTestFillBuffer(buffer2->data(), kSize, true);
1120  EXPECT_EQ(kSize, ReadData(entry, 1, 45000, buffer2.get(), kSize));
1121  EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data(), kSize));
1122
1123  entry->Close();
1124}
1125
1126TEST_F(DiskCacheEntryTest, Buffering) {
1127  InitCache();
1128  Buffering();
1129}
1130
1131TEST_F(DiskCacheEntryTest, BufferingNoBuffer) {
1132  InitCache();
1133  cache_impl_->SetFlags(disk_cache::kNoBuffering);
1134  Buffering();
1135}
1136
1137// Checks that entries are zero length when created.
1138void DiskCacheEntryTest::SizeAtCreate() {
1139  const char key[]  = "the first key";
1140  disk_cache::Entry* entry;
1141  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1142
1143  const int kNumStreams = 3;
1144  for (int i = 0; i < kNumStreams; ++i)
1145    EXPECT_EQ(0, entry->GetDataSize(i));
1146  entry->Close();
1147}
1148
1149TEST_F(DiskCacheEntryTest, SizeAtCreate) {
1150  InitCache();
1151  SizeAtCreate();
1152}
1153
1154TEST_F(DiskCacheEntryTest, MemoryOnlySizeAtCreate) {
1155  SetMemoryOnlyMode();
1156  InitCache();
1157  SizeAtCreate();
1158}
1159
1160// Some extra tests to make sure that buffering works properly when changing
1161// the entry size.
1162void DiskCacheEntryTest::SizeChanges(int stream_index) {
1163  std::string key("the first key");
1164  disk_cache::Entry* entry;
1165  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1166
1167  const int kSize = 200;
1168  const char zeros[kSize] = {};
1169  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
1170  scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
1171  CacheTestFillBuffer(buffer1->data(), kSize, true);
1172  CacheTestFillBuffer(buffer2->data(), kSize, true);
1173
1174  EXPECT_EQ(kSize,
1175            WriteData(entry, stream_index, 0, buffer1.get(), kSize, true));
1176  EXPECT_EQ(kSize,
1177            WriteData(entry, stream_index, 17000, buffer1.get(), kSize, true));
1178  EXPECT_EQ(kSize,
1179            WriteData(entry, stream_index, 23000, buffer1.get(), kSize, true));
1180  entry->Close();
1181
1182  // Extend the file and read between the old size and the new write.
1183  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1184  EXPECT_EQ(23000 + kSize, entry->GetDataSize(stream_index));
1185  EXPECT_EQ(kSize,
1186            WriteData(entry, stream_index, 25000, buffer1.get(), kSize, true));
1187  EXPECT_EQ(25000 + kSize, entry->GetDataSize(stream_index));
1188  EXPECT_EQ(kSize, ReadData(entry, stream_index, 24000, buffer2.get(), kSize));
1189  EXPECT_TRUE(!memcmp(buffer2->data(), zeros, kSize));
1190
1191  // Read at the end of the old file size.
1192  EXPECT_EQ(
1193      kSize,
1194      ReadData(entry, stream_index, 23000 + kSize - 35, buffer2.get(), kSize));
1195  EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + kSize - 35, 35));
1196
1197  // Read slightly before the last write.
1198  CacheTestFillBuffer(buffer2->data(), kSize, true);
1199  EXPECT_EQ(kSize, ReadData(entry, stream_index, 24900, buffer2.get(), kSize));
1200  EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1201  EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1202
1203  // Extend the entry a little more.
1204  EXPECT_EQ(kSize,
1205            WriteData(entry, stream_index, 26000, buffer1.get(), kSize, true));
1206  EXPECT_EQ(26000 + kSize, entry->GetDataSize(stream_index));
1207  CacheTestFillBuffer(buffer2->data(), kSize, true);
1208  EXPECT_EQ(kSize, ReadData(entry, stream_index, 25900, buffer2.get(), kSize));
1209  EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1210  EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1211
1212  // And now reduce the size.
1213  EXPECT_EQ(kSize,
1214            WriteData(entry, stream_index, 25000, buffer1.get(), kSize, true));
1215  EXPECT_EQ(25000 + kSize, entry->GetDataSize(stream_index));
1216  EXPECT_EQ(
1217      28,
1218      ReadData(entry, stream_index, 25000 + kSize - 28, buffer2.get(), kSize));
1219  EXPECT_TRUE(!memcmp(buffer2->data(), buffer1->data() + kSize - 28, 28));
1220
1221  // Reduce the size with a buffer that is not extending the size.
1222  EXPECT_EQ(kSize,
1223            WriteData(entry, stream_index, 24000, buffer1.get(), kSize, false));
1224  EXPECT_EQ(25000 + kSize, entry->GetDataSize(stream_index));
1225  EXPECT_EQ(kSize,
1226            WriteData(entry, stream_index, 24500, buffer1.get(), kSize, true));
1227  EXPECT_EQ(24500 + kSize, entry->GetDataSize(stream_index));
1228  EXPECT_EQ(kSize, ReadData(entry, stream_index, 23900, buffer2.get(), kSize));
1229  EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1230  EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1231
1232  // And now reduce the size below the old size.
1233  EXPECT_EQ(kSize,
1234            WriteData(entry, stream_index, 19000, buffer1.get(), kSize, true));
1235  EXPECT_EQ(19000 + kSize, entry->GetDataSize(stream_index));
1236  EXPECT_EQ(kSize, ReadData(entry, stream_index, 18900, buffer2.get(), kSize));
1237  EXPECT_TRUE(!memcmp(buffer2->data(), zeros, 100));
1238  EXPECT_TRUE(!memcmp(buffer2->data() + 100, buffer1->data(), kSize - 100));
1239
1240  // Verify that the actual file is truncated.
1241  entry->Close();
1242  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1243  EXPECT_EQ(19000 + kSize, entry->GetDataSize(stream_index));
1244
1245  // Extend the newly opened file with a zero length write, expect zero fill.
1246  EXPECT_EQ(
1247      0,
1248      WriteData(entry, stream_index, 20000 + kSize, buffer1.get(), 0, false));
1249  EXPECT_EQ(kSize,
1250            ReadData(entry, stream_index, 19000 + kSize, buffer1.get(), kSize));
1251  EXPECT_EQ(0, memcmp(buffer1->data(), zeros, kSize));
1252
1253  entry->Close();
1254}
1255
1256TEST_F(DiskCacheEntryTest, SizeChanges) {
1257  InitCache();
1258  SizeChanges(1);
1259}
1260
1261TEST_F(DiskCacheEntryTest, SizeChangesNoBuffer) {
1262  InitCache();
1263  cache_impl_->SetFlags(disk_cache::kNoBuffering);
1264  SizeChanges(1);
1265}
1266
1267// Write more than the total cache capacity but to a single entry. |size| is the
1268// amount of bytes to write each time.
1269void DiskCacheEntryTest::ReuseEntry(int size, int stream_index) {
1270  std::string key1("the first key");
1271  disk_cache::Entry* entry;
1272  ASSERT_EQ(net::OK, CreateEntry(key1, &entry));
1273
1274  entry->Close();
1275  std::string key2("the second key");
1276  ASSERT_EQ(net::OK, CreateEntry(key2, &entry));
1277
1278  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(size));
1279  CacheTestFillBuffer(buffer->data(), size, false);
1280
1281  for (int i = 0; i < 15; i++) {
1282    EXPECT_EQ(0, WriteData(entry, stream_index, 0, buffer.get(), 0, true));
1283    EXPECT_EQ(size,
1284              WriteData(entry, stream_index, 0, buffer.get(), size, false));
1285    entry->Close();
1286    ASSERT_EQ(net::OK, OpenEntry(key2, &entry));
1287  }
1288
1289  entry->Close();
1290  ASSERT_EQ(net::OK, OpenEntry(key1, &entry)) << "have not evicted this entry";
1291  entry->Close();
1292}
1293
1294TEST_F(DiskCacheEntryTest, ReuseExternalEntry) {
1295  SetMaxSize(200 * 1024);
1296  InitCache();
1297  ReuseEntry(20 * 1024, 0);
1298}
1299
1300TEST_F(DiskCacheEntryTest, MemoryOnlyReuseExternalEntry) {
1301  SetMemoryOnlyMode();
1302  SetMaxSize(200 * 1024);
1303  InitCache();
1304  ReuseEntry(20 * 1024, 0);
1305}
1306
1307TEST_F(DiskCacheEntryTest, ReuseInternalEntry) {
1308  SetMaxSize(100 * 1024);
1309  InitCache();
1310  ReuseEntry(10 * 1024, 0);
1311}
1312
1313TEST_F(DiskCacheEntryTest, MemoryOnlyReuseInternalEntry) {
1314  SetMemoryOnlyMode();
1315  SetMaxSize(100 * 1024);
1316  InitCache();
1317  ReuseEntry(10 * 1024, 0);
1318}
1319
1320// Reading somewhere that was not written should return zeros.
1321void DiskCacheEntryTest::InvalidData(int stream_index) {
1322  std::string key("the first key");
1323  disk_cache::Entry* entry;
1324  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1325
1326  const int kSize1 = 20000;
1327  const int kSize2 = 20000;
1328  const int kSize3 = 20000;
1329  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
1330  scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
1331  scoped_refptr<net::IOBuffer> buffer3(new net::IOBuffer(kSize3));
1332
1333  CacheTestFillBuffer(buffer1->data(), kSize1, false);
1334  memset(buffer2->data(), 0, kSize2);
1335
1336  // Simple data grow:
1337  EXPECT_EQ(200,
1338            WriteData(entry, stream_index, 400, buffer1.get(), 200, false));
1339  EXPECT_EQ(600, entry->GetDataSize(stream_index));
1340  EXPECT_EQ(100, ReadData(entry, stream_index, 300, buffer3.get(), 100));
1341  EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
1342  entry->Close();
1343  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1344
1345  // The entry is now on disk. Load it and extend it.
1346  EXPECT_EQ(200,
1347            WriteData(entry, stream_index, 800, buffer1.get(), 200, false));
1348  EXPECT_EQ(1000, entry->GetDataSize(stream_index));
1349  EXPECT_EQ(100, ReadData(entry, stream_index, 700, buffer3.get(), 100));
1350  EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
1351  entry->Close();
1352  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1353
1354  // This time using truncate.
1355  EXPECT_EQ(200,
1356            WriteData(entry, stream_index, 1800, buffer1.get(), 200, true));
1357  EXPECT_EQ(2000, entry->GetDataSize(stream_index));
1358  EXPECT_EQ(100, ReadData(entry, stream_index, 1500, buffer3.get(), 100));
1359  EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 100));
1360
1361  // Go to an external file.
1362  EXPECT_EQ(200,
1363            WriteData(entry, stream_index, 19800, buffer1.get(), 200, false));
1364  EXPECT_EQ(20000, entry->GetDataSize(stream_index));
1365  EXPECT_EQ(4000, ReadData(entry, stream_index, 14000, buffer3.get(), 4000));
1366  EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 4000));
1367
1368  // And back to an internal block.
1369  EXPECT_EQ(600,
1370            WriteData(entry, stream_index, 1000, buffer1.get(), 600, true));
1371  EXPECT_EQ(1600, entry->GetDataSize(stream_index));
1372  EXPECT_EQ(600, ReadData(entry, stream_index, 1000, buffer3.get(), 600));
1373  EXPECT_TRUE(!memcmp(buffer3->data(), buffer1->data(), 600));
1374
1375  // Extend it again.
1376  EXPECT_EQ(600,
1377            WriteData(entry, stream_index, 2000, buffer1.get(), 600, false));
1378  EXPECT_EQ(2600, entry->GetDataSize(stream_index));
1379  EXPECT_EQ(200, ReadData(entry, stream_index, 1800, buffer3.get(), 200));
1380  EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
1381
1382  // And again (with truncation flag).
1383  EXPECT_EQ(600,
1384            WriteData(entry, stream_index, 3000, buffer1.get(), 600, true));
1385  EXPECT_EQ(3600, entry->GetDataSize(stream_index));
1386  EXPECT_EQ(200, ReadData(entry, stream_index, 2800, buffer3.get(), 200));
1387  EXPECT_TRUE(!memcmp(buffer3->data(), buffer2->data(), 200));
1388
1389  entry->Close();
1390}
1391
1392TEST_F(DiskCacheEntryTest, InvalidData) {
1393  InitCache();
1394  InvalidData(0);
1395}
1396
1397TEST_F(DiskCacheEntryTest, InvalidDataNoBuffer) {
1398  InitCache();
1399  cache_impl_->SetFlags(disk_cache::kNoBuffering);
1400  InvalidData(0);
1401}
1402
1403TEST_F(DiskCacheEntryTest, MemoryOnlyInvalidData) {
1404  SetMemoryOnlyMode();
1405  InitCache();
1406  InvalidData(0);
1407}
1408
1409// Tests that the cache preserves the buffer of an IO operation.
1410void DiskCacheEntryTest::ReadWriteDestroyBuffer(int stream_index) {
1411  std::string key("the first key");
1412  disk_cache::Entry* entry;
1413  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1414
1415  const int kSize = 200;
1416  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1417  CacheTestFillBuffer(buffer->data(), kSize, false);
1418
1419  net::TestCompletionCallback cb;
1420  EXPECT_EQ(net::ERR_IO_PENDING,
1421            entry->WriteData(
1422                stream_index, 0, buffer.get(), kSize, cb.callback(), false));
1423
1424  // Release our reference to the buffer.
1425  buffer = NULL;
1426  EXPECT_EQ(kSize, cb.WaitForResult());
1427
1428  // And now test with a Read().
1429  buffer = new net::IOBuffer(kSize);
1430  CacheTestFillBuffer(buffer->data(), kSize, false);
1431
1432  EXPECT_EQ(
1433      net::ERR_IO_PENDING,
1434      entry->ReadData(stream_index, 0, buffer.get(), kSize, cb.callback()));
1435  buffer = NULL;
1436  EXPECT_EQ(kSize, cb.WaitForResult());
1437
1438  entry->Close();
1439}
1440
1441TEST_F(DiskCacheEntryTest, ReadWriteDestroyBuffer) {
1442  InitCache();
1443  ReadWriteDestroyBuffer(0);
1444}
1445
1446void DiskCacheEntryTest::DoomNormalEntry() {
1447  std::string key("the first key");
1448  disk_cache::Entry* entry;
1449  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1450  entry->Doom();
1451  entry->Close();
1452
1453  const int kSize = 20000;
1454  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1455  CacheTestFillBuffer(buffer->data(), kSize, true);
1456  buffer->data()[19999] = '\0';
1457
1458  key = buffer->data();
1459  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1460  EXPECT_EQ(20000, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1461  EXPECT_EQ(20000, WriteData(entry, 1, 0, buffer.get(), kSize, false));
1462  entry->Doom();
1463  entry->Close();
1464
1465  FlushQueueForTest();
1466  EXPECT_EQ(0, cache_->GetEntryCount());
1467}
1468
1469TEST_F(DiskCacheEntryTest, DoomEntry) {
1470  InitCache();
1471  DoomNormalEntry();
1472}
1473
1474TEST_F(DiskCacheEntryTest, MemoryOnlyDoomEntry) {
1475  SetMemoryOnlyMode();
1476  InitCache();
1477  DoomNormalEntry();
1478}
1479
1480// Tests dooming an entry that's linked to an open entry.
1481void DiskCacheEntryTest::DoomEntryNextToOpenEntry() {
1482  disk_cache::Entry* entry1;
1483  disk_cache::Entry* entry2;
1484  ASSERT_EQ(net::OK, CreateEntry("fixed", &entry1));
1485  entry1->Close();
1486  ASSERT_EQ(net::OK, CreateEntry("foo", &entry1));
1487  entry1->Close();
1488  ASSERT_EQ(net::OK, CreateEntry("bar", &entry1));
1489  entry1->Close();
1490
1491  ASSERT_EQ(net::OK, OpenEntry("foo", &entry1));
1492  ASSERT_EQ(net::OK, OpenEntry("bar", &entry2));
1493  entry2->Doom();
1494  entry2->Close();
1495
1496  ASSERT_EQ(net::OK, OpenEntry("foo", &entry2));
1497  entry2->Doom();
1498  entry2->Close();
1499  entry1->Close();
1500
1501  ASSERT_EQ(net::OK, OpenEntry("fixed", &entry1));
1502  entry1->Close();
1503}
1504
1505TEST_F(DiskCacheEntryTest, DoomEntryNextToOpenEntry) {
1506  InitCache();
1507  DoomEntryNextToOpenEntry();
1508}
1509
1510TEST_F(DiskCacheEntryTest, NewEvictionDoomEntryNextToOpenEntry) {
1511  SetNewEviction();
1512  InitCache();
1513  DoomEntryNextToOpenEntry();
1514}
1515
1516TEST_F(DiskCacheEntryTest, AppCacheDoomEntryNextToOpenEntry) {
1517  SetCacheType(net::APP_CACHE);
1518  InitCache();
1519  DoomEntryNextToOpenEntry();
1520}
1521
1522// Verify that basic operations work as expected with doomed entries.
1523void DiskCacheEntryTest::DoomedEntry(int stream_index) {
1524  std::string key("the first key");
1525  disk_cache::Entry* entry;
1526  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1527  entry->Doom();
1528
1529  FlushQueueForTest();
1530  EXPECT_EQ(0, cache_->GetEntryCount());
1531  Time initial = Time::Now();
1532  AddDelay();
1533
1534  const int kSize1 = 2000;
1535  const int kSize2 = 2000;
1536  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
1537  scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
1538  CacheTestFillBuffer(buffer1->data(), kSize1, false);
1539  memset(buffer2->data(), 0, kSize2);
1540
1541  EXPECT_EQ(2000,
1542            WriteData(entry, stream_index, 0, buffer1.get(), 2000, false));
1543  EXPECT_EQ(2000, ReadData(entry, stream_index, 0, buffer2.get(), 2000));
1544  EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kSize1));
1545  EXPECT_EQ(key, entry->GetKey());
1546  EXPECT_TRUE(initial < entry->GetLastModified());
1547  EXPECT_TRUE(initial < entry->GetLastUsed());
1548
1549  entry->Close();
1550}
1551
1552TEST_F(DiskCacheEntryTest, DoomedEntry) {
1553  InitCache();
1554  DoomedEntry(0);
1555}
1556
1557TEST_F(DiskCacheEntryTest, MemoryOnlyDoomedEntry) {
1558  SetMemoryOnlyMode();
1559  InitCache();
1560  DoomedEntry(0);
1561}
1562
1563// Tests that we discard entries if the data is missing.
1564TEST_F(DiskCacheEntryTest, MissingData) {
1565  InitCache();
1566
1567  std::string key("the first key");
1568  disk_cache::Entry* entry;
1569  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1570
1571  // Write to an external file.
1572  const int kSize = 20000;
1573  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
1574  CacheTestFillBuffer(buffer->data(), kSize, false);
1575  EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
1576  entry->Close();
1577  FlushQueueForTest();
1578
1579  disk_cache::Addr address(0x80000001);
1580  base::FilePath name = cache_impl_->GetFileName(address);
1581  EXPECT_TRUE(base::DeleteFile(name, false));
1582
1583  // Attempt to read the data.
1584  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1585  EXPECT_EQ(net::ERR_FILE_NOT_FOUND,
1586            ReadData(entry, 0, 0, buffer.get(), kSize));
1587  entry->Close();
1588
1589  // The entry should be gone.
1590  ASSERT_NE(net::OK, OpenEntry(key, &entry));
1591}
1592
1593// Test that child entries in a memory cache backend are not visible from
1594// enumerations.
1595TEST_F(DiskCacheEntryTest, MemoryOnlyEnumerationWithSparseEntries) {
1596  SetMemoryOnlyMode();
1597  InitCache();
1598
1599  const int kSize = 4096;
1600  scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1601  CacheTestFillBuffer(buf->data(), kSize, false);
1602
1603  std::string key("the first key");
1604  disk_cache::Entry* parent_entry;
1605  ASSERT_EQ(net::OK, CreateEntry(key, &parent_entry));
1606
1607  // Writes to the parent entry.
1608  EXPECT_EQ(kSize,
1609            parent_entry->WriteSparseData(
1610                0, buf.get(), kSize, net::CompletionCallback()));
1611
1612  // This write creates a child entry and writes to it.
1613  EXPECT_EQ(kSize,
1614            parent_entry->WriteSparseData(
1615                8192, buf.get(), kSize, net::CompletionCallback()));
1616
1617  parent_entry->Close();
1618
1619  // Perform the enumerations.
1620  scoped_ptr<TestIterator> iter = CreateIterator();
1621  disk_cache::Entry* entry = NULL;
1622  int count = 0;
1623  while (iter->OpenNextEntry(&entry) == net::OK) {
1624    ASSERT_TRUE(entry != NULL);
1625    ++count;
1626    disk_cache::MemEntryImpl* mem_entry =
1627        reinterpret_cast<disk_cache::MemEntryImpl*>(entry);
1628    EXPECT_EQ(disk_cache::MemEntryImpl::kParentEntry, mem_entry->type());
1629    mem_entry->Close();
1630  }
1631  EXPECT_EQ(1, count);
1632}
1633
1634// Writes |buf_1| to offset and reads it back as |buf_2|.
1635void VerifySparseIO(disk_cache::Entry* entry, int64 offset,
1636                    net::IOBuffer* buf_1, int size, net::IOBuffer* buf_2) {
1637  net::TestCompletionCallback cb;
1638
1639  memset(buf_2->data(), 0, size);
1640  int ret = entry->ReadSparseData(offset, buf_2, size, cb.callback());
1641  EXPECT_EQ(0, cb.GetResult(ret));
1642
1643  ret = entry->WriteSparseData(offset, buf_1, size, cb.callback());
1644  EXPECT_EQ(size, cb.GetResult(ret));
1645
1646  ret = entry->ReadSparseData(offset, buf_2, size, cb.callback());
1647  EXPECT_EQ(size, cb.GetResult(ret));
1648
1649  EXPECT_EQ(0, memcmp(buf_1->data(), buf_2->data(), size));
1650}
1651
1652// Reads |size| bytes from |entry| at |offset| and verifies that they are the
1653// same as the content of the provided |buffer|.
1654void VerifyContentSparseIO(disk_cache::Entry* entry, int64 offset, char* buffer,
1655                           int size) {
1656  net::TestCompletionCallback cb;
1657
1658  scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(size));
1659  memset(buf_1->data(), 0, size);
1660  int ret = entry->ReadSparseData(offset, buf_1.get(), size, cb.callback());
1661  EXPECT_EQ(size, cb.GetResult(ret));
1662  EXPECT_EQ(0, memcmp(buf_1->data(), buffer, size));
1663}
1664
1665void DiskCacheEntryTest::BasicSparseIO() {
1666  std::string key("the first key");
1667  disk_cache::Entry* entry;
1668  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1669
1670  const int kSize = 2048;
1671  scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1672  scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1673  CacheTestFillBuffer(buf_1->data(), kSize, false);
1674
1675  // Write at offset 0.
1676  VerifySparseIO(entry, 0, buf_1.get(), kSize, buf_2.get());
1677
1678  // Write at offset 0x400000 (4 MB).
1679  VerifySparseIO(entry, 0x400000, buf_1.get(), kSize, buf_2.get());
1680
1681  // Write at offset 0x800000000 (32 GB).
1682  VerifySparseIO(entry, 0x800000000LL, buf_1.get(), kSize, buf_2.get());
1683
1684  entry->Close();
1685
1686  // Check everything again.
1687  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1688  VerifyContentSparseIO(entry, 0, buf_1->data(), kSize);
1689  VerifyContentSparseIO(entry, 0x400000, buf_1->data(), kSize);
1690  VerifyContentSparseIO(entry, 0x800000000LL, buf_1->data(), kSize);
1691  entry->Close();
1692}
1693
1694TEST_F(DiskCacheEntryTest, BasicSparseIO) {
1695  InitCache();
1696  BasicSparseIO();
1697}
1698
1699TEST_F(DiskCacheEntryTest, MemoryOnlyBasicSparseIO) {
1700  SetMemoryOnlyMode();
1701  InitCache();
1702  BasicSparseIO();
1703}
1704
1705void DiskCacheEntryTest::HugeSparseIO() {
1706  std::string key("the first key");
1707  disk_cache::Entry* entry;
1708  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1709
1710  // Write 1.2 MB so that we cover multiple entries.
1711  const int kSize = 1200 * 1024;
1712  scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1713  scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1714  CacheTestFillBuffer(buf_1->data(), kSize, false);
1715
1716  // Write at offset 0x20F0000 (33 MB - 64 KB).
1717  VerifySparseIO(entry, 0x20F0000, buf_1.get(), kSize, buf_2.get());
1718  entry->Close();
1719
1720  // Check it again.
1721  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1722  VerifyContentSparseIO(entry, 0x20F0000, buf_1->data(), kSize);
1723  entry->Close();
1724}
1725
1726TEST_F(DiskCacheEntryTest, HugeSparseIO) {
1727  InitCache();
1728  HugeSparseIO();
1729}
1730
1731TEST_F(DiskCacheEntryTest, MemoryOnlyHugeSparseIO) {
1732  SetMemoryOnlyMode();
1733  InitCache();
1734  HugeSparseIO();
1735}
1736
1737void DiskCacheEntryTest::GetAvailableRange() {
1738  std::string key("the first key");
1739  disk_cache::Entry* entry;
1740  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1741
1742  const int kSize = 16 * 1024;
1743  scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1744  CacheTestFillBuffer(buf->data(), kSize, false);
1745
1746  // Write at offset 0x20F0000 (33 MB - 64 KB), and 0x20F4400 (33 MB - 47 KB).
1747  EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F0000, buf.get(), kSize));
1748  EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F4400, buf.get(), kSize));
1749
1750  // We stop at the first empty block.
1751  int64 start;
1752  net::TestCompletionCallback cb;
1753  int rv = entry->GetAvailableRange(
1754      0x20F0000, kSize * 2, &start, cb.callback());
1755  EXPECT_EQ(kSize, cb.GetResult(rv));
1756  EXPECT_EQ(0x20F0000, start);
1757
1758  start = 0;
1759  rv = entry->GetAvailableRange(0, kSize, &start, cb.callback());
1760  EXPECT_EQ(0, cb.GetResult(rv));
1761  rv = entry->GetAvailableRange(
1762      0x20F0000 - kSize, kSize, &start, cb.callback());
1763  EXPECT_EQ(0, cb.GetResult(rv));
1764  rv = entry->GetAvailableRange(0, 0x2100000, &start, cb.callback());
1765  EXPECT_EQ(kSize, cb.GetResult(rv));
1766  EXPECT_EQ(0x20F0000, start);
1767
1768  // We should be able to Read based on the results of GetAvailableRange.
1769  start = -1;
1770  rv = entry->GetAvailableRange(0x2100000, kSize, &start, cb.callback());
1771  EXPECT_EQ(0, cb.GetResult(rv));
1772  rv = entry->ReadSparseData(start, buf.get(), kSize, cb.callback());
1773  EXPECT_EQ(0, cb.GetResult(rv));
1774
1775  start = 0;
1776  rv = entry->GetAvailableRange(0x20F2000, kSize, &start, cb.callback());
1777  EXPECT_EQ(0x2000, cb.GetResult(rv));
1778  EXPECT_EQ(0x20F2000, start);
1779  EXPECT_EQ(0x2000, ReadSparseData(entry, start, buf.get(), kSize));
1780
1781  // Make sure that we respect the |len| argument.
1782  start = 0;
1783  rv = entry->GetAvailableRange(
1784      0x20F0001 - kSize, kSize, &start, cb.callback());
1785  EXPECT_EQ(1, cb.GetResult(rv));
1786  EXPECT_EQ(0x20F0000, start);
1787
1788  entry->Close();
1789}
1790
1791TEST_F(DiskCacheEntryTest, GetAvailableRange) {
1792  InitCache();
1793  GetAvailableRange();
1794}
1795
1796TEST_F(DiskCacheEntryTest, MemoryOnlyGetAvailableRange) {
1797  SetMemoryOnlyMode();
1798  InitCache();
1799  GetAvailableRange();
1800}
1801
1802void DiskCacheEntryTest::CouldBeSparse() {
1803  std::string key("the first key");
1804  disk_cache::Entry* entry;
1805  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1806
1807  const int kSize = 16 * 1024;
1808  scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1809  CacheTestFillBuffer(buf->data(), kSize, false);
1810
1811  // Write at offset 0x20F0000 (33 MB - 64 KB).
1812  EXPECT_EQ(kSize, WriteSparseData(entry, 0x20F0000, buf.get(), kSize));
1813
1814  EXPECT_TRUE(entry->CouldBeSparse());
1815  entry->Close();
1816
1817  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1818  EXPECT_TRUE(entry->CouldBeSparse());
1819  entry->Close();
1820
1821  // Now verify a regular entry.
1822  key.assign("another key");
1823  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1824  EXPECT_FALSE(entry->CouldBeSparse());
1825
1826  EXPECT_EQ(kSize, WriteData(entry, 0, 0, buf.get(), kSize, false));
1827  EXPECT_EQ(kSize, WriteData(entry, 1, 0, buf.get(), kSize, false));
1828  EXPECT_EQ(kSize, WriteData(entry, 2, 0, buf.get(), kSize, false));
1829
1830  EXPECT_FALSE(entry->CouldBeSparse());
1831  entry->Close();
1832
1833  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
1834  EXPECT_FALSE(entry->CouldBeSparse());
1835  entry->Close();
1836}
1837
1838TEST_F(DiskCacheEntryTest, CouldBeSparse) {
1839  InitCache();
1840  CouldBeSparse();
1841}
1842
1843TEST_F(DiskCacheEntryTest, MemoryCouldBeSparse) {
1844  SetMemoryOnlyMode();
1845  InitCache();
1846  CouldBeSparse();
1847}
1848
1849TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedSparseIO) {
1850  SetMemoryOnlyMode();
1851  InitCache();
1852
1853  const int kSize = 8192;
1854  scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1855  scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1856  CacheTestFillBuffer(buf_1->data(), kSize, false);
1857
1858  std::string key("the first key");
1859  disk_cache::Entry* entry;
1860  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1861
1862  // This loop writes back to back starting from offset 0 and 9000.
1863  for (int i = 0; i < kSize; i += 1024) {
1864    scoped_refptr<net::WrappedIOBuffer> buf_3(
1865      new net::WrappedIOBuffer(buf_1->data() + i));
1866    VerifySparseIO(entry, i, buf_3.get(), 1024, buf_2.get());
1867    VerifySparseIO(entry, 9000 + i, buf_3.get(), 1024, buf_2.get());
1868  }
1869
1870  // Make sure we have data written.
1871  VerifyContentSparseIO(entry, 0, buf_1->data(), kSize);
1872  VerifyContentSparseIO(entry, 9000, buf_1->data(), kSize);
1873
1874  // This tests a large write that spans 3 entries from a misaligned offset.
1875  VerifySparseIO(entry, 20481, buf_1.get(), 8192, buf_2.get());
1876
1877  entry->Close();
1878}
1879
1880TEST_F(DiskCacheEntryTest, MemoryOnlyMisalignedGetAvailableRange) {
1881  SetMemoryOnlyMode();
1882  InitCache();
1883
1884  const int kSize = 8192;
1885  scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1886  CacheTestFillBuffer(buf->data(), kSize, false);
1887
1888  disk_cache::Entry* entry;
1889  std::string key("the first key");
1890  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
1891
1892  // Writes in the middle of an entry.
1893  EXPECT_EQ(
1894      1024,
1895      entry->WriteSparseData(0, buf.get(), 1024, net::CompletionCallback()));
1896  EXPECT_EQ(
1897      1024,
1898      entry->WriteSparseData(5120, buf.get(), 1024, net::CompletionCallback()));
1899  EXPECT_EQ(1024,
1900            entry->WriteSparseData(
1901                10000, buf.get(), 1024, net::CompletionCallback()));
1902
1903  // Writes in the middle of an entry and spans 2 child entries.
1904  EXPECT_EQ(8192,
1905            entry->WriteSparseData(
1906                50000, buf.get(), 8192, net::CompletionCallback()));
1907
1908  int64 start;
1909  net::TestCompletionCallback cb;
1910  // Test that we stop at a discontinuous child at the second block.
1911  int rv = entry->GetAvailableRange(0, 10000, &start, cb.callback());
1912  EXPECT_EQ(1024, cb.GetResult(rv));
1913  EXPECT_EQ(0, start);
1914
1915  // Test that number of bytes is reported correctly when we start from the
1916  // middle of a filled region.
1917  rv = entry->GetAvailableRange(512, 10000, &start, cb.callback());
1918  EXPECT_EQ(512, cb.GetResult(rv));
1919  EXPECT_EQ(512, start);
1920
1921  // Test that we found bytes in the child of next block.
1922  rv = entry->GetAvailableRange(1024, 10000, &start, cb.callback());
1923  EXPECT_EQ(1024, cb.GetResult(rv));
1924  EXPECT_EQ(5120, start);
1925
1926  // Test that the desired length is respected. It starts within a filled
1927  // region.
1928  rv = entry->GetAvailableRange(5500, 512, &start, cb.callback());
1929  EXPECT_EQ(512, cb.GetResult(rv));
1930  EXPECT_EQ(5500, start);
1931
1932  // Test that the desired length is respected. It starts before a filled
1933  // region.
1934  rv = entry->GetAvailableRange(5000, 620, &start, cb.callback());
1935  EXPECT_EQ(500, cb.GetResult(rv));
1936  EXPECT_EQ(5120, start);
1937
1938  // Test that multiple blocks are scanned.
1939  rv = entry->GetAvailableRange(40000, 20000, &start, cb.callback());
1940  EXPECT_EQ(8192, cb.GetResult(rv));
1941  EXPECT_EQ(50000, start);
1942
1943  entry->Close();
1944}
1945
1946void DiskCacheEntryTest::UpdateSparseEntry() {
1947  std::string key("the first key");
1948  disk_cache::Entry* entry1;
1949  ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
1950
1951  const int kSize = 2048;
1952  scoped_refptr<net::IOBuffer> buf_1(new net::IOBuffer(kSize));
1953  scoped_refptr<net::IOBuffer> buf_2(new net::IOBuffer(kSize));
1954  CacheTestFillBuffer(buf_1->data(), kSize, false);
1955
1956  // Write at offset 0.
1957  VerifySparseIO(entry1, 0, buf_1.get(), kSize, buf_2.get());
1958  entry1->Close();
1959
1960  // Write at offset 2048.
1961  ASSERT_EQ(net::OK, OpenEntry(key, &entry1));
1962  VerifySparseIO(entry1, 2048, buf_1.get(), kSize, buf_2.get());
1963
1964  disk_cache::Entry* entry2;
1965  ASSERT_EQ(net::OK, CreateEntry("the second key", &entry2));
1966
1967  entry1->Close();
1968  entry2->Close();
1969  FlushQueueForTest();
1970  if (memory_only_ || simple_cache_mode_)
1971    EXPECT_EQ(2, cache_->GetEntryCount());
1972  else
1973    EXPECT_EQ(3, cache_->GetEntryCount());
1974}
1975
1976TEST_F(DiskCacheEntryTest, UpdateSparseEntry) {
1977  SetCacheType(net::MEDIA_CACHE);
1978  InitCache();
1979  UpdateSparseEntry();
1980}
1981
1982TEST_F(DiskCacheEntryTest, MemoryOnlyUpdateSparseEntry) {
1983  SetMemoryOnlyMode();
1984  SetCacheType(net::MEDIA_CACHE);
1985  InitCache();
1986  UpdateSparseEntry();
1987}
1988
1989void DiskCacheEntryTest::DoomSparseEntry() {
1990  std::string key1("the first key");
1991  std::string key2("the second key");
1992  disk_cache::Entry *entry1, *entry2;
1993  ASSERT_EQ(net::OK, CreateEntry(key1, &entry1));
1994  ASSERT_EQ(net::OK, CreateEntry(key2, &entry2));
1995
1996  const int kSize = 4 * 1024;
1997  scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
1998  CacheTestFillBuffer(buf->data(), kSize, false);
1999
2000  int64 offset = 1024;
2001  // Write to a bunch of ranges.
2002  for (int i = 0; i < 12; i++) {
2003    EXPECT_EQ(kSize, WriteSparseData(entry1, offset, buf.get(), kSize));
2004    // Keep the second map under the default size.
2005    if (i < 9)
2006      EXPECT_EQ(kSize, WriteSparseData(entry2, offset, buf.get(), kSize));
2007
2008    offset *= 4;
2009  }
2010
2011  if (memory_only_ || simple_cache_mode_)
2012    EXPECT_EQ(2, cache_->GetEntryCount());
2013  else
2014    EXPECT_EQ(15, cache_->GetEntryCount());
2015
2016  // Doom the first entry while it's still open.
2017  entry1->Doom();
2018  entry1->Close();
2019  entry2->Close();
2020
2021  // Doom the second entry after it's fully saved.
2022  EXPECT_EQ(net::OK, DoomEntry(key2));
2023
2024  // Make sure we do all needed work. This may fail for entry2 if between Close
2025  // and DoomEntry the system decides to remove all traces of the file from the
2026  // system cache so we don't see that there is pending IO.
2027  base::MessageLoop::current()->RunUntilIdle();
2028
2029  if (memory_only_) {
2030    EXPECT_EQ(0, cache_->GetEntryCount());
2031  } else {
2032    if (5 == cache_->GetEntryCount()) {
2033      // Most likely we are waiting for the result of reading the sparse info
2034      // (it's always async on Posix so it is easy to miss). Unfortunately we
2035      // don't have any signal to watch for so we can only wait.
2036      base::PlatformThread::Sleep(base::TimeDelta::FromMilliseconds(500));
2037      base::MessageLoop::current()->RunUntilIdle();
2038    }
2039    EXPECT_EQ(0, cache_->GetEntryCount());
2040  }
2041}
2042
2043TEST_F(DiskCacheEntryTest, DoomSparseEntry) {
2044  UseCurrentThread();
2045  InitCache();
2046  DoomSparseEntry();
2047}
2048
2049TEST_F(DiskCacheEntryTest, MemoryOnlyDoomSparseEntry) {
2050  SetMemoryOnlyMode();
2051  InitCache();
2052  DoomSparseEntry();
2053}
2054
2055// A CompletionCallback wrapper that deletes the cache from within the callback.
2056// The way a CompletionCallback works means that all tasks (even new ones)
2057// are executed by the message loop before returning to the caller so the only
2058// way to simulate a race is to execute what we want on the callback.
2059class SparseTestCompletionCallback: public net::TestCompletionCallback {
2060 public:
2061  explicit SparseTestCompletionCallback(scoped_ptr<disk_cache::Backend> cache)
2062      : cache_(cache.Pass()) {
2063  }
2064
2065 private:
2066  virtual void SetResult(int result) OVERRIDE {
2067    cache_.reset();
2068    TestCompletionCallback::SetResult(result);
2069  }
2070
2071  scoped_ptr<disk_cache::Backend> cache_;
2072  DISALLOW_COPY_AND_ASSIGN(SparseTestCompletionCallback);
2073};
2074
2075// Tests that we don't crash when the backend is deleted while we are working
2076// deleting the sub-entries of a sparse entry.
2077TEST_F(DiskCacheEntryTest, DoomSparseEntry2) {
2078  UseCurrentThread();
2079  InitCache();
2080  std::string key("the key");
2081  disk_cache::Entry* entry;
2082  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2083
2084  const int kSize = 4 * 1024;
2085  scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
2086  CacheTestFillBuffer(buf->data(), kSize, false);
2087
2088  int64 offset = 1024;
2089  // Write to a bunch of ranges.
2090  for (int i = 0; i < 12; i++) {
2091    EXPECT_EQ(kSize,
2092              entry->WriteSparseData(
2093                  offset, buf.get(), kSize, net::CompletionCallback()));
2094    offset *= 4;
2095  }
2096  EXPECT_EQ(9, cache_->GetEntryCount());
2097
2098  entry->Close();
2099  disk_cache::Backend* cache = cache_.get();
2100  SparseTestCompletionCallback cb(cache_.Pass());
2101  int rv = cache->DoomEntry(key, cb.callback());
2102  EXPECT_EQ(net::ERR_IO_PENDING, rv);
2103  EXPECT_EQ(net::OK, cb.WaitForResult());
2104}
2105
2106void DiskCacheEntryTest::PartialSparseEntry() {
2107  std::string key("the first key");
2108  disk_cache::Entry* entry;
2109  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2110
2111  // We should be able to deal with IO that is not aligned to the block size
2112  // of a sparse entry, at least to write a big range without leaving holes.
2113  const int kSize = 4 * 1024;
2114  const int kSmallSize = 128;
2115  scoped_refptr<net::IOBuffer> buf1(new net::IOBuffer(kSize));
2116  CacheTestFillBuffer(buf1->data(), kSize, false);
2117
2118  // The first write is just to extend the entry. The third write occupies
2119  // a 1KB block partially, it may not be written internally depending on the
2120  // implementation.
2121  EXPECT_EQ(kSize, WriteSparseData(entry, 20000, buf1.get(), kSize));
2122  EXPECT_EQ(kSize, WriteSparseData(entry, 500, buf1.get(), kSize));
2123  EXPECT_EQ(kSmallSize,
2124            WriteSparseData(entry, 1080321, buf1.get(), kSmallSize));
2125  entry->Close();
2126  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2127
2128  scoped_refptr<net::IOBuffer> buf2(new net::IOBuffer(kSize));
2129  memset(buf2->data(), 0, kSize);
2130  EXPECT_EQ(0, ReadSparseData(entry, 8000, buf2.get(), kSize));
2131
2132  EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize));
2133  EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
2134  EXPECT_EQ(0, ReadSparseData(entry, 0, buf2.get(), kSize));
2135
2136  // This read should not change anything.
2137  EXPECT_EQ(96, ReadSparseData(entry, 24000, buf2.get(), kSize));
2138  EXPECT_EQ(500, ReadSparseData(entry, kSize, buf2.get(), kSize));
2139  EXPECT_EQ(0, ReadSparseData(entry, 99, buf2.get(), kSize));
2140
2141  int rv;
2142  int64 start;
2143  net::TestCompletionCallback cb;
2144  if (memory_only_ || simple_cache_mode_) {
2145    rv = entry->GetAvailableRange(0, 600, &start, cb.callback());
2146    EXPECT_EQ(100, cb.GetResult(rv));
2147    EXPECT_EQ(500, start);
2148  } else {
2149    rv = entry->GetAvailableRange(0, 2048, &start, cb.callback());
2150    EXPECT_EQ(1024, cb.GetResult(rv));
2151    EXPECT_EQ(1024, start);
2152  }
2153  rv = entry->GetAvailableRange(kSize, kSize, &start, cb.callback());
2154  EXPECT_EQ(500, cb.GetResult(rv));
2155  EXPECT_EQ(kSize, start);
2156  rv = entry->GetAvailableRange(20 * 1024, 10000, &start, cb.callback());
2157  EXPECT_EQ(3616, cb.GetResult(rv));
2158  EXPECT_EQ(20 * 1024, start);
2159
2160  // 1. Query before a filled 1KB block.
2161  // 2. Query within a filled 1KB block.
2162  // 3. Query beyond a filled 1KB block.
2163  if (memory_only_ || simple_cache_mode_) {
2164    rv = entry->GetAvailableRange(19400, kSize, &start, cb.callback());
2165    EXPECT_EQ(3496, cb.GetResult(rv));
2166    EXPECT_EQ(20000, start);
2167  } else {
2168    rv = entry->GetAvailableRange(19400, kSize, &start, cb.callback());
2169    EXPECT_EQ(3016, cb.GetResult(rv));
2170    EXPECT_EQ(20480, start);
2171  }
2172  rv = entry->GetAvailableRange(3073, kSize, &start, cb.callback());
2173  EXPECT_EQ(1523, cb.GetResult(rv));
2174  EXPECT_EQ(3073, start);
2175  rv = entry->GetAvailableRange(4600, kSize, &start, cb.callback());
2176  EXPECT_EQ(0, cb.GetResult(rv));
2177  EXPECT_EQ(4600, start);
2178
2179  // Now make another write and verify that there is no hole in between.
2180  EXPECT_EQ(kSize, WriteSparseData(entry, 500 + kSize, buf1.get(), kSize));
2181  rv = entry->GetAvailableRange(1024, 10000, &start, cb.callback());
2182  EXPECT_EQ(7 * 1024 + 500, cb.GetResult(rv));
2183  EXPECT_EQ(1024, start);
2184  EXPECT_EQ(kSize, ReadSparseData(entry, kSize, buf2.get(), kSize));
2185  EXPECT_EQ(0, memcmp(buf2->data(), buf1->data() + kSize - 500, 500));
2186  EXPECT_EQ(0, memcmp(buf2->data() + 500, buf1->data(), kSize - 500));
2187
2188  entry->Close();
2189}
2190
2191TEST_F(DiskCacheEntryTest, PartialSparseEntry) {
2192  InitCache();
2193  PartialSparseEntry();
2194}
2195
2196TEST_F(DiskCacheEntryTest, MemoryPartialSparseEntry) {
2197  SetMemoryOnlyMode();
2198  InitCache();
2199  PartialSparseEntry();
2200}
2201
2202// Tests that corrupt sparse children are removed automatically.
2203TEST_F(DiskCacheEntryTest, CleanupSparseEntry) {
2204  InitCache();
2205  std::string key("the first key");
2206  disk_cache::Entry* entry;
2207  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2208
2209  const int kSize = 4 * 1024;
2210  scoped_refptr<net::IOBuffer> buf1(new net::IOBuffer(kSize));
2211  CacheTestFillBuffer(buf1->data(), kSize, false);
2212
2213  const int k1Meg = 1024 * 1024;
2214  EXPECT_EQ(kSize, WriteSparseData(entry, 8192, buf1.get(), kSize));
2215  EXPECT_EQ(kSize, WriteSparseData(entry, k1Meg + 8192, buf1.get(), kSize));
2216  EXPECT_EQ(kSize, WriteSparseData(entry, 2 * k1Meg + 8192, buf1.get(), kSize));
2217  entry->Close();
2218  EXPECT_EQ(4, cache_->GetEntryCount());
2219
2220  scoped_ptr<TestIterator> iter = CreateIterator();
2221  int count = 0;
2222  std::string child_key[2];
2223  while (iter->OpenNextEntry(&entry) == net::OK) {
2224    ASSERT_TRUE(entry != NULL);
2225    // Writing to an entry will alter the LRU list and invalidate the iterator.
2226    if (entry->GetKey() != key && count < 2)
2227      child_key[count++] = entry->GetKey();
2228    entry->Close();
2229  }
2230  for (int i = 0; i < 2; i++) {
2231    ASSERT_EQ(net::OK, OpenEntry(child_key[i], &entry));
2232    // Overwrite the header's magic and signature.
2233    EXPECT_EQ(12, WriteData(entry, 2, 0, buf1.get(), 12, false));
2234    entry->Close();
2235  }
2236
2237  EXPECT_EQ(4, cache_->GetEntryCount());
2238  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2239
2240  // Two children should be gone. One while reading and one while writing.
2241  EXPECT_EQ(0, ReadSparseData(entry, 2 * k1Meg + 8192, buf1.get(), kSize));
2242  EXPECT_EQ(kSize, WriteSparseData(entry, k1Meg + 16384, buf1.get(), kSize));
2243  EXPECT_EQ(0, ReadSparseData(entry, k1Meg + 8192, buf1.get(), kSize));
2244
2245  // We never touched this one.
2246  EXPECT_EQ(kSize, ReadSparseData(entry, 8192, buf1.get(), kSize));
2247  entry->Close();
2248
2249  // We re-created one of the corrupt children.
2250  EXPECT_EQ(3, cache_->GetEntryCount());
2251}
2252
2253TEST_F(DiskCacheEntryTest, CancelSparseIO) {
2254  UseCurrentThread();
2255  InitCache();
2256  std::string key("the first key");
2257  disk_cache::Entry* entry;
2258  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2259
2260  const int kSize = 40 * 1024;
2261  scoped_refptr<net::IOBuffer> buf(new net::IOBuffer(kSize));
2262  CacheTestFillBuffer(buf->data(), kSize, false);
2263
2264  // This will open and write two "real" entries.
2265  net::TestCompletionCallback cb1, cb2, cb3, cb4, cb5;
2266  int rv = entry->WriteSparseData(
2267      1024 * 1024 - 4096, buf.get(), kSize, cb1.callback());
2268  EXPECT_EQ(net::ERR_IO_PENDING, rv);
2269
2270  int64 offset = 0;
2271  rv = entry->GetAvailableRange(offset, kSize, &offset, cb5.callback());
2272  rv = cb5.GetResult(rv);
2273  if (!cb1.have_result()) {
2274    // We may or may not have finished writing to the entry. If we have not,
2275    // we cannot start another operation at this time.
2276    EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED, rv);
2277  }
2278
2279  // We cancel the pending operation, and register multiple notifications.
2280  entry->CancelSparseIO();
2281  EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb2.callback()));
2282  EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb3.callback()));
2283  entry->CancelSparseIO();  // Should be a no op at this point.
2284  EXPECT_EQ(net::ERR_IO_PENDING, entry->ReadyForSparseIO(cb4.callback()));
2285
2286  if (!cb1.have_result()) {
2287    EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
2288              entry->ReadSparseData(
2289                  offset, buf.get(), kSize, net::CompletionCallback()));
2290    EXPECT_EQ(net::ERR_CACHE_OPERATION_NOT_SUPPORTED,
2291              entry->WriteSparseData(
2292                  offset, buf.get(), kSize, net::CompletionCallback()));
2293  }
2294
2295  // Now see if we receive all notifications. Note that we should not be able
2296  // to write everything (unless the timing of the system is really weird).
2297  rv = cb1.WaitForResult();
2298  EXPECT_TRUE(rv == 4096 || rv == kSize);
2299  EXPECT_EQ(net::OK, cb2.WaitForResult());
2300  EXPECT_EQ(net::OK, cb3.WaitForResult());
2301  EXPECT_EQ(net::OK, cb4.WaitForResult());
2302
2303  rv = entry->GetAvailableRange(offset, kSize, &offset, cb5.callback());
2304  EXPECT_EQ(0, cb5.GetResult(rv));
2305  entry->Close();
2306}
2307
2308// Tests that we perform sanity checks on an entry's key. Note that there are
2309// other tests that exercise sanity checks by using saved corrupt files.
2310TEST_F(DiskCacheEntryTest, KeySanityCheck) {
2311  UseCurrentThread();
2312  InitCache();
2313  std::string key("the first key");
2314  disk_cache::Entry* entry;
2315  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2316
2317  disk_cache::EntryImpl* entry_impl =
2318      static_cast<disk_cache::EntryImpl*>(entry);
2319  disk_cache::EntryStore* store = entry_impl->entry()->Data();
2320
2321  // We have reserved space for a short key (one block), let's say that the key
2322  // takes more than one block, and remove the NULLs after the actual key.
2323  store->key_len = 800;
2324  memset(store->key + key.size(), 'k', sizeof(store->key) - key.size());
2325  entry_impl->entry()->set_modified();
2326  entry->Close();
2327
2328  // We have a corrupt entry. Now reload it. We should NOT read beyond the
2329  // allocated buffer here.
2330  ASSERT_NE(net::OK, OpenEntry(key, &entry));
2331  DisableIntegrityCheck();
2332}
2333
2334// The Simple Cache backend requires a few guarantees from the filesystem like
2335// atomic renaming of recently open files. Those guarantees are not provided in
2336// general on Windows.
2337#if defined(OS_POSIX)
2338
2339TEST_F(DiskCacheEntryTest, SimpleCacheInternalAsyncIO) {
2340  SetSimpleCacheMode();
2341  InitCache();
2342  InternalAsyncIO();
2343}
2344
2345TEST_F(DiskCacheEntryTest, SimpleCacheExternalAsyncIO) {
2346  SetSimpleCacheMode();
2347  InitCache();
2348  ExternalAsyncIO();
2349}
2350
2351TEST_F(DiskCacheEntryTest, SimpleCacheReleaseBuffer) {
2352  SetSimpleCacheMode();
2353  InitCache();
2354  for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2355    EXPECT_EQ(net::OK, DoomAllEntries());
2356    ReleaseBuffer(i);
2357  }
2358}
2359
2360TEST_F(DiskCacheEntryTest, SimpleCacheStreamAccess) {
2361  SetSimpleCacheMode();
2362  InitCache();
2363  StreamAccess();
2364}
2365
2366TEST_F(DiskCacheEntryTest, SimpleCacheGetKey) {
2367  SetSimpleCacheMode();
2368  InitCache();
2369  GetKey();
2370}
2371
2372TEST_F(DiskCacheEntryTest, SimpleCacheGetTimes) {
2373  SetSimpleCacheMode();
2374  InitCache();
2375  for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2376    EXPECT_EQ(net::OK, DoomAllEntries());
2377    GetTimes(i);
2378  }
2379}
2380
2381TEST_F(DiskCacheEntryTest, SimpleCacheGrowData) {
2382  SetSimpleCacheMode();
2383  InitCache();
2384  for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2385    EXPECT_EQ(net::OK, DoomAllEntries());
2386    GrowData(i);
2387  }
2388}
2389
2390TEST_F(DiskCacheEntryTest, SimpleCacheTruncateData) {
2391  SetSimpleCacheMode();
2392  InitCache();
2393  for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2394    EXPECT_EQ(net::OK, DoomAllEntries());
2395    TruncateData(i);
2396  }
2397}
2398
2399TEST_F(DiskCacheEntryTest, SimpleCacheZeroLengthIO) {
2400  SetSimpleCacheMode();
2401  InitCache();
2402  for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2403    EXPECT_EQ(net::OK, DoomAllEntries());
2404    ZeroLengthIO(i);
2405  }
2406}
2407
2408TEST_F(DiskCacheEntryTest, SimpleCacheSizeAtCreate) {
2409  SetSimpleCacheMode();
2410  InitCache();
2411  SizeAtCreate();
2412}
2413
2414TEST_F(DiskCacheEntryTest, SimpleCacheReuseExternalEntry) {
2415  SetSimpleCacheMode();
2416  SetMaxSize(200 * 1024);
2417  InitCache();
2418  for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2419    EXPECT_EQ(net::OK, DoomAllEntries());
2420    ReuseEntry(20 * 1024, i);
2421  }
2422}
2423
2424TEST_F(DiskCacheEntryTest, SimpleCacheReuseInternalEntry) {
2425  SetSimpleCacheMode();
2426  SetMaxSize(100 * 1024);
2427  InitCache();
2428  for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2429    EXPECT_EQ(net::OK, DoomAllEntries());
2430    ReuseEntry(10 * 1024, i);
2431  }
2432}
2433
2434TEST_F(DiskCacheEntryTest, SimpleCacheSizeChanges) {
2435  SetSimpleCacheMode();
2436  InitCache();
2437  for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2438    EXPECT_EQ(net::OK, DoomAllEntries());
2439    SizeChanges(i);
2440  }
2441}
2442
2443TEST_F(DiskCacheEntryTest, SimpleCacheInvalidData) {
2444  SetSimpleCacheMode();
2445  InitCache();
2446  for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2447    EXPECT_EQ(net::OK, DoomAllEntries());
2448    InvalidData(i);
2449  }
2450}
2451
2452TEST_F(DiskCacheEntryTest, SimpleCacheReadWriteDestroyBuffer) {
2453  // Proving that the test works well with optimistic operations enabled is
2454  // subtle, instead run only in APP_CACHE mode to disable optimistic
2455  // operations. Stream 0 always uses optimistic operations, so the test is not
2456  // run on stream 0.
2457  SetCacheType(net::APP_CACHE);
2458  SetSimpleCacheMode();
2459  InitCache();
2460  for (int i = 1; i < disk_cache::kSimpleEntryStreamCount; ++i) {
2461    EXPECT_EQ(net::OK, DoomAllEntries());
2462    ReadWriteDestroyBuffer(i);
2463  }
2464}
2465
2466TEST_F(DiskCacheEntryTest, SimpleCacheDoomEntry) {
2467  SetSimpleCacheMode();
2468  InitCache();
2469  DoomNormalEntry();
2470}
2471
2472TEST_F(DiskCacheEntryTest, SimpleCacheDoomEntryNextToOpenEntry) {
2473  SetSimpleCacheMode();
2474  InitCache();
2475  DoomEntryNextToOpenEntry();
2476}
2477
2478TEST_F(DiskCacheEntryTest, SimpleCacheDoomedEntry) {
2479  SetSimpleCacheMode();
2480  InitCache();
2481  // Stream 2 is excluded because the implementation does not support writing to
2482  // it on a doomed entry, if it was previously lazily omitted.
2483  for (int i = 0; i < disk_cache::kSimpleEntryStreamCount - 1; ++i) {
2484    EXPECT_EQ(net::OK, DoomAllEntries());
2485    DoomedEntry(i);
2486  }
2487}
2488
2489// Creates an entry with corrupted last byte in stream 0.
2490// Requires SimpleCacheMode.
2491bool DiskCacheEntryTest::SimpleCacheMakeBadChecksumEntry(const std::string& key,
2492                                                         int* data_size) {
2493  disk_cache::Entry* entry = NULL;
2494
2495  if (CreateEntry(key, &entry) != net::OK || !entry) {
2496    LOG(ERROR) << "Could not create entry";
2497    return false;
2498  }
2499
2500  const char data[] = "this is very good data";
2501  const int kDataSize = arraysize(data);
2502  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kDataSize));
2503  base::strlcpy(buffer->data(), data, kDataSize);
2504
2505  EXPECT_EQ(kDataSize, WriteData(entry, 1, 0, buffer.get(), kDataSize, false));
2506  entry->Close();
2507  entry = NULL;
2508
2509  // Corrupt the last byte of the data.
2510  base::FilePath entry_file0_path = cache_path_.AppendASCII(
2511      disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
2512  base::File entry_file0(entry_file0_path,
2513                         base::File::FLAG_WRITE | base::File::FLAG_OPEN);
2514  if (!entry_file0.IsValid())
2515    return false;
2516
2517  int64 file_offset =
2518      sizeof(disk_cache::SimpleFileHeader) + key.size() + kDataSize - 2;
2519  EXPECT_EQ(1, entry_file0.Write(file_offset, "X", 1));
2520  *data_size = kDataSize;
2521  return true;
2522}
2523
2524// Tests that the simple cache can detect entries that have bad data.
2525TEST_F(DiskCacheEntryTest, SimpleCacheBadChecksum) {
2526  SetSimpleCacheMode();
2527  InitCache();
2528
2529  const char key[] = "the first key";
2530  int size_unused;
2531  ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size_unused));
2532
2533  disk_cache::Entry* entry = NULL;
2534
2535  // Open the entry.
2536  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2537  ScopedEntryPtr entry_closer(entry);
2538
2539  const int kReadBufferSize = 200;
2540  EXPECT_GE(kReadBufferSize, entry->GetDataSize(1));
2541  scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
2542  EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH,
2543            ReadData(entry, 1, 0, read_buffer.get(), kReadBufferSize));
2544}
2545
2546// Tests that an entry that has had an IO error occur can still be Doomed().
2547TEST_F(DiskCacheEntryTest, SimpleCacheErrorThenDoom) {
2548  SetSimpleCacheMode();
2549  InitCache();
2550
2551  const char key[] = "the first key";
2552  int size_unused;
2553  ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size_unused));
2554
2555  disk_cache::Entry* entry = NULL;
2556
2557  // Open the entry, forcing an IO error.
2558  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2559  ScopedEntryPtr entry_closer(entry);
2560
2561  const int kReadBufferSize = 200;
2562  EXPECT_GE(kReadBufferSize, entry->GetDataSize(1));
2563  scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
2564  EXPECT_EQ(net::ERR_CACHE_CHECKSUM_MISMATCH,
2565            ReadData(entry, 1, 0, read_buffer.get(), kReadBufferSize));
2566
2567  entry->Doom();  // Should not crash.
2568}
2569
2570bool TruncatePath(const base::FilePath& file_path, int64 length)  {
2571  base::File file(file_path, base::File::FLAG_WRITE | base::File::FLAG_OPEN);
2572  if (!file.IsValid())
2573    return false;
2574  return file.SetLength(length);
2575}
2576
2577TEST_F(DiskCacheEntryTest, SimpleCacheNoEOF) {
2578  SetSimpleCacheMode();
2579  InitCache();
2580
2581  const char key[] = "the first key";
2582
2583  disk_cache::Entry* entry = NULL;
2584  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
2585  disk_cache::Entry* null = NULL;
2586  EXPECT_NE(null, entry);
2587  entry->Close();
2588  entry = NULL;
2589
2590  // Force the entry to flush to disk, so subsequent platform file operations
2591  // succed.
2592  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
2593  entry->Close();
2594  entry = NULL;
2595
2596  // Truncate the file such that the length isn't sufficient to have an EOF
2597  // record.
2598  int kTruncationBytes = -implicit_cast<int>(sizeof(disk_cache::SimpleFileEOF));
2599  const base::FilePath entry_path = cache_path_.AppendASCII(
2600      disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
2601  const int64 invalid_size =
2602      disk_cache::simple_util::GetFileSizeFromKeyAndDataSize(key,
2603                                                             kTruncationBytes);
2604  EXPECT_TRUE(TruncatePath(entry_path, invalid_size));
2605  EXPECT_EQ(net::ERR_FAILED, OpenEntry(key, &entry));
2606  DisableIntegrityCheck();
2607}
2608
2609TEST_F(DiskCacheEntryTest, SimpleCacheNonOptimisticOperationsBasic) {
2610  // Test sequence:
2611  // Create, Write, Read, Close.
2612  SetCacheType(net::APP_CACHE);  // APP_CACHE doesn't use optimistic operations.
2613  SetSimpleCacheMode();
2614  InitCache();
2615  disk_cache::Entry* const null_entry = NULL;
2616
2617  disk_cache::Entry* entry = NULL;
2618  EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
2619  ASSERT_NE(null_entry, entry);
2620  ScopedEntryPtr entry_closer(entry);
2621
2622  const int kBufferSize = 10;
2623  scoped_refptr<net::IOBufferWithSize> write_buffer(
2624      new net::IOBufferWithSize(kBufferSize));
2625  CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
2626  EXPECT_EQ(
2627      write_buffer->size(),
2628      WriteData(entry, 1, 0, write_buffer.get(), write_buffer->size(), false));
2629
2630  scoped_refptr<net::IOBufferWithSize> read_buffer(
2631      new net::IOBufferWithSize(kBufferSize));
2632  EXPECT_EQ(read_buffer->size(),
2633            ReadData(entry, 1, 0, read_buffer.get(), read_buffer->size()));
2634}
2635
2636TEST_F(DiskCacheEntryTest, SimpleCacheNonOptimisticOperationsDontBlock) {
2637  // Test sequence:
2638  // Create, Write, Close.
2639  SetCacheType(net::APP_CACHE);  // APP_CACHE doesn't use optimistic operations.
2640  SetSimpleCacheMode();
2641  InitCache();
2642  disk_cache::Entry* const null_entry = NULL;
2643
2644  MessageLoopHelper helper;
2645  CallbackTest create_callback(&helper, false);
2646
2647  int expected_callback_runs = 0;
2648  const int kBufferSize = 10;
2649  scoped_refptr<net::IOBufferWithSize> write_buffer(
2650      new net::IOBufferWithSize(kBufferSize));
2651
2652  disk_cache::Entry* entry = NULL;
2653  EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
2654  ASSERT_NE(null_entry, entry);
2655  ScopedEntryPtr entry_closer(entry);
2656
2657  CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
2658  CallbackTest write_callback(&helper, false);
2659  int ret = entry->WriteData(
2660      1,
2661      0,
2662      write_buffer.get(),
2663      write_buffer->size(),
2664      base::Bind(&CallbackTest::Run, base::Unretained(&write_callback)),
2665      false);
2666  ASSERT_EQ(net::ERR_IO_PENDING, ret);
2667  helper.WaitUntilCacheIoFinished(++expected_callback_runs);
2668}
2669
2670TEST_F(DiskCacheEntryTest,
2671       SimpleCacheNonOptimisticOperationsBasicsWithoutWaiting) {
2672  // Test sequence:
2673  // Create, Write, Read, Close.
2674  SetCacheType(net::APP_CACHE);  // APP_CACHE doesn't use optimistic operations.
2675  SetSimpleCacheMode();
2676  InitCache();
2677  disk_cache::Entry* const null_entry = NULL;
2678  MessageLoopHelper helper;
2679
2680  disk_cache::Entry* entry = NULL;
2681  // Note that |entry| is only set once CreateEntry() completed which is why we
2682  // have to wait (i.e. use the helper CreateEntry() function).
2683  EXPECT_EQ(net::OK, CreateEntry("my key", &entry));
2684  ASSERT_NE(null_entry, entry);
2685  ScopedEntryPtr entry_closer(entry);
2686
2687  const int kBufferSize = 10;
2688  scoped_refptr<net::IOBufferWithSize> write_buffer(
2689      new net::IOBufferWithSize(kBufferSize));
2690  CacheTestFillBuffer(write_buffer->data(), write_buffer->size(), false);
2691  CallbackTest write_callback(&helper, false);
2692  int ret = entry->WriteData(
2693      1,
2694      0,
2695      write_buffer.get(),
2696      write_buffer->size(),
2697      base::Bind(&CallbackTest::Run, base::Unretained(&write_callback)),
2698      false);
2699  EXPECT_EQ(net::ERR_IO_PENDING, ret);
2700  int expected_callback_runs = 1;
2701
2702  scoped_refptr<net::IOBufferWithSize> read_buffer(
2703      new net::IOBufferWithSize(kBufferSize));
2704  CallbackTest read_callback(&helper, false);
2705  ret = entry->ReadData(
2706      1,
2707      0,
2708      read_buffer.get(),
2709      read_buffer->size(),
2710      base::Bind(&CallbackTest::Run, base::Unretained(&read_callback)));
2711  EXPECT_EQ(net::ERR_IO_PENDING, ret);
2712  ++expected_callback_runs;
2713
2714  helper.WaitUntilCacheIoFinished(expected_callback_runs);
2715  ASSERT_EQ(read_buffer->size(), write_buffer->size());
2716  EXPECT_EQ(
2717      0,
2718      memcmp(read_buffer->data(), write_buffer->data(), read_buffer->size()));
2719}
2720
2721TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic) {
2722  // Test sequence:
2723  // Create, Write, Read, Write, Read, Close.
2724  SetSimpleCacheMode();
2725  InitCache();
2726  disk_cache::Entry* null = NULL;
2727  const char key[] = "the first key";
2728
2729  MessageLoopHelper helper;
2730  CallbackTest callback1(&helper, false);
2731  CallbackTest callback2(&helper, false);
2732  CallbackTest callback3(&helper, false);
2733  CallbackTest callback4(&helper, false);
2734  CallbackTest callback5(&helper, false);
2735
2736  int expected = 0;
2737  const int kSize1 = 10;
2738  const int kSize2 = 20;
2739  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
2740  scoped_refptr<net::IOBuffer> buffer1_read(new net::IOBuffer(kSize1));
2741  scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize2));
2742  scoped_refptr<net::IOBuffer> buffer2_read(new net::IOBuffer(kSize2));
2743  CacheTestFillBuffer(buffer1->data(), kSize1, false);
2744  CacheTestFillBuffer(buffer2->data(), kSize2, false);
2745
2746  disk_cache::Entry* entry = NULL;
2747  // Create is optimistic, must return OK.
2748  ASSERT_EQ(net::OK,
2749            cache_->CreateEntry(key, &entry,
2750                                base::Bind(&CallbackTest::Run,
2751                                           base::Unretained(&callback1))));
2752  EXPECT_NE(null, entry);
2753  ScopedEntryPtr entry_closer(entry);
2754
2755  // This write may or may not be optimistic (it depends if the previous
2756  // optimistic create already finished by the time we call the write here).
2757  int ret = entry->WriteData(
2758      1,
2759      0,
2760      buffer1.get(),
2761      kSize1,
2762      base::Bind(&CallbackTest::Run, base::Unretained(&callback2)),
2763      false);
2764  EXPECT_TRUE(kSize1 == ret || net::ERR_IO_PENDING == ret);
2765  if (net::ERR_IO_PENDING == ret)
2766    expected++;
2767
2768  // This Read must not be optimistic, since we don't support that yet.
2769  EXPECT_EQ(net::ERR_IO_PENDING,
2770            entry->ReadData(
2771                1,
2772                0,
2773                buffer1_read.get(),
2774                kSize1,
2775                base::Bind(&CallbackTest::Run, base::Unretained(&callback3))));
2776  expected++;
2777  EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
2778  EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1));
2779
2780  // At this point after waiting, the pending operations queue on the entry
2781  // should be empty, so the next Write operation must run as optimistic.
2782  EXPECT_EQ(kSize2,
2783            entry->WriteData(
2784                1,
2785                0,
2786                buffer2.get(),
2787                kSize2,
2788                base::Bind(&CallbackTest::Run, base::Unretained(&callback4)),
2789                false));
2790
2791  // Lets do another read so we block until both the write and the read
2792  // operation finishes and we can then test for HasOneRef() below.
2793  EXPECT_EQ(net::ERR_IO_PENDING,
2794            entry->ReadData(
2795                1,
2796                0,
2797                buffer2_read.get(),
2798                kSize2,
2799                base::Bind(&CallbackTest::Run, base::Unretained(&callback5))));
2800  expected++;
2801
2802  EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
2803  EXPECT_EQ(0, memcmp(buffer2->data(), buffer2_read->data(), kSize2));
2804
2805  // Check that we are not leaking.
2806  EXPECT_NE(entry, null);
2807  EXPECT_TRUE(
2808      static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
2809}
2810
2811TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic2) {
2812  // Test sequence:
2813  // Create, Open, Close, Close.
2814  SetSimpleCacheMode();
2815  InitCache();
2816  disk_cache::Entry* null = NULL;
2817  const char key[] = "the first key";
2818
2819  MessageLoopHelper helper;
2820  CallbackTest callback1(&helper, false);
2821  CallbackTest callback2(&helper, false);
2822
2823  disk_cache::Entry* entry = NULL;
2824  ASSERT_EQ(net::OK,
2825            cache_->CreateEntry(key, &entry,
2826                                base::Bind(&CallbackTest::Run,
2827                                           base::Unretained(&callback1))));
2828  EXPECT_NE(null, entry);
2829  ScopedEntryPtr entry_closer(entry);
2830
2831  disk_cache::Entry* entry2 = NULL;
2832  ASSERT_EQ(net::ERR_IO_PENDING,
2833            cache_->OpenEntry(key, &entry2,
2834                              base::Bind(&CallbackTest::Run,
2835                                         base::Unretained(&callback2))));
2836  ASSERT_TRUE(helper.WaitUntilCacheIoFinished(1));
2837
2838  EXPECT_NE(null, entry2);
2839  EXPECT_EQ(entry, entry2);
2840
2841  // We have to call close twice, since we called create and open above.
2842  entry->Close();
2843
2844  // Check that we are not leaking.
2845  EXPECT_TRUE(
2846      static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
2847}
2848
2849TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic3) {
2850  // Test sequence:
2851  // Create, Close, Open, Close.
2852  SetSimpleCacheMode();
2853  InitCache();
2854  disk_cache::Entry* null = NULL;
2855  const char key[] = "the first key";
2856
2857  disk_cache::Entry* entry = NULL;
2858  ASSERT_EQ(net::OK,
2859            cache_->CreateEntry(key, &entry, net::CompletionCallback()));
2860  EXPECT_NE(null, entry);
2861  entry->Close();
2862
2863  net::TestCompletionCallback cb;
2864  disk_cache::Entry* entry2 = NULL;
2865  ASSERT_EQ(net::ERR_IO_PENDING,
2866            cache_->OpenEntry(key, &entry2, cb.callback()));
2867  ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
2868  ScopedEntryPtr entry_closer(entry2);
2869
2870  EXPECT_NE(null, entry2);
2871  EXPECT_EQ(entry, entry2);
2872
2873  // Check that we are not leaking.
2874  EXPECT_TRUE(
2875      static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef());
2876}
2877
2878TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic4) {
2879  // Test sequence:
2880  // Create, Close, Write, Open, Open, Close, Write, Read, Close.
2881  SetSimpleCacheMode();
2882  InitCache();
2883  disk_cache::Entry* null = NULL;
2884  const char key[] = "the first key";
2885
2886  net::TestCompletionCallback cb;
2887  const int kSize1 = 10;
2888  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
2889  CacheTestFillBuffer(buffer1->data(), kSize1, false);
2890  disk_cache::Entry* entry = NULL;
2891
2892  ASSERT_EQ(net::OK,
2893            cache_->CreateEntry(key, &entry, net::CompletionCallback()));
2894  EXPECT_NE(null, entry);
2895  entry->Close();
2896
2897  // Lets do a Write so we block until both the Close and the Write
2898  // operation finishes. Write must fail since we are writing in a closed entry.
2899  EXPECT_EQ(
2900      net::ERR_IO_PENDING,
2901      entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
2902  EXPECT_EQ(net::ERR_FAILED, cb.GetResult(net::ERR_IO_PENDING));
2903
2904  // Finish running the pending tasks so that we fully complete the close
2905  // operation and destroy the entry object.
2906  base::MessageLoop::current()->RunUntilIdle();
2907
2908  // At this point the |entry| must have been destroyed, and called
2909  // RemoveSelfFromBackend().
2910  disk_cache::Entry* entry2 = NULL;
2911  ASSERT_EQ(net::ERR_IO_PENDING,
2912            cache_->OpenEntry(key, &entry2, cb.callback()));
2913  ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
2914  EXPECT_NE(null, entry2);
2915
2916  disk_cache::Entry* entry3 = NULL;
2917  ASSERT_EQ(net::ERR_IO_PENDING,
2918            cache_->OpenEntry(key, &entry3, cb.callback()));
2919  ASSERT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
2920  EXPECT_NE(null, entry3);
2921  EXPECT_EQ(entry2, entry3);
2922  entry3->Close();
2923
2924  // The previous Close doesn't actually closes the entry since we opened it
2925  // twice, so the next Write operation must succeed and it must be able to
2926  // perform it optimistically, since there is no operation running on this
2927  // entry.
2928  EXPECT_EQ(kSize1,
2929            entry2->WriteData(
2930                1, 0, buffer1.get(), kSize1, net::CompletionCallback(), false));
2931
2932  // Lets do another read so we block until both the write and the read
2933  // operation finishes and we can then test for HasOneRef() below.
2934  EXPECT_EQ(net::ERR_IO_PENDING,
2935            entry2->ReadData(1, 0, buffer1.get(), kSize1, cb.callback()));
2936  EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
2937
2938  // Check that we are not leaking.
2939  EXPECT_TRUE(
2940      static_cast<disk_cache::SimpleEntryImpl*>(entry2)->HasOneRef());
2941  entry2->Close();
2942}
2943
2944TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic5) {
2945  // Test sequence:
2946  // Create, Doom, Write, Read, Close.
2947  SetSimpleCacheMode();
2948  InitCache();
2949  disk_cache::Entry* null = NULL;
2950  const char key[] = "the first key";
2951
2952  net::TestCompletionCallback cb;
2953  const int kSize1 = 10;
2954  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
2955  CacheTestFillBuffer(buffer1->data(), kSize1, false);
2956  disk_cache::Entry* entry = NULL;
2957
2958  ASSERT_EQ(net::OK,
2959            cache_->CreateEntry(key, &entry, net::CompletionCallback()));
2960  EXPECT_NE(null, entry);
2961  ScopedEntryPtr entry_closer(entry);
2962  entry->Doom();
2963
2964  EXPECT_EQ(
2965      net::ERR_IO_PENDING,
2966      entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
2967  EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
2968
2969  EXPECT_EQ(net::ERR_IO_PENDING,
2970            entry->ReadData(1, 0, buffer1.get(), kSize1, cb.callback()));
2971  EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
2972
2973  // Check that we are not leaking.
2974  EXPECT_TRUE(
2975      static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
2976}
2977
2978TEST_F(DiskCacheEntryTest, SimpleCacheOptimistic6) {
2979  // Test sequence:
2980  // Create, Write, Doom, Doom, Read, Doom, Close.
2981  SetSimpleCacheMode();
2982  InitCache();
2983  disk_cache::Entry* null = NULL;
2984  const char key[] = "the first key";
2985
2986  net::TestCompletionCallback cb;
2987  const int kSize1 = 10;
2988  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
2989  scoped_refptr<net::IOBuffer> buffer1_read(new net::IOBuffer(kSize1));
2990  CacheTestFillBuffer(buffer1->data(), kSize1, false);
2991  disk_cache::Entry* entry = NULL;
2992
2993  ASSERT_EQ(net::OK,
2994            cache_->CreateEntry(key, &entry, net::CompletionCallback()));
2995  EXPECT_NE(null, entry);
2996  ScopedEntryPtr entry_closer(entry);
2997
2998  EXPECT_EQ(
2999      net::ERR_IO_PENDING,
3000      entry->WriteData(1, 0, buffer1.get(), kSize1, cb.callback(), false));
3001  EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
3002
3003  entry->Doom();
3004  entry->Doom();
3005
3006  // This Read must not be optimistic, since we don't support that yet.
3007  EXPECT_EQ(net::ERR_IO_PENDING,
3008            entry->ReadData(1, 0, buffer1_read.get(), kSize1, cb.callback()));
3009  EXPECT_EQ(kSize1, cb.GetResult(net::ERR_IO_PENDING));
3010  EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read->data(), kSize1));
3011
3012  entry->Doom();
3013}
3014
3015// Confirm that IO buffers are not referenced by the Simple Cache after a write
3016// completes.
3017TEST_F(DiskCacheEntryTest, SimpleCacheOptimisticWriteReleases) {
3018  SetSimpleCacheMode();
3019  InitCache();
3020
3021  const char key[] = "the first key";
3022  disk_cache::Entry* entry = NULL;
3023
3024  // First, an optimistic create.
3025  ASSERT_EQ(net::OK,
3026            cache_->CreateEntry(key, &entry, net::CompletionCallback()));
3027  ASSERT_TRUE(entry);
3028  ScopedEntryPtr entry_closer(entry);
3029
3030  const int kWriteSize = 512;
3031  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kWriteSize));
3032  EXPECT_TRUE(buffer1->HasOneRef());
3033  CacheTestFillBuffer(buffer1->data(), kWriteSize, false);
3034
3035  // An optimistic write happens only when there is an empty queue of pending
3036  // operations. To ensure the queue is empty, we issue a write and wait until
3037  // it completes.
3038  EXPECT_EQ(kWriteSize,
3039            WriteData(entry, 1, 0, buffer1.get(), kWriteSize, false));
3040  EXPECT_TRUE(buffer1->HasOneRef());
3041
3042  // Finally, we should perform an optimistic write and confirm that all
3043  // references to the IO buffer have been released.
3044  EXPECT_EQ(
3045      kWriteSize,
3046      entry->WriteData(
3047          1, 0, buffer1.get(), kWriteSize, net::CompletionCallback(), false));
3048  EXPECT_TRUE(buffer1->HasOneRef());
3049}
3050
3051TEST_F(DiskCacheEntryTest, SimpleCacheCreateDoomRace) {
3052  // Test sequence:
3053  // Create, Doom, Write, Close, Check files are not on disk anymore.
3054  SetSimpleCacheMode();
3055  InitCache();
3056  disk_cache::Entry* null = NULL;
3057  const char key[] = "the first key";
3058
3059  net::TestCompletionCallback cb;
3060  const int kSize1 = 10;
3061  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize1));
3062  CacheTestFillBuffer(buffer1->data(), kSize1, false);
3063  disk_cache::Entry* entry = NULL;
3064
3065  ASSERT_EQ(net::OK,
3066            cache_->CreateEntry(key, &entry, net::CompletionCallback()));
3067  EXPECT_NE(null, entry);
3068
3069  EXPECT_EQ(net::ERR_IO_PENDING, cache_->DoomEntry(key, cb.callback()));
3070  EXPECT_EQ(net::OK, cb.GetResult(net::ERR_IO_PENDING));
3071
3072  EXPECT_EQ(
3073      kSize1,
3074      entry->WriteData(0, 0, buffer1.get(), kSize1, cb.callback(), false));
3075
3076  entry->Close();
3077
3078  // Finish running the pending tasks so that we fully complete the close
3079  // operation and destroy the entry object.
3080  base::MessageLoop::current()->RunUntilIdle();
3081
3082  for (int i = 0; i < disk_cache::kSimpleEntryFileCount; ++i) {
3083    base::FilePath entry_file_path = cache_path_.AppendASCII(
3084        disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, i));
3085    base::File::Info info;
3086    EXPECT_FALSE(base::GetFileInfo(entry_file_path, &info));
3087  }
3088}
3089
3090TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateRace) {
3091  // This test runs as APP_CACHE to make operations more synchronous. Test
3092  // sequence:
3093  // Create, Doom, Create.
3094  SetCacheType(net::APP_CACHE);
3095  SetSimpleCacheMode();
3096  InitCache();
3097  disk_cache::Entry* null = NULL;
3098  const char key[] = "the first key";
3099
3100  net::TestCompletionCallback create_callback;
3101
3102  disk_cache::Entry* entry1 = NULL;
3103  ASSERT_EQ(net::OK,
3104            create_callback.GetResult(
3105                cache_->CreateEntry(key, &entry1, create_callback.callback())));
3106  ScopedEntryPtr entry1_closer(entry1);
3107  EXPECT_NE(null, entry1);
3108
3109  net::TestCompletionCallback doom_callback;
3110  EXPECT_EQ(net::ERR_IO_PENDING,
3111            cache_->DoomEntry(key, doom_callback.callback()));
3112
3113  disk_cache::Entry* entry2 = NULL;
3114  ASSERT_EQ(net::OK,
3115            create_callback.GetResult(
3116                cache_->CreateEntry(key, &entry2, create_callback.callback())));
3117  ScopedEntryPtr entry2_closer(entry2);
3118  EXPECT_EQ(net::OK, doom_callback.GetResult(net::ERR_IO_PENDING));
3119}
3120
3121TEST_F(DiskCacheEntryTest, SimpleCacheDoomDoom) {
3122  // Test sequence:
3123  // Create, Doom, Create, Doom (1st entry), Open.
3124  SetSimpleCacheMode();
3125  InitCache();
3126  disk_cache::Entry* null = NULL;
3127
3128  const char key[] = "the first key";
3129
3130  disk_cache::Entry* entry1 = NULL;
3131  ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
3132  ScopedEntryPtr entry1_closer(entry1);
3133  EXPECT_NE(null, entry1);
3134
3135  EXPECT_EQ(net::OK, DoomEntry(key));
3136
3137  disk_cache::Entry* entry2 = NULL;
3138  ASSERT_EQ(net::OK, CreateEntry(key, &entry2));
3139  ScopedEntryPtr entry2_closer(entry2);
3140  EXPECT_NE(null, entry2);
3141
3142  // Redundantly dooming entry1 should not delete entry2.
3143  disk_cache::SimpleEntryImpl* simple_entry1 =
3144      static_cast<disk_cache::SimpleEntryImpl*>(entry1);
3145  net::TestCompletionCallback cb;
3146  EXPECT_EQ(net::OK,
3147            cb.GetResult(simple_entry1->DoomEntry(cb.callback())));
3148
3149  disk_cache::Entry* entry3 = NULL;
3150  ASSERT_EQ(net::OK, OpenEntry(key, &entry3));
3151  ScopedEntryPtr entry3_closer(entry3);
3152  EXPECT_NE(null, entry3);
3153}
3154
3155TEST_F(DiskCacheEntryTest, SimpleCacheDoomCreateDoom) {
3156  // Test sequence:
3157  // Create, Doom, Create, Doom.
3158  SetSimpleCacheMode();
3159  InitCache();
3160
3161  disk_cache::Entry* null = NULL;
3162
3163  const char key[] = "the first key";
3164
3165  disk_cache::Entry* entry1 = NULL;
3166  ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
3167  ScopedEntryPtr entry1_closer(entry1);
3168  EXPECT_NE(null, entry1);
3169
3170  entry1->Doom();
3171
3172  disk_cache::Entry* entry2 = NULL;
3173  ASSERT_EQ(net::OK, CreateEntry(key, &entry2));
3174  ScopedEntryPtr entry2_closer(entry2);
3175  EXPECT_NE(null, entry2);
3176
3177  entry2->Doom();
3178
3179  // This test passes if it doesn't crash.
3180}
3181
3182TEST_F(DiskCacheEntryTest, SimpleCacheDoomCloseCreateCloseOpen) {
3183  // Test sequence: Create, Doom, Close, Create, Close, Open.
3184  SetSimpleCacheMode();
3185  InitCache();
3186
3187  disk_cache::Entry* null = NULL;
3188
3189  const char key[] = "this is a key";
3190
3191  disk_cache::Entry* entry1 = NULL;
3192  ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
3193  ScopedEntryPtr entry1_closer(entry1);
3194  EXPECT_NE(null, entry1);
3195
3196  entry1->Doom();
3197  entry1_closer.reset();
3198  entry1 = NULL;
3199
3200  disk_cache::Entry* entry2 = NULL;
3201  ASSERT_EQ(net::OK, CreateEntry(key, &entry2));
3202  ScopedEntryPtr entry2_closer(entry2);
3203  EXPECT_NE(null, entry2);
3204
3205  entry2_closer.reset();
3206  entry2 = NULL;
3207
3208  disk_cache::Entry* entry3 = NULL;
3209  ASSERT_EQ(net::OK, OpenEntry(key, &entry3));
3210  ScopedEntryPtr entry3_closer(entry3);
3211  EXPECT_NE(null, entry3);
3212}
3213
3214// Checks that an optimistic Create would fail later on a racing Open.
3215TEST_F(DiskCacheEntryTest, SimpleCacheOptimisticCreateFailsOnOpen) {
3216  SetSimpleCacheMode();
3217  InitCache();
3218
3219  // Create a corrupt file in place of a future entry. Optimistic create should
3220  // initially succeed, but realize later that creation failed.
3221  const std::string key = "the key";
3222  net::TestCompletionCallback cb;
3223  disk_cache::Entry* entry = NULL;
3224  disk_cache::Entry* entry2 = NULL;
3225
3226  EXPECT_TRUE(disk_cache::simple_util::CreateCorruptFileForTests(
3227      key, cache_path_));
3228  EXPECT_EQ(net::OK, cache_->CreateEntry(key, &entry, cb.callback()));
3229  ASSERT_TRUE(entry);
3230  ScopedEntryPtr entry_closer(entry);
3231  ASSERT_NE(net::OK, OpenEntry(key, &entry2));
3232
3233  // Check that we are not leaking.
3234  EXPECT_TRUE(
3235      static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3236
3237  DisableIntegrityCheck();
3238}
3239
3240// Tests that old entries are evicted while new entries remain in the index.
3241// This test relies on non-mandatory properties of the simple Cache Backend:
3242// LRU eviction, specific values of high-watermark and low-watermark etc.
3243// When changing the eviction algorithm, the test will have to be re-engineered.
3244TEST_F(DiskCacheEntryTest, SimpleCacheEvictOldEntries) {
3245  const int kMaxSize = 200 * 1024;
3246  const int kWriteSize = kMaxSize / 10;
3247  const int kNumExtraEntries = 12;
3248  SetSimpleCacheMode();
3249  SetMaxSize(kMaxSize);
3250  InitCache();
3251
3252  std::string key1("the first key");
3253  disk_cache::Entry* entry;
3254  ASSERT_EQ(net::OK, CreateEntry(key1, &entry));
3255  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kWriteSize));
3256  CacheTestFillBuffer(buffer->data(), kWriteSize, false);
3257  EXPECT_EQ(kWriteSize,
3258            WriteData(entry, 1, 0, buffer.get(), kWriteSize, false));
3259  entry->Close();
3260  AddDelay();
3261
3262  std::string key2("the key prefix");
3263  for (int i = 0; i < kNumExtraEntries; i++) {
3264    if (i == kNumExtraEntries - 2) {
3265      // Create a distinct timestamp for the last two entries. These entries
3266      // will be checked for outliving the eviction.
3267      AddDelay();
3268    }
3269    ASSERT_EQ(net::OK, CreateEntry(key2 + base::StringPrintf("%d", i), &entry));
3270    ScopedEntryPtr entry_closer(entry);
3271    EXPECT_EQ(kWriteSize,
3272              WriteData(entry, 1, 0, buffer.get(), kWriteSize, false));
3273  }
3274
3275  // TODO(pasko): Find a way to wait for the eviction task(s) to finish by using
3276  // the internal knowledge about |SimpleBackendImpl|.
3277  ASSERT_NE(net::OK, OpenEntry(key1, &entry))
3278      << "Should have evicted the old entry";
3279  for (int i = 0; i < 2; i++) {
3280    int entry_no = kNumExtraEntries - i - 1;
3281    // Generally there is no guarantee that at this point the backround eviction
3282    // is finished. We are testing the positive case, i.e. when the eviction
3283    // never reaches this entry, should be non-flaky.
3284    ASSERT_EQ(net::OK, OpenEntry(key2 + base::StringPrintf("%d", entry_no),
3285                                 &entry))
3286        << "Should not have evicted fresh entry " << entry_no;
3287    entry->Close();
3288  }
3289}
3290
3291// Tests that if a read and a following in-flight truncate are both in progress
3292// simultaniously that they both can occur successfully. See
3293// http://crbug.com/239223
3294TEST_F(DiskCacheEntryTest, SimpleCacheInFlightTruncate)  {
3295  SetSimpleCacheMode();
3296  InitCache();
3297
3298  const char key[] = "the first key";
3299
3300  const int kBufferSize = 1024;
3301  scoped_refptr<net::IOBuffer> write_buffer(new net::IOBuffer(kBufferSize));
3302  CacheTestFillBuffer(write_buffer->data(), kBufferSize, false);
3303
3304  disk_cache::Entry* entry = NULL;
3305  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3306
3307  EXPECT_EQ(kBufferSize,
3308            WriteData(entry, 1, 0, write_buffer.get(), kBufferSize, false));
3309  entry->Close();
3310  entry = NULL;
3311
3312  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3313  ScopedEntryPtr entry_closer(entry);
3314
3315  MessageLoopHelper helper;
3316  int expected = 0;
3317
3318  // Make a short read.
3319  const int kReadBufferSize = 512;
3320  scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kReadBufferSize));
3321  CallbackTest read_callback(&helper, false);
3322  EXPECT_EQ(net::ERR_IO_PENDING,
3323            entry->ReadData(1,
3324                            0,
3325                            read_buffer.get(),
3326                            kReadBufferSize,
3327                            base::Bind(&CallbackTest::Run,
3328                                       base::Unretained(&read_callback))));
3329  ++expected;
3330
3331  // Truncate the entry to the length of that read.
3332  scoped_refptr<net::IOBuffer>
3333      truncate_buffer(new net::IOBuffer(kReadBufferSize));
3334  CacheTestFillBuffer(truncate_buffer->data(), kReadBufferSize, false);
3335  CallbackTest truncate_callback(&helper, false);
3336  EXPECT_EQ(net::ERR_IO_PENDING,
3337            entry->WriteData(1,
3338                             0,
3339                             truncate_buffer.get(),
3340                             kReadBufferSize,
3341                             base::Bind(&CallbackTest::Run,
3342                                        base::Unretained(&truncate_callback)),
3343                             true));
3344  ++expected;
3345
3346  // Wait for both the read and truncation to finish, and confirm that both
3347  // succeeded.
3348  EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
3349  EXPECT_EQ(kReadBufferSize, read_callback.last_result());
3350  EXPECT_EQ(kReadBufferSize, truncate_callback.last_result());
3351  EXPECT_EQ(0,
3352            memcmp(write_buffer->data(), read_buffer->data(), kReadBufferSize));
3353}
3354
3355// Tests that if a write and a read dependant on it are both in flight
3356// simultaneiously that they both can complete successfully without erroneous
3357// early returns. See http://crbug.com/239223
3358TEST_F(DiskCacheEntryTest, SimpleCacheInFlightRead) {
3359  SetSimpleCacheMode();
3360  InitCache();
3361
3362  const char key[] = "the first key";
3363  disk_cache::Entry* entry = NULL;
3364  ASSERT_EQ(net::OK,
3365            cache_->CreateEntry(key, &entry, net::CompletionCallback()));
3366  ScopedEntryPtr entry_closer(entry);
3367
3368  const int kBufferSize = 1024;
3369  scoped_refptr<net::IOBuffer> write_buffer(new net::IOBuffer(kBufferSize));
3370  CacheTestFillBuffer(write_buffer->data(), kBufferSize, false);
3371
3372  MessageLoopHelper helper;
3373  int expected = 0;
3374
3375  CallbackTest write_callback(&helper, false);
3376  EXPECT_EQ(net::ERR_IO_PENDING,
3377            entry->WriteData(1,
3378                             0,
3379                             write_buffer.get(),
3380                             kBufferSize,
3381                             base::Bind(&CallbackTest::Run,
3382                                        base::Unretained(&write_callback)),
3383                             true));
3384  ++expected;
3385
3386  scoped_refptr<net::IOBuffer> read_buffer(new net::IOBuffer(kBufferSize));
3387  CallbackTest read_callback(&helper, false);
3388  EXPECT_EQ(net::ERR_IO_PENDING,
3389            entry->ReadData(1,
3390                            0,
3391                            read_buffer.get(),
3392                            kBufferSize,
3393                            base::Bind(&CallbackTest::Run,
3394                                       base::Unretained(&read_callback))));
3395  ++expected;
3396
3397  EXPECT_TRUE(helper.WaitUntilCacheIoFinished(expected));
3398  EXPECT_EQ(kBufferSize, write_callback.last_result());
3399  EXPECT_EQ(kBufferSize, read_callback.last_result());
3400  EXPECT_EQ(0, memcmp(write_buffer->data(), read_buffer->data(), kBufferSize));
3401}
3402
3403TEST_F(DiskCacheEntryTest, SimpleCacheOpenCreateRaceWithNoIndex) {
3404  SetSimpleCacheMode();
3405  DisableSimpleCacheWaitForIndex();
3406  DisableIntegrityCheck();
3407  InitCache();
3408
3409  // Assume the index is not initialized, which is likely, since we are blocking
3410  // the IO thread from executing the index finalization step.
3411  disk_cache::Entry* entry1;
3412  net::TestCompletionCallback cb1;
3413  disk_cache::Entry* entry2;
3414  net::TestCompletionCallback cb2;
3415  int rv1 = cache_->OpenEntry("key", &entry1, cb1.callback());
3416  int rv2 = cache_->CreateEntry("key", &entry2, cb2.callback());
3417
3418  EXPECT_EQ(net::ERR_FAILED, cb1.GetResult(rv1));
3419  ASSERT_EQ(net::OK, cb2.GetResult(rv2));
3420  entry2->Close();
3421}
3422
3423// Checks that reading two entries simultaneously does not discard a CRC check.
3424// TODO(pasko): make it work with Simple Cache.
3425TEST_F(DiskCacheEntryTest, DISABLED_SimpleCacheMultipleReadersCheckCRC) {
3426  SetSimpleCacheMode();
3427  InitCache();
3428
3429  const char key[] = "key";
3430
3431  int size;
3432  ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size));
3433
3434  scoped_refptr<net::IOBuffer> read_buffer1(new net::IOBuffer(size));
3435  scoped_refptr<net::IOBuffer> read_buffer2(new net::IOBuffer(size));
3436
3437  // Advance the first reader a little.
3438  disk_cache::Entry* entry = NULL;
3439  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3440  EXPECT_EQ(1, ReadData(entry, 0, 0, read_buffer1.get(), 1));
3441
3442  // Make the second reader pass the point where the first one is, and close.
3443  disk_cache::Entry* entry2 = NULL;
3444  EXPECT_EQ(net::OK, OpenEntry(key, &entry2));
3445  EXPECT_EQ(1, ReadData(entry2, 0, 0, read_buffer2.get(), 1));
3446  EXPECT_EQ(1, ReadData(entry2, 0, 1, read_buffer2.get(), 1));
3447  entry2->Close();
3448
3449  // Read the data till the end should produce an error.
3450  EXPECT_GT(0, ReadData(entry, 0, 1, read_buffer1.get(), size));
3451  entry->Close();
3452  DisableIntegrityCheck();
3453}
3454
3455// Checking one more scenario of overlapped reading of a bad entry.
3456// Differs from the |SimpleCacheMultipleReadersCheckCRC| only by the order of
3457// last two reads.
3458TEST_F(DiskCacheEntryTest, SimpleCacheMultipleReadersCheckCRC2) {
3459  SetSimpleCacheMode();
3460  InitCache();
3461
3462  const char key[] = "key";
3463  int size;
3464  ASSERT_TRUE(SimpleCacheMakeBadChecksumEntry(key, &size));
3465
3466  scoped_refptr<net::IOBuffer> read_buffer1(new net::IOBuffer(size));
3467  scoped_refptr<net::IOBuffer> read_buffer2(new net::IOBuffer(size));
3468
3469  // Advance the first reader a little.
3470  disk_cache::Entry* entry = NULL;
3471  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3472  ScopedEntryPtr entry_closer(entry);
3473  EXPECT_EQ(1, ReadData(entry, 1, 0, read_buffer1.get(), 1));
3474
3475  // Advance the 2nd reader by the same amount.
3476  disk_cache::Entry* entry2 = NULL;
3477  EXPECT_EQ(net::OK, OpenEntry(key, &entry2));
3478  ScopedEntryPtr entry2_closer(entry2);
3479  EXPECT_EQ(1, ReadData(entry2, 1, 0, read_buffer2.get(), 1));
3480
3481  // Continue reading 1st.
3482  EXPECT_GT(0, ReadData(entry, 1, 1, read_buffer1.get(), size));
3483
3484  // This read should fail as well because we have previous read failures.
3485  EXPECT_GT(0, ReadData(entry2, 1, 1, read_buffer2.get(), 1));
3486  DisableIntegrityCheck();
3487}
3488
3489// Test if we can sequentially read each subset of the data until all the data
3490// is read, then the CRC is calculated correctly and the reads are successful.
3491TEST_F(DiskCacheEntryTest, SimpleCacheReadCombineCRC) {
3492  // Test sequence:
3493  // Create, Write, Read (first half of data), Read (second half of data),
3494  // Close.
3495  SetSimpleCacheMode();
3496  InitCache();
3497  disk_cache::Entry* null = NULL;
3498  const char key[] = "the first key";
3499
3500  const int kHalfSize = 200;
3501  const int kSize = 2 * kHalfSize;
3502  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3503  CacheTestFillBuffer(buffer1->data(), kSize, false);
3504  disk_cache::Entry* entry = NULL;
3505
3506  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3507  EXPECT_NE(null, entry);
3508
3509  EXPECT_EQ(kSize, WriteData(entry, 1, 0, buffer1.get(), kSize, false));
3510  entry->Close();
3511
3512  disk_cache::Entry* entry2 = NULL;
3513  ASSERT_EQ(net::OK, OpenEntry(key, &entry2));
3514  EXPECT_EQ(entry, entry2);
3515
3516  // Read the first half of the data.
3517  int offset = 0;
3518  int buf_len = kHalfSize;
3519  scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(buf_len));
3520  EXPECT_EQ(buf_len, ReadData(entry2, 1, offset, buffer1_read1.get(), buf_len));
3521  EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), buf_len));
3522
3523  // Read the second half of the data.
3524  offset = buf_len;
3525  buf_len = kHalfSize;
3526  scoped_refptr<net::IOBuffer> buffer1_read2(new net::IOBuffer(buf_len));
3527  EXPECT_EQ(buf_len, ReadData(entry2, 1, offset, buffer1_read2.get(), buf_len));
3528  char* buffer1_data = buffer1->data() + offset;
3529  EXPECT_EQ(0, memcmp(buffer1_data, buffer1_read2->data(), buf_len));
3530
3531  // Check that we are not leaking.
3532  EXPECT_NE(entry, null);
3533  EXPECT_TRUE(
3534      static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3535  entry->Close();
3536  entry = NULL;
3537}
3538
3539// Test if we can write the data not in sequence and read correctly. In
3540// this case the CRC will not be present.
3541TEST_F(DiskCacheEntryTest, SimpleCacheNonSequentialWrite) {
3542  // Test sequence:
3543  // Create, Write (second half of data), Write (first half of data), Read,
3544  // Close.
3545  SetSimpleCacheMode();
3546  InitCache();
3547  disk_cache::Entry* null = NULL;
3548  const char key[] = "the first key";
3549
3550  const int kHalfSize = 200;
3551  const int kSize = 2 * kHalfSize;
3552  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3553  scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
3554  CacheTestFillBuffer(buffer1->data(), kSize, false);
3555  char* buffer1_data = buffer1->data() + kHalfSize;
3556  memcpy(buffer2->data(), buffer1_data, kHalfSize);
3557
3558  disk_cache::Entry* entry = NULL;
3559  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3560  entry->Close();
3561  for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
3562    ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3563    EXPECT_NE(null, entry);
3564
3565    int offset = kHalfSize;
3566    int buf_len = kHalfSize;
3567
3568    EXPECT_EQ(buf_len,
3569              WriteData(entry, i, offset, buffer2.get(), buf_len, false));
3570    offset = 0;
3571    buf_len = kHalfSize;
3572    EXPECT_EQ(buf_len,
3573              WriteData(entry, i, offset, buffer1.get(), buf_len, false));
3574    entry->Close();
3575
3576    ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3577
3578    scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(kSize));
3579    EXPECT_EQ(kSize, ReadData(entry, i, 0, buffer1_read1.get(), kSize));
3580    EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), kSize));
3581    // Check that we are not leaking.
3582    ASSERT_NE(entry, null);
3583    EXPECT_TRUE(static_cast<disk_cache::SimpleEntryImpl*>(entry)->HasOneRef());
3584    entry->Close();
3585  }
3586}
3587
3588// Test that changing stream1 size does not affect stream0 (stream0 and stream1
3589// are stored in the same file in Simple Cache).
3590TEST_F(DiskCacheEntryTest, SimpleCacheStream1SizeChanges) {
3591  SetSimpleCacheMode();
3592  InitCache();
3593  disk_cache::Entry* entry = NULL;
3594  const char key[] = "the key";
3595  const int kSize = 100;
3596  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3597  scoped_refptr<net::IOBuffer> buffer_read(new net::IOBuffer(kSize));
3598  CacheTestFillBuffer(buffer->data(), kSize, false);
3599
3600  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3601  EXPECT_TRUE(entry);
3602
3603  // Write something into stream0.
3604  EXPECT_EQ(kSize, WriteData(entry, 0, 0, buffer.get(), kSize, false));
3605  EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
3606  EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
3607  entry->Close();
3608
3609  // Extend stream1.
3610  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3611  int stream1_size = 100;
3612  EXPECT_EQ(0, WriteData(entry, 1, stream1_size, buffer.get(), 0, false));
3613  EXPECT_EQ(stream1_size, entry->GetDataSize(1));
3614  entry->Close();
3615
3616  // Check that stream0 data has not been modified and that the EOF record for
3617  // stream 0 contains a crc.
3618  // The entry needs to be reopened before checking the crc: Open will perform
3619  // the synchronization with the previous Close. This ensures the EOF records
3620  // have been written to disk before we attempt to read them independently.
3621  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3622  base::FilePath entry_file0_path = cache_path_.AppendASCII(
3623      disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(key, 0));
3624  base::File entry_file0(entry_file0_path,
3625                         base::File::FLAG_READ | base::File::FLAG_OPEN);
3626  ASSERT_TRUE(entry_file0.IsValid());
3627
3628  int data_size[disk_cache::kSimpleEntryStreamCount] = {kSize, stream1_size, 0};
3629  int sparse_data_size = 0;
3630  disk_cache::SimpleEntryStat entry_stat(
3631      base::Time::Now(), base::Time::Now(), data_size, sparse_data_size);
3632  int eof_offset = entry_stat.GetEOFOffsetInFile(key, 0);
3633  disk_cache::SimpleFileEOF eof_record;
3634  ASSERT_EQ(static_cast<int>(sizeof(eof_record)),
3635            entry_file0.Read(eof_offset, reinterpret_cast<char*>(&eof_record),
3636                             sizeof(eof_record)));
3637  EXPECT_EQ(disk_cache::kSimpleFinalMagicNumber, eof_record.final_magic_number);
3638  EXPECT_TRUE((eof_record.flags & disk_cache::SimpleFileEOF::FLAG_HAS_CRC32) ==
3639              disk_cache::SimpleFileEOF::FLAG_HAS_CRC32);
3640
3641  buffer_read = new net::IOBuffer(kSize);
3642  EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
3643  EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
3644
3645  // Shrink stream1.
3646  stream1_size = 50;
3647  EXPECT_EQ(0, WriteData(entry, 1, stream1_size, buffer.get(), 0, true));
3648  EXPECT_EQ(stream1_size, entry->GetDataSize(1));
3649  entry->Close();
3650
3651  // Check that stream0 data has not been modified.
3652  buffer_read = new net::IOBuffer(kSize);
3653  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3654  EXPECT_EQ(kSize, ReadData(entry, 0, 0, buffer_read.get(), kSize));
3655  EXPECT_EQ(0, memcmp(buffer->data(), buffer_read->data(), kSize));
3656  entry->Close();
3657  entry = NULL;
3658}
3659
3660// Test that writing within the range for which the crc has already been
3661// computed will properly invalidate the computed crc.
3662TEST_F(DiskCacheEntryTest, SimpleCacheCRCRewrite) {
3663  // Test sequence:
3664  // Create, Write (big data), Write (small data in the middle), Close.
3665  // Open, Read (all), Close.
3666  SetSimpleCacheMode();
3667  InitCache();
3668  disk_cache::Entry* null = NULL;
3669  const char key[] = "the first key";
3670
3671  const int kHalfSize = 200;
3672  const int kSize = 2 * kHalfSize;
3673  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3674  scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kHalfSize));
3675  CacheTestFillBuffer(buffer1->data(), kSize, false);
3676  CacheTestFillBuffer(buffer2->data(), kHalfSize, false);
3677
3678  disk_cache::Entry* entry = NULL;
3679  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3680  EXPECT_NE(null, entry);
3681  entry->Close();
3682
3683  for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
3684    ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3685    int offset = 0;
3686    int buf_len = kSize;
3687
3688    EXPECT_EQ(buf_len,
3689              WriteData(entry, i, offset, buffer1.get(), buf_len, false));
3690    offset = kHalfSize;
3691    buf_len = kHalfSize;
3692    EXPECT_EQ(buf_len,
3693              WriteData(entry, i, offset, buffer2.get(), buf_len, false));
3694    entry->Close();
3695
3696    ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3697
3698    scoped_refptr<net::IOBuffer> buffer1_read1(new net::IOBuffer(kSize));
3699    EXPECT_EQ(kSize, ReadData(entry, i, 0, buffer1_read1.get(), kSize));
3700    EXPECT_EQ(0, memcmp(buffer1->data(), buffer1_read1->data(), kHalfSize));
3701    EXPECT_EQ(
3702        0,
3703        memcmp(buffer2->data(), buffer1_read1->data() + kHalfSize, kHalfSize));
3704
3705    entry->Close();
3706  }
3707}
3708
3709bool DiskCacheEntryTest::SimpleCacheThirdStreamFileExists(const char* key) {
3710  int third_stream_file_index =
3711      disk_cache::simple_util::GetFileIndexFromStreamIndex(2);
3712  base::FilePath third_stream_file_path = cache_path_.AppendASCII(
3713      disk_cache::simple_util::GetFilenameFromKeyAndFileIndex(
3714          key, third_stream_file_index));
3715  return PathExists(third_stream_file_path);
3716}
3717
3718void DiskCacheEntryTest::SyncDoomEntry(const char* key) {
3719  net::TestCompletionCallback callback;
3720  cache_->DoomEntry(key, callback.callback());
3721  callback.WaitForResult();
3722}
3723
3724// Check that a newly-created entry with no third-stream writes omits the
3725// third stream file.
3726TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream1) {
3727  SetSimpleCacheMode();
3728  InitCache();
3729
3730  const char key[] = "key";
3731
3732  disk_cache::Entry* entry;
3733
3734  // Create entry and close without writing: third stream file should be
3735  // omitted, since the stream is empty.
3736  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3737  entry->Close();
3738  EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3739
3740  SyncDoomEntry(key);
3741  EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3742}
3743
3744// Check that a newly-created entry with only a single zero-offset, zero-length
3745// write omits the third stream file.
3746TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream2) {
3747  SetSimpleCacheMode();
3748  InitCache();
3749
3750  const int kHalfSize = 8;
3751  const int kSize = kHalfSize * 2;
3752  const char key[] = "key";
3753  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3754  CacheTestFillBuffer(buffer->data(), kHalfSize, false);
3755
3756  disk_cache::Entry* entry;
3757
3758  // Create entry, write empty buffer to third stream, and close: third stream
3759  // should still be omitted, since the entry ignores writes that don't modify
3760  // data or change the length.
3761  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3762  EXPECT_EQ(0, WriteData(entry, 2, 0, buffer.get(), 0, true));
3763  entry->Close();
3764  EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3765
3766  SyncDoomEntry(key);
3767  EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3768}
3769
3770// Check that we can read back data written to the third stream.
3771TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream3) {
3772  SetSimpleCacheMode();
3773  InitCache();
3774
3775  const int kHalfSize = 8;
3776  const int kSize = kHalfSize * 2;
3777  const char key[] = "key";
3778  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3779  scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
3780  CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
3781
3782  disk_cache::Entry* entry;
3783
3784  // Create entry, write data to third stream, and close: third stream should
3785  // not be omitted, since it contains data.  Re-open entry and ensure there
3786  // are that many bytes in the third stream.
3787  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3788  EXPECT_EQ(kHalfSize, WriteData(entry, 2, 0, buffer1.get(), kHalfSize, true));
3789  entry->Close();
3790  EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
3791
3792  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3793  EXPECT_EQ(kHalfSize, ReadData(entry, 2, 0, buffer2.get(), kSize));
3794  EXPECT_EQ(0, memcmp(buffer1->data(), buffer2->data(), kHalfSize));
3795  entry->Close();
3796  EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
3797
3798  SyncDoomEntry(key);
3799  EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3800}
3801
3802// Check that we remove the third stream file upon opening an entry and finding
3803// the third stream empty.  (This is the upgrade path for entries written
3804// before the third stream was optional.)
3805TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream4) {
3806  SetSimpleCacheMode();
3807  InitCache();
3808
3809  const int kHalfSize = 8;
3810  const int kSize = kHalfSize * 2;
3811  const char key[] = "key";
3812  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3813  scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
3814  CacheTestFillBuffer(buffer1->data(), kHalfSize, false);
3815
3816  disk_cache::Entry* entry;
3817
3818  // Create entry, write data to third stream, truncate third stream back to
3819  // empty, and close: third stream will not initially be omitted, since entry
3820  // creates the file when the first significant write comes in, and only
3821  // removes it on open if it is empty.  Reopen, ensure that the file is
3822  // deleted, and that there's no data in the third stream.
3823  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3824  EXPECT_EQ(kHalfSize, WriteData(entry, 2, 0, buffer1.get(), kHalfSize, true));
3825  EXPECT_EQ(0, WriteData(entry, 2, 0, buffer1.get(), 0, true));
3826  entry->Close();
3827  EXPECT_TRUE(SimpleCacheThirdStreamFileExists(key));
3828
3829  ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3830  EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3831  EXPECT_EQ(0, ReadData(entry, 2, 0, buffer2.get(), kSize));
3832  entry->Close();
3833  EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3834
3835  SyncDoomEntry(key);
3836  EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3837}
3838
3839// Check that we don't accidentally create the third stream file once the entry
3840// has been doomed.
3841TEST_F(DiskCacheEntryTest, SimpleCacheOmittedThirdStream5) {
3842  SetSimpleCacheMode();
3843  InitCache();
3844
3845  const int kHalfSize = 8;
3846  const int kSize = kHalfSize * 2;
3847  const char key[] = "key";
3848  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
3849  CacheTestFillBuffer(buffer->data(), kHalfSize, false);
3850
3851  disk_cache::Entry* entry;
3852
3853  // Create entry, doom entry, write data to third stream, and close: third
3854  // stream should not exist.  (Note: We don't care if the write fails, just
3855  // that it doesn't cause the file to be created on disk.)
3856  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3857  entry->Doom();
3858  WriteData(entry, 2, 0, buffer.get(), kHalfSize, true);
3859  entry->Close();
3860  EXPECT_FALSE(SimpleCacheThirdStreamFileExists(key));
3861}
3862
3863// There could be a race between Doom and an optimistic write.
3864TEST_F(DiskCacheEntryTest, SimpleCacheDoomOptimisticWritesRace) {
3865  // Test sequence:
3866  // Create, first Write, second Write, Close.
3867  // Open, Close.
3868  SetSimpleCacheMode();
3869  InitCache();
3870  disk_cache::Entry* null = NULL;
3871  const char key[] = "the first key";
3872
3873  const int kSize = 200;
3874  scoped_refptr<net::IOBuffer> buffer1(new net::IOBuffer(kSize));
3875  scoped_refptr<net::IOBuffer> buffer2(new net::IOBuffer(kSize));
3876  CacheTestFillBuffer(buffer1->data(), kSize, false);
3877  CacheTestFillBuffer(buffer2->data(), kSize, false);
3878
3879  // The race only happens on stream 1 and stream 2.
3880  for (int i = 0; i < disk_cache::kSimpleEntryStreamCount; ++i) {
3881    ASSERT_EQ(net::OK, DoomAllEntries());
3882    disk_cache::Entry* entry = NULL;
3883
3884    ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3885    EXPECT_NE(null, entry);
3886    entry->Close();
3887    entry = NULL;
3888
3889    ASSERT_EQ(net::OK, DoomAllEntries());
3890    ASSERT_EQ(net::OK, CreateEntry(key, &entry));
3891    EXPECT_NE(null, entry);
3892
3893    int offset = 0;
3894    int buf_len = kSize;
3895    // This write should not be optimistic (since create is).
3896    EXPECT_EQ(buf_len,
3897              WriteData(entry, i, offset, buffer1.get(), buf_len, false));
3898
3899    offset = kSize;
3900    // This write should be optimistic.
3901    EXPECT_EQ(buf_len,
3902              WriteData(entry, i, offset, buffer2.get(), buf_len, false));
3903    entry->Close();
3904
3905    ASSERT_EQ(net::OK, OpenEntry(key, &entry));
3906    EXPECT_NE(null, entry);
3907
3908    entry->Close();
3909    entry = NULL;
3910  }
3911}
3912
3913// Tests for a regression in crbug.com/317138 , in which deleting an already
3914// doomed entry was removing the active entry from the index.
3915TEST_F(DiskCacheEntryTest, SimpleCachePreserveActiveEntries) {
3916  SetSimpleCacheMode();
3917  InitCache();
3918
3919  disk_cache::Entry* null = NULL;
3920
3921  const char key[] = "this is a key";
3922
3923  disk_cache::Entry* entry1 = NULL;
3924  ASSERT_EQ(net::OK, CreateEntry(key, &entry1));
3925  ScopedEntryPtr entry1_closer(entry1);
3926  EXPECT_NE(null, entry1);
3927  entry1->Doom();
3928
3929  disk_cache::Entry* entry2 = NULL;
3930  ASSERT_EQ(net::OK, CreateEntry(key, &entry2));
3931  ScopedEntryPtr entry2_closer(entry2);
3932  EXPECT_NE(null, entry2);
3933  entry2_closer.reset();
3934
3935  // Closing then reopening entry2 insures that entry2 is serialized, and so
3936  // it can be opened from files without error.
3937  entry2 = NULL;
3938  ASSERT_EQ(net::OK, OpenEntry(key, &entry2));
3939  EXPECT_NE(null, entry2);
3940  entry2_closer.reset(entry2);
3941
3942  scoped_refptr<disk_cache::SimpleEntryImpl>
3943      entry1_refptr = static_cast<disk_cache::SimpleEntryImpl*>(entry1);
3944
3945  // If crbug.com/317138 has regressed, this will remove |entry2| from
3946  // the backend's |active_entries_| while |entry2| is still alive and its
3947  // files are still on disk.
3948  entry1_closer.reset();
3949  entry1 = NULL;
3950
3951  // Close does not have a callback. However, we need to be sure the close is
3952  // finished before we continue the test. We can take advantage of how the ref
3953  // counting of a SimpleEntryImpl works to fake out a callback: When the
3954  // last Close() call is made to an entry, an IO operation is sent to the
3955  // synchronous entry to close the platform files. This IO operation holds a
3956  // ref pointer to the entry, which expires when the operation is done. So,
3957  // we take a refpointer, and watch the SimpleEntry object until it has only
3958  // one ref; this indicates the IO operation is complete.
3959  while (!entry1_refptr->HasOneRef()) {
3960    base::PlatformThread::YieldCurrentThread();
3961    base::MessageLoop::current()->RunUntilIdle();
3962  }
3963  entry1_refptr = NULL;
3964
3965  // In the bug case, this new entry ends up being a duplicate object pointing
3966  // at the same underlying files.
3967  disk_cache::Entry* entry3 = NULL;
3968  EXPECT_EQ(net::OK, OpenEntry(key, &entry3));
3969  ScopedEntryPtr entry3_closer(entry3);
3970  EXPECT_NE(null, entry3);
3971
3972  // The test passes if these two dooms do not crash.
3973  entry2->Doom();
3974  entry3->Doom();
3975}
3976
3977TEST_F(DiskCacheEntryTest, SimpleCacheBasicSparseIO) {
3978  SetSimpleCacheMode();
3979  InitCache();
3980  BasicSparseIO();
3981}
3982
3983TEST_F(DiskCacheEntryTest, SimpleCacheHugeSparseIO) {
3984  SetSimpleCacheMode();
3985  InitCache();
3986  HugeSparseIO();
3987}
3988
3989TEST_F(DiskCacheEntryTest, SimpleCacheGetAvailableRange) {
3990  SetSimpleCacheMode();
3991  InitCache();
3992  GetAvailableRange();
3993}
3994
3995TEST_F(DiskCacheEntryTest, DISABLED_SimpleCacheCouldBeSparse) {
3996  SetSimpleCacheMode();
3997  InitCache();
3998  CouldBeSparse();
3999}
4000
4001TEST_F(DiskCacheEntryTest, SimpleCacheUpdateSparseEntry) {
4002  SetSimpleCacheMode();
4003  InitCache();
4004  UpdateSparseEntry();
4005}
4006
4007TEST_F(DiskCacheEntryTest, SimpleCacheDoomSparseEntry) {
4008  SetSimpleCacheMode();
4009  InitCache();
4010  DoomSparseEntry();
4011}
4012
4013TEST_F(DiskCacheEntryTest, SimpleCachePartialSparseEntry) {
4014  SetSimpleCacheMode();
4015  InitCache();
4016  PartialSparseEntry();
4017}
4018
4019TEST_F(DiskCacheEntryTest, SimpleCacheTruncateLargeSparseFile) {
4020  const int kSize = 1024;
4021
4022  SetSimpleCacheMode();
4023  // An entry is allowed sparse data 1/10 the size of the cache, so this size
4024  // allows for one |kSize|-sized range plus overhead, but not two ranges.
4025  SetMaxSize(kSize * 15);
4026  InitCache();
4027
4028  const char key[] = "key";
4029  disk_cache::Entry* null = NULL;
4030  disk_cache::Entry* entry;
4031  ASSERT_EQ(net::OK, CreateEntry(key, &entry));
4032  EXPECT_NE(null, entry);
4033
4034  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(kSize));
4035  CacheTestFillBuffer(buffer->data(), kSize, false);
4036  net::TestCompletionCallback callback;
4037  int ret;
4038
4039  // Verify initial conditions.
4040  ret = entry->ReadSparseData(0, buffer.get(), kSize, callback.callback());
4041  EXPECT_EQ(0, callback.GetResult(ret));
4042
4043  ret = entry->ReadSparseData(kSize, buffer.get(), kSize, callback.callback());
4044  EXPECT_EQ(0, callback.GetResult(ret));
4045
4046  // Write a range and make sure it reads back.
4047  ret = entry->WriteSparseData(0, buffer.get(), kSize, callback.callback());
4048  EXPECT_EQ(kSize, callback.GetResult(ret));
4049
4050  ret = entry->ReadSparseData(0, buffer.get(), kSize, callback.callback());
4051  EXPECT_EQ(kSize, callback.GetResult(ret));
4052
4053  // Write another range and make sure it reads back.
4054  ret = entry->WriteSparseData(kSize, buffer.get(), kSize, callback.callback());
4055  EXPECT_EQ(kSize, callback.GetResult(ret));
4056
4057  ret = entry->ReadSparseData(kSize, buffer.get(), kSize, callback.callback());
4058  EXPECT_EQ(kSize, callback.GetResult(ret));
4059
4060  // Make sure the first range was removed when the second was written.
4061  ret = entry->ReadSparseData(0, buffer.get(), kSize, callback.callback());
4062  EXPECT_EQ(0, callback.GetResult(ret));
4063
4064  entry->Close();
4065}
4066
4067#endif  // defined(OS_POSIX)
4068