entry_impl.cc revision 3345a6884c488ff3a535c2c9acdd33d74b37e311
1// Copyright (c) 2006-2010 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/disk_cache/entry_impl.h"
6
7#include "base/histogram.h"
8#include "base/message_loop.h"
9#include "base/string_util.h"
10#include "net/base/io_buffer.h"
11#include "net/base/net_errors.h"
12#include "net/disk_cache/backend_impl.h"
13#include "net/disk_cache/bitmap.h"
14#include "net/disk_cache/cache_util.h"
15#include "net/disk_cache/histogram_macros.h"
16#include "net/disk_cache/sparse_control.h"
17
18using base::Time;
19using base::TimeDelta;
20using base::TimeTicks;
21
22namespace {
23
24// Index for the file used to store the key, if any (files_[kKeyFileIndex]).
25const int kKeyFileIndex = 3;
26
27// This class implements FileIOCallback to buffer the callback from a file IO
28// operation from the actual net class.
29class SyncCallback: public disk_cache::FileIOCallback {
30 public:
31  SyncCallback(disk_cache::EntryImpl* entry, net::IOBuffer* buffer,
32               net::CompletionCallback* callback )
33      : entry_(entry), callback_(callback), buf_(buffer),
34        start_(TimeTicks::Now()) {
35    entry->AddRef();
36    entry->IncrementIoCount();
37  }
38  ~SyncCallback() {}
39
40  virtual void OnFileIOComplete(int bytes_copied);
41  void Discard();
42 private:
43  disk_cache::EntryImpl* entry_;
44  net::CompletionCallback* callback_;
45  scoped_refptr<net::IOBuffer> buf_;
46  TimeTicks start_;
47
48  DISALLOW_COPY_AND_ASSIGN(SyncCallback);
49};
50
51void SyncCallback::OnFileIOComplete(int bytes_copied) {
52  entry_->DecrementIoCount();
53  if (callback_) {
54    entry_->ReportIOTime(disk_cache::EntryImpl::kAsyncIO, start_);
55    callback_->Run(bytes_copied);
56  }
57  entry_->Release();
58  delete this;
59}
60
61void SyncCallback::Discard() {
62  callback_ = NULL;
63  buf_ = NULL;
64  OnFileIOComplete(0);
65}
66
67const int kMaxBufferSize = 1024 * 1024;  // 1 MB.
68
69}  // namespace
70
71namespace disk_cache {
72
73// This class handles individual memory buffers that store data before it is
74// sent to disk. The buffer can start at any offset, but if we try to write to
75// anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to
76// zero. The buffer grows up to a size determined by the backend, to keep the
77// total memory used under control.
78class EntryImpl::UserBuffer {
79 public:
80  explicit UserBuffer(BackendImpl* backend)
81      : backend_(backend->GetWeakPtr()), offset_(0), grow_allowed_(true) {
82    buffer_.reserve(kMaxBlockSize);
83  }
84  ~UserBuffer() {
85    if (backend_)
86      backend_->BufferDeleted(capacity() - kMaxBlockSize);
87  }
88
89  // Returns true if we can handle writing |len| bytes to |offset|.
90  bool PreWrite(int offset, int len);
91
92  // Truncates the buffer to |offset| bytes.
93  void Truncate(int offset);
94
95  // Writes |len| bytes from |buf| at the given |offset|.
96  void Write(int offset, net::IOBuffer* buf, int len);
97
98  // Returns true if we can read |len| bytes from |offset|, given that the
99  // actual file has |eof| bytes stored. Note that the number of bytes to read
100  // may be modified by this method even though it returns false: that means we
101  // should do a smaller read from disk.
102  bool PreRead(int eof, int offset, int* len);
103
104  // Read |len| bytes from |buf| at the given |offset|.
105  int Read(int offset, net::IOBuffer* buf, int len);
106
107  // Prepare this buffer for reuse.
108  void Reset();
109
110  char* Data() { return buffer_.size() ? &buffer_[0] : NULL; }
111  int Size() { return static_cast<int>(buffer_.size()); }
112  int Start() { return offset_; }
113  int End() { return offset_ + Size(); }
114
115 private:
116  int capacity() { return static_cast<int>(buffer_.capacity()); }
117  bool GrowBuffer(int required, int limit);
118
119  base::WeakPtr<BackendImpl> backend_;
120  int offset_;
121  std::vector<char> buffer_;
122  bool grow_allowed_;
123  DISALLOW_COPY_AND_ASSIGN(UserBuffer);
124};
125
126bool EntryImpl::UserBuffer::PreWrite(int offset, int len) {
127  DCHECK_GE(offset, 0);
128  DCHECK_GE(len, 0);
129  DCHECK_GE(offset + len, 0);
130
131  // We don't want to write before our current start.
132  if (offset < offset_)
133    return false;
134
135  // Lets get the common case out of the way.
136  if (offset + len <= capacity())
137    return true;
138
139  // If we are writing to the first 16K (kMaxBlockSize), we want to keep the
140  // buffer offset_ at 0.
141  if (!Size() && offset > kMaxBlockSize)
142    return GrowBuffer(len, kMaxBufferSize);
143
144  int required = offset - offset_ + len;
145  return GrowBuffer(required, kMaxBufferSize * 6 / 5);
146}
147
148void EntryImpl::UserBuffer::Truncate(int offset) {
149  DCHECK_GE(offset, 0);
150  DCHECK_GE(offset, offset_);
151
152  offset -= offset_;
153  if (Size() >= offset)
154    buffer_.resize(offset);
155}
156
157void EntryImpl::UserBuffer::Write(int offset, net::IOBuffer* buf, int len) {
158  DCHECK_GE(offset, 0);
159  DCHECK_GE(len, 0);
160  DCHECK_GE(offset + len, 0);
161  DCHECK_GE(offset, offset_);
162
163  if (!Size() && offset > kMaxBlockSize)
164    offset_ = offset;
165
166  offset -= offset_;
167
168  if (offset > Size())
169    buffer_.resize(offset);
170
171  if (!len)
172    return;
173
174  char* buffer = buf->data();
175  int valid_len = Size() - offset;
176  int copy_len = std::min(valid_len, len);
177  if (copy_len) {
178    memcpy(&buffer_[offset], buffer, copy_len);
179    len -= copy_len;
180    buffer += copy_len;
181  }
182  if (!len)
183    return;
184
185  buffer_.insert(buffer_.end(), buffer, buffer + len);
186}
187
188bool EntryImpl::UserBuffer::PreRead(int eof, int offset, int* len) {
189  DCHECK_GE(offset, 0);
190  DCHECK_GT(*len, 0);
191
192  if (offset < offset_) {
193    // We are reading before this buffer.
194    if (offset >= eof)
195      return true;
196
197    // If the read overlaps with the buffer, change its length so that there is
198    // no overlap.
199    *len = std::min(*len, offset_ - offset);
200    *len = std::min(*len, eof - offset);
201
202    // We should read from disk.
203    return false;
204  }
205
206  if (!Size())
207    return false;
208
209  // See if we can fulfill the first part of the operation.
210  return (offset - offset_ < Size());
211}
212
213int EntryImpl::UserBuffer::Read(int offset, net::IOBuffer* buf, int len) {
214  DCHECK_GE(offset, 0);
215  DCHECK_GT(len, 0);
216  DCHECK(Size() || offset < offset_);
217
218  int clean_bytes = 0;
219  if (offset < offset_) {
220    // We don't have a file so lets fill the first part with 0.
221    clean_bytes = std::min(offset_ - offset, len);
222    memset(buf->data(), 0, clean_bytes);
223    if (len == clean_bytes)
224      return len;
225    offset = offset_;
226    len -= clean_bytes;
227  }
228
229  int start = offset - offset_;
230  int available = Size() - start;
231  DCHECK_GE(start, 0);
232  DCHECK_GE(available, 0);
233  len = std::min(len, available);
234  memcpy(buf->data() + clean_bytes, &buffer_[start], len);
235  return len + clean_bytes;
236}
237
238void EntryImpl::UserBuffer::Reset() {
239  if (!grow_allowed_) {
240    if (backend_)
241      backend_->BufferDeleted(capacity() - kMaxBlockSize);
242    grow_allowed_ = true;
243    std::vector<char> tmp;
244    buffer_.swap(tmp);
245    buffer_.reserve(kMaxBlockSize);
246  }
247  offset_ = 0;
248  buffer_.clear();
249}
250
251bool EntryImpl::UserBuffer::GrowBuffer(int required, int limit) {
252  DCHECK_GE(required, 0);
253  int current_size = capacity();
254  if (required <= current_size)
255    return true;
256
257  if (required > limit)
258    return false;
259
260  if (!backend_)
261    return false;
262
263  int to_add = std::max(required - current_size, kMaxBlockSize * 4);
264  to_add = std::max(current_size, to_add);
265  required = std::min(current_size + to_add, limit);
266
267  grow_allowed_ = backend_->IsAllocAllowed(current_size, required);
268  if (!grow_allowed_)
269    return false;
270
271  buffer_.reserve(required);
272  return true;
273}
274
275// ------------------------------------------------------------------------
276
277EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only)
278    : entry_(NULL, Addr(0)), node_(NULL, Addr(0)), read_only_(read_only) {
279  entry_.LazyInit(backend->File(address), address);
280  doomed_ = false;
281  backend_ = backend;
282  for (int i = 0; i < kNumStreams; i++) {
283    unreported_size_[i] = 0;
284  }
285}
286
287// When an entry is deleted from the cache, we clean up all the data associated
288// with it for two reasons: to simplify the reuse of the block (we know that any
289// unused block is filled with zeros), and to simplify the handling of write /
290// read partial information from an entry (don't have to worry about returning
291// data related to a previous cache entry because the range was not fully
292// written before).
293EntryImpl::~EntryImpl() {
294  Log("~EntryImpl in");
295  backend_->OnEntryDestroyBegin(entry_.address());
296
297  // Save the sparse info to disk before deleting this entry.
298  sparse_.reset();
299
300  if (doomed_) {
301    DeleteEntryData(true);
302  } else {
303    bool ret = true;
304    for (int index = 0; index < kNumStreams; index++) {
305      if (user_buffers_[index].get()) {
306        if (!(ret = Flush(index, 0)))
307          LOG(ERROR) << "Failed to save user data";
308      }
309      if (unreported_size_[index]) {
310        backend_->ModifyStorageSize(
311            entry_.Data()->data_size[index] - unreported_size_[index],
312            entry_.Data()->data_size[index]);
313      }
314    }
315
316    if (!ret) {
317      // There was a failure writing the actual data. Mark the entry as dirty.
318      int current_id = backend_->GetCurrentEntryId();
319      node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1;
320      node_.Store();
321    } else if (node_.HasData() && node_.Data()->dirty) {
322      node_.Data()->dirty = 0;
323      node_.Store();
324    }
325  }
326
327  Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this));
328  backend_->OnEntryDestroyEnd();
329}
330
331void EntryImpl::Doom() {
332  backend_->background_queue()->DoomEntryImpl(this);
333}
334
335void EntryImpl::Close() {
336  backend_->background_queue()->CloseEntryImpl(this);
337}
338
339std::string EntryImpl::GetKey() const {
340  CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
341  if (entry->Data()->key_len <= kMaxInternalKeyLength)
342    return std::string(entry->Data()->key);
343
344  // We keep a copy of the key so that we can always return it, even if the
345  // backend is disabled.
346  if (!key_.empty())
347    return key_;
348
349  Addr address(entry->Data()->long_key);
350  DCHECK(address.is_initialized());
351  size_t offset = 0;
352  if (address.is_block_file())
353    offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
354
355  COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index);
356  File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address,
357                                                                kKeyFileIndex);
358
359  if (!key_file ||
360      !key_file->Read(WriteInto(&key_, entry->Data()->key_len + 1),
361                      entry->Data()->key_len + 1, offset))
362    key_.clear();
363  return key_;
364}
365
366Time EntryImpl::GetLastUsed() const {
367  CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
368  return Time::FromInternalValue(node->Data()->last_used);
369}
370
371Time EntryImpl::GetLastModified() const {
372  CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
373  return Time::FromInternalValue(node->Data()->last_modified);
374}
375
376int32 EntryImpl::GetDataSize(int index) const {
377  if (index < 0 || index >= kNumStreams)
378    return 0;
379
380  CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
381  return entry->Data()->data_size[index];
382}
383
384int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len,
385                        net::CompletionCallback* callback) {
386  if (!callback)
387    return ReadDataImpl(index, offset, buf, buf_len, callback);
388
389  DCHECK(node_.Data()->dirty || read_only_);
390  if (index < 0 || index >= kNumStreams)
391    return net::ERR_INVALID_ARGUMENT;
392
393  int entry_size = entry_.Data()->data_size[index];
394  if (offset >= entry_size || offset < 0 || !buf_len)
395    return 0;
396
397  if (buf_len < 0)
398    return net::ERR_INVALID_ARGUMENT;
399
400  backend_->background_queue()->ReadData(this, index, offset, buf, buf_len,
401                                         callback);
402  return net::ERR_IO_PENDING;
403}
404
405int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len,
406                         CompletionCallback* callback, bool truncate) {
407  if (!callback)
408    return WriteDataImpl(index, offset, buf, buf_len, callback, truncate);
409
410  DCHECK(node_.Data()->dirty || read_only_);
411  if (index < 0 || index >= kNumStreams)
412    return net::ERR_INVALID_ARGUMENT;
413
414  if (offset < 0 || buf_len < 0)
415    return net::ERR_INVALID_ARGUMENT;
416
417  backend_->background_queue()->WriteData(this, index, offset, buf, buf_len,
418                                          truncate, callback);
419  return net::ERR_IO_PENDING;
420}
421
422int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
423                              net::CompletionCallback* callback) {
424  if (!callback)
425    return ReadSparseDataImpl(offset, buf, buf_len, callback);
426
427  backend_->background_queue()->ReadSparseData(this, offset, buf, buf_len,
428                                               callback);
429  return net::ERR_IO_PENDING;
430}
431
432int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
433                               net::CompletionCallback* callback) {
434  if (!callback)
435    return WriteSparseDataImpl(offset, buf, buf_len, callback);
436
437  backend_->background_queue()->WriteSparseData(this, offset, buf, buf_len,
438                                                callback);
439  return net::ERR_IO_PENDING;
440}
441
442int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
443                                 CompletionCallback* callback) {
444  backend_->background_queue()->GetAvailableRange(this, offset, len, start,
445                                                  callback);
446  return net::ERR_IO_PENDING;
447}
448
449bool EntryImpl::CouldBeSparse() const {
450  if (sparse_.get())
451    return true;
452
453  scoped_ptr<SparseControl> sparse;
454  sparse.reset(new SparseControl(const_cast<EntryImpl*>(this)));
455  return sparse->CouldBeSparse();
456}
457
458void EntryImpl::CancelSparseIO() {
459  backend_->background_queue()->CancelSparseIO(this);
460}
461
462int EntryImpl::ReadyForSparseIO(net::CompletionCallback* callback) {
463  if (!sparse_.get())
464    return net::OK;
465
466  backend_->background_queue()->ReadyForSparseIO(this, callback);
467  return net::ERR_IO_PENDING;
468}
469
470// ------------------------------------------------------------------------
471
472void EntryImpl::DoomImpl() {
473  if (doomed_)
474    return;
475
476  SetPointerForInvalidEntry(backend_->GetCurrentEntryId());
477  backend_->InternalDoomEntry(this);
478}
479
480int EntryImpl::ReadDataImpl(int index, int offset, net::IOBuffer* buf,
481                            int buf_len, CompletionCallback* callback) {
482  DCHECK(node_.Data()->dirty || read_only_);
483  if (index < 0 || index >= kNumStreams)
484    return net::ERR_INVALID_ARGUMENT;
485
486  int entry_size = entry_.Data()->data_size[index];
487  if (offset >= entry_size || offset < 0 || !buf_len)
488    return 0;
489
490  if (buf_len < 0)
491    return net::ERR_INVALID_ARGUMENT;
492
493  TimeTicks start = TimeTicks::Now();
494
495  if (offset + buf_len > entry_size)
496    buf_len = entry_size - offset;
497
498  UpdateRank(false);
499
500  backend_->OnEvent(Stats::READ_DATA);
501  backend_->OnRead(buf_len);
502
503  // We need the current size in disk.
504  int eof = entry_size - unreported_size_[index];
505  if (user_buffers_[index].get() &&
506      user_buffers_[index]->PreRead(eof, offset, &buf_len)) {
507    // Complete the operation locally.
508    buf_len = user_buffers_[index]->Read(offset, buf, buf_len);
509    ReportIOTime(kRead, start);
510    return buf_len;
511  }
512
513  Addr address(entry_.Data()->data_addr[index]);
514  DCHECK(address.is_initialized());
515  if (!address.is_initialized())
516    return net::ERR_FAILED;
517
518  File* file = GetBackingFile(address, index);
519  if (!file)
520    return net::ERR_FAILED;
521
522  size_t file_offset = offset;
523  if (address.is_block_file()) {
524    DCHECK_LE(offset + buf_len, kMaxBlockSize);
525    file_offset += address.start_block() * address.BlockSize() +
526                   kBlockHeaderSize;
527  }
528
529  SyncCallback* io_callback = NULL;
530  if (callback)
531    io_callback = new SyncCallback(this, buf, callback);
532
533  bool completed;
534  if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) {
535    if (io_callback)
536      io_callback->Discard();
537    return net::ERR_FAILED;
538  }
539
540  if (io_callback && completed)
541    io_callback->Discard();
542
543  ReportIOTime(kRead, start);
544  return (completed || !callback) ? buf_len : net::ERR_IO_PENDING;
545}
546
547int EntryImpl::WriteDataImpl(int index, int offset, net::IOBuffer* buf,
548                             int buf_len, CompletionCallback* callback,
549                             bool truncate) {
550  DCHECK(node_.Data()->dirty || read_only_);
551  if (index < 0 || index >= kNumStreams)
552    return net::ERR_INVALID_ARGUMENT;
553
554  if (offset < 0 || buf_len < 0)
555    return net::ERR_INVALID_ARGUMENT;
556
557  int max_file_size = backend_->MaxFileSize();
558
559  // offset or buf_len could be negative numbers.
560  if (offset > max_file_size || buf_len > max_file_size ||
561      offset + buf_len > max_file_size) {
562    int size = offset + buf_len;
563    if (size <= max_file_size)
564      size = kint32max;
565    backend_->TooMuchStorageRequested(size);
566    return net::ERR_FAILED;
567  }
568
569  TimeTicks start = TimeTicks::Now();
570
571  // Read the size at this point (it may change inside prepare).
572  int entry_size = entry_.Data()->data_size[index];
573  bool extending = entry_size < offset + buf_len;
574  truncate = truncate && entry_size > offset + buf_len;
575  Trace("To PrepareTarget 0x%x", entry_.address().value());
576  if (!PrepareTarget(index, offset, buf_len, truncate))
577    return net::ERR_FAILED;
578
579  Trace("From PrepareTarget 0x%x", entry_.address().value());
580  if (extending || truncate)
581    UpdateSize(index, entry_size, offset + buf_len);
582
583  UpdateRank(true);
584
585  backend_->OnEvent(Stats::WRITE_DATA);
586  backend_->OnWrite(buf_len);
587
588  if (user_buffers_[index].get()) {
589    // Complete the operation locally.
590    user_buffers_[index]->Write(offset, buf, buf_len);
591    ReportIOTime(kWrite, start);
592    return buf_len;
593  }
594
595  Addr address(entry_.Data()->data_addr[index]);
596  if (truncate && offset + buf_len == 0) {
597    DCHECK(!address.is_initialized());
598    return 0;
599  }
600
601  File* file = GetBackingFile(address, index);
602  if (!file)
603    return net::ERR_FAILED;
604
605  size_t file_offset = offset;
606  if (address.is_block_file()) {
607    DCHECK_LE(offset + buf_len, kMaxBlockSize);
608    file_offset += address.start_block() * address.BlockSize() +
609                   kBlockHeaderSize;
610  } else if (truncate || (extending && !buf_len)) {
611    if (!file->SetLength(offset + buf_len))
612      return net::ERR_FAILED;
613  }
614
615  if (!buf_len)
616    return 0;
617
618  SyncCallback* io_callback = NULL;
619  if (callback)
620    io_callback = new SyncCallback(this, buf, callback);
621
622  bool completed;
623  if (!file->Write(buf->data(), buf_len, file_offset, io_callback,
624                   &completed)) {
625    if (io_callback)
626      io_callback->Discard();
627    return net::ERR_FAILED;
628  }
629
630  if (io_callback && completed)
631    io_callback->Discard();
632
633  ReportIOTime(kWrite, start);
634  return (completed || !callback) ? buf_len : net::ERR_IO_PENDING;
635}
636
637int EntryImpl::ReadSparseDataImpl(int64 offset, net::IOBuffer* buf, int buf_len,
638                                  CompletionCallback* callback) {
639  DCHECK(node_.Data()->dirty || read_only_);
640  int result = InitSparseData();
641  if (net::OK != result)
642    return result;
643
644  TimeTicks start = TimeTicks::Now();
645  result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len,
646                            callback);
647  ReportIOTime(kSparseRead, start);
648  return result;
649}
650
651int EntryImpl::WriteSparseDataImpl(int64 offset, net::IOBuffer* buf,
652                                   int buf_len, CompletionCallback* callback) {
653  DCHECK(node_.Data()->dirty || read_only_);
654  int result = InitSparseData();
655  if (net::OK != result)
656    return result;
657
658  TimeTicks start = TimeTicks::Now();
659  result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf,
660                            buf_len, callback);
661  ReportIOTime(kSparseWrite, start);
662  return result;
663}
664
665int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) {
666  int result = InitSparseData();
667  if (net::OK != result)
668    return result;
669
670  return sparse_->GetAvailableRange(offset, len, start);
671}
672
673void EntryImpl::CancelSparseIOImpl() {
674  if (!sparse_.get())
675    return;
676
677  sparse_->CancelIO();
678}
679
680int EntryImpl::ReadyForSparseIOImpl(CompletionCallback* callback) {
681  DCHECK(sparse_.get());
682  return sparse_->ReadyToUse(callback);
683}
684
685// ------------------------------------------------------------------------
686
687uint32 EntryImpl::GetHash() {
688  return entry_.Data()->hash;
689}
690
691bool EntryImpl::CreateEntry(Addr node_address, const std::string& key,
692                            uint32 hash) {
693  Trace("Create entry In");
694  EntryStore* entry_store = entry_.Data();
695  RankingsNode* node = node_.Data();
696  memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks());
697  memset(node, 0, sizeof(RankingsNode));
698  if (!node_.LazyInit(backend_->File(node_address), node_address))
699    return false;
700
701  entry_store->rankings_node = node_address.value();
702  node->contents = entry_.address().value();
703
704  entry_store->hash = hash;
705  entry_store->creation_time = Time::Now().ToInternalValue();
706  entry_store->key_len = static_cast<int32>(key.size());
707  if (entry_store->key_len > kMaxInternalKeyLength) {
708    Addr address(0);
709    if (!CreateBlock(entry_store->key_len + 1, &address))
710      return false;
711
712    entry_store->long_key = address.value();
713    File* key_file = GetBackingFile(address, kKeyFileIndex);
714    key_ = key;
715
716    size_t offset = 0;
717    if (address.is_block_file())
718      offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
719
720    if (!key_file || !key_file->Write(key.data(), key.size(), offset)) {
721      DeleteData(address, kKeyFileIndex);
722      return false;
723    }
724
725    if (address.is_separate_file())
726      key_file->SetLength(key.size() + 1);
727  } else {
728    memcpy(entry_store->key, key.data(), key.size());
729    entry_store->key[key.size()] = '\0';
730  }
731  backend_->ModifyStorageSize(0, static_cast<int32>(key.size()));
732  CACHE_UMA(COUNTS, "KeySize", 0, static_cast<int32>(key.size()));
733  node->dirty = backend_->GetCurrentEntryId();
734  Log("Create Entry ");
735  return true;
736}
737
738bool EntryImpl::IsSameEntry(const std::string& key, uint32 hash) {
739  if (entry_.Data()->hash != hash ||
740      static_cast<size_t>(entry_.Data()->key_len) != key.size())
741    return false;
742
743  std::string my_key = GetKey();
744  return key.compare(my_key) ? false : true;
745}
746
747void EntryImpl::InternalDoom() {
748  DCHECK(node_.HasData());
749  if (!node_.Data()->dirty) {
750    node_.Data()->dirty = backend_->GetCurrentEntryId();
751    node_.Store();
752  }
753  doomed_ = true;
754}
755
756void EntryImpl::DeleteEntryData(bool everything) {
757  DCHECK(doomed_ || !everything);
758
759  if (GetEntryFlags() & PARENT_ENTRY) {
760    // We have some child entries that must go away.
761    SparseControl::DeleteChildren(this);
762  }
763
764  if (GetDataSize(0))
765    CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0));
766  if (GetDataSize(1))
767    CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1));
768  for (int index = 0; index < kNumStreams; index++) {
769    Addr address(entry_.Data()->data_addr[index]);
770    if (address.is_initialized()) {
771      backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
772                                      unreported_size_[index], 0);
773      entry_.Data()->data_addr[index] = 0;
774      entry_.Data()->data_size[index] = 0;
775      entry_.Store();
776      DeleteData(address, index);
777    }
778  }
779
780  if (!everything)
781    return;
782
783  // Remove all traces of this entry.
784  backend_->RemoveEntry(this);
785
786  Addr address(entry_.Data()->long_key);
787  DeleteData(address, kKeyFileIndex);
788  backend_->ModifyStorageSize(entry_.Data()->key_len, 0);
789
790  memset(node_.buffer(), 0, node_.size());
791  memset(entry_.buffer(), 0, entry_.size());
792  node_.Store();
793  entry_.Store();
794
795  backend_->DeleteBlock(node_.address(), false);
796  backend_->DeleteBlock(entry_.address(), false);
797}
798
799CacheAddr EntryImpl::GetNextAddress() {
800  return entry_.Data()->next;
801}
802
803void EntryImpl::SetNextAddress(Addr address) {
804  entry_.Data()->next = address.value();
805  bool success = entry_.Store();
806  DCHECK(success);
807}
808
809bool EntryImpl::LoadNodeAddress() {
810  Addr address(entry_.Data()->rankings_node);
811  if (!node_.LazyInit(backend_->File(address), address))
812    return false;
813  return node_.Load();
814}
815
816bool EntryImpl::Update() {
817  DCHECK(node_.HasData());
818
819  if (read_only_)
820    return true;
821
822  RankingsNode* rankings = node_.Data();
823  if (!rankings->dirty) {
824    rankings->dirty = backend_->GetCurrentEntryId();
825    if (!node_.Store())
826      return false;
827  }
828  return true;
829}
830
831bool EntryImpl::IsDirty(int32 current_id) {
832  DCHECK(node_.HasData());
833  // We are checking if the entry is valid or not. If there is a pointer here,
834  // we should not be checking the entry.
835  if (node_.Data()->dummy)
836    return true;
837
838  return node_.Data()->dirty && current_id != node_.Data()->dirty;
839}
840
841void EntryImpl::ClearDirtyFlag() {
842  node_.Data()->dirty = 0;
843}
844
845void EntryImpl::SetPointerForInvalidEntry(int32 new_id) {
846  node_.Data()->dirty = new_id;
847  node_.Data()->dummy = 0;
848  node_.Store();
849}
850
851bool EntryImpl::SanityCheck() {
852  if (!entry_.Data()->rankings_node || !entry_.Data()->key_len)
853    return false;
854
855  Addr rankings_addr(entry_.Data()->rankings_node);
856  if (!rankings_addr.is_initialized() || rankings_addr.is_separate_file() ||
857      rankings_addr.file_type() != RANKINGS)
858    return false;
859
860  Addr next_addr(entry_.Data()->next);
861  if (next_addr.is_initialized() &&
862      (next_addr.is_separate_file() || next_addr.file_type() != BLOCK_256))
863    return false;
864
865  return true;
866}
867
868void EntryImpl::IncrementIoCount() {
869  backend_->IncrementIoCount();
870}
871
872void EntryImpl::DecrementIoCount() {
873  backend_->DecrementIoCount();
874}
875
876void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) {
877  node_.Data()->last_used = last_used.ToInternalValue();
878  node_.Data()->last_modified = last_modified.ToInternalValue();
879  node_.set_modified();
880}
881
882void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) {
883  int group = backend_->GetSizeGroup();
884  switch (op) {
885    case kRead:
886      CACHE_UMA(AGE_MS, "ReadTime", group, start);
887      break;
888    case kWrite:
889      CACHE_UMA(AGE_MS, "WriteTime", group, start);
890      break;
891    case kSparseRead:
892      CACHE_UMA(AGE_MS, "SparseReadTime", 0, start);
893      break;
894    case kSparseWrite:
895      CACHE_UMA(AGE_MS, "SparseWriteTime", 0, start);
896      break;
897    case kAsyncIO:
898      CACHE_UMA(AGE_MS, "AsyncIOTime", group, start);
899      break;
900    default:
901      NOTREACHED();
902  }
903}
904
905// ------------------------------------------------------------------------
906
907bool EntryImpl::CreateDataBlock(int index, int size) {
908  DCHECK(index >= 0 && index < kNumStreams);
909
910  Addr address(entry_.Data()->data_addr[index]);
911  if (!CreateBlock(size, &address))
912    return false;
913
914  entry_.Data()->data_addr[index] = address.value();
915  entry_.Store();
916  return true;
917}
918
919bool EntryImpl::CreateBlock(int size, Addr* address) {
920  DCHECK(!address->is_initialized());
921
922  FileType file_type = Addr::RequiredFileType(size);
923  if (EXTERNAL == file_type) {
924    if (size > backend_->MaxFileSize())
925      return false;
926    if (!backend_->CreateExternalFile(address))
927      return false;
928  } else {
929    int num_blocks = (size + Addr::BlockSizeForFileType(file_type) - 1) /
930                     Addr::BlockSizeForFileType(file_type);
931
932    if (!backend_->CreateBlock(file_type, num_blocks, address))
933      return false;
934  }
935  return true;
936}
937
938// Note that this method may end up modifying a block file so upon return the
939// involved block will be free, and could be reused for something else. If there
940// is a crash after that point (and maybe before returning to the caller), the
941// entry will be left dirty... and at some point it will be discarded; it is
942// important that the entry doesn't keep a reference to this address, or we'll
943// end up deleting the contents of |address| once again.
944void EntryImpl::DeleteData(Addr address, int index) {
945  if (!address.is_initialized())
946    return;
947  if (address.is_separate_file()) {
948    int failure = !DeleteCacheFile(backend_->GetFileName(address));
949    CACHE_UMA(COUNTS, "DeleteFailed", 0, failure);
950    if (failure) {
951      LOG(ERROR) << "Failed to delete " <<
952          backend_->GetFileName(address).value() << " from the cache.";
953    }
954    if (files_[index])
955      files_[index] = NULL;  // Releases the object.
956  } else {
957    backend_->DeleteBlock(address, true);
958  }
959}
960
961void EntryImpl::UpdateRank(bool modified) {
962  if (!doomed_) {
963    // Everything is handled by the backend.
964    backend_->UpdateRank(this, modified);
965    return;
966  }
967
968  Time current = Time::Now();
969  node_.Data()->last_used = current.ToInternalValue();
970
971  if (modified)
972    node_.Data()->last_modified = current.ToInternalValue();
973}
974
975File* EntryImpl::GetBackingFile(Addr address, int index) {
976  File* file;
977  if (address.is_separate_file())
978    file = GetExternalFile(address, index);
979  else
980    file = backend_->File(address);
981  return file;
982}
983
984File* EntryImpl::GetExternalFile(Addr address, int index) {
985  DCHECK(index >= 0 && index <= kKeyFileIndex);
986  if (!files_[index].get()) {
987    // For a key file, use mixed mode IO.
988    scoped_refptr<File> file(new File(kKeyFileIndex == index));
989    if (file->Init(backend_->GetFileName(address)))
990      files_[index].swap(file);
991  }
992  return files_[index].get();
993}
994
995// We keep a memory buffer for everything that ends up stored on a block file
996// (because we don't know yet the final data size), and for some of the data
997// that end up on external files. This function will initialize that memory
998// buffer and / or the files needed to store the data.
999//
1000// In general, a buffer may overlap data already stored on disk, and in that
1001// case, the contents of the buffer are the most accurate. It may also extend
1002// the file, but we don't want to read from disk just to keep the buffer up to
1003// date. This means that as soon as there is a chance to get confused about what
1004// is the most recent version of some part of a file, we'll flush the buffer and
1005// reuse it for the new data. Keep in mind that the normal use pattern is quite
1006// simple (write sequentially from the beginning), so we optimize for handling
1007// that case.
1008bool EntryImpl::PrepareTarget(int index, int offset, int buf_len,
1009                              bool truncate) {
1010  if (truncate)
1011    return HandleTruncation(index, offset, buf_len);
1012
1013  Addr address(entry_.Data()->data_addr[index]);
1014  if (address.is_initialized()) {
1015    if (address.is_block_file() && !MoveToLocalBuffer(index))
1016      return false;
1017
1018    if (!user_buffers_[index].get() && offset < kMaxBlockSize) {
1019      // We are about to create a buffer for the first 16KB, make sure that we
1020      // preserve existing data.
1021      if (!CopyToLocalBuffer(index))
1022        return false;
1023    }
1024  }
1025
1026  if (!user_buffers_[index].get())
1027    user_buffers_[index].reset(new UserBuffer(backend_));
1028
1029  return PrepareBuffer(index, offset, buf_len);
1030}
1031
1032// We get to this function with some data already stored. If there is a
1033// truncation that results on data stored internally, we'll explicitly
1034// handle the case here.
1035bool EntryImpl::HandleTruncation(int index, int offset, int buf_len) {
1036  Addr address(entry_.Data()->data_addr[index]);
1037
1038  int current_size = entry_.Data()->data_size[index];
1039  int new_size = offset + buf_len;
1040
1041  if (!new_size) {
1042    // This is by far the most common scenario.
1043    backend_->ModifyStorageSize(current_size - unreported_size_[index], 0);
1044    entry_.Data()->data_addr[index] = 0;
1045    entry_.Data()->data_size[index] = 0;
1046    unreported_size_[index] = 0;
1047    entry_.Store();
1048    DeleteData(address, index);
1049
1050    user_buffers_[index].reset();
1051    return true;
1052  }
1053
1054  // We never postpone truncating a file, if there is one, but we may postpone
1055  // telling the backend about the size reduction.
1056  if (user_buffers_[index].get()) {
1057    DCHECK_GE(current_size, user_buffers_[index]->Start());
1058    if (!address.is_initialized()) {
1059      // There is no overlap between the buffer and disk.
1060      if (new_size > user_buffers_[index]->Start()) {
1061        // Just truncate our buffer.
1062        DCHECK_LT(new_size, user_buffers_[index]->End());
1063        user_buffers_[index]->Truncate(new_size);
1064        return true;
1065      }
1066
1067      // Just discard our buffer.
1068      user_buffers_[index]->Reset();
1069      return PrepareBuffer(index, offset, buf_len);
1070    }
1071
1072    // There is some overlap or we need to extend the file before the
1073    // truncation.
1074    if (offset > user_buffers_[index]->Start())
1075      user_buffers_[index]->Truncate(new_size);
1076    UpdateSize(index, current_size, new_size);
1077    if (!Flush(index, 0))
1078      return false;
1079    user_buffers_[index].reset();
1080  }
1081
1082  // We have data somewhere, and it is not in a buffer.
1083  DCHECK(!user_buffers_[index].get());
1084  DCHECK(address.is_initialized());
1085
1086  if (new_size > kMaxBlockSize)
1087    return true;  // Let the operation go directly to disk.
1088
1089  return ImportSeparateFile(index, offset + buf_len);
1090}
1091
1092bool EntryImpl::CopyToLocalBuffer(int index) {
1093  Addr address(entry_.Data()->data_addr[index]);
1094  DCHECK(!user_buffers_[index].get());
1095  DCHECK(address.is_initialized());
1096
1097  int len = std::min(entry_.Data()->data_size[index], kMaxBlockSize);
1098  user_buffers_[index].reset(new UserBuffer(backend_));
1099  user_buffers_[index]->Write(len, NULL, 0);
1100
1101  File* file = GetBackingFile(address, index);
1102  int offset = 0;
1103
1104  if (address.is_block_file())
1105    offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
1106
1107  if (!file ||
1108      !file->Read(user_buffers_[index]->Data(), len, offset, NULL, NULL)) {
1109    user_buffers_[index].reset();
1110    return false;
1111  }
1112  return true;
1113}
1114
1115bool EntryImpl::MoveToLocalBuffer(int index) {
1116  if (!CopyToLocalBuffer(index))
1117    return false;
1118
1119  Addr address(entry_.Data()->data_addr[index]);
1120  entry_.Data()->data_addr[index] = 0;
1121  entry_.Store();
1122  DeleteData(address, index);
1123
1124  // If we lose this entry we'll see it as zero sized.
1125  int len = entry_.Data()->data_size[index];
1126  backend_->ModifyStorageSize(len - unreported_size_[index], 0);
1127  unreported_size_[index] = len;
1128  return true;
1129}
1130
1131bool EntryImpl::ImportSeparateFile(int index, int new_size) {
1132  if (entry_.Data()->data_size[index] > new_size)
1133    UpdateSize(index, entry_.Data()->data_size[index], new_size);
1134
1135  return MoveToLocalBuffer(index);
1136}
1137
1138bool EntryImpl::PrepareBuffer(int index, int offset, int buf_len) {
1139  DCHECK(user_buffers_[index].get());
1140  if (offset > user_buffers_[index]->End()) {
1141    // We are about to extend the buffer (with zeros), so make sure that we are
1142    // not overwriting anything.
1143    Addr address(entry_.Data()->data_addr[index]);
1144    if (address.is_initialized() && address.is_separate_file()) {
1145      int eof = entry_.Data()->data_size[index];
1146      if (eof > user_buffers_[index]->Start() && !Flush(index, 0))
1147        return false;
1148    }
1149  }
1150
1151  if (!user_buffers_[index]->PreWrite(offset, buf_len)) {
1152    if (!Flush(index, offset + buf_len))
1153      return false;
1154
1155    // Lets try again.
1156    if (!user_buffers_[index]->PreWrite(offset, buf_len)) {
1157      // We cannot complete the operation with a buffer.
1158      DCHECK(!user_buffers_[index]->Size());
1159      DCHECK(!user_buffers_[index]->Start());
1160      user_buffers_[index].reset();
1161    }
1162  }
1163  return true;
1164}
1165
1166bool EntryImpl::Flush(int index, int min_len) {
1167  Addr address(entry_.Data()->data_addr[index]);
1168  DCHECK(user_buffers_[index].get());
1169  DCHECK(!address.is_initialized() || address.is_separate_file());
1170
1171  int size = std::max(entry_.Data()->data_size[index], min_len);
1172  if (!address.is_initialized() && !CreateDataBlock(index, size))
1173    return false;
1174
1175  if (!entry_.Data()->data_size[index]) {
1176    DCHECK(!user_buffers_[index]->Size());
1177    return true;
1178  }
1179
1180  address.set_value(entry_.Data()->data_addr[index]);
1181
1182  int len = user_buffers_[index]->Size();
1183  int offset = user_buffers_[index]->Start();
1184  if (!len && !offset)
1185    return true;
1186
1187  if (address.is_block_file()) {
1188    DCHECK_EQ(len, entry_.Data()->data_size[index]);
1189    DCHECK(!offset);
1190    offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
1191  }
1192
1193  File* file = GetBackingFile(address, index);
1194  if (!file)
1195    return false;
1196
1197  if (!file->Write(user_buffers_[index]->Data(), len, offset, NULL, NULL))
1198    return false;
1199  user_buffers_[index]->Reset();
1200
1201  return true;
1202}
1203
1204void EntryImpl::UpdateSize(int index, int old_size, int new_size) {
1205  if (entry_.Data()->data_size[index] == new_size)
1206    return;
1207
1208  unreported_size_[index] += new_size - old_size;
1209  entry_.Data()->data_size[index] = new_size;
1210  entry_.set_modified();
1211}
1212
1213int EntryImpl::InitSparseData() {
1214  if (sparse_.get())
1215    return net::OK;
1216
1217  // Use a local variable so that sparse_ never goes from 'valid' to NULL.
1218  scoped_ptr<SparseControl> sparse(new SparseControl(this));
1219  int result = sparse->Init();
1220  if (net::OK == result)
1221    sparse_.swap(sparse);
1222
1223  return result;
1224}
1225
1226void EntryImpl::SetEntryFlags(uint32 flags) {
1227  entry_.Data()->flags |= flags;
1228  entry_.set_modified();
1229}
1230
1231uint32 EntryImpl::GetEntryFlags() {
1232  return entry_.Data()->flags;
1233}
1234
1235void EntryImpl::GetData(int index, char** buffer, Addr* address) {
1236  if (user_buffers_[index].get() && user_buffers_[index]->Size() &&
1237      !user_buffers_[index]->Start()) {
1238    // The data is already in memory, just copy it and we're done.
1239    int data_len = entry_.Data()->data_size[index];
1240    if (data_len <= user_buffers_[index]->Size()) {
1241      DCHECK(!user_buffers_[index]->Start());
1242      *buffer = new char[data_len];
1243      memcpy(*buffer, user_buffers_[index]->Data(), data_len);
1244      return;
1245    }
1246  }
1247
1248  // Bad news: we'd have to read the info from disk so instead we'll just tell
1249  // the caller where to read from.
1250  *buffer = NULL;
1251  address->set_value(entry_.Data()->data_addr[index]);
1252  if (address->is_initialized()) {
1253    // Prevent us from deleting the block from the backing store.
1254    backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
1255                                    unreported_size_[index], 0);
1256    entry_.Data()->data_addr[index] = 0;
1257    entry_.Data()->data_size[index] = 0;
1258  }
1259}
1260
1261void EntryImpl::Log(const char* msg) {
1262  int dirty = 0;
1263  if (node_.HasData()) {
1264    dirty = node_.Data()->dirty;
1265  }
1266
1267  Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this),
1268        entry_.address().value(), node_.address().value());
1269
1270  Trace("  data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0],
1271        entry_.Data()->data_addr[1], entry_.Data()->long_key);
1272
1273  Trace("  doomed: %d 0x%x", doomed_, dirty);
1274}
1275
1276}  // namespace disk_cache
1277