1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/disk_cache/blockfile/entry_impl.h"
6
7#include "base/hash.h"
8#include "base/message_loop/message_loop.h"
9#include "base/metrics/histogram.h"
10#include "base/strings/string_util.h"
11#include "net/base/io_buffer.h"
12#include "net/base/net_errors.h"
13#include "net/disk_cache/blockfile/backend_impl.h"
14#include "net/disk_cache/blockfile/bitmap.h"
15#include "net/disk_cache/blockfile/disk_format.h"
16#include "net/disk_cache/blockfile/histogram_macros.h"
17#include "net/disk_cache/blockfile/sparse_control.h"
18#include "net/disk_cache/cache_util.h"
19#include "net/disk_cache/net_log_parameters.h"
20
21// Provide a BackendImpl object to macros from histogram_macros.h.
22#define CACHE_UMA_BACKEND_IMPL_OBJ backend_
23
24using base::Time;
25using base::TimeDelta;
26using base::TimeTicks;
27
28namespace {
29
30// Index for the file used to store the key, if any (files_[kKeyFileIndex]).
31const int kKeyFileIndex = 3;
32
33// This class implements FileIOCallback to buffer the callback from a file IO
34// operation from the actual net class.
35class SyncCallback: public disk_cache::FileIOCallback {
36 public:
37  // |end_event_type| is the event type to log on completion.  Logs nothing on
38  // discard, or when the NetLog is not set to log all events.
39  SyncCallback(disk_cache::EntryImpl* entry, net::IOBuffer* buffer,
40               const net::CompletionCallback& callback,
41               net::NetLog::EventType end_event_type)
42      : entry_(entry), callback_(callback), buf_(buffer),
43        start_(TimeTicks::Now()), end_event_type_(end_event_type) {
44    entry->AddRef();
45    entry->IncrementIoCount();
46  }
47  virtual ~SyncCallback() {}
48
49  virtual void OnFileIOComplete(int bytes_copied) OVERRIDE;
50  void Discard();
51
52 private:
53  disk_cache::EntryImpl* entry_;
54  net::CompletionCallback callback_;
55  scoped_refptr<net::IOBuffer> buf_;
56  TimeTicks start_;
57  const net::NetLog::EventType end_event_type_;
58
59  DISALLOW_COPY_AND_ASSIGN(SyncCallback);
60};
61
62void SyncCallback::OnFileIOComplete(int bytes_copied) {
63  entry_->DecrementIoCount();
64  if (!callback_.is_null()) {
65    if (entry_->net_log().IsLogging()) {
66      entry_->net_log().EndEvent(
67          end_event_type_,
68          disk_cache::CreateNetLogReadWriteCompleteCallback(bytes_copied));
69    }
70    entry_->ReportIOTime(disk_cache::EntryImpl::kAsyncIO, start_);
71    buf_ = NULL;  // Release the buffer before invoking the callback.
72    callback_.Run(bytes_copied);
73  }
74  entry_->Release();
75  delete this;
76}
77
78void SyncCallback::Discard() {
79  callback_.Reset();
80  buf_ = NULL;
81  OnFileIOComplete(0);
82}
83
84const int kMaxBufferSize = 1024 * 1024;  // 1 MB.
85
86}  // namespace
87
88namespace disk_cache {
89
90// This class handles individual memory buffers that store data before it is
91// sent to disk. The buffer can start at any offset, but if we try to write to
92// anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to
93// zero. The buffer grows up to a size determined by the backend, to keep the
94// total memory used under control.
95class EntryImpl::UserBuffer {
96 public:
97  explicit UserBuffer(BackendImpl* backend)
98      : backend_(backend->GetWeakPtr()), offset_(0), grow_allowed_(true) {
99    buffer_.reserve(kMaxBlockSize);
100  }
101  ~UserBuffer() {
102    if (backend_.get())
103      backend_->BufferDeleted(capacity() - kMaxBlockSize);
104  }
105
106  // Returns true if we can handle writing |len| bytes to |offset|.
107  bool PreWrite(int offset, int len);
108
109  // Truncates the buffer to |offset| bytes.
110  void Truncate(int offset);
111
112  // Writes |len| bytes from |buf| at the given |offset|.
113  void Write(int offset, IOBuffer* buf, int len);
114
115  // Returns true if we can read |len| bytes from |offset|, given that the
116  // actual file has |eof| bytes stored. Note that the number of bytes to read
117  // may be modified by this method even though it returns false: that means we
118  // should do a smaller read from disk.
119  bool PreRead(int eof, int offset, int* len);
120
121  // Read |len| bytes from |buf| at the given |offset|.
122  int Read(int offset, IOBuffer* buf, int len);
123
124  // Prepare this buffer for reuse.
125  void Reset();
126
127  char* Data() { return buffer_.size() ? &buffer_[0] : NULL; }
128  int Size() { return static_cast<int>(buffer_.size()); }
129  int Start() { return offset_; }
130  int End() { return offset_ + Size(); }
131
132 private:
133  int capacity() { return static_cast<int>(buffer_.capacity()); }
134  bool GrowBuffer(int required, int limit);
135
136  base::WeakPtr<BackendImpl> backend_;
137  int offset_;
138  std::vector<char> buffer_;
139  bool grow_allowed_;
140  DISALLOW_COPY_AND_ASSIGN(UserBuffer);
141};
142
143bool EntryImpl::UserBuffer::PreWrite(int offset, int len) {
144  DCHECK_GE(offset, 0);
145  DCHECK_GE(len, 0);
146  DCHECK_GE(offset + len, 0);
147
148  // We don't want to write before our current start.
149  if (offset < offset_)
150    return false;
151
152  // Lets get the common case out of the way.
153  if (offset + len <= capacity())
154    return true;
155
156  // If we are writing to the first 16K (kMaxBlockSize), we want to keep the
157  // buffer offset_ at 0.
158  if (!Size() && offset > kMaxBlockSize)
159    return GrowBuffer(len, kMaxBufferSize);
160
161  int required = offset - offset_ + len;
162  return GrowBuffer(required, kMaxBufferSize * 6 / 5);
163}
164
165void EntryImpl::UserBuffer::Truncate(int offset) {
166  DCHECK_GE(offset, 0);
167  DCHECK_GE(offset, offset_);
168  DVLOG(3) << "Buffer truncate at " << offset << " current " << offset_;
169
170  offset -= offset_;
171  if (Size() >= offset)
172    buffer_.resize(offset);
173}
174
175void EntryImpl::UserBuffer::Write(int offset, IOBuffer* buf, int len) {
176  DCHECK_GE(offset, 0);
177  DCHECK_GE(len, 0);
178  DCHECK_GE(offset + len, 0);
179  DCHECK_GE(offset, offset_);
180  DVLOG(3) << "Buffer write at " << offset << " current " << offset_;
181
182  if (!Size() && offset > kMaxBlockSize)
183    offset_ = offset;
184
185  offset -= offset_;
186
187  if (offset > Size())
188    buffer_.resize(offset);
189
190  if (!len)
191    return;
192
193  char* buffer = buf->data();
194  int valid_len = Size() - offset;
195  int copy_len = std::min(valid_len, len);
196  if (copy_len) {
197    memcpy(&buffer_[offset], buffer, copy_len);
198    len -= copy_len;
199    buffer += copy_len;
200  }
201  if (!len)
202    return;
203
204  buffer_.insert(buffer_.end(), buffer, buffer + len);
205}
206
207bool EntryImpl::UserBuffer::PreRead(int eof, int offset, int* len) {
208  DCHECK_GE(offset, 0);
209  DCHECK_GT(*len, 0);
210
211  if (offset < offset_) {
212    // We are reading before this buffer.
213    if (offset >= eof)
214      return true;
215
216    // If the read overlaps with the buffer, change its length so that there is
217    // no overlap.
218    *len = std::min(*len, offset_ - offset);
219    *len = std::min(*len, eof - offset);
220
221    // We should read from disk.
222    return false;
223  }
224
225  if (!Size())
226    return false;
227
228  // See if we can fulfill the first part of the operation.
229  return (offset - offset_ < Size());
230}
231
232int EntryImpl::UserBuffer::Read(int offset, IOBuffer* buf, int len) {
233  DCHECK_GE(offset, 0);
234  DCHECK_GT(len, 0);
235  DCHECK(Size() || offset < offset_);
236
237  int clean_bytes = 0;
238  if (offset < offset_) {
239    // We don't have a file so lets fill the first part with 0.
240    clean_bytes = std::min(offset_ - offset, len);
241    memset(buf->data(), 0, clean_bytes);
242    if (len == clean_bytes)
243      return len;
244    offset = offset_;
245    len -= clean_bytes;
246  }
247
248  int start = offset - offset_;
249  int available = Size() - start;
250  DCHECK_GE(start, 0);
251  DCHECK_GE(available, 0);
252  len = std::min(len, available);
253  memcpy(buf->data() + clean_bytes, &buffer_[start], len);
254  return len + clean_bytes;
255}
256
257void EntryImpl::UserBuffer::Reset() {
258  if (!grow_allowed_) {
259    if (backend_.get())
260      backend_->BufferDeleted(capacity() - kMaxBlockSize);
261    grow_allowed_ = true;
262    std::vector<char> tmp;
263    buffer_.swap(tmp);
264    buffer_.reserve(kMaxBlockSize);
265  }
266  offset_ = 0;
267  buffer_.clear();
268}
269
270bool EntryImpl::UserBuffer::GrowBuffer(int required, int limit) {
271  DCHECK_GE(required, 0);
272  int current_size = capacity();
273  if (required <= current_size)
274    return true;
275
276  if (required > limit)
277    return false;
278
279  if (!backend_.get())
280    return false;
281
282  int to_add = std::max(required - current_size, kMaxBlockSize * 4);
283  to_add = std::max(current_size, to_add);
284  required = std::min(current_size + to_add, limit);
285
286  grow_allowed_ = backend_->IsAllocAllowed(current_size, required);
287  if (!grow_allowed_)
288    return false;
289
290  DVLOG(3) << "Buffer grow to " << required;
291
292  buffer_.reserve(required);
293  return true;
294}
295
296// ------------------------------------------------------------------------
297
298EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only)
299    : entry_(NULL, Addr(0)), node_(NULL, Addr(0)),
300      backend_(backend->GetWeakPtr()), doomed_(false), read_only_(read_only),
301      dirty_(false) {
302  entry_.LazyInit(backend->File(address), address);
303  for (int i = 0; i < kNumStreams; i++) {
304    unreported_size_[i] = 0;
305  }
306}
307
308void EntryImpl::DoomImpl() {
309  if (doomed_ || !backend_.get())
310    return;
311
312  SetPointerForInvalidEntry(backend_->GetCurrentEntryId());
313  backend_->InternalDoomEntry(this);
314}
315
316int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len,
317                            const CompletionCallback& callback) {
318  if (net_log_.IsLogging()) {
319    net_log_.BeginEvent(
320        net::NetLog::TYPE_ENTRY_READ_DATA,
321        CreateNetLogReadWriteDataCallback(index, offset, buf_len, false));
322  }
323
324  int result = InternalReadData(index, offset, buf, buf_len, callback);
325
326  if (result != net::ERR_IO_PENDING && net_log_.IsLogging()) {
327    net_log_.EndEvent(
328        net::NetLog::TYPE_ENTRY_READ_DATA,
329        CreateNetLogReadWriteCompleteCallback(result));
330  }
331  return result;
332}
333
334int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len,
335                             const CompletionCallback& callback,
336                             bool truncate) {
337  if (net_log_.IsLogging()) {
338    net_log_.BeginEvent(
339        net::NetLog::TYPE_ENTRY_WRITE_DATA,
340        CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate));
341  }
342
343  int result = InternalWriteData(index, offset, buf, buf_len, callback,
344                                 truncate);
345
346  if (result != net::ERR_IO_PENDING && net_log_.IsLogging()) {
347    net_log_.EndEvent(
348        net::NetLog::TYPE_ENTRY_WRITE_DATA,
349        CreateNetLogReadWriteCompleteCallback(result));
350  }
351  return result;
352}
353
354int EntryImpl::ReadSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len,
355                                  const CompletionCallback& callback) {
356  DCHECK(node_.Data()->dirty || read_only_);
357  int result = InitSparseData();
358  if (net::OK != result)
359    return result;
360
361  TimeTicks start = TimeTicks::Now();
362  result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len,
363                            callback);
364  ReportIOTime(kSparseRead, start);
365  return result;
366}
367
368int EntryImpl::WriteSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len,
369                                   const CompletionCallback& callback) {
370  DCHECK(node_.Data()->dirty || read_only_);
371  int result = InitSparseData();
372  if (net::OK != result)
373    return result;
374
375  TimeTicks start = TimeTicks::Now();
376  result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf,
377                            buf_len, callback);
378  ReportIOTime(kSparseWrite, start);
379  return result;
380}
381
382int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) {
383  int result = InitSparseData();
384  if (net::OK != result)
385    return result;
386
387  return sparse_->GetAvailableRange(offset, len, start);
388}
389
390void EntryImpl::CancelSparseIOImpl() {
391  if (!sparse_.get())
392    return;
393
394  sparse_->CancelIO();
395}
396
397int EntryImpl::ReadyForSparseIOImpl(const CompletionCallback& callback) {
398  DCHECK(sparse_.get());
399  return sparse_->ReadyToUse(callback);
400}
401
402uint32 EntryImpl::GetHash() {
403  return entry_.Data()->hash;
404}
405
406bool EntryImpl::CreateEntry(Addr node_address, const std::string& key,
407                            uint32 hash) {
408  Trace("Create entry In");
409  EntryStore* entry_store = entry_.Data();
410  RankingsNode* node = node_.Data();
411  memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks());
412  memset(node, 0, sizeof(RankingsNode));
413  if (!node_.LazyInit(backend_->File(node_address), node_address))
414    return false;
415
416  entry_store->rankings_node = node_address.value();
417  node->contents = entry_.address().value();
418
419  entry_store->hash = hash;
420  entry_store->creation_time = Time::Now().ToInternalValue();
421  entry_store->key_len = static_cast<int32>(key.size());
422  if (entry_store->key_len > kMaxInternalKeyLength) {
423    Addr address(0);
424    if (!CreateBlock(entry_store->key_len + 1, &address))
425      return false;
426
427    entry_store->long_key = address.value();
428    File* key_file = GetBackingFile(address, kKeyFileIndex);
429    key_ = key;
430
431    size_t offset = 0;
432    if (address.is_block_file())
433      offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
434
435    if (!key_file || !key_file->Write(key.data(), key.size(), offset)) {
436      DeleteData(address, kKeyFileIndex);
437      return false;
438    }
439
440    if (address.is_separate_file())
441      key_file->SetLength(key.size() + 1);
442  } else {
443    memcpy(entry_store->key, key.data(), key.size());
444    entry_store->key[key.size()] = '\0';
445  }
446  backend_->ModifyStorageSize(0, static_cast<int32>(key.size()));
447  CACHE_UMA(COUNTS, "KeySize", 0, static_cast<int32>(key.size()));
448  node->dirty = backend_->GetCurrentEntryId();
449  Log("Create Entry ");
450  return true;
451}
452
453bool EntryImpl::IsSameEntry(const std::string& key, uint32 hash) {
454  if (entry_.Data()->hash != hash ||
455      static_cast<size_t>(entry_.Data()->key_len) != key.size())
456    return false;
457
458  return (key.compare(GetKey()) == 0);
459}
460
461void EntryImpl::InternalDoom() {
462  net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM);
463  DCHECK(node_.HasData());
464  if (!node_.Data()->dirty) {
465    node_.Data()->dirty = backend_->GetCurrentEntryId();
466    node_.Store();
467  }
468  doomed_ = true;
469}
470
471void EntryImpl::DeleteEntryData(bool everything) {
472  DCHECK(doomed_ || !everything);
473
474  if (GetEntryFlags() & PARENT_ENTRY) {
475    // We have some child entries that must go away.
476    SparseControl::DeleteChildren(this);
477  }
478
479  if (GetDataSize(0))
480    CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0));
481  if (GetDataSize(1))
482    CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1));
483  for (int index = 0; index < kNumStreams; index++) {
484    Addr address(entry_.Data()->data_addr[index]);
485    if (address.is_initialized()) {
486      backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
487                                      unreported_size_[index], 0);
488      entry_.Data()->data_addr[index] = 0;
489      entry_.Data()->data_size[index] = 0;
490      entry_.Store();
491      DeleteData(address, index);
492    }
493  }
494
495  if (!everything)
496    return;
497
498  // Remove all traces of this entry.
499  backend_->RemoveEntry(this);
500
501  // Note that at this point node_ and entry_ are just two blocks of data, and
502  // even if they reference each other, nobody should be referencing them.
503
504  Addr address(entry_.Data()->long_key);
505  DeleteData(address, kKeyFileIndex);
506  backend_->ModifyStorageSize(entry_.Data()->key_len, 0);
507
508  backend_->DeleteBlock(entry_.address(), true);
509  entry_.Discard();
510
511  if (!LeaveRankingsBehind()) {
512    backend_->DeleteBlock(node_.address(), true);
513    node_.Discard();
514  }
515}
516
517CacheAddr EntryImpl::GetNextAddress() {
518  return entry_.Data()->next;
519}
520
521void EntryImpl::SetNextAddress(Addr address) {
522  DCHECK_NE(address.value(), entry_.address().value());
523  entry_.Data()->next = address.value();
524  bool success = entry_.Store();
525  DCHECK(success);
526}
527
528bool EntryImpl::LoadNodeAddress() {
529  Addr address(entry_.Data()->rankings_node);
530  if (!node_.LazyInit(backend_->File(address), address))
531    return false;
532  return node_.Load();
533}
534
535bool EntryImpl::Update() {
536  DCHECK(node_.HasData());
537
538  if (read_only_)
539    return true;
540
541  RankingsNode* rankings = node_.Data();
542  if (!rankings->dirty) {
543    rankings->dirty = backend_->GetCurrentEntryId();
544    if (!node_.Store())
545      return false;
546  }
547  return true;
548}
549
550void EntryImpl::SetDirtyFlag(int32 current_id) {
551  DCHECK(node_.HasData());
552  if (node_.Data()->dirty && current_id != node_.Data()->dirty)
553    dirty_ = true;
554
555  if (!current_id)
556    dirty_ = true;
557}
558
559void EntryImpl::SetPointerForInvalidEntry(int32 new_id) {
560  node_.Data()->dirty = new_id;
561  node_.Store();
562}
563
564bool EntryImpl::LeaveRankingsBehind() {
565  return !node_.Data()->contents;
566}
567
568// This only includes checks that relate to the first block of the entry (the
569// first 256 bytes), and values that should be set from the entry creation.
570// Basically, even if there is something wrong with this entry, we want to see
571// if it is possible to load the rankings node and delete them together.
572bool EntryImpl::SanityCheck() {
573  if (!entry_.VerifyHash())
574    return false;
575
576  EntryStore* stored = entry_.Data();
577  if (!stored->rankings_node || stored->key_len <= 0)
578    return false;
579
580  if (stored->reuse_count < 0 || stored->refetch_count < 0)
581    return false;
582
583  Addr rankings_addr(stored->rankings_node);
584  if (!rankings_addr.SanityCheckForRankings())
585    return false;
586
587  Addr next_addr(stored->next);
588  if (next_addr.is_initialized() && !next_addr.SanityCheckForEntryV2()) {
589    STRESS_NOTREACHED();
590    return false;
591  }
592  STRESS_DCHECK(next_addr.value() != entry_.address().value());
593
594  if (stored->state > ENTRY_DOOMED || stored->state < ENTRY_NORMAL)
595    return false;
596
597  Addr key_addr(stored->long_key);
598  if ((stored->key_len <= kMaxInternalKeyLength && key_addr.is_initialized()) ||
599      (stored->key_len > kMaxInternalKeyLength && !key_addr.is_initialized()))
600    return false;
601
602  if (!key_addr.SanityCheckV2())
603    return false;
604
605  if (key_addr.is_initialized() &&
606      ((stored->key_len < kMaxBlockSize && key_addr.is_separate_file()) ||
607       (stored->key_len >= kMaxBlockSize && key_addr.is_block_file())))
608    return false;
609
610  int num_blocks = NumBlocksForEntry(stored->key_len);
611  if (entry_.address().num_blocks() != num_blocks)
612    return false;
613
614  return true;
615}
616
617bool EntryImpl::DataSanityCheck() {
618  EntryStore* stored = entry_.Data();
619  Addr key_addr(stored->long_key);
620
621  // The key must be NULL terminated.
622  if (!key_addr.is_initialized() && stored->key[stored->key_len])
623    return false;
624
625  if (stored->hash != base::Hash(GetKey()))
626    return false;
627
628  for (int i = 0; i < kNumStreams; i++) {
629    Addr data_addr(stored->data_addr[i]);
630    int data_size = stored->data_size[i];
631    if (data_size < 0)
632      return false;
633    if (!data_size && data_addr.is_initialized())
634      return false;
635    if (!data_addr.SanityCheckV2())
636      return false;
637    if (!data_size)
638      continue;
639    if (data_size <= kMaxBlockSize && data_addr.is_separate_file())
640      return false;
641    if (data_size > kMaxBlockSize && data_addr.is_block_file())
642      return false;
643  }
644  return true;
645}
646
647void EntryImpl::FixForDelete() {
648  EntryStore* stored = entry_.Data();
649  Addr key_addr(stored->long_key);
650
651  if (!key_addr.is_initialized())
652    stored->key[stored->key_len] = '\0';
653
654  for (int i = 0; i < kNumStreams; i++) {
655    Addr data_addr(stored->data_addr[i]);
656    int data_size = stored->data_size[i];
657    if (data_addr.is_initialized()) {
658      if ((data_size <= kMaxBlockSize && data_addr.is_separate_file()) ||
659          (data_size > kMaxBlockSize && data_addr.is_block_file()) ||
660          !data_addr.SanityCheckV2()) {
661        STRESS_NOTREACHED();
662        // The address is weird so don't attempt to delete it.
663        stored->data_addr[i] = 0;
664        // In general, trust the stored size as it should be in sync with the
665        // total size tracked by the backend.
666      }
667    }
668    if (data_size < 0)
669      stored->data_size[i] = 0;
670  }
671  entry_.Store();
672}
673
674void EntryImpl::IncrementIoCount() {
675  backend_->IncrementIoCount();
676}
677
678void EntryImpl::DecrementIoCount() {
679  if (backend_.get())
680    backend_->DecrementIoCount();
681}
682
683void EntryImpl::OnEntryCreated(BackendImpl* backend) {
684  // Just grab a reference to the backround queue.
685  background_queue_ = backend->GetBackgroundQueue();
686}
687
688void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) {
689  node_.Data()->last_used = last_used.ToInternalValue();
690  node_.Data()->last_modified = last_modified.ToInternalValue();
691  node_.set_modified();
692}
693
694void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) {
695  if (!backend_.get())
696    return;
697
698  switch (op) {
699    case kRead:
700      CACHE_UMA(AGE_MS, "ReadTime", 0, start);
701      break;
702    case kWrite:
703      CACHE_UMA(AGE_MS, "WriteTime", 0, start);
704      break;
705    case kSparseRead:
706      CACHE_UMA(AGE_MS, "SparseReadTime", 0, start);
707      break;
708    case kSparseWrite:
709      CACHE_UMA(AGE_MS, "SparseWriteTime", 0, start);
710      break;
711    case kAsyncIO:
712      CACHE_UMA(AGE_MS, "AsyncIOTime", 0, start);
713      break;
714    case kReadAsync1:
715      CACHE_UMA(AGE_MS, "AsyncReadDispatchTime", 0, start);
716      break;
717    case kWriteAsync1:
718      CACHE_UMA(AGE_MS, "AsyncWriteDispatchTime", 0, start);
719      break;
720    default:
721      NOTREACHED();
722  }
723}
724
725void EntryImpl::BeginLogging(net::NetLog* net_log, bool created) {
726  DCHECK(!net_log_.net_log());
727  net_log_ = net::BoundNetLog::Make(
728      net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY);
729  net_log_.BeginEvent(
730      net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL,
731      CreateNetLogEntryCreationCallback(this, created));
732}
733
734const net::BoundNetLog& EntryImpl::net_log() const {
735  return net_log_;
736}
737
738// static
739int EntryImpl::NumBlocksForEntry(int key_size) {
740  // The longest key that can be stored using one block.
741  int key1_len =
742      static_cast<int>(sizeof(EntryStore) - offsetof(EntryStore, key));
743
744  if (key_size < key1_len || key_size > kMaxInternalKeyLength)
745    return 1;
746
747  return ((key_size - key1_len) / 256 + 2);
748}
749
750// ------------------------------------------------------------------------
751
752void EntryImpl::Doom() {
753  if (background_queue_.get())
754    background_queue_->DoomEntryImpl(this);
755}
756
757void EntryImpl::Close() {
758  if (background_queue_.get())
759    background_queue_->CloseEntryImpl(this);
760}
761
762std::string EntryImpl::GetKey() const {
763  CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
764  int key_len = entry->Data()->key_len;
765  if (key_len <= kMaxInternalKeyLength)
766    return std::string(entry->Data()->key);
767
768  // We keep a copy of the key so that we can always return it, even if the
769  // backend is disabled.
770  if (!key_.empty())
771    return key_;
772
773  Addr address(entry->Data()->long_key);
774  DCHECK(address.is_initialized());
775  size_t offset = 0;
776  if (address.is_block_file())
777    offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
778
779  COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index);
780  File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address,
781                                                                kKeyFileIndex);
782  if (!key_file)
783    return std::string();
784
785  ++key_len;  // We store a trailing \0 on disk that we read back below.
786  if (!offset && key_file->GetLength() != static_cast<size_t>(key_len))
787    return std::string();
788
789  if (!key_file->Read(WriteInto(&key_, key_len), key_len, offset))
790    key_.clear();
791  return key_;
792}
793
794Time EntryImpl::GetLastUsed() const {
795  CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
796  return Time::FromInternalValue(node->Data()->last_used);
797}
798
799Time EntryImpl::GetLastModified() const {
800  CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
801  return Time::FromInternalValue(node->Data()->last_modified);
802}
803
804int32 EntryImpl::GetDataSize(int index) const {
805  if (index < 0 || index >= kNumStreams)
806    return 0;
807
808  CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
809  return entry->Data()->data_size[index];
810}
811
812int EntryImpl::ReadData(int index, int offset, IOBuffer* buf, int buf_len,
813                        const CompletionCallback& callback) {
814  if (callback.is_null())
815    return ReadDataImpl(index, offset, buf, buf_len, callback);
816
817  DCHECK(node_.Data()->dirty || read_only_);
818  if (index < 0 || index >= kNumStreams)
819    return net::ERR_INVALID_ARGUMENT;
820
821  int entry_size = entry_.Data()->data_size[index];
822  if (offset >= entry_size || offset < 0 || !buf_len)
823    return 0;
824
825  if (buf_len < 0)
826    return net::ERR_INVALID_ARGUMENT;
827
828  if (!background_queue_.get())
829    return net::ERR_UNEXPECTED;
830
831  background_queue_->ReadData(this, index, offset, buf, buf_len, callback);
832  return net::ERR_IO_PENDING;
833}
834
835int EntryImpl::WriteData(int index, int offset, IOBuffer* buf, int buf_len,
836                         const CompletionCallback& callback, bool truncate) {
837  if (callback.is_null())
838    return WriteDataImpl(index, offset, buf, buf_len, callback, truncate);
839
840  DCHECK(node_.Data()->dirty || read_only_);
841  if (index < 0 || index >= kNumStreams)
842    return net::ERR_INVALID_ARGUMENT;
843
844  if (offset < 0 || buf_len < 0)
845    return net::ERR_INVALID_ARGUMENT;
846
847  if (!background_queue_.get())
848    return net::ERR_UNEXPECTED;
849
850  background_queue_->WriteData(this, index, offset, buf, buf_len, truncate,
851                               callback);
852  return net::ERR_IO_PENDING;
853}
854
855int EntryImpl::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len,
856                              const CompletionCallback& callback) {
857  if (callback.is_null())
858    return ReadSparseDataImpl(offset, buf, buf_len, callback);
859
860  if (!background_queue_.get())
861    return net::ERR_UNEXPECTED;
862
863  background_queue_->ReadSparseData(this, offset, buf, buf_len, callback);
864  return net::ERR_IO_PENDING;
865}
866
867int EntryImpl::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len,
868                               const CompletionCallback& callback) {
869  if (callback.is_null())
870    return WriteSparseDataImpl(offset, buf, buf_len, callback);
871
872  if (!background_queue_.get())
873    return net::ERR_UNEXPECTED;
874
875  background_queue_->WriteSparseData(this, offset, buf, buf_len, callback);
876  return net::ERR_IO_PENDING;
877}
878
879int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
880                                 const CompletionCallback& callback) {
881  if (!background_queue_.get())
882    return net::ERR_UNEXPECTED;
883
884  background_queue_->GetAvailableRange(this, offset, len, start, callback);
885  return net::ERR_IO_PENDING;
886}
887
888bool EntryImpl::CouldBeSparse() const {
889  if (sparse_.get())
890    return true;
891
892  scoped_ptr<SparseControl> sparse;
893  sparse.reset(new SparseControl(const_cast<EntryImpl*>(this)));
894  return sparse->CouldBeSparse();
895}
896
897void EntryImpl::CancelSparseIO() {
898  if (background_queue_.get())
899    background_queue_->CancelSparseIO(this);
900}
901
902int EntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
903  if (!sparse_.get())
904    return net::OK;
905
906  if (!background_queue_.get())
907    return net::ERR_UNEXPECTED;
908
909  background_queue_->ReadyForSparseIO(this, callback);
910  return net::ERR_IO_PENDING;
911}
912
913// When an entry is deleted from the cache, we clean up all the data associated
914// with it for two reasons: to simplify the reuse of the block (we know that any
915// unused block is filled with zeros), and to simplify the handling of write /
916// read partial information from an entry (don't have to worry about returning
917// data related to a previous cache entry because the range was not fully
918// written before).
919EntryImpl::~EntryImpl() {
920  if (!backend_.get()) {
921    entry_.clear_modified();
922    node_.clear_modified();
923    return;
924  }
925  Log("~EntryImpl in");
926
927  // Save the sparse info to disk. This will generate IO for this entry and
928  // maybe for a child entry, so it is important to do it before deleting this
929  // entry.
930  sparse_.reset();
931
932  // Remove this entry from the list of open entries.
933  backend_->OnEntryDestroyBegin(entry_.address());
934
935  if (doomed_) {
936    DeleteEntryData(true);
937  } else {
938#if defined(NET_BUILD_STRESS_CACHE)
939    SanityCheck();
940#endif
941    net_log_.AddEvent(net::NetLog::TYPE_ENTRY_CLOSE);
942    bool ret = true;
943    for (int index = 0; index < kNumStreams; index++) {
944      if (user_buffers_[index].get()) {
945        ret = Flush(index, 0);
946        if (!ret)
947          LOG(ERROR) << "Failed to save user data";
948      }
949      if (unreported_size_[index]) {
950        backend_->ModifyStorageSize(
951            entry_.Data()->data_size[index] - unreported_size_[index],
952            entry_.Data()->data_size[index]);
953      }
954    }
955
956    if (!ret) {
957      // There was a failure writing the actual data. Mark the entry as dirty.
958      int current_id = backend_->GetCurrentEntryId();
959      node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1;
960      node_.Store();
961    } else if (node_.HasData() && !dirty_ && node_.Data()->dirty) {
962      node_.Data()->dirty = 0;
963      node_.Store();
964    }
965  }
966
967  Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this));
968  net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL);
969  backend_->OnEntryDestroyEnd();
970}
971
972// ------------------------------------------------------------------------
973
974int EntryImpl::InternalReadData(int index, int offset,
975                                IOBuffer* buf, int buf_len,
976                                const CompletionCallback& callback) {
977  DCHECK(node_.Data()->dirty || read_only_);
978  DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len;
979  if (index < 0 || index >= kNumStreams)
980    return net::ERR_INVALID_ARGUMENT;
981
982  int entry_size = entry_.Data()->data_size[index];
983  if (offset >= entry_size || offset < 0 || !buf_len)
984    return 0;
985
986  if (buf_len < 0)
987    return net::ERR_INVALID_ARGUMENT;
988
989  if (!backend_.get())
990    return net::ERR_UNEXPECTED;
991
992  TimeTicks start = TimeTicks::Now();
993
994  if (offset + buf_len > entry_size)
995    buf_len = entry_size - offset;
996
997  UpdateRank(false);
998
999  backend_->OnEvent(Stats::READ_DATA);
1000  backend_->OnRead(buf_len);
1001
1002  Addr address(entry_.Data()->data_addr[index]);
1003  int eof = address.is_initialized() ? entry_size : 0;
1004  if (user_buffers_[index].get() &&
1005      user_buffers_[index]->PreRead(eof, offset, &buf_len)) {
1006    // Complete the operation locally.
1007    buf_len = user_buffers_[index]->Read(offset, buf, buf_len);
1008    ReportIOTime(kRead, start);
1009    return buf_len;
1010  }
1011
1012  address.set_value(entry_.Data()->data_addr[index]);
1013  DCHECK(address.is_initialized());
1014  if (!address.is_initialized()) {
1015    DoomImpl();
1016    return net::ERR_FAILED;
1017  }
1018
1019  File* file = GetBackingFile(address, index);
1020  if (!file) {
1021    DoomImpl();
1022    LOG(ERROR) << "No file for " << std::hex << address.value();
1023    return net::ERR_FILE_NOT_FOUND;
1024  }
1025
1026  size_t file_offset = offset;
1027  if (address.is_block_file()) {
1028    DCHECK_LE(offset + buf_len, kMaxBlockSize);
1029    file_offset += address.start_block() * address.BlockSize() +
1030                   kBlockHeaderSize;
1031  }
1032
1033  SyncCallback* io_callback = NULL;
1034  if (!callback.is_null()) {
1035    io_callback = new SyncCallback(this, buf, callback,
1036                                   net::NetLog::TYPE_ENTRY_READ_DATA);
1037  }
1038
1039  TimeTicks start_async = TimeTicks::Now();
1040
1041  bool completed;
1042  if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) {
1043    if (io_callback)
1044      io_callback->Discard();
1045    DoomImpl();
1046    return net::ERR_CACHE_READ_FAILURE;
1047  }
1048
1049  if (io_callback && completed)
1050    io_callback->Discard();
1051
1052  if (io_callback)
1053    ReportIOTime(kReadAsync1, start_async);
1054
1055  ReportIOTime(kRead, start);
1056  return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING;
1057}
1058
1059int EntryImpl::InternalWriteData(int index, int offset,
1060                                 IOBuffer* buf, int buf_len,
1061                                 const CompletionCallback& callback,
1062                                 bool truncate) {
1063  DCHECK(node_.Data()->dirty || read_only_);
1064  DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len;
1065  if (index < 0 || index >= kNumStreams)
1066    return net::ERR_INVALID_ARGUMENT;
1067
1068  if (offset < 0 || buf_len < 0)
1069    return net::ERR_INVALID_ARGUMENT;
1070
1071  if (!backend_.get())
1072    return net::ERR_UNEXPECTED;
1073
1074  int max_file_size = backend_->MaxFileSize();
1075
1076  // offset or buf_len could be negative numbers.
1077  if (offset > max_file_size || buf_len > max_file_size ||
1078      offset + buf_len > max_file_size) {
1079    int size = offset + buf_len;
1080    if (size <= max_file_size)
1081      size = kint32max;
1082    backend_->TooMuchStorageRequested(size);
1083    return net::ERR_FAILED;
1084  }
1085
1086  TimeTicks start = TimeTicks::Now();
1087
1088  // Read the size at this point (it may change inside prepare).
1089  int entry_size = entry_.Data()->data_size[index];
1090  bool extending = entry_size < offset + buf_len;
1091  truncate = truncate && entry_size > offset + buf_len;
1092  Trace("To PrepareTarget 0x%x", entry_.address().value());
1093  if (!PrepareTarget(index, offset, buf_len, truncate))
1094    return net::ERR_FAILED;
1095
1096  Trace("From PrepareTarget 0x%x", entry_.address().value());
1097  if (extending || truncate)
1098    UpdateSize(index, entry_size, offset + buf_len);
1099
1100  UpdateRank(true);
1101
1102  backend_->OnEvent(Stats::WRITE_DATA);
1103  backend_->OnWrite(buf_len);
1104
1105  if (user_buffers_[index].get()) {
1106    // Complete the operation locally.
1107    user_buffers_[index]->Write(offset, buf, buf_len);
1108    ReportIOTime(kWrite, start);
1109    return buf_len;
1110  }
1111
1112  Addr address(entry_.Data()->data_addr[index]);
1113  if (offset + buf_len == 0) {
1114    if (truncate) {
1115      DCHECK(!address.is_initialized());
1116    }
1117    return 0;
1118  }
1119
1120  File* file = GetBackingFile(address, index);
1121  if (!file)
1122    return net::ERR_FILE_NOT_FOUND;
1123
1124  size_t file_offset = offset;
1125  if (address.is_block_file()) {
1126    DCHECK_LE(offset + buf_len, kMaxBlockSize);
1127    file_offset += address.start_block() * address.BlockSize() +
1128                   kBlockHeaderSize;
1129  } else if (truncate || (extending && !buf_len)) {
1130    if (!file->SetLength(offset + buf_len))
1131      return net::ERR_FAILED;
1132  }
1133
1134  if (!buf_len)
1135    return 0;
1136
1137  SyncCallback* io_callback = NULL;
1138  if (!callback.is_null()) {
1139    io_callback = new SyncCallback(this, buf, callback,
1140                                   net::NetLog::TYPE_ENTRY_WRITE_DATA);
1141  }
1142
1143  TimeTicks start_async = TimeTicks::Now();
1144
1145  bool completed;
1146  if (!file->Write(buf->data(), buf_len, file_offset, io_callback,
1147                   &completed)) {
1148    if (io_callback)
1149      io_callback->Discard();
1150    return net::ERR_CACHE_WRITE_FAILURE;
1151  }
1152
1153  if (io_callback && completed)
1154    io_callback->Discard();
1155
1156  if (io_callback)
1157    ReportIOTime(kWriteAsync1, start_async);
1158
1159  ReportIOTime(kWrite, start);
1160  return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING;
1161}
1162
1163// ------------------------------------------------------------------------
1164
1165bool EntryImpl::CreateDataBlock(int index, int size) {
1166  DCHECK(index >= 0 && index < kNumStreams);
1167
1168  Addr address(entry_.Data()->data_addr[index]);
1169  if (!CreateBlock(size, &address))
1170    return false;
1171
1172  entry_.Data()->data_addr[index] = address.value();
1173  entry_.Store();
1174  return true;
1175}
1176
1177bool EntryImpl::CreateBlock(int size, Addr* address) {
1178  DCHECK(!address->is_initialized());
1179  if (!backend_.get())
1180    return false;
1181
1182  FileType file_type = Addr::RequiredFileType(size);
1183  if (EXTERNAL == file_type) {
1184    if (size > backend_->MaxFileSize())
1185      return false;
1186    if (!backend_->CreateExternalFile(address))
1187      return false;
1188  } else {
1189    int num_blocks = Addr::RequiredBlocks(size, file_type);
1190
1191    if (!backend_->CreateBlock(file_type, num_blocks, address))
1192      return false;
1193  }
1194  return true;
1195}
1196
1197// Note that this method may end up modifying a block file so upon return the
1198// involved block will be free, and could be reused for something else. If there
1199// is a crash after that point (and maybe before returning to the caller), the
1200// entry will be left dirty... and at some point it will be discarded; it is
1201// important that the entry doesn't keep a reference to this address, or we'll
1202// end up deleting the contents of |address| once again.
1203void EntryImpl::DeleteData(Addr address, int index) {
1204  DCHECK(backend_.get());
1205  if (!address.is_initialized())
1206    return;
1207  if (address.is_separate_file()) {
1208    int failure = !DeleteCacheFile(backend_->GetFileName(address));
1209    CACHE_UMA(COUNTS, "DeleteFailed", 0, failure);
1210    if (failure) {
1211      LOG(ERROR) << "Failed to delete " <<
1212          backend_->GetFileName(address).value() << " from the cache.";
1213    }
1214    if (files_[index].get())
1215      files_[index] = NULL;  // Releases the object.
1216  } else {
1217    backend_->DeleteBlock(address, true);
1218  }
1219}
1220
1221void EntryImpl::UpdateRank(bool modified) {
1222  if (!backend_.get())
1223    return;
1224
1225  if (!doomed_) {
1226    // Everything is handled by the backend.
1227    backend_->UpdateRank(this, modified);
1228    return;
1229  }
1230
1231  Time current = Time::Now();
1232  node_.Data()->last_used = current.ToInternalValue();
1233
1234  if (modified)
1235    node_.Data()->last_modified = current.ToInternalValue();
1236}
1237
1238File* EntryImpl::GetBackingFile(Addr address, int index) {
1239  if (!backend_.get())
1240    return NULL;
1241
1242  File* file;
1243  if (address.is_separate_file())
1244    file = GetExternalFile(address, index);
1245  else
1246    file = backend_->File(address);
1247  return file;
1248}
1249
1250File* EntryImpl::GetExternalFile(Addr address, int index) {
1251  DCHECK(index >= 0 && index <= kKeyFileIndex);
1252  if (!files_[index].get()) {
1253    // For a key file, use mixed mode IO.
1254    scoped_refptr<File> file(new File(kKeyFileIndex == index));
1255    if (file->Init(backend_->GetFileName(address)))
1256      files_[index].swap(file);
1257  }
1258  return files_[index].get();
1259}
1260
1261// We keep a memory buffer for everything that ends up stored on a block file
1262// (because we don't know yet the final data size), and for some of the data
1263// that end up on external files. This function will initialize that memory
1264// buffer and / or the files needed to store the data.
1265//
1266// In general, a buffer may overlap data already stored on disk, and in that
1267// case, the contents of the buffer are the most accurate. It may also extend
1268// the file, but we don't want to read from disk just to keep the buffer up to
1269// date. This means that as soon as there is a chance to get confused about what
1270// is the most recent version of some part of a file, we'll flush the buffer and
1271// reuse it for the new data. Keep in mind that the normal use pattern is quite
1272// simple (write sequentially from the beginning), so we optimize for handling
1273// that case.
1274bool EntryImpl::PrepareTarget(int index, int offset, int buf_len,
1275                              bool truncate) {
1276  if (truncate)
1277    return HandleTruncation(index, offset, buf_len);
1278
1279  if (!offset && !buf_len)
1280    return true;
1281
1282  Addr address(entry_.Data()->data_addr[index]);
1283  if (address.is_initialized()) {
1284    if (address.is_block_file() && !MoveToLocalBuffer(index))
1285      return false;
1286
1287    if (!user_buffers_[index].get() && offset < kMaxBlockSize) {
1288      // We are about to create a buffer for the first 16KB, make sure that we
1289      // preserve existing data.
1290      if (!CopyToLocalBuffer(index))
1291        return false;
1292    }
1293  }
1294
1295  if (!user_buffers_[index].get())
1296    user_buffers_[index].reset(new UserBuffer(backend_.get()));
1297
1298  return PrepareBuffer(index, offset, buf_len);
1299}
1300
1301// We get to this function with some data already stored. If there is a
1302// truncation that results on data stored internally, we'll explicitly
1303// handle the case here.
1304bool EntryImpl::HandleTruncation(int index, int offset, int buf_len) {
1305  Addr address(entry_.Data()->data_addr[index]);
1306
1307  int current_size = entry_.Data()->data_size[index];
1308  int new_size = offset + buf_len;
1309
1310  if (!new_size) {
1311    // This is by far the most common scenario.
1312    backend_->ModifyStorageSize(current_size - unreported_size_[index], 0);
1313    entry_.Data()->data_addr[index] = 0;
1314    entry_.Data()->data_size[index] = 0;
1315    unreported_size_[index] = 0;
1316    entry_.Store();
1317    DeleteData(address, index);
1318
1319    user_buffers_[index].reset();
1320    return true;
1321  }
1322
1323  // We never postpone truncating a file, if there is one, but we may postpone
1324  // telling the backend about the size reduction.
1325  if (user_buffers_[index].get()) {
1326    DCHECK_GE(current_size, user_buffers_[index]->Start());
1327    if (!address.is_initialized()) {
1328      // There is no overlap between the buffer and disk.
1329      if (new_size > user_buffers_[index]->Start()) {
1330        // Just truncate our buffer.
1331        DCHECK_LT(new_size, user_buffers_[index]->End());
1332        user_buffers_[index]->Truncate(new_size);
1333        return true;
1334      }
1335
1336      // Just discard our buffer.
1337      user_buffers_[index]->Reset();
1338      return PrepareBuffer(index, offset, buf_len);
1339    }
1340
1341    // There is some overlap or we need to extend the file before the
1342    // truncation.
1343    if (offset > user_buffers_[index]->Start())
1344      user_buffers_[index]->Truncate(new_size);
1345    UpdateSize(index, current_size, new_size);
1346    if (!Flush(index, 0))
1347      return false;
1348    user_buffers_[index].reset();
1349  }
1350
1351  // We have data somewhere, and it is not in a buffer.
1352  DCHECK(!user_buffers_[index].get());
1353  DCHECK(address.is_initialized());
1354
1355  if (new_size > kMaxBlockSize)
1356    return true;  // Let the operation go directly to disk.
1357
1358  return ImportSeparateFile(index, offset + buf_len);
1359}
1360
1361bool EntryImpl::CopyToLocalBuffer(int index) {
1362  Addr address(entry_.Data()->data_addr[index]);
1363  DCHECK(!user_buffers_[index].get());
1364  DCHECK(address.is_initialized());
1365
1366  int len = std::min(entry_.Data()->data_size[index], kMaxBlockSize);
1367  user_buffers_[index].reset(new UserBuffer(backend_.get()));
1368  user_buffers_[index]->Write(len, NULL, 0);
1369
1370  File* file = GetBackingFile(address, index);
1371  int offset = 0;
1372
1373  if (address.is_block_file())
1374    offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
1375
1376  if (!file ||
1377      !file->Read(user_buffers_[index]->Data(), len, offset, NULL, NULL)) {
1378    user_buffers_[index].reset();
1379    return false;
1380  }
1381  return true;
1382}
1383
1384bool EntryImpl::MoveToLocalBuffer(int index) {
1385  if (!CopyToLocalBuffer(index))
1386    return false;
1387
1388  Addr address(entry_.Data()->data_addr[index]);
1389  entry_.Data()->data_addr[index] = 0;
1390  entry_.Store();
1391  DeleteData(address, index);
1392
1393  // If we lose this entry we'll see it as zero sized.
1394  int len = entry_.Data()->data_size[index];
1395  backend_->ModifyStorageSize(len - unreported_size_[index], 0);
1396  unreported_size_[index] = len;
1397  return true;
1398}
1399
1400bool EntryImpl::ImportSeparateFile(int index, int new_size) {
1401  if (entry_.Data()->data_size[index] > new_size)
1402    UpdateSize(index, entry_.Data()->data_size[index], new_size);
1403
1404  return MoveToLocalBuffer(index);
1405}
1406
1407bool EntryImpl::PrepareBuffer(int index, int offset, int buf_len) {
1408  DCHECK(user_buffers_[index].get());
1409  if ((user_buffers_[index]->End() && offset > user_buffers_[index]->End()) ||
1410      offset > entry_.Data()->data_size[index]) {
1411    // We are about to extend the buffer or the file (with zeros), so make sure
1412    // that we are not overwriting anything.
1413    Addr address(entry_.Data()->data_addr[index]);
1414    if (address.is_initialized() && address.is_separate_file()) {
1415      if (!Flush(index, 0))
1416        return false;
1417      // There is an actual file already, and we don't want to keep track of
1418      // its length so we let this operation go straight to disk.
1419      // The only case when a buffer is allowed to extend the file (as in fill
1420      // with zeros before the start) is when there is no file yet to extend.
1421      user_buffers_[index].reset();
1422      return true;
1423    }
1424  }
1425
1426  if (!user_buffers_[index]->PreWrite(offset, buf_len)) {
1427    if (!Flush(index, offset + buf_len))
1428      return false;
1429
1430    // Lets try again.
1431    if (offset > user_buffers_[index]->End() ||
1432        !user_buffers_[index]->PreWrite(offset, buf_len)) {
1433      // We cannot complete the operation with a buffer.
1434      DCHECK(!user_buffers_[index]->Size());
1435      DCHECK(!user_buffers_[index]->Start());
1436      user_buffers_[index].reset();
1437    }
1438  }
1439  return true;
1440}
1441
1442bool EntryImpl::Flush(int index, int min_len) {
1443  Addr address(entry_.Data()->data_addr[index]);
1444  DCHECK(user_buffers_[index].get());
1445  DCHECK(!address.is_initialized() || address.is_separate_file());
1446  DVLOG(3) << "Flush";
1447
1448  int size = std::max(entry_.Data()->data_size[index], min_len);
1449  if (size && !address.is_initialized() && !CreateDataBlock(index, size))
1450    return false;
1451
1452  if (!entry_.Data()->data_size[index]) {
1453    DCHECK(!user_buffers_[index]->Size());
1454    return true;
1455  }
1456
1457  address.set_value(entry_.Data()->data_addr[index]);
1458
1459  int len = user_buffers_[index]->Size();
1460  int offset = user_buffers_[index]->Start();
1461  if (!len && !offset)
1462    return true;
1463
1464  if (address.is_block_file()) {
1465    DCHECK_EQ(len, entry_.Data()->data_size[index]);
1466    DCHECK(!offset);
1467    offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
1468  }
1469
1470  File* file = GetBackingFile(address, index);
1471  if (!file)
1472    return false;
1473
1474  if (!file->Write(user_buffers_[index]->Data(), len, offset, NULL, NULL))
1475    return false;
1476  user_buffers_[index]->Reset();
1477
1478  return true;
1479}
1480
1481void EntryImpl::UpdateSize(int index, int old_size, int new_size) {
1482  if (entry_.Data()->data_size[index] == new_size)
1483    return;
1484
1485  unreported_size_[index] += new_size - old_size;
1486  entry_.Data()->data_size[index] = new_size;
1487  entry_.set_modified();
1488}
1489
1490int EntryImpl::InitSparseData() {
1491  if (sparse_.get())
1492    return net::OK;
1493
1494  // Use a local variable so that sparse_ never goes from 'valid' to NULL.
1495  scoped_ptr<SparseControl> sparse(new SparseControl(this));
1496  int result = sparse->Init();
1497  if (net::OK == result)
1498    sparse_.swap(sparse);
1499
1500  return result;
1501}
1502
1503void EntryImpl::SetEntryFlags(uint32 flags) {
1504  entry_.Data()->flags |= flags;
1505  entry_.set_modified();
1506}
1507
1508uint32 EntryImpl::GetEntryFlags() {
1509  return entry_.Data()->flags;
1510}
1511
1512void EntryImpl::GetData(int index, char** buffer, Addr* address) {
1513  DCHECK(backend_.get());
1514  if (user_buffers_[index].get() && user_buffers_[index]->Size() &&
1515      !user_buffers_[index]->Start()) {
1516    // The data is already in memory, just copy it and we're done.
1517    int data_len = entry_.Data()->data_size[index];
1518    if (data_len <= user_buffers_[index]->Size()) {
1519      DCHECK(!user_buffers_[index]->Start());
1520      *buffer = new char[data_len];
1521      memcpy(*buffer, user_buffers_[index]->Data(), data_len);
1522      return;
1523    }
1524  }
1525
1526  // Bad news: we'd have to read the info from disk so instead we'll just tell
1527  // the caller where to read from.
1528  *buffer = NULL;
1529  address->set_value(entry_.Data()->data_addr[index]);
1530  if (address->is_initialized()) {
1531    // Prevent us from deleting the block from the backing store.
1532    backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
1533                                    unreported_size_[index], 0);
1534    entry_.Data()->data_addr[index] = 0;
1535    entry_.Data()->data_size[index] = 0;
1536  }
1537}
1538
1539void EntryImpl::Log(const char* msg) {
1540  int dirty = 0;
1541  if (node_.HasData()) {
1542    dirty = node_.Data()->dirty;
1543  }
1544
1545  Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this),
1546        entry_.address().value(), node_.address().value());
1547
1548  Trace("  data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0],
1549        entry_.Data()->data_addr[1], entry_.Data()->long_key);
1550
1551  Trace("  doomed: %d 0x%x", doomed_, dirty);
1552}
1553
1554}  // namespace disk_cache
1555