1// Copyright (c) 2011 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/disk_cache/entry_impl.h"
6
7#include "base/message_loop.h"
8#include "base/metrics/histogram.h"
9#include "base/string_util.h"
10#include "net/base/io_buffer.h"
11#include "net/base/net_errors.h"
12#include "net/disk_cache/backend_impl.h"
13#include "net/disk_cache/bitmap.h"
14#include "net/disk_cache/cache_util.h"
15#include "net/disk_cache/hash.h"
16#include "net/disk_cache/histogram_macros.h"
17#include "net/disk_cache/net_log_parameters.h"
18#include "net/disk_cache/sparse_control.h"
19
20using base::Time;
21using base::TimeDelta;
22using base::TimeTicks;
23
24namespace {
25
26// Index for the file used to store the key, if any (files_[kKeyFileIndex]).
27const int kKeyFileIndex = 3;
28
29// This class implements FileIOCallback to buffer the callback from a file IO
30// operation from the actual net class.
31class SyncCallback: public disk_cache::FileIOCallback {
32 public:
33  // |end_event_type| is the event type to log on completion.  Logs nothing on
34  // discard, or when the NetLog is not set to log all events.
35  SyncCallback(disk_cache::EntryImpl* entry, net::IOBuffer* buffer,
36               net::CompletionCallback* callback,
37               net::NetLog::EventType end_event_type)
38      : entry_(entry), callback_(callback), buf_(buffer),
39        start_(TimeTicks::Now()), end_event_type_(end_event_type) {
40    entry->AddRef();
41    entry->IncrementIoCount();
42  }
43  ~SyncCallback() {}
44
45  virtual void OnFileIOComplete(int bytes_copied);
46  void Discard();
47
48 private:
49  disk_cache::EntryImpl* entry_;
50  net::CompletionCallback* callback_;
51  scoped_refptr<net::IOBuffer> buf_;
52  TimeTicks start_;
53  const net::NetLog::EventType end_event_type_;
54
55  DISALLOW_COPY_AND_ASSIGN(SyncCallback);
56};
57
58void SyncCallback::OnFileIOComplete(int bytes_copied) {
59  entry_->DecrementIoCount();
60  if (callback_) {
61    if (entry_->net_log().IsLoggingAllEvents()) {
62      entry_->net_log().EndEvent(
63          end_event_type_,
64          make_scoped_refptr(
65              new disk_cache::ReadWriteCompleteParameters(bytes_copied)));
66    }
67    entry_->ReportIOTime(disk_cache::EntryImpl::kAsyncIO, start_);
68    callback_->Run(bytes_copied);
69  }
70  entry_->Release();
71  delete this;
72}
73
74void SyncCallback::Discard() {
75  callback_ = NULL;
76  buf_ = NULL;
77  OnFileIOComplete(0);
78}
79
80const int kMaxBufferSize = 1024 * 1024;  // 1 MB.
81
82}  // namespace
83
84namespace disk_cache {
85
86// This class handles individual memory buffers that store data before it is
87// sent to disk. The buffer can start at any offset, but if we try to write to
88// anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to
89// zero. The buffer grows up to a size determined by the backend, to keep the
90// total memory used under control.
91class EntryImpl::UserBuffer {
92 public:
93  explicit UserBuffer(BackendImpl* backend)
94      : backend_(backend->GetWeakPtr()), offset_(0), grow_allowed_(true) {
95    buffer_.reserve(kMaxBlockSize);
96  }
97  ~UserBuffer() {
98    if (backend_)
99      backend_->BufferDeleted(capacity() - kMaxBlockSize);
100  }
101
102  // Returns true if we can handle writing |len| bytes to |offset|.
103  bool PreWrite(int offset, int len);
104
105  // Truncates the buffer to |offset| bytes.
106  void Truncate(int offset);
107
108  // Writes |len| bytes from |buf| at the given |offset|.
109  void Write(int offset, net::IOBuffer* buf, int len);
110
111  // Returns true if we can read |len| bytes from |offset|, given that the
112  // actual file has |eof| bytes stored. Note that the number of bytes to read
113  // may be modified by this method even though it returns false: that means we
114  // should do a smaller read from disk.
115  bool PreRead(int eof, int offset, int* len);
116
117  // Read |len| bytes from |buf| at the given |offset|.
118  int Read(int offset, net::IOBuffer* buf, int len);
119
120  // Prepare this buffer for reuse.
121  void Reset();
122
123  char* Data() { return buffer_.size() ? &buffer_[0] : NULL; }
124  int Size() { return static_cast<int>(buffer_.size()); }
125  int Start() { return offset_; }
126  int End() { return offset_ + Size(); }
127
128 private:
129  int capacity() { return static_cast<int>(buffer_.capacity()); }
130  bool GrowBuffer(int required, int limit);
131
132  base::WeakPtr<BackendImpl> backend_;
133  int offset_;
134  std::vector<char> buffer_;
135  bool grow_allowed_;
136  DISALLOW_COPY_AND_ASSIGN(UserBuffer);
137};
138
139bool EntryImpl::UserBuffer::PreWrite(int offset, int len) {
140  DCHECK_GE(offset, 0);
141  DCHECK_GE(len, 0);
142  DCHECK_GE(offset + len, 0);
143
144  // We don't want to write before our current start.
145  if (offset < offset_)
146    return false;
147
148  // Lets get the common case out of the way.
149  if (offset + len <= capacity())
150    return true;
151
152  // If we are writing to the first 16K (kMaxBlockSize), we want to keep the
153  // buffer offset_ at 0.
154  if (!Size() && offset > kMaxBlockSize)
155    return GrowBuffer(len, kMaxBufferSize);
156
157  int required = offset - offset_ + len;
158  return GrowBuffer(required, kMaxBufferSize * 6 / 5);
159}
160
161void EntryImpl::UserBuffer::Truncate(int offset) {
162  DCHECK_GE(offset, 0);
163  DCHECK_GE(offset, offset_);
164  DVLOG(3) << "Buffer truncate at " << offset << " current " << offset_;
165
166  offset -= offset_;
167  if (Size() >= offset)
168    buffer_.resize(offset);
169}
170
171void EntryImpl::UserBuffer::Write(int offset, net::IOBuffer* buf, int len) {
172  DCHECK_GE(offset, 0);
173  DCHECK_GE(len, 0);
174  DCHECK_GE(offset + len, 0);
175  DCHECK_GE(offset, offset_);
176  DVLOG(3) << "Buffer write at " << offset << " current " << offset_;
177
178  if (!Size() && offset > kMaxBlockSize)
179    offset_ = offset;
180
181  offset -= offset_;
182
183  if (offset > Size())
184    buffer_.resize(offset);
185
186  if (!len)
187    return;
188
189  char* buffer = buf->data();
190  int valid_len = Size() - offset;
191  int copy_len = std::min(valid_len, len);
192  if (copy_len) {
193    memcpy(&buffer_[offset], buffer, copy_len);
194    len -= copy_len;
195    buffer += copy_len;
196  }
197  if (!len)
198    return;
199
200  buffer_.insert(buffer_.end(), buffer, buffer + len);
201}
202
203bool EntryImpl::UserBuffer::PreRead(int eof, int offset, int* len) {
204  DCHECK_GE(offset, 0);
205  DCHECK_GT(*len, 0);
206
207  if (offset < offset_) {
208    // We are reading before this buffer.
209    if (offset >= eof)
210      return true;
211
212    // If the read overlaps with the buffer, change its length so that there is
213    // no overlap.
214    *len = std::min(*len, offset_ - offset);
215    *len = std::min(*len, eof - offset);
216
217    // We should read from disk.
218    return false;
219  }
220
221  if (!Size())
222    return false;
223
224  // See if we can fulfill the first part of the operation.
225  return (offset - offset_ < Size());
226}
227
228int EntryImpl::UserBuffer::Read(int offset, net::IOBuffer* buf, int len) {
229  DCHECK_GE(offset, 0);
230  DCHECK_GT(len, 0);
231  DCHECK(Size() || offset < offset_);
232
233  int clean_bytes = 0;
234  if (offset < offset_) {
235    // We don't have a file so lets fill the first part with 0.
236    clean_bytes = std::min(offset_ - offset, len);
237    memset(buf->data(), 0, clean_bytes);
238    if (len == clean_bytes)
239      return len;
240    offset = offset_;
241    len -= clean_bytes;
242  }
243
244  int start = offset - offset_;
245  int available = Size() - start;
246  DCHECK_GE(start, 0);
247  DCHECK_GE(available, 0);
248  len = std::min(len, available);
249  memcpy(buf->data() + clean_bytes, &buffer_[start], len);
250  return len + clean_bytes;
251}
252
253void EntryImpl::UserBuffer::Reset() {
254  if (!grow_allowed_) {
255    if (backend_)
256      backend_->BufferDeleted(capacity() - kMaxBlockSize);
257    grow_allowed_ = true;
258    std::vector<char> tmp;
259    buffer_.swap(tmp);
260    buffer_.reserve(kMaxBlockSize);
261  }
262  offset_ = 0;
263  buffer_.clear();
264}
265
266bool EntryImpl::UserBuffer::GrowBuffer(int required, int limit) {
267  DCHECK_GE(required, 0);
268  int current_size = capacity();
269  if (required <= current_size)
270    return true;
271
272  if (required > limit)
273    return false;
274
275  if (!backend_)
276    return false;
277
278  int to_add = std::max(required - current_size, kMaxBlockSize * 4);
279  to_add = std::max(current_size, to_add);
280  required = std::min(current_size + to_add, limit);
281
282  grow_allowed_ = backend_->IsAllocAllowed(current_size, required);
283  if (!grow_allowed_)
284    return false;
285
286  DVLOG(3) << "Buffer grow to " << required;
287
288  buffer_.reserve(required);
289  return true;
290}
291
292// ------------------------------------------------------------------------
293
294EntryImpl::EntryImpl(BackendImpl* backend, Addr address, bool read_only)
295    : entry_(NULL, Addr(0)), node_(NULL, Addr(0)), backend_(backend),
296      doomed_(false), read_only_(read_only), dirty_(false) {
297  entry_.LazyInit(backend->File(address), address);
298  for (int i = 0; i < kNumStreams; i++) {
299    unreported_size_[i] = 0;
300  }
301}
302
303void EntryImpl::DoomImpl() {
304  if (doomed_)
305    return;
306
307  SetPointerForInvalidEntry(backend_->GetCurrentEntryId());
308  backend_->InternalDoomEntry(this);
309}
310
311int EntryImpl::ReadDataImpl(int index, int offset, net::IOBuffer* buf,
312                            int buf_len, CompletionCallback* callback) {
313  if (net_log_.IsLoggingAllEvents()) {
314    net_log_.BeginEvent(
315        net::NetLog::TYPE_ENTRY_READ_DATA,
316        make_scoped_refptr(
317            new ReadWriteDataParameters(index, offset, buf_len, false)));
318  }
319
320  int result = InternalReadData(index, offset, buf, buf_len, callback);
321
322  if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) {
323    net_log_.EndEvent(
324        net::NetLog::TYPE_ENTRY_READ_DATA,
325        make_scoped_refptr(new ReadWriteCompleteParameters(result)));
326  }
327  return result;
328}
329
330int EntryImpl::WriteDataImpl(int index, int offset, net::IOBuffer* buf,
331                             int buf_len, CompletionCallback* callback,
332                             bool truncate) {
333  if (net_log_.IsLoggingAllEvents()) {
334    net_log_.BeginEvent(
335        net::NetLog::TYPE_ENTRY_WRITE_DATA,
336        make_scoped_refptr(
337            new ReadWriteDataParameters(index, offset, buf_len, truncate)));
338  }
339
340  int result = InternalWriteData(index, offset, buf, buf_len, callback,
341                                 truncate);
342
343  if (result != net::ERR_IO_PENDING && net_log_.IsLoggingAllEvents()) {
344    net_log_.EndEvent(
345        net::NetLog::TYPE_ENTRY_WRITE_DATA,
346        make_scoped_refptr(new ReadWriteCompleteParameters(result)));
347  }
348  return result;
349}
350
351int EntryImpl::ReadSparseDataImpl(int64 offset, net::IOBuffer* buf, int buf_len,
352                                  CompletionCallback* callback) {
353  DCHECK(node_.Data()->dirty || read_only_);
354  int result = InitSparseData();
355  if (net::OK != result)
356    return result;
357
358  TimeTicks start = TimeTicks::Now();
359  result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len,
360                            callback);
361  ReportIOTime(kSparseRead, start);
362  return result;
363}
364
365int EntryImpl::WriteSparseDataImpl(int64 offset, net::IOBuffer* buf,
366                                   int buf_len, CompletionCallback* callback) {
367  DCHECK(node_.Data()->dirty || read_only_);
368  int result = InitSparseData();
369  if (net::OK != result)
370    return result;
371
372  TimeTicks start = TimeTicks::Now();
373  result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf,
374                            buf_len, callback);
375  ReportIOTime(kSparseWrite, start);
376  return result;
377}
378
379int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) {
380  int result = InitSparseData();
381  if (net::OK != result)
382    return result;
383
384  return sparse_->GetAvailableRange(offset, len, start);
385}
386
387void EntryImpl::CancelSparseIOImpl() {
388  if (!sparse_.get())
389    return;
390
391  sparse_->CancelIO();
392}
393
394int EntryImpl::ReadyForSparseIOImpl(CompletionCallback* callback) {
395  DCHECK(sparse_.get());
396  return sparse_->ReadyToUse(callback);
397}
398
399uint32 EntryImpl::GetHash() {
400  return entry_.Data()->hash;
401}
402
403bool EntryImpl::CreateEntry(Addr node_address, const std::string& key,
404                            uint32 hash) {
405  Trace("Create entry In");
406  EntryStore* entry_store = entry_.Data();
407  RankingsNode* node = node_.Data();
408  memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks());
409  memset(node, 0, sizeof(RankingsNode));
410  if (!node_.LazyInit(backend_->File(node_address), node_address))
411    return false;
412
413  entry_store->rankings_node = node_address.value();
414  node->contents = entry_.address().value();
415
416  entry_store->hash = hash;
417  entry_store->creation_time = Time::Now().ToInternalValue();
418  entry_store->key_len = static_cast<int32>(key.size());
419  if (entry_store->key_len > kMaxInternalKeyLength) {
420    Addr address(0);
421    if (!CreateBlock(entry_store->key_len + 1, &address))
422      return false;
423
424    entry_store->long_key = address.value();
425    File* key_file = GetBackingFile(address, kKeyFileIndex);
426    key_ = key;
427
428    size_t offset = 0;
429    if (address.is_block_file())
430      offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
431
432    if (!key_file || !key_file->Write(key.data(), key.size(), offset)) {
433      DeleteData(address, kKeyFileIndex);
434      return false;
435    }
436
437    if (address.is_separate_file())
438      key_file->SetLength(key.size() + 1);
439  } else {
440    memcpy(entry_store->key, key.data(), key.size());
441    entry_store->key[key.size()] = '\0';
442  }
443  backend_->ModifyStorageSize(0, static_cast<int32>(key.size()));
444  CACHE_UMA(COUNTS, "KeySize", 0, static_cast<int32>(key.size()));
445  node->dirty = backend_->GetCurrentEntryId();
446  Log("Create Entry ");
447  return true;
448}
449
450bool EntryImpl::IsSameEntry(const std::string& key, uint32 hash) {
451  if (entry_.Data()->hash != hash ||
452      static_cast<size_t>(entry_.Data()->key_len) != key.size())
453    return false;
454
455  std::string my_key = GetKey();
456  return key.compare(my_key) ? false : true;
457}
458
459void EntryImpl::InternalDoom() {
460  net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM, NULL);
461  DCHECK(node_.HasData());
462  if (!node_.Data()->dirty) {
463    node_.Data()->dirty = backend_->GetCurrentEntryId();
464    node_.Store();
465  }
466  doomed_ = true;
467}
468
469void EntryImpl::DeleteEntryData(bool everything) {
470  DCHECK(doomed_ || !everything);
471
472  if (GetEntryFlags() & PARENT_ENTRY) {
473    // We have some child entries that must go away.
474    SparseControl::DeleteChildren(this);
475  }
476
477  if (GetDataSize(0))
478    CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0));
479  if (GetDataSize(1))
480    CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1));
481  for (int index = 0; index < kNumStreams; index++) {
482    Addr address(entry_.Data()->data_addr[index]);
483    if (address.is_initialized()) {
484      backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
485                                      unreported_size_[index], 0);
486      entry_.Data()->data_addr[index] = 0;
487      entry_.Data()->data_size[index] = 0;
488      entry_.Store();
489      DeleteData(address, index);
490    }
491  }
492
493  if (!everything)
494    return;
495
496  // Remove all traces of this entry.
497  backend_->RemoveEntry(this);
498
499  // Note that at this point node_ and entry_ are just two blocks of data, and
500  // even if they reference each other, nobody should be referencing them.
501
502  Addr address(entry_.Data()->long_key);
503  DeleteData(address, kKeyFileIndex);
504  backend_->ModifyStorageSize(entry_.Data()->key_len, 0);
505
506  backend_->DeleteBlock(entry_.address(), true);
507
508  if (!LeaveRankingsBehind())
509    backend_->DeleteBlock(node_.address(), true);
510}
511
512CacheAddr EntryImpl::GetNextAddress() {
513  return entry_.Data()->next;
514}
515
516void EntryImpl::SetNextAddress(Addr address) {
517  DCHECK_NE(address.value(), entry_.address().value());
518  entry_.Data()->next = address.value();
519  bool success = entry_.Store();
520  DCHECK(success);
521}
522
523bool EntryImpl::LoadNodeAddress() {
524  Addr address(entry_.Data()->rankings_node);
525  if (!node_.LazyInit(backend_->File(address), address))
526    return false;
527  return node_.Load();
528}
529
530bool EntryImpl::Update() {
531  DCHECK(node_.HasData());
532
533  if (read_only_)
534    return true;
535
536  RankingsNode* rankings = node_.Data();
537  if (!rankings->dirty) {
538    rankings->dirty = backend_->GetCurrentEntryId();
539    if (!node_.Store())
540      return false;
541  }
542  return true;
543}
544
545void EntryImpl::SetDirtyFlag(int32 current_id) {
546  DCHECK(node_.HasData());
547  // We are checking if the entry is valid or not. If there is a pointer here,
548  // we should not be checking the entry.
549  if (node_.Data()->dummy)
550    dirty_ = true;
551
552  if (node_.Data()->dirty && current_id != node_.Data()->dirty)
553    dirty_ = true;
554
555  if (!current_id)
556    dirty_ = true;
557}
558
559void EntryImpl::SetPointerForInvalidEntry(int32 new_id) {
560  node_.Data()->dirty = new_id;
561  node_.Data()->dummy = 0;
562  node_.Store();
563}
564
565bool EntryImpl::LeaveRankingsBehind() {
566  return !node_.Data()->contents;
567}
568
569// This only includes checks that relate to the first block of the entry (the
570// first 256 bytes), and values that should be set from the entry creation.
571// Basically, even if there is something wrong with this entry, we want to see
572// if it is possible to load the rankings node and delete them together.
573bool EntryImpl::SanityCheck() {
574  EntryStore* stored = entry_.Data();
575  if (!stored->rankings_node || stored->key_len <= 0)
576    return false;
577
578  if (stored->reuse_count < 0 || stored->refetch_count < 0)
579    return false;
580
581  Addr rankings_addr(stored->rankings_node);
582  if (!rankings_addr.is_initialized() || rankings_addr.is_separate_file() ||
583      rankings_addr.file_type() != RANKINGS || rankings_addr.num_blocks() != 1)
584    return false;
585
586  Addr next_addr(stored->next);
587  if (next_addr.is_initialized() &&
588      (next_addr.is_separate_file() || next_addr.file_type() != BLOCK_256))
589    return false;
590
591  if (!rankings_addr.SanityCheck() || !next_addr.SanityCheck())
592    return false;
593
594  if (stored->state > ENTRY_DOOMED || stored->state < ENTRY_NORMAL)
595    return false;
596
597  Addr key_addr(stored->long_key);
598  if ((stored->key_len <= kMaxInternalKeyLength && key_addr.is_initialized()) ||
599      (stored->key_len > kMaxInternalKeyLength && !key_addr.is_initialized()))
600    return false;
601
602  if (!key_addr.SanityCheck())
603    return false;
604
605  if (key_addr.is_initialized() &&
606      ((stored->key_len <= kMaxBlockSize && key_addr.is_separate_file()) ||
607      (stored->key_len > kMaxBlockSize && key_addr.is_block_file())))
608    return false;
609
610  int num_blocks = NumBlocksForEntry(stored->key_len);
611  if (entry_.address().num_blocks() != num_blocks)
612    return false;
613
614  return true;
615}
616
617bool EntryImpl::DataSanityCheck() {
618  EntryStore* stored = entry_.Data();
619  Addr key_addr(stored->long_key);
620
621  // The key must be NULL terminated.
622  if (!key_addr.is_initialized() && stored->key[stored->key_len])
623    return false;
624
625  if (stored->hash != Hash(GetKey()))
626    return false;
627
628  for (int i = 0; i < kNumStreams; i++) {
629    Addr data_addr(stored->data_addr[i]);
630    int data_size = stored->data_size[i];
631    if (data_size < 0)
632      return false;
633    if (!data_size && data_addr.is_initialized())
634      return false;
635    if (!data_addr.SanityCheck())
636      return false;
637    if (!data_size)
638      continue;
639    if (data_size <= kMaxBlockSize && data_addr.is_separate_file())
640      return false;
641    if (data_size > kMaxBlockSize && data_addr.is_block_file())
642      return false;
643  }
644  return true;
645}
646
647void EntryImpl::FixForDelete() {
648  EntryStore* stored = entry_.Data();
649  Addr key_addr(stored->long_key);
650
651  if (!key_addr.is_initialized())
652    stored->key[stored->key_len] = '\0';
653
654  for (int i = 0; i < kNumStreams; i++) {
655    Addr data_addr(stored->data_addr[i]);
656    int data_size = stored->data_size[i];
657    if (data_addr.is_initialized()) {
658      if ((data_size <= kMaxBlockSize && data_addr.is_separate_file()) ||
659          (data_size > kMaxBlockSize && data_addr.is_block_file()) ||
660          !data_addr.SanityCheck()) {
661        // The address is weird so don't attempt to delete it.
662        stored->data_addr[i] = 0;
663        // In general, trust the stored size as it should be in sync with the
664        // total size tracked by the backend.
665      }
666    }
667    if (data_size < 0)
668      stored->data_size[i] = 0;
669  }
670  entry_.Store();
671}
672
673void EntryImpl::IncrementIoCount() {
674  backend_->IncrementIoCount();
675}
676
677void EntryImpl::DecrementIoCount() {
678  backend_->DecrementIoCount();
679}
680
681void EntryImpl::SetTimes(base::Time last_used, base::Time last_modified) {
682  node_.Data()->last_used = last_used.ToInternalValue();
683  node_.Data()->last_modified = last_modified.ToInternalValue();
684  node_.set_modified();
685}
686
687void EntryImpl::ReportIOTime(Operation op, const base::TimeTicks& start) {
688  int group = backend_->GetSizeGroup();
689  switch (op) {
690    case kRead:
691      CACHE_UMA(AGE_MS, "ReadTime", group, start);
692      break;
693    case kWrite:
694      CACHE_UMA(AGE_MS, "WriteTime", group, start);
695      break;
696    case kSparseRead:
697      CACHE_UMA(AGE_MS, "SparseReadTime", 0, start);
698      break;
699    case kSparseWrite:
700      CACHE_UMA(AGE_MS, "SparseWriteTime", 0, start);
701      break;
702    case kAsyncIO:
703      CACHE_UMA(AGE_MS, "AsyncIOTime", group, start);
704      break;
705    default:
706      NOTREACHED();
707  }
708}
709
710void EntryImpl::BeginLogging(net::NetLog* net_log, bool created) {
711  DCHECK(!net_log_.net_log());
712  net_log_ = net::BoundNetLog::Make(
713      net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY);
714  net_log_.BeginEvent(
715      net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL,
716      make_scoped_refptr(new EntryCreationParameters(GetKey(), created)));
717}
718
719const net::BoundNetLog& EntryImpl::net_log() const {
720  return net_log_;
721}
722
723// static
724int EntryImpl::NumBlocksForEntry(int key_size) {
725  // The longest key that can be stored using one block.
726  int key1_len =
727      static_cast<int>(sizeof(EntryStore) - offsetof(EntryStore, key));
728
729  if (key_size < key1_len || key_size > kMaxInternalKeyLength)
730    return 1;
731
732  return ((key_size - key1_len) / 256 + 2);
733}
734
735// ------------------------------------------------------------------------
736
737void EntryImpl::Doom() {
738  backend_->background_queue()->DoomEntryImpl(this);
739}
740
741void EntryImpl::Close() {
742  backend_->background_queue()->CloseEntryImpl(this);
743}
744
745std::string EntryImpl::GetKey() const {
746  CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
747  int key_len = entry->Data()->key_len;
748  if (key_len <= kMaxInternalKeyLength)
749    return std::string(entry->Data()->key);
750
751  // We keep a copy of the key so that we can always return it, even if the
752  // backend is disabled.
753  if (!key_.empty())
754    return key_;
755
756  Addr address(entry->Data()->long_key);
757  DCHECK(address.is_initialized());
758  size_t offset = 0;
759  if (address.is_block_file())
760    offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
761
762  COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index);
763  File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address,
764                                                                kKeyFileIndex);
765
766  if (!offset && key_file->GetLength() != static_cast<size_t>(key_len + 1))
767    return std::string();
768
769  if (!key_file ||
770      !key_file->Read(WriteInto(&key_, key_len + 1), key_len + 1, offset))
771    key_.clear();
772  return key_;
773}
774
775Time EntryImpl::GetLastUsed() const {
776  CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
777  return Time::FromInternalValue(node->Data()->last_used);
778}
779
780Time EntryImpl::GetLastModified() const {
781  CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_);
782  return Time::FromInternalValue(node->Data()->last_modified);
783}
784
785int32 EntryImpl::GetDataSize(int index) const {
786  if (index < 0 || index >= kNumStreams)
787    return 0;
788
789  CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_);
790  return entry->Data()->data_size[index];
791}
792
793int EntryImpl::ReadData(int index, int offset, net::IOBuffer* buf, int buf_len,
794                        net::CompletionCallback* callback) {
795  if (!callback)
796    return ReadDataImpl(index, offset, buf, buf_len, callback);
797
798  DCHECK(node_.Data()->dirty || read_only_);
799  if (index < 0 || index >= kNumStreams)
800    return net::ERR_INVALID_ARGUMENT;
801
802  int entry_size = entry_.Data()->data_size[index];
803  if (offset >= entry_size || offset < 0 || !buf_len)
804    return 0;
805
806  if (buf_len < 0)
807    return net::ERR_INVALID_ARGUMENT;
808
809  backend_->background_queue()->ReadData(this, index, offset, buf, buf_len,
810                                         callback);
811  return net::ERR_IO_PENDING;
812}
813
814int EntryImpl::WriteData(int index, int offset, net::IOBuffer* buf, int buf_len,
815                         CompletionCallback* callback, bool truncate) {
816  if (!callback)
817    return WriteDataImpl(index, offset, buf, buf_len, callback, truncate);
818
819  DCHECK(node_.Data()->dirty || read_only_);
820  if (index < 0 || index >= kNumStreams)
821    return net::ERR_INVALID_ARGUMENT;
822
823  if (offset < 0 || buf_len < 0)
824    return net::ERR_INVALID_ARGUMENT;
825
826  backend_->background_queue()->WriteData(this, index, offset, buf, buf_len,
827                                          truncate, callback);
828  return net::ERR_IO_PENDING;
829}
830
831int EntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
832                              net::CompletionCallback* callback) {
833  if (!callback)
834    return ReadSparseDataImpl(offset, buf, buf_len, callback);
835
836  backend_->background_queue()->ReadSparseData(this, offset, buf, buf_len,
837                                               callback);
838  return net::ERR_IO_PENDING;
839}
840
841int EntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
842                               net::CompletionCallback* callback) {
843  if (!callback)
844    return WriteSparseDataImpl(offset, buf, buf_len, callback);
845
846  backend_->background_queue()->WriteSparseData(this, offset, buf, buf_len,
847                                                callback);
848  return net::ERR_IO_PENDING;
849}
850
851int EntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
852                                 CompletionCallback* callback) {
853  backend_->background_queue()->GetAvailableRange(this, offset, len, start,
854                                                  callback);
855  return net::ERR_IO_PENDING;
856}
857
858bool EntryImpl::CouldBeSparse() const {
859  if (sparse_.get())
860    return true;
861
862  scoped_ptr<SparseControl> sparse;
863  sparse.reset(new SparseControl(const_cast<EntryImpl*>(this)));
864  return sparse->CouldBeSparse();
865}
866
867void EntryImpl::CancelSparseIO() {
868  backend_->background_queue()->CancelSparseIO(this);
869}
870
871int EntryImpl::ReadyForSparseIO(net::CompletionCallback* callback) {
872  if (!sparse_.get())
873    return net::OK;
874
875  backend_->background_queue()->ReadyForSparseIO(this, callback);
876  return net::ERR_IO_PENDING;
877}
878
879// When an entry is deleted from the cache, we clean up all the data associated
880// with it for two reasons: to simplify the reuse of the block (we know that any
881// unused block is filled with zeros), and to simplify the handling of write /
882// read partial information from an entry (don't have to worry about returning
883// data related to a previous cache entry because the range was not fully
884// written before).
885EntryImpl::~EntryImpl() {
886  Log("~EntryImpl in");
887
888  // Save the sparse info to disk. This will generate IO for this entry and
889  // maybe for a child entry, so it is important to do it before deleting this
890  // entry.
891  sparse_.reset();
892
893  // Remove this entry from the list of open entries.
894  backend_->OnEntryDestroyBegin(entry_.address());
895
896  if (doomed_) {
897    DeleteEntryData(true);
898  } else {
899    net_log_.AddEvent(net::NetLog::TYPE_ENTRY_CLOSE, NULL);
900    bool ret = true;
901    for (int index = 0; index < kNumStreams; index++) {
902      if (user_buffers_[index].get()) {
903        if (!(ret = Flush(index, 0)))
904          LOG(ERROR) << "Failed to save user data";
905      }
906      if (unreported_size_[index]) {
907        backend_->ModifyStorageSize(
908            entry_.Data()->data_size[index] - unreported_size_[index],
909            entry_.Data()->data_size[index]);
910      }
911    }
912
913    if (!ret) {
914      // There was a failure writing the actual data. Mark the entry as dirty.
915      int current_id = backend_->GetCurrentEntryId();
916      node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1;
917      node_.Store();
918    } else if (node_.HasData() && !dirty_) {
919      node_.Data()->dirty = 0;
920      node_.Store();
921    }
922  }
923
924  Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this));
925  net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL, NULL);
926  backend_->OnEntryDestroyEnd();
927}
928
929// ------------------------------------------------------------------------
930
931int EntryImpl::InternalReadData(int index, int offset, net::IOBuffer* buf,
932                                int buf_len, CompletionCallback* callback) {
933  DCHECK(node_.Data()->dirty || read_only_);
934  DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len;
935  if (index < 0 || index >= kNumStreams)
936    return net::ERR_INVALID_ARGUMENT;
937
938  int entry_size = entry_.Data()->data_size[index];
939  if (offset >= entry_size || offset < 0 || !buf_len)
940    return 0;
941
942  if (buf_len < 0)
943    return net::ERR_INVALID_ARGUMENT;
944
945  TimeTicks start = TimeTicks::Now();
946
947  if (offset + buf_len > entry_size)
948    buf_len = entry_size - offset;
949
950  UpdateRank(false);
951
952  backend_->OnEvent(Stats::READ_DATA);
953  backend_->OnRead(buf_len);
954
955  Addr address(entry_.Data()->data_addr[index]);
956  int eof = address.is_initialized() ? entry_size : 0;
957  if (user_buffers_[index].get() &&
958      user_buffers_[index]->PreRead(eof, offset, &buf_len)) {
959    // Complete the operation locally.
960    buf_len = user_buffers_[index]->Read(offset, buf, buf_len);
961    ReportIOTime(kRead, start);
962    return buf_len;
963  }
964
965  address.set_value(entry_.Data()->data_addr[index]);
966  DCHECK(address.is_initialized());
967  if (!address.is_initialized())
968    return net::ERR_FAILED;
969
970  File* file = GetBackingFile(address, index);
971  if (!file)
972    return net::ERR_FAILED;
973
974  size_t file_offset = offset;
975  if (address.is_block_file()) {
976    DCHECK_LE(offset + buf_len, kMaxBlockSize);
977    file_offset += address.start_block() * address.BlockSize() +
978                   kBlockHeaderSize;
979  }
980
981  SyncCallback* io_callback = NULL;
982  if (callback) {
983    io_callback = new SyncCallback(this, buf, callback,
984                                   net::NetLog::TYPE_ENTRY_READ_DATA);
985  }
986
987  bool completed;
988  if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) {
989    if (io_callback)
990      io_callback->Discard();
991    return net::ERR_FAILED;
992  }
993
994  if (io_callback && completed)
995    io_callback->Discard();
996
997  ReportIOTime(kRead, start);
998  return (completed || !callback) ? buf_len : net::ERR_IO_PENDING;
999}
1000
1001int EntryImpl::InternalWriteData(int index, int offset, net::IOBuffer* buf,
1002                                 int buf_len, CompletionCallback* callback,
1003                                 bool truncate) {
1004  DCHECK(node_.Data()->dirty || read_only_);
1005  DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len;
1006  if (index < 0 || index >= kNumStreams)
1007    return net::ERR_INVALID_ARGUMENT;
1008
1009  if (offset < 0 || buf_len < 0)
1010    return net::ERR_INVALID_ARGUMENT;
1011
1012  int max_file_size = backend_->MaxFileSize();
1013
1014  // offset or buf_len could be negative numbers.
1015  if (offset > max_file_size || buf_len > max_file_size ||
1016      offset + buf_len > max_file_size) {
1017    int size = offset + buf_len;
1018    if (size <= max_file_size)
1019      size = kint32max;
1020    backend_->TooMuchStorageRequested(size);
1021    return net::ERR_FAILED;
1022  }
1023
1024  TimeTicks start = TimeTicks::Now();
1025
1026  // Read the size at this point (it may change inside prepare).
1027  int entry_size = entry_.Data()->data_size[index];
1028  bool extending = entry_size < offset + buf_len;
1029  truncate = truncate && entry_size > offset + buf_len;
1030  Trace("To PrepareTarget 0x%x", entry_.address().value());
1031  if (!PrepareTarget(index, offset, buf_len, truncate))
1032    return net::ERR_FAILED;
1033
1034  Trace("From PrepareTarget 0x%x", entry_.address().value());
1035  if (extending || truncate)
1036    UpdateSize(index, entry_size, offset + buf_len);
1037
1038  UpdateRank(true);
1039
1040  backend_->OnEvent(Stats::WRITE_DATA);
1041  backend_->OnWrite(buf_len);
1042
1043  if (user_buffers_[index].get()) {
1044    // Complete the operation locally.
1045    user_buffers_[index]->Write(offset, buf, buf_len);
1046    ReportIOTime(kWrite, start);
1047    return buf_len;
1048  }
1049
1050  Addr address(entry_.Data()->data_addr[index]);
1051  if (offset + buf_len == 0) {
1052    if (truncate) {
1053      DCHECK(!address.is_initialized());
1054    }
1055    return 0;
1056  }
1057
1058  File* file = GetBackingFile(address, index);
1059  if (!file)
1060    return net::ERR_FAILED;
1061
1062  size_t file_offset = offset;
1063  if (address.is_block_file()) {
1064    DCHECK_LE(offset + buf_len, kMaxBlockSize);
1065    file_offset += address.start_block() * address.BlockSize() +
1066                   kBlockHeaderSize;
1067  } else if (truncate || (extending && !buf_len)) {
1068    if (!file->SetLength(offset + buf_len))
1069      return net::ERR_FAILED;
1070  }
1071
1072  if (!buf_len)
1073    return 0;
1074
1075  SyncCallback* io_callback = NULL;
1076  if (callback) {
1077    io_callback = new SyncCallback(this, buf, callback,
1078                                   net::NetLog::TYPE_ENTRY_WRITE_DATA);
1079  }
1080
1081  bool completed;
1082  if (!file->Write(buf->data(), buf_len, file_offset, io_callback,
1083                   &completed)) {
1084    if (io_callback)
1085      io_callback->Discard();
1086    return net::ERR_FAILED;
1087  }
1088
1089  if (io_callback && completed)
1090    io_callback->Discard();
1091
1092  ReportIOTime(kWrite, start);
1093  return (completed || !callback) ? buf_len : net::ERR_IO_PENDING;
1094}
1095
1096// ------------------------------------------------------------------------
1097
1098bool EntryImpl::CreateDataBlock(int index, int size) {
1099  DCHECK(index >= 0 && index < kNumStreams);
1100
1101  Addr address(entry_.Data()->data_addr[index]);
1102  if (!CreateBlock(size, &address))
1103    return false;
1104
1105  entry_.Data()->data_addr[index] = address.value();
1106  entry_.Store();
1107  return true;
1108}
1109
1110bool EntryImpl::CreateBlock(int size, Addr* address) {
1111  DCHECK(!address->is_initialized());
1112
1113  FileType file_type = Addr::RequiredFileType(size);
1114  if (EXTERNAL == file_type) {
1115    if (size > backend_->MaxFileSize())
1116      return false;
1117    if (!backend_->CreateExternalFile(address))
1118      return false;
1119  } else {
1120    int num_blocks = (size + Addr::BlockSizeForFileType(file_type) - 1) /
1121                     Addr::BlockSizeForFileType(file_type);
1122
1123    if (!backend_->CreateBlock(file_type, num_blocks, address))
1124      return false;
1125  }
1126  return true;
1127}
1128
1129// Note that this method may end up modifying a block file so upon return the
1130// involved block will be free, and could be reused for something else. If there
1131// is a crash after that point (and maybe before returning to the caller), the
1132// entry will be left dirty... and at some point it will be discarded; it is
1133// important that the entry doesn't keep a reference to this address, or we'll
1134// end up deleting the contents of |address| once again.
1135void EntryImpl::DeleteData(Addr address, int index) {
1136  if (!address.is_initialized())
1137    return;
1138  if (address.is_separate_file()) {
1139    int failure = !DeleteCacheFile(backend_->GetFileName(address));
1140    CACHE_UMA(COUNTS, "DeleteFailed", 0, failure);
1141    if (failure) {
1142      LOG(ERROR) << "Failed to delete " <<
1143          backend_->GetFileName(address).value() << " from the cache.";
1144    }
1145    if (files_[index])
1146      files_[index] = NULL;  // Releases the object.
1147  } else {
1148    backend_->DeleteBlock(address, true);
1149  }
1150}
1151
1152void EntryImpl::UpdateRank(bool modified) {
1153  if (!doomed_) {
1154    // Everything is handled by the backend.
1155    backend_->UpdateRank(this, modified);
1156    return;
1157  }
1158
1159  Time current = Time::Now();
1160  node_.Data()->last_used = current.ToInternalValue();
1161
1162  if (modified)
1163    node_.Data()->last_modified = current.ToInternalValue();
1164}
1165
1166File* EntryImpl::GetBackingFile(Addr address, int index) {
1167  File* file;
1168  if (address.is_separate_file())
1169    file = GetExternalFile(address, index);
1170  else
1171    file = backend_->File(address);
1172  return file;
1173}
1174
1175File* EntryImpl::GetExternalFile(Addr address, int index) {
1176  DCHECK(index >= 0 && index <= kKeyFileIndex);
1177  if (!files_[index].get()) {
1178    // For a key file, use mixed mode IO.
1179    scoped_refptr<File> file(new File(kKeyFileIndex == index));
1180    if (file->Init(backend_->GetFileName(address)))
1181      files_[index].swap(file);
1182  }
1183  return files_[index].get();
1184}
1185
1186// We keep a memory buffer for everything that ends up stored on a block file
1187// (because we don't know yet the final data size), and for some of the data
1188// that end up on external files. This function will initialize that memory
1189// buffer and / or the files needed to store the data.
1190//
1191// In general, a buffer may overlap data already stored on disk, and in that
1192// case, the contents of the buffer are the most accurate. It may also extend
1193// the file, but we don't want to read from disk just to keep the buffer up to
1194// date. This means that as soon as there is a chance to get confused about what
1195// is the most recent version of some part of a file, we'll flush the buffer and
1196// reuse it for the new data. Keep in mind that the normal use pattern is quite
1197// simple (write sequentially from the beginning), so we optimize for handling
1198// that case.
1199bool EntryImpl::PrepareTarget(int index, int offset, int buf_len,
1200                              bool truncate) {
1201  if (truncate)
1202    return HandleTruncation(index, offset, buf_len);
1203
1204  if (!offset && !buf_len)
1205    return true;
1206
1207  Addr address(entry_.Data()->data_addr[index]);
1208  if (address.is_initialized()) {
1209    if (address.is_block_file() && !MoveToLocalBuffer(index))
1210      return false;
1211
1212    if (!user_buffers_[index].get() && offset < kMaxBlockSize) {
1213      // We are about to create a buffer for the first 16KB, make sure that we
1214      // preserve existing data.
1215      if (!CopyToLocalBuffer(index))
1216        return false;
1217    }
1218  }
1219
1220  if (!user_buffers_[index].get())
1221    user_buffers_[index].reset(new UserBuffer(backend_));
1222
1223  return PrepareBuffer(index, offset, buf_len);
1224}
1225
1226// We get to this function with some data already stored. If there is a
1227// truncation that results on data stored internally, we'll explicitly
1228// handle the case here.
1229bool EntryImpl::HandleTruncation(int index, int offset, int buf_len) {
1230  Addr address(entry_.Data()->data_addr[index]);
1231
1232  int current_size = entry_.Data()->data_size[index];
1233  int new_size = offset + buf_len;
1234
1235  if (!new_size) {
1236    // This is by far the most common scenario.
1237    backend_->ModifyStorageSize(current_size - unreported_size_[index], 0);
1238    entry_.Data()->data_addr[index] = 0;
1239    entry_.Data()->data_size[index] = 0;
1240    unreported_size_[index] = 0;
1241    entry_.Store();
1242    DeleteData(address, index);
1243
1244    user_buffers_[index].reset();
1245    return true;
1246  }
1247
1248  // We never postpone truncating a file, if there is one, but we may postpone
1249  // telling the backend about the size reduction.
1250  if (user_buffers_[index].get()) {
1251    DCHECK_GE(current_size, user_buffers_[index]->Start());
1252    if (!address.is_initialized()) {
1253      // There is no overlap between the buffer and disk.
1254      if (new_size > user_buffers_[index]->Start()) {
1255        // Just truncate our buffer.
1256        DCHECK_LT(new_size, user_buffers_[index]->End());
1257        user_buffers_[index]->Truncate(new_size);
1258        return true;
1259      }
1260
1261      // Just discard our buffer.
1262      user_buffers_[index]->Reset();
1263      return PrepareBuffer(index, offset, buf_len);
1264    }
1265
1266    // There is some overlap or we need to extend the file before the
1267    // truncation.
1268    if (offset > user_buffers_[index]->Start())
1269      user_buffers_[index]->Truncate(new_size);
1270    UpdateSize(index, current_size, new_size);
1271    if (!Flush(index, 0))
1272      return false;
1273    user_buffers_[index].reset();
1274  }
1275
1276  // We have data somewhere, and it is not in a buffer.
1277  DCHECK(!user_buffers_[index].get());
1278  DCHECK(address.is_initialized());
1279
1280  if (new_size > kMaxBlockSize)
1281    return true;  // Let the operation go directly to disk.
1282
1283  return ImportSeparateFile(index, offset + buf_len);
1284}
1285
1286bool EntryImpl::CopyToLocalBuffer(int index) {
1287  Addr address(entry_.Data()->data_addr[index]);
1288  DCHECK(!user_buffers_[index].get());
1289  DCHECK(address.is_initialized());
1290
1291  int len = std::min(entry_.Data()->data_size[index], kMaxBlockSize);
1292  user_buffers_[index].reset(new UserBuffer(backend_));
1293  user_buffers_[index]->Write(len, NULL, 0);
1294
1295  File* file = GetBackingFile(address, index);
1296  int offset = 0;
1297
1298  if (address.is_block_file())
1299    offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
1300
1301  if (!file ||
1302      !file->Read(user_buffers_[index]->Data(), len, offset, NULL, NULL)) {
1303    user_buffers_[index].reset();
1304    return false;
1305  }
1306  return true;
1307}
1308
1309bool EntryImpl::MoveToLocalBuffer(int index) {
1310  if (!CopyToLocalBuffer(index))
1311    return false;
1312
1313  Addr address(entry_.Data()->data_addr[index]);
1314  entry_.Data()->data_addr[index] = 0;
1315  entry_.Store();
1316  DeleteData(address, index);
1317
1318  // If we lose this entry we'll see it as zero sized.
1319  int len = entry_.Data()->data_size[index];
1320  backend_->ModifyStorageSize(len - unreported_size_[index], 0);
1321  unreported_size_[index] = len;
1322  return true;
1323}
1324
1325bool EntryImpl::ImportSeparateFile(int index, int new_size) {
1326  if (entry_.Data()->data_size[index] > new_size)
1327    UpdateSize(index, entry_.Data()->data_size[index], new_size);
1328
1329  return MoveToLocalBuffer(index);
1330}
1331
1332bool EntryImpl::PrepareBuffer(int index, int offset, int buf_len) {
1333  DCHECK(user_buffers_[index].get());
1334  if ((user_buffers_[index]->End() && offset > user_buffers_[index]->End()) ||
1335      offset > entry_.Data()->data_size[index]) {
1336    // We are about to extend the buffer or the file (with zeros), so make sure
1337    // that we are not overwriting anything.
1338    Addr address(entry_.Data()->data_addr[index]);
1339    if (address.is_initialized() && address.is_separate_file()) {
1340      if (!Flush(index, 0))
1341        return false;
1342      // There is an actual file already, and we don't want to keep track of
1343      // its length so we let this operation go straight to disk.
1344      // The only case when a buffer is allowed to extend the file (as in fill
1345      // with zeros before the start) is when there is no file yet to extend.
1346      user_buffers_[index].reset();
1347      return true;
1348    }
1349  }
1350
1351  if (!user_buffers_[index]->PreWrite(offset, buf_len)) {
1352    if (!Flush(index, offset + buf_len))
1353      return false;
1354
1355    // Lets try again.
1356    if (offset > user_buffers_[index]->End() ||
1357        !user_buffers_[index]->PreWrite(offset, buf_len)) {
1358      // We cannot complete the operation with a buffer.
1359      DCHECK(!user_buffers_[index]->Size());
1360      DCHECK(!user_buffers_[index]->Start());
1361      user_buffers_[index].reset();
1362    }
1363  }
1364  return true;
1365}
1366
1367bool EntryImpl::Flush(int index, int min_len) {
1368  Addr address(entry_.Data()->data_addr[index]);
1369  DCHECK(user_buffers_[index].get());
1370  DCHECK(!address.is_initialized() || address.is_separate_file());
1371  DVLOG(3) << "Flush";
1372
1373  int size = std::max(entry_.Data()->data_size[index], min_len);
1374  if (size && !address.is_initialized() && !CreateDataBlock(index, size))
1375    return false;
1376
1377  if (!entry_.Data()->data_size[index]) {
1378    DCHECK(!user_buffers_[index]->Size());
1379    return true;
1380  }
1381
1382  address.set_value(entry_.Data()->data_addr[index]);
1383
1384  int len = user_buffers_[index]->Size();
1385  int offset = user_buffers_[index]->Start();
1386  if (!len && !offset)
1387    return true;
1388
1389  if (address.is_block_file()) {
1390    DCHECK_EQ(len, entry_.Data()->data_size[index]);
1391    DCHECK(!offset);
1392    offset = address.start_block() * address.BlockSize() + kBlockHeaderSize;
1393  }
1394
1395  File* file = GetBackingFile(address, index);
1396  if (!file)
1397    return false;
1398
1399  if (!file->Write(user_buffers_[index]->Data(), len, offset, NULL, NULL))
1400    return false;
1401  user_buffers_[index]->Reset();
1402
1403  return true;
1404}
1405
1406void EntryImpl::UpdateSize(int index, int old_size, int new_size) {
1407  if (entry_.Data()->data_size[index] == new_size)
1408    return;
1409
1410  unreported_size_[index] += new_size - old_size;
1411  entry_.Data()->data_size[index] = new_size;
1412  entry_.set_modified();
1413}
1414
1415int EntryImpl::InitSparseData() {
1416  if (sparse_.get())
1417    return net::OK;
1418
1419  // Use a local variable so that sparse_ never goes from 'valid' to NULL.
1420  scoped_ptr<SparseControl> sparse(new SparseControl(this));
1421  int result = sparse->Init();
1422  if (net::OK == result)
1423    sparse_.swap(sparse);
1424
1425  return result;
1426}
1427
1428void EntryImpl::SetEntryFlags(uint32 flags) {
1429  entry_.Data()->flags |= flags;
1430  entry_.set_modified();
1431}
1432
1433uint32 EntryImpl::GetEntryFlags() {
1434  return entry_.Data()->flags;
1435}
1436
1437void EntryImpl::GetData(int index, char** buffer, Addr* address) {
1438  if (user_buffers_[index].get() && user_buffers_[index]->Size() &&
1439      !user_buffers_[index]->Start()) {
1440    // The data is already in memory, just copy it and we're done.
1441    int data_len = entry_.Data()->data_size[index];
1442    if (data_len <= user_buffers_[index]->Size()) {
1443      DCHECK(!user_buffers_[index]->Start());
1444      *buffer = new char[data_len];
1445      memcpy(*buffer, user_buffers_[index]->Data(), data_len);
1446      return;
1447    }
1448  }
1449
1450  // Bad news: we'd have to read the info from disk so instead we'll just tell
1451  // the caller where to read from.
1452  *buffer = NULL;
1453  address->set_value(entry_.Data()->data_addr[index]);
1454  if (address->is_initialized()) {
1455    // Prevent us from deleting the block from the backing store.
1456    backend_->ModifyStorageSize(entry_.Data()->data_size[index] -
1457                                    unreported_size_[index], 0);
1458    entry_.Data()->data_addr[index] = 0;
1459    entry_.Data()->data_size[index] = 0;
1460  }
1461}
1462
1463void EntryImpl::Log(const char* msg) {
1464  int dirty = 0;
1465  if (node_.HasData()) {
1466    dirty = node_.Data()->dirty;
1467  }
1468
1469  Trace("%s 0x%p 0x%x 0x%x", msg, reinterpret_cast<void*>(this),
1470        entry_.address().value(), node_.address().value());
1471
1472  Trace("  data: 0x%x 0x%x 0x%x", entry_.Data()->data_addr[0],
1473        entry_.Data()->data_addr[1], entry_.Data()->long_key);
1474
1475  Trace("  doomed: %d 0x%x", doomed_, dirty);
1476}
1477
1478}  // namespace disk_cache
1479