simple_entry_impl.cc revision 58537e28ecd584eab876aee8be7156509866d23a
1// Copyright (c) 2013 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/disk_cache/simple/simple_entry_impl.h"
6
7#include <algorithm>
8#include <cstring>
9#include <vector>
10
11#include "base/bind.h"
12#include "base/bind_helpers.h"
13#include "base/callback.h"
14#include "base/location.h"
15#include "base/logging.h"
16#include "base/message_loop/message_loop_proxy.h"
17#include "base/task_runner.h"
18#include "base/task_runner_util.h"
19#include "base/time/time.h"
20#include "net/base/io_buffer.h"
21#include "net/base/net_errors.h"
22#include "net/disk_cache/net_log_parameters.h"
23#include "net/disk_cache/simple/simple_backend_impl.h"
24#include "net/disk_cache/simple/simple_histogram_macros.h"
25#include "net/disk_cache/simple/simple_index.h"
26#include "net/disk_cache/simple/simple_net_log_parameters.h"
27#include "net/disk_cache/simple/simple_synchronous_entry.h"
28#include "net/disk_cache/simple/simple_util.h"
29#include "third_party/zlib/zlib.h"
30
31namespace {
32
33// Used in histograms, please only add entries at the end.
34enum ReadResult {
35  READ_RESULT_SUCCESS = 0,
36  READ_RESULT_INVALID_ARGUMENT = 1,
37  READ_RESULT_NONBLOCK_EMPTY_RETURN = 2,
38  READ_RESULT_BAD_STATE = 3,
39  READ_RESULT_FAST_EMPTY_RETURN = 4,
40  READ_RESULT_SYNC_READ_FAILURE = 5,
41  READ_RESULT_SYNC_CHECKSUM_FAILURE = 6,
42  READ_RESULT_MAX = 7,
43};
44
45// Used in histograms, please only add entries at the end.
46enum WriteResult {
47  WRITE_RESULT_SUCCESS = 0,
48  WRITE_RESULT_INVALID_ARGUMENT = 1,
49  WRITE_RESULT_OVER_MAX_SIZE = 2,
50  WRITE_RESULT_BAD_STATE = 3,
51  WRITE_RESULT_SYNC_WRITE_FAILURE = 4,
52  WRITE_RESULT_MAX = 5,
53};
54
55// Used in histograms, please only add entries at the end.
56enum HeaderSizeChange {
57  HEADER_SIZE_CHANGE_INITIAL,
58  HEADER_SIZE_CHANGE_SAME,
59  HEADER_SIZE_CHANGE_INCREASE,
60  HEADER_SIZE_CHANGE_DECREASE,
61  HEADER_SIZE_CHANGE_UNEXPECTED_WRITE,
62  HEADER_SIZE_CHANGE_MAX
63};
64
65void RecordReadResult(net::CacheType cache_type, ReadResult result) {
66  SIMPLE_CACHE_UMA(ENUMERATION,
67                   "ReadResult", cache_type, result, READ_RESULT_MAX);
68}
69
70void RecordWriteResult(net::CacheType cache_type, WriteResult result) {
71  SIMPLE_CACHE_UMA(ENUMERATION,
72                   "WriteResult", cache_type, result, WRITE_RESULT_MAX);
73}
74
75// TODO(ttuttle): Consider removing this once we have a good handle on header
76// size changes.
77void RecordHeaderSizeChange(net::CacheType cache_type,
78                            int old_size, int new_size) {
79  HeaderSizeChange size_change;
80
81  SIMPLE_CACHE_UMA(COUNTS_10000, "HeaderSize", cache_type, new_size);
82
83  if (old_size == 0) {
84    size_change = HEADER_SIZE_CHANGE_INITIAL;
85  } else if (new_size == old_size) {
86    size_change = HEADER_SIZE_CHANGE_SAME;
87  } else if (new_size > old_size) {
88    int delta = new_size - old_size;
89    SIMPLE_CACHE_UMA(COUNTS_10000,
90                     "HeaderSizeIncreaseAbsolute", cache_type, delta);
91    SIMPLE_CACHE_UMA(PERCENTAGE,
92                     "HeaderSizeIncreasePercentage", cache_type,
93                     delta * 100 / old_size);
94    size_change = HEADER_SIZE_CHANGE_INCREASE;
95  } else {  // new_size < old_size
96    int delta = old_size - new_size;
97    SIMPLE_CACHE_UMA(COUNTS_10000,
98                     "HeaderSizeDecreaseAbsolute", cache_type, delta);
99    SIMPLE_CACHE_UMA(PERCENTAGE,
100                     "HeaderSizeDecreasePercentage", cache_type,
101                     delta * 100 / old_size);
102    size_change = HEADER_SIZE_CHANGE_DECREASE;
103  }
104
105  SIMPLE_CACHE_UMA(ENUMERATION,
106                   "HeaderSizeChange", cache_type,
107                   size_change, HEADER_SIZE_CHANGE_MAX);
108}
109
110void RecordUnexpectedStream0Write(net::CacheType cache_type) {
111  SIMPLE_CACHE_UMA(ENUMERATION,
112                   "HeaderSizeChange", cache_type,
113                   HEADER_SIZE_CHANGE_UNEXPECTED_WRITE, HEADER_SIZE_CHANGE_MAX);
114}
115
116int g_open_entry_count = 0;
117
118void AdjustOpenEntryCountBy(net::CacheType cache_type, int offset) {
119  g_open_entry_count += offset;
120  SIMPLE_CACHE_UMA(COUNTS_10000,
121                   "GlobalOpenEntryCount", cache_type, g_open_entry_count);
122}
123
124}  // namespace
125
126namespace disk_cache {
127
128using base::Closure;
129using base::FilePath;
130using base::MessageLoopProxy;
131using base::Time;
132using base::TaskRunner;
133
134// A helper class to insure that RunNextOperationIfNeeded() is called when
135// exiting the current stack frame.
136class SimpleEntryImpl::ScopedOperationRunner {
137 public:
138  explicit ScopedOperationRunner(SimpleEntryImpl* entry) : entry_(entry) {
139  }
140
141  ~ScopedOperationRunner() {
142    entry_->RunNextOperationIfNeeded();
143  }
144
145 private:
146  SimpleEntryImpl* const entry_;
147};
148
149SimpleEntryImpl::SimpleEntryImpl(net::CacheType cache_type,
150                                 const FilePath& path,
151                                 const uint64 entry_hash,
152                                 OperationsMode operations_mode,
153                                 SimpleBackendImpl* backend,
154                                 net::NetLog* net_log)
155    : backend_(backend->AsWeakPtr()),
156      cache_type_(cache_type),
157      worker_pool_(backend->worker_pool()),
158      path_(path),
159      entry_hash_(entry_hash),
160      use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS),
161      last_used_(Time::Now()),
162      last_modified_(last_used_),
163      open_count_(0),
164      doomed_(false),
165      state_(STATE_UNINITIALIZED),
166      synchronous_entry_(NULL),
167      net_log_(net::BoundNetLog::Make(
168          net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY)) {
169  COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_),
170                 arrays_should_be_same_size);
171  COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_),
172                 arrays_should_be_same_size);
173  COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_),
174                 arrays_should_be_same_size);
175  COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc_check_state_),
176                 arrays_should_be_same_size);
177  MakeUninitialized();
178  net_log_.BeginEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY,
179      CreateNetLogSimpleEntryConstructionCallback(this));
180}
181
182int SimpleEntryImpl::OpenEntry(Entry** out_entry,
183                               const CompletionCallback& callback) {
184  DCHECK(backend_.get());
185
186  net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_CALL);
187
188  bool have_index = backend_->index()->initialized();
189  // This enumeration is used in histograms, add entries only at end.
190  enum OpenEntryIndexEnum {
191    INDEX_NOEXIST = 0,
192    INDEX_MISS = 1,
193    INDEX_HIT = 2,
194    INDEX_MAX = 3,
195  };
196  OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST;
197  if (have_index) {
198    if (backend_->index()->Has(entry_hash_))
199      open_entry_index_enum = INDEX_HIT;
200    else
201      open_entry_index_enum = INDEX_MISS;
202  }
203  SIMPLE_CACHE_UMA(ENUMERATION,
204                   "OpenEntryIndexState", cache_type_,
205                   open_entry_index_enum, INDEX_MAX);
206
207  // If entry is not known to the index, initiate fast failover to the network.
208  if (open_entry_index_enum == INDEX_MISS) {
209    net_log_.AddEventWithNetErrorCode(
210        net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
211        net::ERR_FAILED);
212    return net::ERR_FAILED;
213  }
214
215  pending_operations_.push(SimpleEntryOperation::OpenOperation(
216      this, have_index, callback, out_entry));
217  RunNextOperationIfNeeded();
218  return net::ERR_IO_PENDING;
219}
220
221int SimpleEntryImpl::CreateEntry(Entry** out_entry,
222                                 const CompletionCallback& callback) {
223  DCHECK(backend_.get());
224  DCHECK_EQ(entry_hash_, simple_util::GetEntryHashKey(key_));
225
226  net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_CALL);
227
228  bool have_index = backend_->index()->initialized();
229  int ret_value = net::ERR_FAILED;
230  if (use_optimistic_operations_ &&
231      state_ == STATE_UNINITIALIZED && pending_operations_.size() == 0) {
232    net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_OPTIMISTIC);
233
234    ReturnEntryToCaller(out_entry);
235    pending_operations_.push(SimpleEntryOperation::CreateOperation(
236        this, have_index, CompletionCallback(), static_cast<Entry**>(NULL)));
237    ret_value = net::OK;
238  } else {
239    pending_operations_.push(SimpleEntryOperation::CreateOperation(
240        this, have_index, callback, out_entry));
241    ret_value = net::ERR_IO_PENDING;
242  }
243
244  // We insert the entry in the index before creating the entry files in the
245  // SimpleSynchronousEntry, because this way the worst scenario is when we
246  // have the entry in the index but we don't have the created files yet, this
247  // way we never leak files. CreationOperationComplete will remove the entry
248  // from the index if the creation fails.
249  backend_->index()->Insert(entry_hash_);
250
251  RunNextOperationIfNeeded();
252  return ret_value;
253}
254
255int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) {
256  net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_CALL);
257  net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_DOOM_BEGIN);
258
259  MarkAsDoomed();
260  pending_operations_.push(SimpleEntryOperation::DoomOperation(this, callback));
261  RunNextOperationIfNeeded();
262  return net::ERR_IO_PENDING;
263}
264
265void SimpleEntryImpl::SetKey(const std::string& key) {
266  key_ = key;
267  net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_SET_KEY,
268      net::NetLog::StringCallback("key", &key));
269}
270
271void SimpleEntryImpl::Doom() {
272  DoomEntry(CompletionCallback());
273}
274
275void SimpleEntryImpl::Close() {
276  DCHECK(io_thread_checker_.CalledOnValidThread());
277  DCHECK_LT(0, open_count_);
278
279  net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_CALL);
280
281  if (--open_count_ > 0) {
282    DCHECK(!HasOneRef());
283    Release();  // Balanced in ReturnEntryToCaller().
284    return;
285  }
286
287  pending_operations_.push(SimpleEntryOperation::CloseOperation(this));
288  DCHECK(!HasOneRef());
289  Release();  // Balanced in ReturnEntryToCaller().
290  RunNextOperationIfNeeded();
291}
292
293std::string SimpleEntryImpl::GetKey() const {
294  DCHECK(io_thread_checker_.CalledOnValidThread());
295  return key_;
296}
297
298Time SimpleEntryImpl::GetLastUsed() const {
299  DCHECK(io_thread_checker_.CalledOnValidThread());
300  return last_used_;
301}
302
303Time SimpleEntryImpl::GetLastModified() const {
304  DCHECK(io_thread_checker_.CalledOnValidThread());
305  return last_modified_;
306}
307
308int32 SimpleEntryImpl::GetDataSize(int stream_index) const {
309  DCHECK(io_thread_checker_.CalledOnValidThread());
310  DCHECK_LE(0, data_size_[stream_index]);
311  return data_size_[stream_index];
312}
313
314int SimpleEntryImpl::ReadData(int stream_index,
315                              int offset,
316                              net::IOBuffer* buf,
317                              int buf_len,
318                              const CompletionCallback& callback) {
319  DCHECK(io_thread_checker_.CalledOnValidThread());
320
321  if (net_log_.IsLoggingAllEvents()) {
322    net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_CALL,
323        CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
324                                          false));
325  }
326
327  if (stream_index < 0 || stream_index >= kSimpleEntryFileCount ||
328      buf_len < 0) {
329    if (net_log_.IsLoggingAllEvents()) {
330      net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
331          CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT));
332    }
333
334    RecordReadResult(cache_type_, READ_RESULT_INVALID_ARGUMENT);
335    return net::ERR_INVALID_ARGUMENT;
336  }
337  if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) ||
338                                      offset < 0 || !buf_len)) {
339    if (net_log_.IsLoggingAllEvents()) {
340      net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
341          CreateNetLogReadWriteCompleteCallback(0));
342    }
343
344    RecordReadResult(cache_type_, READ_RESULT_NONBLOCK_EMPTY_RETURN);
345    return 0;
346  }
347
348  // TODO(felipeg): Optimization: Add support for truly parallel read
349  // operations.
350  bool alone_in_queue =
351      pending_operations_.size() == 0 && state_ == STATE_READY;
352  pending_operations_.push(SimpleEntryOperation::ReadOperation(
353      this, stream_index, offset, buf_len, buf, callback, alone_in_queue));
354  RunNextOperationIfNeeded();
355  return net::ERR_IO_PENDING;
356}
357
358int SimpleEntryImpl::WriteData(int stream_index,
359                               int offset,
360                               net::IOBuffer* buf,
361                               int buf_len,
362                               const CompletionCallback& callback,
363                               bool truncate) {
364  DCHECK(io_thread_checker_.CalledOnValidThread());
365
366  if (net_log_.IsLoggingAllEvents()) {
367    net_log_.AddEvent(
368        net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_CALL,
369        CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
370                                          truncate));
371  }
372
373  if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || offset < 0 ||
374      buf_len < 0) {
375    if (net_log_.IsLoggingAllEvents()) {
376      net_log_.AddEvent(
377          net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
378          CreateNetLogReadWriteCompleteCallback(net::ERR_INVALID_ARGUMENT));
379    }
380    RecordWriteResult(cache_type_, WRITE_RESULT_INVALID_ARGUMENT);
381    return net::ERR_INVALID_ARGUMENT;
382  }
383  if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) {
384    if (net_log_.IsLoggingAllEvents()) {
385      net_log_.AddEvent(
386          net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
387          CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
388    }
389    RecordWriteResult(cache_type_, WRITE_RESULT_OVER_MAX_SIZE);
390    return net::ERR_FAILED;
391  }
392  ScopedOperationRunner operation_runner(this);
393
394  // Currently, Simple Cache is only used for HTTP, which stores the headers in
395  // stream 0 and always writes them with a single, truncating write.  Detect
396  // these writes and record the size and size changes of the headers.  Also,
397  // note writes to stream 0 that violate those assumptions.
398  if (stream_index == 0) {
399    if (offset == 0 && truncate)
400      RecordHeaderSizeChange(cache_type_, data_size_[0], buf_len);
401    else
402      RecordUnexpectedStream0Write(cache_type_);
403  }
404
405  // We can only do optimistic Write if there is no pending operations, so
406  // that we are sure that the next call to RunNextOperationIfNeeded will
407  // actually run the write operation that sets the stream size. It also
408  // prevents from previous possibly-conflicting writes that could be stacked
409  // in the |pending_operations_|. We could optimize this for when we have
410  // only read operations enqueued.
411  const bool optimistic =
412      (use_optimistic_operations_ && state_ == STATE_READY &&
413       pending_operations_.size() == 0);
414  CompletionCallback op_callback;
415  scoped_refptr<net::IOBuffer> op_buf;
416  int ret_value = net::ERR_FAILED;
417  if (!optimistic) {
418    op_buf = buf;
419    op_callback = callback;
420    ret_value = net::ERR_IO_PENDING;
421  } else {
422    // TODO(gavinp,pasko): For performance, don't use a copy of an IOBuffer
423    // here to avoid paying the price of the RefCountedThreadSafe atomic
424    // operations.
425    if (buf) {
426      op_buf = new IOBuffer(buf_len);
427      memcpy(op_buf->data(), buf->data(), buf_len);
428    }
429    op_callback = CompletionCallback();
430    ret_value = buf_len;
431    if (net_log_.IsLoggingAllEvents()) {
432      net_log_.AddEvent(
433          net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_OPTIMISTIC,
434          CreateNetLogReadWriteCompleteCallback(buf_len));
435    }
436  }
437
438  pending_operations_.push(SimpleEntryOperation::WriteOperation(this,
439                                                                stream_index,
440                                                                offset,
441                                                                buf_len,
442                                                                op_buf.get(),
443                                                                truncate,
444                                                                optimistic,
445                                                                op_callback));
446  return ret_value;
447}
448
449int SimpleEntryImpl::ReadSparseData(int64 offset,
450                                    net::IOBuffer* buf,
451                                    int buf_len,
452                                    const CompletionCallback& callback) {
453  DCHECK(io_thread_checker_.CalledOnValidThread());
454  // TODO(gavinp): Determine if the simple backend should support sparse data.
455  NOTIMPLEMENTED();
456  return net::ERR_FAILED;
457}
458
459int SimpleEntryImpl::WriteSparseData(int64 offset,
460                                     net::IOBuffer* buf,
461                                     int buf_len,
462                                     const CompletionCallback& callback) {
463  DCHECK(io_thread_checker_.CalledOnValidThread());
464  // TODO(gavinp): Determine if the simple backend should support sparse data.
465  NOTIMPLEMENTED();
466  return net::ERR_FAILED;
467}
468
469int SimpleEntryImpl::GetAvailableRange(int64 offset,
470                                       int len,
471                                       int64* start,
472                                       const CompletionCallback& callback) {
473  DCHECK(io_thread_checker_.CalledOnValidThread());
474  // TODO(gavinp): Determine if the simple backend should support sparse data.
475  NOTIMPLEMENTED();
476  return net::ERR_FAILED;
477}
478
479bool SimpleEntryImpl::CouldBeSparse() const {
480  DCHECK(io_thread_checker_.CalledOnValidThread());
481  // TODO(gavinp): Determine if the simple backend should support sparse data.
482  return false;
483}
484
485void SimpleEntryImpl::CancelSparseIO() {
486  DCHECK(io_thread_checker_.CalledOnValidThread());
487  // TODO(gavinp): Determine if the simple backend should support sparse data.
488  NOTIMPLEMENTED();
489}
490
491int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
492  DCHECK(io_thread_checker_.CalledOnValidThread());
493  // TODO(gavinp): Determine if the simple backend should support sparse data.
494  NOTIMPLEMENTED();
495  return net::ERR_NOT_IMPLEMENTED;
496}
497
498SimpleEntryImpl::~SimpleEntryImpl() {
499  DCHECK(io_thread_checker_.CalledOnValidThread());
500  DCHECK_EQ(0U, pending_operations_.size());
501  DCHECK(state_ == STATE_UNINITIALIZED || state_ == STATE_FAILURE);
502  DCHECK(!synchronous_entry_);
503  RemoveSelfFromBackend();
504  net_log_.EndEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY);
505}
506
507void SimpleEntryImpl::MakeUninitialized() {
508  state_ = STATE_UNINITIALIZED;
509  std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_));
510  std::memset(crc32s_, 0, sizeof(crc32s_));
511  std::memset(have_written_, 0, sizeof(have_written_));
512  std::memset(data_size_, 0, sizeof(data_size_));
513  for (size_t i = 0; i < arraysize(crc_check_state_); ++i) {
514    crc_check_state_[i] = CRC_CHECK_NEVER_READ_AT_ALL;
515  }
516}
517
518void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) {
519  DCHECK(out_entry);
520  ++open_count_;
521  AddRef();  // Balanced in Close()
522  *out_entry = this;
523}
524
525void SimpleEntryImpl::RemoveSelfFromBackend() {
526  if (!backend_.get())
527    return;
528  backend_->OnDeactivated(this);
529}
530
531void SimpleEntryImpl::MarkAsDoomed() {
532  if (!backend_.get())
533    return;
534  doomed_ = true;
535  backend_->index()->Remove(entry_hash_);
536  RemoveSelfFromBackend();
537}
538
539void SimpleEntryImpl::RunNextOperationIfNeeded() {
540  DCHECK(io_thread_checker_.CalledOnValidThread());
541  SIMPLE_CACHE_UMA(CUSTOM_COUNTS,
542                   "EntryOperationsPending", cache_type_,
543                   pending_operations_.size(), 0, 100, 20);
544  if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) {
545    scoped_ptr<SimpleEntryOperation> operation(
546        new SimpleEntryOperation(pending_operations_.front()));
547    pending_operations_.pop();
548    switch (operation->type()) {
549      case SimpleEntryOperation::TYPE_OPEN:
550        OpenEntryInternal(operation->have_index(),
551                          operation->callback(),
552                          operation->out_entry());
553        break;
554      case SimpleEntryOperation::TYPE_CREATE:
555        CreateEntryInternal(operation->have_index(),
556                            operation->callback(),
557                            operation->out_entry());
558        break;
559      case SimpleEntryOperation::TYPE_CLOSE:
560        CloseInternal();
561        break;
562      case SimpleEntryOperation::TYPE_READ:
563        RecordReadIsParallelizable(*operation);
564        ReadDataInternal(operation->index(),
565                         operation->offset(),
566                         operation->buf(),
567                         operation->length(),
568                         operation->callback());
569        break;
570      case SimpleEntryOperation::TYPE_WRITE:
571        RecordWriteDependencyType(*operation);
572        WriteDataInternal(operation->index(),
573                          operation->offset(),
574                          operation->buf(),
575                          operation->length(),
576                          operation->callback(),
577                          operation->truncate());
578        break;
579      case SimpleEntryOperation::TYPE_DOOM:
580        DoomEntryInternal(operation->callback());
581        break;
582      default:
583        NOTREACHED();
584    }
585    // The operation is kept for histograms. Makes sure it does not leak
586    // resources.
587    executing_operation_.swap(operation);
588    executing_operation_->ReleaseReferences();
589    // |this| may have been deleted.
590  }
591}
592
593void SimpleEntryImpl::OpenEntryInternal(bool have_index,
594                                        const CompletionCallback& callback,
595                                        Entry** out_entry) {
596  ScopedOperationRunner operation_runner(this);
597
598  net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_BEGIN);
599
600  if (state_ == STATE_READY) {
601    ReturnEntryToCaller(out_entry);
602    MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback,
603                                                                net::OK));
604    net_log_.AddEvent(
605        net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
606        CreateNetLogSimpleEntryCreationCallback(this, net::OK));
607    return;
608  }
609  if (state_ == STATE_FAILURE) {
610    if (!callback.is_null()) {
611      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
612          callback, net::ERR_FAILED));
613    }
614    net_log_.AddEvent(
615        net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END,
616        CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED));
617    return;
618  }
619
620  DCHECK_EQ(STATE_UNINITIALIZED, state_);
621  DCHECK(!synchronous_entry_);
622  state_ = STATE_IO_PENDING;
623  const base::TimeTicks start_time = base::TimeTicks::Now();
624  scoped_ptr<SimpleEntryCreationResults> results(
625      new SimpleEntryCreationResults(
626          SimpleEntryStat(last_used_, last_modified_, data_size_)));
627  Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry,
628                            cache_type_,
629                            path_,
630                            entry_hash_,
631                            have_index,
632                            results.get());
633  Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete,
634                             this,
635                             callback,
636                             start_time,
637                             base::Passed(&results),
638                             out_entry,
639                             net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_OPEN_END);
640  worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
641}
642
643void SimpleEntryImpl::CreateEntryInternal(bool have_index,
644                                          const CompletionCallback& callback,
645                                          Entry** out_entry) {
646  ScopedOperationRunner operation_runner(this);
647
648  net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_BEGIN);
649
650  if (state_ != STATE_UNINITIALIZED) {
651    // There is already an active normal entry.
652    net_log_.AddEvent(
653        net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END,
654        CreateNetLogSimpleEntryCreationCallback(this, net::ERR_FAILED));
655
656    if (!callback.is_null()) {
657      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
658          callback, net::ERR_FAILED));
659    }
660    return;
661  }
662  DCHECK_EQ(STATE_UNINITIALIZED, state_);
663  DCHECK(!synchronous_entry_);
664
665  state_ = STATE_IO_PENDING;
666
667  // Since we don't know the correct values for |last_used_| and
668  // |last_modified_| yet, we make this approximation.
669  last_used_ = last_modified_ = base::Time::Now();
670
671  // If creation succeeds, we should mark all streams to be saved on close.
672  for (int i = 0; i < kSimpleEntryFileCount; ++i)
673    have_written_[i] = true;
674
675  const base::TimeTicks start_time = base::TimeTicks::Now();
676  scoped_ptr<SimpleEntryCreationResults> results(
677      new SimpleEntryCreationResults(
678          SimpleEntryStat(last_used_, last_modified_, data_size_)));
679  Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry,
680                            cache_type_,
681                            path_,
682                            key_,
683                            entry_hash_,
684                            have_index,
685                            results.get());
686  Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete,
687                             this,
688                             callback,
689                             start_time,
690                             base::Passed(&results),
691                             out_entry,
692                             net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CREATE_END);
693  worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
694}
695
696void SimpleEntryImpl::CloseInternal() {
697  DCHECK(io_thread_checker_.CalledOnValidThread());
698  typedef SimpleSynchronousEntry::CRCRecord CRCRecord;
699  scoped_ptr<std::vector<CRCRecord> >
700      crc32s_to_write(new std::vector<CRCRecord>());
701
702  net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_BEGIN);
703
704  if (state_ == STATE_READY) {
705    DCHECK(synchronous_entry_);
706    state_ = STATE_IO_PENDING;
707    for (int i = 0; i < kSimpleEntryFileCount; ++i) {
708      if (have_written_[i]) {
709        if (GetDataSize(i) == crc32s_end_offset_[i]) {
710          int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i];
711          crc32s_to_write->push_back(CRCRecord(i, true, crc));
712        } else {
713          crc32s_to_write->push_back(CRCRecord(i, false, 0));
714        }
715      }
716    }
717  } else {
718    DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_);
719  }
720
721  if (synchronous_entry_) {
722    Closure task =
723        base::Bind(&SimpleSynchronousEntry::Close,
724                   base::Unretained(synchronous_entry_),
725                   SimpleEntryStat(last_used_, last_modified_, data_size_),
726                   base::Passed(&crc32s_to_write));
727    Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this);
728    synchronous_entry_ = NULL;
729    worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
730
731    for (int i = 0; i < kSimpleEntryFileCount; ++i) {
732      if (!have_written_[i]) {
733        SIMPLE_CACHE_UMA(ENUMERATION,
734                         "CheckCRCResult", cache_type_,
735                         crc_check_state_[i], CRC_CHECK_MAX);
736      }
737    }
738  } else {
739    CloseOperationComplete();
740  }
741}
742
743void SimpleEntryImpl::ReadDataInternal(int stream_index,
744                                       int offset,
745                                       net::IOBuffer* buf,
746                                       int buf_len,
747                                       const CompletionCallback& callback) {
748  DCHECK(io_thread_checker_.CalledOnValidThread());
749  ScopedOperationRunner operation_runner(this);
750
751  if (net_log_.IsLoggingAllEvents()) {
752    net_log_.AddEvent(
753        net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_BEGIN,
754        CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
755                                          false));
756  }
757
758  if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
759    if (!callback.is_null()) {
760      RecordReadResult(cache_type_, READ_RESULT_BAD_STATE);
761      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
762          callback, net::ERR_FAILED));
763    }
764    if (net_log_.IsLoggingAllEvents()) {
765      net_log_.AddEvent(
766          net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
767          CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
768    }
769    return;
770  }
771  DCHECK_EQ(STATE_READY, state_);
772  if (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len) {
773    RecordReadResult(cache_type_, READ_RESULT_FAST_EMPTY_RETURN);
774    // If there is nothing to read, we bail out before setting state_ to
775    // STATE_IO_PENDING.
776    if (!callback.is_null())
777      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
778          callback, 0));
779    return;
780  }
781
782  buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
783
784  state_ = STATE_IO_PENDING;
785  if (!doomed_ && backend_.get())
786    backend_->index()->UseIfExists(entry_hash_);
787
788  scoped_ptr<uint32> read_crc32(new uint32());
789  scoped_ptr<int> result(new int());
790  scoped_ptr<base::Time> last_used(new base::Time());
791  Closure task = base::Bind(
792      &SimpleSynchronousEntry::ReadData,
793      base::Unretained(synchronous_entry_),
794      SimpleSynchronousEntry::EntryOperationData(stream_index, offset, buf_len),
795      make_scoped_refptr(buf),
796      read_crc32.get(),
797      last_used.get(),
798      result.get());
799  Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete,
800                             this,
801                             stream_index,
802                             offset,
803                             callback,
804                             base::Passed(&read_crc32),
805                             base::Passed(&last_used),
806                             base::Passed(&result));
807  worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
808}
809
810void SimpleEntryImpl::WriteDataInternal(int stream_index,
811                                       int offset,
812                                       net::IOBuffer* buf,
813                                       int buf_len,
814                                       const CompletionCallback& callback,
815                                       bool truncate) {
816  DCHECK(io_thread_checker_.CalledOnValidThread());
817  ScopedOperationRunner operation_runner(this);
818
819  if (net_log_.IsLoggingAllEvents()) {
820    net_log_.AddEvent(
821        net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_BEGIN,
822        CreateNetLogReadWriteDataCallback(stream_index, offset, buf_len,
823                                          truncate));
824  }
825
826  if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
827    RecordWriteResult(cache_type_, WRITE_RESULT_BAD_STATE);
828    if (net_log_.IsLoggingAllEvents()) {
829      net_log_.AddEvent(
830          net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
831          CreateNetLogReadWriteCompleteCallback(net::ERR_FAILED));
832    }
833    if (!callback.is_null()) {
834      // We need to posttask so that we don't go in a loop when we call the
835      // callback directly.
836      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
837          callback, net::ERR_FAILED));
838    }
839    // |this| may be destroyed after return here.
840    return;
841  }
842
843  DCHECK_EQ(STATE_READY, state_);
844  state_ = STATE_IO_PENDING;
845  if (!doomed_ && backend_.get())
846    backend_->index()->UseIfExists(entry_hash_);
847  // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|)
848  // if |offset == 0| or we have already computed the CRC for [0 .. offset).
849  // We rely on most write operations being sequential, start to end to compute
850  // the crc of the data. When we write to an entry and close without having
851  // done a sequential write, we don't check the CRC on read.
852  if (offset == 0 || crc32s_end_offset_[stream_index] == offset) {
853    uint32 initial_crc = (offset != 0) ? crc32s_[stream_index]
854                                       : crc32(0, Z_NULL, 0);
855    if (buf_len > 0) {
856      crc32s_[stream_index] = crc32(initial_crc,
857                                    reinterpret_cast<const Bytef*>(buf->data()),
858                                    buf_len);
859    }
860    crc32s_end_offset_[stream_index] = offset + buf_len;
861  }
862
863  // |entry_stat| needs to be initialized before modifying |data_size_|.
864  scoped_ptr<SimpleEntryStat> entry_stat(
865      new SimpleEntryStat(last_used_, last_modified_, data_size_));
866  if (truncate) {
867    data_size_[stream_index] = offset + buf_len;
868  } else {
869    data_size_[stream_index] = std::max(offset + buf_len,
870                                        GetDataSize(stream_index));
871  }
872
873  // Since we don't know the correct values for |last_used_| and
874  // |last_modified_| yet, we make this approximation.
875  last_used_ = last_modified_ = base::Time::Now();
876
877  have_written_[stream_index] = true;
878
879  scoped_ptr<int> result(new int());
880  Closure task = base::Bind(&SimpleSynchronousEntry::WriteData,
881                            base::Unretained(synchronous_entry_),
882                            SimpleSynchronousEntry::EntryOperationData(
883                                stream_index, offset, buf_len, truncate),
884                            make_scoped_refptr(buf),
885                            entry_stat.get(),
886                            result.get());
887  Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete,
888                             this,
889                             stream_index,
890                             callback,
891                             base::Passed(&entry_stat),
892                             base::Passed(&result));
893  worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
894}
895
896void SimpleEntryImpl::DoomEntryInternal(const CompletionCallback& callback) {
897  if (backend_)
898    backend_->OnDoomStart(entry_hash_);
899  PostTaskAndReplyWithResult(
900      worker_pool_, FROM_HERE,
901      base::Bind(&SimpleSynchronousEntry::DoomEntry, path_, key_, entry_hash_),
902      base::Bind(&SimpleEntryImpl::DoomOperationComplete, this, callback,
903                 state_));
904  state_ = STATE_IO_PENDING;
905}
906
907void SimpleEntryImpl::CreationOperationComplete(
908    const CompletionCallback& completion_callback,
909    const base::TimeTicks& start_time,
910    scoped_ptr<SimpleEntryCreationResults> in_results,
911    Entry** out_entry,
912    net::NetLog::EventType end_event_type) {
913  DCHECK(io_thread_checker_.CalledOnValidThread());
914  DCHECK_EQ(state_, STATE_IO_PENDING);
915  DCHECK(in_results);
916  ScopedOperationRunner operation_runner(this);
917  SIMPLE_CACHE_UMA(BOOLEAN,
918                   "EntryCreationResult", cache_type_,
919                   in_results->result == net::OK);
920  if (in_results->result != net::OK) {
921    if (in_results->result != net::ERR_FILE_EXISTS)
922      MarkAsDoomed();
923
924    net_log_.AddEventWithNetErrorCode(end_event_type, net::ERR_FAILED);
925
926    if (!completion_callback.is_null()) {
927      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
928          completion_callback, net::ERR_FAILED));
929    }
930    MakeUninitialized();
931    return;
932  }
933  // If out_entry is NULL, it means we already called ReturnEntryToCaller from
934  // the optimistic Create case.
935  if (out_entry)
936    ReturnEntryToCaller(out_entry);
937
938  state_ = STATE_READY;
939  synchronous_entry_ = in_results->sync_entry;
940  if (key_.empty()) {
941    SetKey(synchronous_entry_->key());
942  } else {
943    // This should only be triggered when creating an entry. The key check in
944    // the open case is handled in SimpleBackendImpl.
945    DCHECK_EQ(key_, synchronous_entry_->key());
946  }
947  UpdateDataFromEntryStat(in_results->entry_stat);
948  SIMPLE_CACHE_UMA(TIMES,
949                   "EntryCreationTime", cache_type_,
950                   (base::TimeTicks::Now() - start_time));
951  AdjustOpenEntryCountBy(cache_type_, 1);
952
953  net_log_.AddEvent(end_event_type);
954  if (!completion_callback.is_null()) {
955    MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
956        completion_callback, net::OK));
957  }
958}
959
960void SimpleEntryImpl::EntryOperationComplete(
961    int stream_index,
962    const CompletionCallback& completion_callback,
963    const SimpleEntryStat& entry_stat,
964    scoped_ptr<int> result) {
965  DCHECK(io_thread_checker_.CalledOnValidThread());
966  DCHECK(synchronous_entry_);
967  DCHECK_EQ(STATE_IO_PENDING, state_);
968  DCHECK(result);
969  state_ = STATE_READY;
970  if (*result < 0) {
971    MarkAsDoomed();
972    state_ = STATE_FAILURE;
973    crc32s_end_offset_[stream_index] = 0;
974  } else {
975    UpdateDataFromEntryStat(entry_stat);
976  }
977
978  if (!completion_callback.is_null()) {
979    MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
980        completion_callback, *result));
981  }
982  RunNextOperationIfNeeded();
983}
984
985void SimpleEntryImpl::ReadOperationComplete(
986    int stream_index,
987    int offset,
988    const CompletionCallback& completion_callback,
989    scoped_ptr<uint32> read_crc32,
990    scoped_ptr<base::Time> last_used,
991    scoped_ptr<int> result) {
992  DCHECK(io_thread_checker_.CalledOnValidThread());
993  DCHECK(synchronous_entry_);
994  DCHECK_EQ(STATE_IO_PENDING, state_);
995  DCHECK(read_crc32);
996  DCHECK(result);
997
998  if (*result > 0 &&
999      crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_AT_ALL) {
1000    crc_check_state_[stream_index] = CRC_CHECK_NEVER_READ_TO_END;
1001  }
1002
1003  if (*result > 0 && crc32s_end_offset_[stream_index] == offset) {
1004    uint32 current_crc = offset == 0 ? crc32(0, Z_NULL, 0)
1005                                     : crc32s_[stream_index];
1006    crc32s_[stream_index] = crc32_combine(current_crc, *read_crc32, *result);
1007    crc32s_end_offset_[stream_index] += *result;
1008    if (!have_written_[stream_index] &&
1009        GetDataSize(stream_index) == crc32s_end_offset_[stream_index]) {
1010      // We have just read a file from start to finish, and so we have
1011      // computed a crc of the entire file. We can check it now. If a cache
1012      // entry has a single reader, the normal pattern is to read from start
1013      // to finish.
1014
1015      // Other cases are possible. In the case of two readers on the same
1016      // entry, one reader can be behind the other. In this case we compute
1017      // the crc as the most advanced reader progresses, and check it for
1018      // both readers as they read the last byte.
1019
1020      net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_BEGIN);
1021
1022      scoped_ptr<int> new_result(new int());
1023      Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord,
1024                                base::Unretained(synchronous_entry_),
1025                                stream_index,
1026                                data_size_[stream_index],
1027                                crc32s_[stream_index],
1028                                new_result.get());
1029      Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete,
1030                                 this, *result, stream_index,
1031                                 completion_callback,
1032                                 base::Passed(&new_result));
1033      worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
1034      crc_check_state_[stream_index] = CRC_CHECK_DONE;
1035      return;
1036    }
1037  }
1038
1039  if (*result < 0) {
1040    RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
1041  } else {
1042    RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
1043    if (crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_TO_END &&
1044        offset + *result == GetDataSize(stream_index)) {
1045      crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE;
1046    }
1047  }
1048  if (net_log_.IsLoggingAllEvents()) {
1049    net_log_.AddEvent(
1050        net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
1051        CreateNetLogReadWriteCompleteCallback(*result));
1052  }
1053
1054  EntryOperationComplete(
1055      stream_index,
1056      completion_callback,
1057      SimpleEntryStat(*last_used, last_modified_, data_size_),
1058      result.Pass());
1059}
1060
1061void SimpleEntryImpl::WriteOperationComplete(
1062    int stream_index,
1063    const CompletionCallback& completion_callback,
1064    scoped_ptr<SimpleEntryStat> entry_stat,
1065    scoped_ptr<int> result) {
1066  if (*result >= 0)
1067    RecordWriteResult(cache_type_, WRITE_RESULT_SUCCESS);
1068  else
1069    RecordWriteResult(cache_type_, WRITE_RESULT_SYNC_WRITE_FAILURE);
1070  if (net_log_.IsLoggingAllEvents()) {
1071    net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_WRITE_END,
1072        CreateNetLogReadWriteCompleteCallback(*result));
1073  }
1074
1075  EntryOperationComplete(
1076      stream_index, completion_callback, *entry_stat, result.Pass());
1077}
1078
1079void SimpleEntryImpl::DoomOperationComplete(
1080    const CompletionCallback& callback,
1081    State state_to_restore,
1082    int result) {
1083  state_ = state_to_restore;
1084  if (!callback.is_null())
1085    callback.Run(result);
1086  RunNextOperationIfNeeded();
1087  if (backend_)
1088    backend_->OnDoomComplete(entry_hash_);
1089}
1090
1091void SimpleEntryImpl::ChecksumOperationComplete(
1092    int orig_result,
1093    int stream_index,
1094    const CompletionCallback& completion_callback,
1095    scoped_ptr<int> result) {
1096  DCHECK(io_thread_checker_.CalledOnValidThread());
1097  DCHECK(synchronous_entry_);
1098  DCHECK_EQ(STATE_IO_PENDING, state_);
1099  DCHECK(result);
1100
1101  if (net_log_.IsLoggingAllEvents()) {
1102    net_log_.AddEventWithNetErrorCode(
1103        net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CHECKSUM_END,
1104        *result);
1105  }
1106
1107  if (*result == net::OK) {
1108    *result = orig_result;
1109    if (orig_result >= 0)
1110      RecordReadResult(cache_type_, READ_RESULT_SUCCESS);
1111    else
1112      RecordReadResult(cache_type_, READ_RESULT_SYNC_READ_FAILURE);
1113  } else {
1114    RecordReadResult(cache_type_, READ_RESULT_SYNC_CHECKSUM_FAILURE);
1115  }
1116  if (net_log_.IsLoggingAllEvents()) {
1117    net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_READ_END,
1118        CreateNetLogReadWriteCompleteCallback(*result));
1119  }
1120
1121  EntryOperationComplete(
1122      stream_index,
1123      completion_callback,
1124      SimpleEntryStat(last_used_, last_modified_, data_size_),
1125      result.Pass());
1126}
1127
1128void SimpleEntryImpl::CloseOperationComplete() {
1129  DCHECK(!synchronous_entry_);
1130  DCHECK_EQ(0, open_count_);
1131  DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_ ||
1132         STATE_UNINITIALIZED == state_);
1133  net_log_.AddEvent(net::NetLog::TYPE_SIMPLE_CACHE_ENTRY_CLOSE_END);
1134  AdjustOpenEntryCountBy(cache_type_, -1);
1135  MakeUninitialized();
1136  RunNextOperationIfNeeded();
1137}
1138
1139void SimpleEntryImpl::UpdateDataFromEntryStat(
1140    const SimpleEntryStat& entry_stat) {
1141  DCHECK(io_thread_checker_.CalledOnValidThread());
1142  DCHECK(synchronous_entry_);
1143  DCHECK_EQ(STATE_READY, state_);
1144
1145  last_used_ = entry_stat.last_used;
1146  last_modified_ = entry_stat.last_modified;
1147  for (int i = 0; i < kSimpleEntryFileCount; ++i) {
1148    data_size_[i] = entry_stat.data_size[i];
1149  }
1150  if (!doomed_ && backend_.get())
1151    backend_->index()->UpdateEntrySize(entry_hash_, GetDiskUsage());
1152}
1153
1154int64 SimpleEntryImpl::GetDiskUsage() const {
1155  int64 file_size = 0;
1156  for (int i = 0; i < kSimpleEntryFileCount; ++i) {
1157    file_size +=
1158        simple_util::GetFileSizeFromKeyAndDataSize(key_, data_size_[i]);
1159  }
1160  return file_size;
1161}
1162
1163void SimpleEntryImpl::RecordReadIsParallelizable(
1164    const SimpleEntryOperation& operation) const {
1165  if (!executing_operation_)
1166    return;
1167  // Used in histograms, please only add entries at the end.
1168  enum ReadDependencyType {
1169    // READ_STANDALONE = 0, Deprecated.
1170    READ_FOLLOWS_READ = 1,
1171    READ_FOLLOWS_CONFLICTING_WRITE = 2,
1172    READ_FOLLOWS_NON_CONFLICTING_WRITE = 3,
1173    READ_FOLLOWS_OTHER = 4,
1174    READ_ALONE_IN_QUEUE = 5,
1175    READ_DEPENDENCY_TYPE_MAX = 6,
1176  };
1177
1178  ReadDependencyType type = READ_FOLLOWS_OTHER;
1179  if (operation.alone_in_queue()) {
1180    type = READ_ALONE_IN_QUEUE;
1181  } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ) {
1182    type = READ_FOLLOWS_READ;
1183  } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_WRITE) {
1184    if (executing_operation_->ConflictsWith(operation))
1185      type = READ_FOLLOWS_CONFLICTING_WRITE;
1186    else
1187      type = READ_FOLLOWS_NON_CONFLICTING_WRITE;
1188  }
1189  SIMPLE_CACHE_UMA(ENUMERATION,
1190                   "ReadIsParallelizable", cache_type_,
1191                   type, READ_DEPENDENCY_TYPE_MAX);
1192}
1193
1194void SimpleEntryImpl::RecordWriteDependencyType(
1195    const SimpleEntryOperation& operation) const {
1196  if (!executing_operation_)
1197    return;
1198  // Used in histograms, please only add entries at the end.
1199  enum WriteDependencyType {
1200    WRITE_OPTIMISTIC = 0,
1201    WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC = 1,
1202    WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC = 2,
1203    WRITE_FOLLOWS_CONFLICTING_WRITE = 3,
1204    WRITE_FOLLOWS_NON_CONFLICTING_WRITE = 4,
1205    WRITE_FOLLOWS_CONFLICTING_READ = 5,
1206    WRITE_FOLLOWS_NON_CONFLICTING_READ = 6,
1207    WRITE_FOLLOWS_OTHER = 7,
1208    WRITE_DEPENDENCY_TYPE_MAX = 8,
1209  };
1210
1211  WriteDependencyType type = WRITE_FOLLOWS_OTHER;
1212  if (operation.optimistic()) {
1213    type = WRITE_OPTIMISTIC;
1214  } else if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ ||
1215             executing_operation_->type() == SimpleEntryOperation::TYPE_WRITE) {
1216    bool conflicting = executing_operation_->ConflictsWith(operation);
1217
1218    if (executing_operation_->type() == SimpleEntryOperation::TYPE_READ) {
1219      type = conflicting ? WRITE_FOLLOWS_CONFLICTING_READ
1220                         : WRITE_FOLLOWS_NON_CONFLICTING_READ;
1221    } else if (executing_operation_->optimistic()) {
1222      type = conflicting ? WRITE_FOLLOWS_CONFLICTING_OPTIMISTIC
1223                         : WRITE_FOLLOWS_NON_CONFLICTING_OPTIMISTIC;
1224    } else {
1225      type = conflicting ? WRITE_FOLLOWS_CONFLICTING_WRITE
1226                         : WRITE_FOLLOWS_NON_CONFLICTING_WRITE;
1227    }
1228  }
1229  SIMPLE_CACHE_UMA(ENUMERATION,
1230                   "WriteDependencyType", cache_type_,
1231                   type, WRITE_DEPENDENCY_TYPE_MAX);
1232}
1233
1234}  // namespace disk_cache
1235