simple_entry_impl.cc revision 7d4cd473f85ac64c3747c96c277f9e506a0d2246
1// Copyright (c) 2013 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/disk_cache/simple/simple_entry_impl.h"
6
7#include <algorithm>
8#include <cstring>
9#include <vector>
10
11#include "base/bind.h"
12#include "base/bind_helpers.h"
13#include "base/callback.h"
14#include "base/location.h"
15#include "base/logging.h"
16#include "base/message_loop/message_loop_proxy.h"
17#include "base/metrics/histogram.h"
18#include "base/task_runner.h"
19#include "base/time.h"
20#include "net/base/io_buffer.h"
21#include "net/base/net_errors.h"
22#include "net/disk_cache/simple/simple_backend_impl.h"
23#include "net/disk_cache/simple/simple_index.h"
24#include "net/disk_cache/simple/simple_synchronous_entry.h"
25#include "net/disk_cache/simple/simple_util.h"
26#include "third_party/zlib/zlib.h"
27
28namespace {
29
30// Used in histograms, please only add entries at the end.
31enum ReadResult {
32  READ_RESULT_SUCCESS = 0,
33  READ_RESULT_INVALID_ARGUMENT = 1,
34  READ_RESULT_NONBLOCK_EMPTY_RETURN = 2,
35  READ_RESULT_BAD_STATE = 3,
36  READ_RESULT_FAST_EMPTY_RETURN = 4,
37  READ_RESULT_SYNC_READ_FAILURE = 5,
38  READ_RESULT_SYNC_CHECKSUM_FAILURE = 6,
39  READ_RESULT_MAX = 7,
40};
41
42// Used in histograms, please only add entries at the end.
43enum WriteResult {
44  WRITE_RESULT_SUCCESS = 0,
45  WRITE_RESULT_INVALID_ARGUMENT = 1,
46  WRITE_RESULT_OVER_MAX_SIZE = 2,
47  WRITE_RESULT_BAD_STATE = 3,
48  WRITE_RESULT_SYNC_WRITE_FAILURE = 4,
49  WRITE_RESULT_MAX = 5,
50};
51
52void RecordReadResult(ReadResult result) {
53  UMA_HISTOGRAM_ENUMERATION("SimpleCache.ReadResult", result, READ_RESULT_MAX);
54};
55
56void RecordWriteResult(WriteResult result) {
57  UMA_HISTOGRAM_ENUMERATION("SimpleCache.WriteResult",
58                            result, WRITE_RESULT_MAX);
59};
60
61// Short trampoline to take an owned input parameter and call a net completion
62// callback with its value.
63void CallCompletionCallback(const net::CompletionCallback& callback,
64                            scoped_ptr<int> result) {
65  DCHECK(result);
66  if (!callback.is_null())
67    callback.Run(*result);
68}
69
70}  // namespace
71
72namespace disk_cache {
73
74using base::Closure;
75using base::FilePath;
76using base::MessageLoopProxy;
77using base::Time;
78using base::TaskRunner;
79
80// A helper class to insure that RunNextOperationIfNeeded() is called when
81// exiting the current stack frame.
82class SimpleEntryImpl::ScopedOperationRunner {
83 public:
84  explicit ScopedOperationRunner(SimpleEntryImpl* entry) : entry_(entry) {
85  }
86
87  ~ScopedOperationRunner() {
88    entry_->RunNextOperationIfNeeded();
89  }
90
91 private:
92  SimpleEntryImpl* const entry_;
93};
94
95SimpleEntryImpl::SimpleEntryImpl(SimpleBackendImpl* backend,
96                                 const FilePath& path,
97                                 const std::string& key,
98                                 const uint64 entry_hash)
99    : backend_(backend->AsWeakPtr()),
100      worker_pool_(backend->worker_pool()),
101      path_(path),
102      key_(key),
103      entry_hash_(entry_hash),
104      last_used_(Time::Now()),
105      last_modified_(last_used_),
106      open_count_(0),
107      state_(STATE_UNINITIALIZED),
108      synchronous_entry_(NULL) {
109  DCHECK_EQ(entry_hash, simple_util::GetEntryHashKey(key));
110  COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_),
111                 arrays_should_be_same_size);
112  COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_),
113                 arrays_should_be_same_size);
114  COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_),
115                 arrays_should_be_same_size);
116  COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc_check_state_),
117                 arrays_should_be_same_size);
118  MakeUninitialized();
119}
120
121int SimpleEntryImpl::OpenEntry(Entry** out_entry,
122                               const CompletionCallback& callback) {
123  DCHECK(backend_.get());
124  // This enumeration is used in histograms, add entries only at end.
125  enum OpenEntryIndexEnum {
126    INDEX_NOEXIST = 0,
127    INDEX_MISS = 1,
128    INDEX_HIT = 2,
129    INDEX_MAX = 3,
130  };
131  OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST;
132  if (backend_.get()) {
133    if (backend_->index()->Has(key_))
134      open_entry_index_enum = INDEX_HIT;
135    else
136      open_entry_index_enum = INDEX_MISS;
137  }
138  UMA_HISTOGRAM_ENUMERATION("SimpleCache.OpenEntryIndexState",
139                            open_entry_index_enum, INDEX_MAX);
140
141  // If entry is not known to the index, initiate fast failover to the network.
142  if (open_entry_index_enum == INDEX_MISS)
143    return net::ERR_FAILED;
144
145  pending_operations_.push(base::Bind(&SimpleEntryImpl::OpenEntryInternal,
146                                      this, callback, out_entry));
147  RunNextOperationIfNeeded();
148  return net::ERR_IO_PENDING;
149}
150
151int SimpleEntryImpl::CreateEntry(Entry** out_entry,
152                                 const CompletionCallback& callback) {
153  DCHECK(backend_.get());
154  int ret_value = net::ERR_FAILED;
155  if (state_ == STATE_UNINITIALIZED &&
156      pending_operations_.size() == 0) {
157    ReturnEntryToCaller(out_entry);
158    // We can do optimistic Create.
159    pending_operations_.push(base::Bind(&SimpleEntryImpl::CreateEntryInternal,
160                                        this,
161                                        CompletionCallback(),
162                                        static_cast<Entry**>(NULL)));
163    ret_value = net::OK;
164  } else {
165    pending_operations_.push(base::Bind(&SimpleEntryImpl::CreateEntryInternal,
166                                        this,
167                                        callback,
168                                        out_entry));
169    ret_value = net::ERR_IO_PENDING;
170  }
171
172  // We insert the entry in the index before creating the entry files in the
173  // SimpleSynchronousEntry, because this way the worst scenario is when we
174  // have the entry in the index but we don't have the created files yet, this
175  // way we never leak files. CreationOperationComplete will remove the entry
176  // from the index if the creation fails.
177  if (backend_.get())
178    backend_->index()->Insert(key_);
179
180  RunNextOperationIfNeeded();
181  return ret_value;
182}
183
184int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) {
185  MarkAsDoomed();
186  scoped_ptr<int> result(new int());
187  Closure task = base::Bind(&SimpleSynchronousEntry::DoomEntry, path_, key_,
188                            entry_hash_, result.get());
189  Closure reply = base::Bind(&CallCompletionCallback,
190                             callback, base::Passed(&result));
191  worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
192  return net::ERR_IO_PENDING;
193}
194
195
196void SimpleEntryImpl::Doom() {
197  DoomEntry(CompletionCallback());
198}
199
200void SimpleEntryImpl::Close() {
201  DCHECK(io_thread_checker_.CalledOnValidThread());
202  DCHECK_LT(0, open_count_);
203
204  if (--open_count_ > 0) {
205    DCHECK(!HasOneRef());
206    Release();  // Balanced in ReturnEntryToCaller().
207    return;
208  }
209
210  pending_operations_.push(base::Bind(&SimpleEntryImpl::CloseInternal, this));
211  DCHECK(!HasOneRef());
212  Release();  // Balanced in ReturnEntryToCaller().
213  RunNextOperationIfNeeded();
214}
215
216std::string SimpleEntryImpl::GetKey() const {
217  DCHECK(io_thread_checker_.CalledOnValidThread());
218  return key_;
219}
220
221Time SimpleEntryImpl::GetLastUsed() const {
222  DCHECK(io_thread_checker_.CalledOnValidThread());
223  return last_used_;
224}
225
226Time SimpleEntryImpl::GetLastModified() const {
227  DCHECK(io_thread_checker_.CalledOnValidThread());
228  return last_modified_;
229}
230
231int32 SimpleEntryImpl::GetDataSize(int stream_index) const {
232  DCHECK(io_thread_checker_.CalledOnValidThread());
233  DCHECK_LE(0, data_size_[stream_index]);
234  return data_size_[stream_index];
235}
236
237int SimpleEntryImpl::ReadData(int stream_index,
238                              int offset,
239                              net::IOBuffer* buf,
240                              int buf_len,
241                              const CompletionCallback& callback) {
242  DCHECK(io_thread_checker_.CalledOnValidThread());
243  if (stream_index < 0 || stream_index >= kSimpleEntryFileCount ||
244      buf_len < 0) {
245    RecordReadResult(READ_RESULT_INVALID_ARGUMENT);
246    return net::ERR_INVALID_ARGUMENT;
247  }
248  if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) ||
249                                      offset < 0 || !buf_len)) {
250    RecordReadResult(READ_RESULT_NONBLOCK_EMPTY_RETURN);
251    return 0;
252  }
253
254  // TODO(felipeg): Optimization: Add support for truly parallel read
255  // operations.
256  pending_operations_.push(
257      base::Bind(&SimpleEntryImpl::ReadDataInternal,
258                 this,
259                 stream_index,
260                 offset,
261                 make_scoped_refptr(buf),
262                 buf_len,
263                 callback));
264  RunNextOperationIfNeeded();
265  return net::ERR_IO_PENDING;
266}
267
268int SimpleEntryImpl::WriteData(int stream_index,
269                               int offset,
270                               net::IOBuffer* buf,
271                               int buf_len,
272                               const CompletionCallback& callback,
273                               bool truncate) {
274  DCHECK(io_thread_checker_.CalledOnValidThread());
275  if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || offset < 0 ||
276      buf_len < 0) {
277    RecordWriteResult(WRITE_RESULT_INVALID_ARGUMENT);
278    return net::ERR_INVALID_ARGUMENT;
279  }
280  if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) {
281    RecordWriteResult(WRITE_RESULT_OVER_MAX_SIZE);
282    return net::ERR_FAILED;
283  }
284
285  int ret_value = net::ERR_FAILED;
286  if (state_ == STATE_READY && pending_operations_.size() == 0) {
287    // We can only do optimistic Write if there is no pending operations, so
288    // that we are sure that the next call to RunNextOperationIfNeeded will
289    // actually run the write operation that sets the stream size. It also
290    // prevents from previous possibly-conflicting writes that could be stacked
291    // in the |pending_operations_|. We could optimize this for when we have
292    // only read operations enqueued.
293    // TODO(gavinp,pasko): For performance, don't use a copy of an IOBuffer
294    // here to avoid paying the price of the RefCountedThreadSafe atomic
295    // operations.
296    IOBuffer* buf_copy = NULL;
297    if (buf) {
298      buf_copy = new IOBuffer(buf_len);
299      memcpy(buf_copy->data(), buf->data(), buf_len);
300    }
301    pending_operations_.push(
302        base::Bind(&SimpleEntryImpl::WriteDataInternal, this, stream_index,
303                   offset, make_scoped_refptr(buf_copy), buf_len,
304                   CompletionCallback(), truncate));
305    ret_value = buf_len;
306  } else {
307    pending_operations_.push(
308        base::Bind(&SimpleEntryImpl::WriteDataInternal, this, stream_index,
309                   offset, make_scoped_refptr(buf), buf_len, callback,
310                   truncate));
311    ret_value = net::ERR_IO_PENDING;
312  }
313
314  RunNextOperationIfNeeded();
315  return ret_value;
316}
317
318int SimpleEntryImpl::ReadSparseData(int64 offset,
319                                    net::IOBuffer* buf,
320                                    int buf_len,
321                                    const CompletionCallback& callback) {
322  DCHECK(io_thread_checker_.CalledOnValidThread());
323  // TODO(gavinp): Determine if the simple backend should support sparse data.
324  NOTIMPLEMENTED();
325  return net::ERR_FAILED;
326}
327
328int SimpleEntryImpl::WriteSparseData(int64 offset,
329                                     net::IOBuffer* buf,
330                                     int buf_len,
331                                     const CompletionCallback& callback) {
332  DCHECK(io_thread_checker_.CalledOnValidThread());
333  // TODO(gavinp): Determine if the simple backend should support sparse data.
334  NOTIMPLEMENTED();
335  return net::ERR_FAILED;
336}
337
338int SimpleEntryImpl::GetAvailableRange(int64 offset,
339                                       int len,
340                                       int64* start,
341                                       const CompletionCallback& callback) {
342  DCHECK(io_thread_checker_.CalledOnValidThread());
343  // TODO(gavinp): Determine if the simple backend should support sparse data.
344  NOTIMPLEMENTED();
345  return net::ERR_FAILED;
346}
347
348bool SimpleEntryImpl::CouldBeSparse() const {
349  DCHECK(io_thread_checker_.CalledOnValidThread());
350  // TODO(gavinp): Determine if the simple backend should support sparse data.
351  return false;
352}
353
354void SimpleEntryImpl::CancelSparseIO() {
355  DCHECK(io_thread_checker_.CalledOnValidThread());
356  // TODO(gavinp): Determine if the simple backend should support sparse data.
357  NOTIMPLEMENTED();
358}
359
360int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
361  DCHECK(io_thread_checker_.CalledOnValidThread());
362  // TODO(gavinp): Determine if the simple backend should support sparse data.
363  NOTIMPLEMENTED();
364  return net::ERR_FAILED;
365}
366
367SimpleEntryImpl::~SimpleEntryImpl() {
368  DCHECK(io_thread_checker_.CalledOnValidThread());
369  DCHECK_EQ(0U, pending_operations_.size());
370  DCHECK(state_ == STATE_UNINITIALIZED || state_ == STATE_FAILURE);
371  DCHECK(!synchronous_entry_);
372  RemoveSelfFromBackend();
373}
374
375void SimpleEntryImpl::MakeUninitialized() {
376  state_ = STATE_UNINITIALIZED;
377  std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_));
378  std::memset(crc32s_, 0, sizeof(crc32s_));
379  std::memset(have_written_, 0, sizeof(have_written_));
380  std::memset(data_size_, 0, sizeof(data_size_));
381  std::memset(crc_check_state_, 0, sizeof(crc_check_state_));
382}
383
384void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) {
385  DCHECK(out_entry);
386  ++open_count_;
387  AddRef();  // Balanced in Close()
388  *out_entry = this;
389}
390
391void SimpleEntryImpl::RemoveSelfFromBackend() {
392  if (!backend_.get())
393    return;
394  backend_->OnDeactivated(this);
395  backend_.reset();
396}
397
398void SimpleEntryImpl::MarkAsDoomed() {
399  if (!backend_.get())
400    return;
401  backend_->index()->Remove(key_);
402  RemoveSelfFromBackend();
403}
404
405void SimpleEntryImpl::RunNextOperationIfNeeded() {
406  DCHECK(io_thread_checker_.CalledOnValidThread());
407  UMA_HISTOGRAM_CUSTOM_COUNTS("SimpleCache.EntryOperationsPending",
408                              pending_operations_.size(), 0, 100, 20);
409  if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) {
410    base::Closure operation = pending_operations_.front();
411    pending_operations_.pop();
412    operation.Run();
413    // |this| may have been deleted.
414  }
415}
416
417void SimpleEntryImpl::OpenEntryInternal(const CompletionCallback& callback,
418                                        Entry** out_entry) {
419  ScopedOperationRunner operation_runner(this);
420  if (state_ == STATE_READY) {
421    ReturnEntryToCaller(out_entry);
422    MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback,
423                                                                net::OK));
424    return;
425  } else if (state_ == STATE_FAILURE) {
426    if (!callback.is_null()) {
427      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
428          callback, net::ERR_FAILED));
429    }
430    return;
431  }
432  DCHECK_EQ(STATE_UNINITIALIZED, state_);
433  DCHECK(!synchronous_entry_);
434  state_ = STATE_IO_PENDING;
435  const base::TimeTicks start_time = base::TimeTicks::Now();
436  typedef SimpleSynchronousEntry* PointerToSimpleSynchronousEntry;
437  scoped_ptr<PointerToSimpleSynchronousEntry> sync_entry(
438      new PointerToSimpleSynchronousEntry());
439  scoped_ptr<int> result(new int());
440  Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry, path_, key_,
441                            entry_hash_, sync_entry.get(), result.get());
442  Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, this,
443                             callback, start_time, base::Passed(&sync_entry),
444                             base::Passed(&result), out_entry);
445  worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
446}
447
448void SimpleEntryImpl::CreateEntryInternal(const CompletionCallback& callback,
449                                          Entry** out_entry) {
450  ScopedOperationRunner operation_runner(this);
451  if (state_ != STATE_UNINITIALIZED) {
452    // There is already an active normal entry.
453    if (!callback.is_null()) {
454      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
455          callback, net::ERR_FAILED));
456    }
457    return;
458  }
459  DCHECK_EQ(STATE_UNINITIALIZED, state_);
460  DCHECK(!synchronous_entry_);
461
462  state_ = STATE_IO_PENDING;
463
464  // Since we don't know the correct values for |last_used_| and
465  // |last_modified_| yet, we make this approximation.
466  last_used_ = last_modified_ = base::Time::Now();
467
468  // If creation succeeds, we should mark all streams to be saved on close.
469  for (int i = 0; i < kSimpleEntryFileCount; ++i)
470    have_written_[i] = true;
471
472  const base::TimeTicks start_time = base::TimeTicks::Now();
473  typedef SimpleSynchronousEntry* PointerToSimpleSynchronousEntry;
474  scoped_ptr<PointerToSimpleSynchronousEntry> sync_entry(
475      new PointerToSimpleSynchronousEntry());
476  scoped_ptr<int> result(new int());
477  Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry, path_, key_,
478                            entry_hash_, sync_entry.get(), result.get());
479  Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, this,
480                             callback, start_time, base::Passed(&sync_entry),
481                             base::Passed(&result), out_entry);
482  worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
483}
484
485void SimpleEntryImpl::CloseInternal() {
486  DCHECK(io_thread_checker_.CalledOnValidThread());
487  typedef SimpleSynchronousEntry::CRCRecord CRCRecord;
488  scoped_ptr<std::vector<CRCRecord> >
489      crc32s_to_write(new std::vector<CRCRecord>());
490
491  if (state_ == STATE_READY) {
492    DCHECK(synchronous_entry_);
493    state_ = STATE_IO_PENDING;
494    for (int i = 0; i < kSimpleEntryFileCount; ++i) {
495      if (have_written_[i]) {
496        if (GetDataSize(i) == crc32s_end_offset_[i]) {
497          int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i];
498          crc32s_to_write->push_back(CRCRecord(i, true, crc));
499        } else {
500          crc32s_to_write->push_back(CRCRecord(i, false, 0));
501        }
502      }
503    }
504  } else {
505    DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_);
506  }
507
508  if (synchronous_entry_) {
509    Closure task = base::Bind(&SimpleSynchronousEntry::Close,
510                              base::Unretained(synchronous_entry_),
511                              base::Passed(&crc32s_to_write));
512    Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this);
513    synchronous_entry_ = NULL;
514    worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
515
516    for (int i = 0; i < kSimpleEntryFileCount; ++i) {
517      if (!have_written_[i]) {
518        UMA_HISTOGRAM_ENUMERATION("SimpleCache.CheckCRCResult",
519                                  crc_check_state_[i], CRC_CHECK_MAX);
520      }
521    }
522  } else {
523    synchronous_entry_ = NULL;
524    CloseOperationComplete();
525  }
526}
527
528void SimpleEntryImpl::ReadDataInternal(int stream_index,
529                                       int offset,
530                                       net::IOBuffer* buf,
531                                       int buf_len,
532                                       const CompletionCallback& callback) {
533  DCHECK(io_thread_checker_.CalledOnValidThread());
534  ScopedOperationRunner operation_runner(this);
535
536  if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
537    if (!callback.is_null()) {
538      RecordReadResult(READ_RESULT_BAD_STATE);
539      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
540          callback, net::ERR_FAILED));
541    }
542    return;
543  }
544  DCHECK_EQ(STATE_READY, state_);
545  if (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len) {
546    RecordReadResult(READ_RESULT_FAST_EMPTY_RETURN);
547    // If there is nothing to read, we bail out before setting state_ to
548    // STATE_IO_PENDING.
549    if (!callback.is_null())
550      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
551          callback, 0));
552    return;
553  }
554  buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
555
556  state_ = STATE_IO_PENDING;
557  if (backend_.get())
558    backend_->index()->UseIfExists(key_);
559
560  scoped_ptr<uint32> read_crc32(new uint32());
561  scoped_ptr<int> result(new int());
562  Closure task = base::Bind(&SimpleSynchronousEntry::ReadData,
563                            base::Unretained(synchronous_entry_),
564                            stream_index, offset, make_scoped_refptr(buf),
565                            buf_len, read_crc32.get(), result.get());
566  Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete, this,
567                             stream_index, offset, callback,
568                             base::Passed(&read_crc32), base::Passed(&result));
569  worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
570}
571
572void SimpleEntryImpl::WriteDataInternal(int stream_index,
573                                       int offset,
574                                       net::IOBuffer* buf,
575                                       int buf_len,
576                                       const CompletionCallback& callback,
577                                       bool truncate) {
578  DCHECK(io_thread_checker_.CalledOnValidThread());
579  ScopedOperationRunner operation_runner(this);
580  if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
581    RecordWriteResult(WRITE_RESULT_BAD_STATE);
582    if (!callback.is_null()) {
583      // We need to posttask so that we don't go in a loop when we call the
584      // callback directly.
585      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
586          callback, net::ERR_FAILED));
587    }
588    // |this| may be destroyed after return here.
589    return;
590  }
591  DCHECK_EQ(STATE_READY, state_);
592  state_ = STATE_IO_PENDING;
593  if (backend_.get())
594    backend_->index()->UseIfExists(key_);
595  // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|)
596  // if |offset == 0| or we have already computed the CRC for [0 .. offset).
597  // We rely on most write operations being sequential, start to end to compute
598  // the crc of the data. When we write to an entry and close without having
599  // done a sequential write, we don't check the CRC on read.
600  if (offset == 0 || crc32s_end_offset_[stream_index] == offset) {
601    uint32 initial_crc = (offset != 0) ? crc32s_[stream_index]
602                                       : crc32(0, Z_NULL, 0);
603    if (buf_len > 0) {
604      crc32s_[stream_index] = crc32(initial_crc,
605                                    reinterpret_cast<const Bytef*>(buf->data()),
606                                    buf_len);
607    }
608    crc32s_end_offset_[stream_index] = offset + buf_len;
609  }
610
611  if (truncate) {
612    data_size_[stream_index] = offset + buf_len;
613  } else {
614    data_size_[stream_index] = std::max(offset + buf_len,
615                                        GetDataSize(stream_index));
616  }
617
618  // Since we don't know the correct values for |last_used_| and
619  // |last_modified_| yet, we make this approximation.
620  last_used_ = last_modified_ = base::Time::Now();
621
622  have_written_[stream_index] = true;
623
624  scoped_ptr<int> result(new int());
625  Closure task = base::Bind(&SimpleSynchronousEntry::WriteData,
626                            base::Unretained(synchronous_entry_),
627                            stream_index, offset, make_scoped_refptr(buf),
628                            buf_len, truncate, result.get());
629  Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete, this,
630                             stream_index, callback, base::Passed(&result));
631  worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
632}
633
634void SimpleEntryImpl::CreationOperationComplete(
635    const CompletionCallback& completion_callback,
636    const base::TimeTicks& start_time,
637    scoped_ptr<SimpleSynchronousEntry*> in_sync_entry,
638    scoped_ptr<int> in_result,
639    Entry** out_entry) {
640  DCHECK(io_thread_checker_.CalledOnValidThread());
641  DCHECK_EQ(state_, STATE_IO_PENDING);
642  DCHECK(in_sync_entry);
643  DCHECK(in_result);
644  ScopedOperationRunner operation_runner(this);
645  UMA_HISTOGRAM_BOOLEAN(
646      "SimpleCache.EntryCreationResult", *in_result == net::OK);
647  if (*in_result != net::OK) {
648    if (*in_result!= net::ERR_FILE_EXISTS)
649      MarkAsDoomed();
650    if (!completion_callback.is_null()) {
651      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
652          completion_callback, net::ERR_FAILED));
653    }
654    MakeUninitialized();
655    return;
656  }
657  // If out_entry is NULL, it means we already called ReturnEntryToCaller from
658  // the optimistic Create case.
659  if (out_entry)
660    ReturnEntryToCaller(out_entry);
661
662  state_ = STATE_READY;
663  synchronous_entry_ = *in_sync_entry;
664  SetSynchronousData();
665  UMA_HISTOGRAM_TIMES("SimpleCache.EntryCreationTime",
666                      (base::TimeTicks::Now() - start_time));
667
668  if (!completion_callback.is_null()) {
669    MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
670        completion_callback, net::OK));
671  }
672}
673
674void SimpleEntryImpl::EntryOperationComplete(
675    int stream_index,
676    const CompletionCallback& completion_callback,
677    scoped_ptr<int> result) {
678  DCHECK(io_thread_checker_.CalledOnValidThread());
679  DCHECK(synchronous_entry_);
680  DCHECK_EQ(STATE_IO_PENDING, state_);
681  DCHECK(result);
682  state_ = STATE_READY;
683  if (*result < 0) {
684    MarkAsDoomed();
685    state_ = STATE_FAILURE;
686    crc32s_end_offset_[stream_index] = 0;
687  } else {
688    SetSynchronousData();
689  }
690
691  if (!completion_callback.is_null()) {
692    MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
693        completion_callback, *result));
694  }
695  RunNextOperationIfNeeded();
696}
697
698void SimpleEntryImpl::ReadOperationComplete(
699    int stream_index,
700    int offset,
701    const CompletionCallback& completion_callback,
702    scoped_ptr<uint32> read_crc32,
703    scoped_ptr<int> result) {
704  DCHECK(io_thread_checker_.CalledOnValidThread());
705  DCHECK(synchronous_entry_);
706  DCHECK_EQ(STATE_IO_PENDING, state_);
707  DCHECK(read_crc32);
708  DCHECK(result);
709
710  if (*result > 0 && crc32s_end_offset_[stream_index] == offset) {
711    uint32 current_crc = offset == 0 ? crc32(0, Z_NULL, 0)
712                                     : crc32s_[stream_index];
713    crc32s_[stream_index] = crc32_combine(current_crc, *read_crc32, *result);
714    crc32s_end_offset_[stream_index] += *result;
715    if (!have_written_[stream_index] &&
716        GetDataSize(stream_index) == crc32s_end_offset_[stream_index]) {
717      // We have just read a file from start to finish, and so we have
718      // computed a crc of the entire file. We can check it now. If a cache
719      // entry has a single reader, the normal pattern is to read from start
720      // to finish.
721
722      // Other cases are possible. In the case of two readers on the same
723      // entry, one reader can be behind the other. In this case we compute
724      // the crc as the most advanced reader progresses, and check it for
725      // both readers as they read the last byte.
726
727      scoped_ptr<int> new_result(new int());
728      Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord,
729                                base::Unretained(synchronous_entry_),
730                                stream_index, crc32s_[stream_index],
731                                new_result.get());
732      Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete,
733                                 this, *result, stream_index,
734                                 completion_callback,
735                                 base::Passed(&new_result));
736      worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
737      crc_check_state_[stream_index] = CRC_CHECK_DONE;
738      return;
739    }
740  }
741  if (*result < 0) {
742    RecordReadResult(READ_RESULT_SYNC_READ_FAILURE);
743  } else {
744    RecordReadResult(READ_RESULT_SUCCESS);
745    if (crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_TO_END &&
746        offset + *result == GetDataSize(stream_index)) {
747      crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE;
748    }
749  }
750  EntryOperationComplete(stream_index, completion_callback, result.Pass());
751}
752
753void SimpleEntryImpl::WriteOperationComplete(
754    int stream_index,
755    const CompletionCallback& completion_callback,
756    scoped_ptr<int> result) {
757  if (*result >= 0)
758    RecordWriteResult(WRITE_RESULT_SUCCESS);
759  else
760    RecordWriteResult(WRITE_RESULT_SYNC_WRITE_FAILURE);
761  EntryOperationComplete(stream_index, completion_callback, result.Pass());
762}
763
764void SimpleEntryImpl::ChecksumOperationComplete(
765    int orig_result,
766    int stream_index,
767    const CompletionCallback& completion_callback,
768    scoped_ptr<int> result) {
769  DCHECK(io_thread_checker_.CalledOnValidThread());
770  DCHECK(synchronous_entry_);
771  DCHECK_EQ(STATE_IO_PENDING, state_);
772  DCHECK(result);
773  if (*result == net::OK) {
774    *result = orig_result;
775    if (orig_result >= 0)
776      RecordReadResult(READ_RESULT_SUCCESS);
777    else
778      RecordReadResult(READ_RESULT_SYNC_READ_FAILURE);
779  } else {
780    RecordReadResult(READ_RESULT_SYNC_CHECKSUM_FAILURE);
781  }
782  EntryOperationComplete(stream_index, completion_callback, result.Pass());
783}
784
785void SimpleEntryImpl::CloseOperationComplete() {
786  DCHECK(!synchronous_entry_);
787  DCHECK_EQ(0, open_count_);
788  DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_ ||
789         STATE_UNINITIALIZED == state_);
790  MakeUninitialized();
791  RunNextOperationIfNeeded();
792}
793
794void SimpleEntryImpl::SetSynchronousData() {
795  DCHECK(io_thread_checker_.CalledOnValidThread());
796  DCHECK(synchronous_entry_);
797  DCHECK_EQ(STATE_READY, state_);
798  // TODO(felipeg): These copies to avoid data races are not optimal. While
799  // adding an IO thread index (for fast misses etc...), we can store this data
800  // in that structure. This also solves problems with last_used() on ext4
801  // filesystems not being accurate.
802  last_used_ = synchronous_entry_->last_used();
803  last_modified_ = synchronous_entry_->last_modified();
804  for (int i = 0; i < kSimpleEntryFileCount; ++i)
805    data_size_[i] = synchronous_entry_->data_size(i);
806  if (backend_.get())
807    backend_->index()->UpdateEntrySize(key_, synchronous_entry_->GetFileSize());
808}
809
810}  // namespace disk_cache
811