simple_entry_impl.cc revision eb525c5499e34cc9c4b825d6d9e75bb07cc06ace
1// Copyright (c) 2013 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/disk_cache/simple/simple_entry_impl.h"
6
7#include <algorithm>
8#include <cstring>
9#include <vector>
10
11#include "base/bind.h"
12#include "base/bind_helpers.h"
13#include "base/callback.h"
14#include "base/location.h"
15#include "base/logging.h"
16#include "base/message_loop/message_loop_proxy.h"
17#include "base/metrics/histogram.h"
18#include "base/task_runner.h"
19#include "base/time/time.h"
20#include "net/base/io_buffer.h"
21#include "net/base/net_errors.h"
22#include "net/disk_cache/simple/simple_backend_impl.h"
23#include "net/disk_cache/simple/simple_index.h"
24#include "net/disk_cache/simple/simple_synchronous_entry.h"
25#include "net/disk_cache/simple/simple_util.h"
26#include "third_party/zlib/zlib.h"
27
28namespace {
29
30// Used in histograms, please only add entries at the end.
31enum ReadResult {
32  READ_RESULT_SUCCESS = 0,
33  READ_RESULT_INVALID_ARGUMENT = 1,
34  READ_RESULT_NONBLOCK_EMPTY_RETURN = 2,
35  READ_RESULT_BAD_STATE = 3,
36  READ_RESULT_FAST_EMPTY_RETURN = 4,
37  READ_RESULT_SYNC_READ_FAILURE = 5,
38  READ_RESULT_SYNC_CHECKSUM_FAILURE = 6,
39  READ_RESULT_MAX = 7,
40};
41
42// Used in histograms, please only add entries at the end.
43enum WriteResult {
44  WRITE_RESULT_SUCCESS = 0,
45  WRITE_RESULT_INVALID_ARGUMENT = 1,
46  WRITE_RESULT_OVER_MAX_SIZE = 2,
47  WRITE_RESULT_BAD_STATE = 3,
48  WRITE_RESULT_SYNC_WRITE_FAILURE = 4,
49  WRITE_RESULT_MAX = 5,
50};
51
52void RecordReadResult(ReadResult result) {
53  UMA_HISTOGRAM_ENUMERATION("SimpleCache.ReadResult", result, READ_RESULT_MAX);
54};
55
56void RecordWriteResult(WriteResult result) {
57  UMA_HISTOGRAM_ENUMERATION("SimpleCache.WriteResult",
58                            result, WRITE_RESULT_MAX);
59};
60
61// Short trampoline to take an owned input parameter and call a net completion
62// callback with its value.
63void CallCompletionCallback(const net::CompletionCallback& callback,
64                            scoped_ptr<int> result) {
65  DCHECK(result);
66  if (!callback.is_null())
67    callback.Run(*result);
68}
69
70}  // namespace
71
72namespace disk_cache {
73
74using base::Closure;
75using base::FilePath;
76using base::MessageLoopProxy;
77using base::Time;
78using base::TaskRunner;
79
80// A helper class to insure that RunNextOperationIfNeeded() is called when
81// exiting the current stack frame.
82class SimpleEntryImpl::ScopedOperationRunner {
83 public:
84  explicit ScopedOperationRunner(SimpleEntryImpl* entry) : entry_(entry) {
85  }
86
87  ~ScopedOperationRunner() {
88    entry_->RunNextOperationIfNeeded();
89  }
90
91 private:
92  SimpleEntryImpl* const entry_;
93};
94
95SimpleEntryImpl::SimpleEntryImpl(SimpleBackendImpl* backend,
96                                 const FilePath& path,
97                                 const uint64 entry_hash)
98    : backend_(backend->AsWeakPtr()),
99      worker_pool_(backend->worker_pool()),
100      path_(path),
101      entry_hash_(entry_hash),
102      last_used_(Time::Now()),
103      last_modified_(last_used_),
104      open_count_(0),
105      state_(STATE_UNINITIALIZED),
106      synchronous_entry_(NULL) {
107  COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_),
108                 arrays_should_be_same_size);
109  COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_),
110                 arrays_should_be_same_size);
111  COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_),
112                 arrays_should_be_same_size);
113  COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc_check_state_),
114                 arrays_should_be_same_size);
115  MakeUninitialized();
116}
117
118int SimpleEntryImpl::OpenEntry(Entry** out_entry,
119                               const CompletionCallback& callback) {
120  DCHECK(backend_.get());
121  // This enumeration is used in histograms, add entries only at end.
122  enum OpenEntryIndexEnum {
123    INDEX_NOEXIST = 0,
124    INDEX_MISS = 1,
125    INDEX_HIT = 2,
126    INDEX_MAX = 3,
127  };
128  OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST;
129  if (backend_.get()) {
130    if (backend_->index()->Has(entry_hash_))
131      open_entry_index_enum = INDEX_HIT;
132    else
133      open_entry_index_enum = INDEX_MISS;
134  }
135  UMA_HISTOGRAM_ENUMERATION("SimpleCache.OpenEntryIndexState",
136                            open_entry_index_enum, INDEX_MAX);
137
138  // If entry is not known to the index, initiate fast failover to the network.
139  if (open_entry_index_enum == INDEX_MISS)
140    return net::ERR_FAILED;
141
142  pending_operations_.push(base::Bind(&SimpleEntryImpl::OpenEntryInternal,
143                                      this, callback, out_entry));
144  RunNextOperationIfNeeded();
145  return net::ERR_IO_PENDING;
146}
147
148int SimpleEntryImpl::CreateEntry(Entry** out_entry,
149                                 const CompletionCallback& callback) {
150  DCHECK(backend_.get());
151  DCHECK_EQ(entry_hash_, simple_util::GetEntryHashKey(key_));
152  int ret_value = net::ERR_FAILED;
153  if (state_ == STATE_UNINITIALIZED &&
154      pending_operations_.size() == 0) {
155    ReturnEntryToCaller(out_entry);
156    // We can do optimistic Create.
157    pending_operations_.push(base::Bind(&SimpleEntryImpl::CreateEntryInternal,
158                                        this,
159                                        CompletionCallback(),
160                                        static_cast<Entry**>(NULL)));
161    ret_value = net::OK;
162  } else {
163    pending_operations_.push(base::Bind(&SimpleEntryImpl::CreateEntryInternal,
164                                        this,
165                                        callback,
166                                        out_entry));
167    ret_value = net::ERR_IO_PENDING;
168  }
169
170  // We insert the entry in the index before creating the entry files in the
171  // SimpleSynchronousEntry, because this way the worst scenario is when we
172  // have the entry in the index but we don't have the created files yet, this
173  // way we never leak files. CreationOperationComplete will remove the entry
174  // from the index if the creation fails.
175  if (backend_.get())
176    backend_->index()->Insert(key_);
177
178  RunNextOperationIfNeeded();
179  return ret_value;
180}
181
182int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) {
183  MarkAsDoomed();
184  scoped_ptr<int> result(new int());
185  Closure task = base::Bind(&SimpleSynchronousEntry::DoomEntry, path_, key_,
186                            entry_hash_, result.get());
187  Closure reply = base::Bind(&CallCompletionCallback,
188                             callback, base::Passed(&result));
189  worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
190  return net::ERR_IO_PENDING;
191}
192
193void SimpleEntryImpl::Doom() {
194  DoomEntry(CompletionCallback());
195}
196
197void SimpleEntryImpl::Close() {
198  DCHECK(io_thread_checker_.CalledOnValidThread());
199  DCHECK_LT(0, open_count_);
200
201  if (--open_count_ > 0) {
202    DCHECK(!HasOneRef());
203    Release();  // Balanced in ReturnEntryToCaller().
204    return;
205  }
206
207  pending_operations_.push(base::Bind(&SimpleEntryImpl::CloseInternal, this));
208  DCHECK(!HasOneRef());
209  Release();  // Balanced in ReturnEntryToCaller().
210  RunNextOperationIfNeeded();
211}
212
213std::string SimpleEntryImpl::GetKey() const {
214  DCHECK(io_thread_checker_.CalledOnValidThread());
215  return key_;
216}
217
218Time SimpleEntryImpl::GetLastUsed() const {
219  DCHECK(io_thread_checker_.CalledOnValidThread());
220  return last_used_;
221}
222
223Time SimpleEntryImpl::GetLastModified() const {
224  DCHECK(io_thread_checker_.CalledOnValidThread());
225  return last_modified_;
226}
227
228int32 SimpleEntryImpl::GetDataSize(int stream_index) const {
229  DCHECK(io_thread_checker_.CalledOnValidThread());
230  DCHECK_LE(0, data_size_[stream_index]);
231  return data_size_[stream_index];
232}
233
234int SimpleEntryImpl::ReadData(int stream_index,
235                              int offset,
236                              net::IOBuffer* buf,
237                              int buf_len,
238                              const CompletionCallback& callback) {
239  DCHECK(io_thread_checker_.CalledOnValidThread());
240  if (stream_index < 0 || stream_index >= kSimpleEntryFileCount ||
241      buf_len < 0) {
242    RecordReadResult(READ_RESULT_INVALID_ARGUMENT);
243    return net::ERR_INVALID_ARGUMENT;
244  }
245  if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) ||
246                                      offset < 0 || !buf_len)) {
247    RecordReadResult(READ_RESULT_NONBLOCK_EMPTY_RETURN);
248    return 0;
249  }
250
251  // TODO(felipeg): Optimization: Add support for truly parallel read
252  // operations.
253  pending_operations_.push(
254      base::Bind(&SimpleEntryImpl::ReadDataInternal,
255                 this,
256                 stream_index,
257                 offset,
258                 make_scoped_refptr(buf),
259                 buf_len,
260                 callback));
261  RunNextOperationIfNeeded();
262  return net::ERR_IO_PENDING;
263}
264
265int SimpleEntryImpl::WriteData(int stream_index,
266                               int offset,
267                               net::IOBuffer* buf,
268                               int buf_len,
269                               const CompletionCallback& callback,
270                               bool truncate) {
271  DCHECK(io_thread_checker_.CalledOnValidThread());
272  if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || offset < 0 ||
273      buf_len < 0) {
274    RecordWriteResult(WRITE_RESULT_INVALID_ARGUMENT);
275    return net::ERR_INVALID_ARGUMENT;
276  }
277  if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) {
278    RecordWriteResult(WRITE_RESULT_OVER_MAX_SIZE);
279    return net::ERR_FAILED;
280  }
281
282  int ret_value = net::ERR_FAILED;
283  if (state_ == STATE_READY && pending_operations_.size() == 0) {
284    // We can only do optimistic Write if there is no pending operations, so
285    // that we are sure that the next call to RunNextOperationIfNeeded will
286    // actually run the write operation that sets the stream size. It also
287    // prevents from previous possibly-conflicting writes that could be stacked
288    // in the |pending_operations_|. We could optimize this for when we have
289    // only read operations enqueued.
290    // TODO(gavinp,pasko): For performance, don't use a copy of an IOBuffer
291    // here to avoid paying the price of the RefCountedThreadSafe atomic
292    // operations.
293    IOBuffer* buf_copy = NULL;
294    if (buf) {
295      buf_copy = new IOBuffer(buf_len);
296      memcpy(buf_copy->data(), buf->data(), buf_len);
297    }
298    pending_operations_.push(
299        base::Bind(&SimpleEntryImpl::WriteDataInternal, this, stream_index,
300                   offset, make_scoped_refptr(buf_copy), buf_len,
301                   CompletionCallback(), truncate));
302    ret_value = buf_len;
303  } else {
304    pending_operations_.push(
305        base::Bind(&SimpleEntryImpl::WriteDataInternal, this, stream_index,
306                   offset, make_scoped_refptr(buf), buf_len, callback,
307                   truncate));
308    ret_value = net::ERR_IO_PENDING;
309  }
310
311  RunNextOperationIfNeeded();
312  return ret_value;
313}
314
315int SimpleEntryImpl::ReadSparseData(int64 offset,
316                                    net::IOBuffer* buf,
317                                    int buf_len,
318                                    const CompletionCallback& callback) {
319  DCHECK(io_thread_checker_.CalledOnValidThread());
320  // TODO(gavinp): Determine if the simple backend should support sparse data.
321  NOTIMPLEMENTED();
322  return net::ERR_FAILED;
323}
324
325int SimpleEntryImpl::WriteSparseData(int64 offset,
326                                     net::IOBuffer* buf,
327                                     int buf_len,
328                                     const CompletionCallback& callback) {
329  DCHECK(io_thread_checker_.CalledOnValidThread());
330  // TODO(gavinp): Determine if the simple backend should support sparse data.
331  NOTIMPLEMENTED();
332  return net::ERR_FAILED;
333}
334
335int SimpleEntryImpl::GetAvailableRange(int64 offset,
336                                       int len,
337                                       int64* start,
338                                       const CompletionCallback& callback) {
339  DCHECK(io_thread_checker_.CalledOnValidThread());
340  // TODO(gavinp): Determine if the simple backend should support sparse data.
341  NOTIMPLEMENTED();
342  return net::ERR_FAILED;
343}
344
345bool SimpleEntryImpl::CouldBeSparse() const {
346  DCHECK(io_thread_checker_.CalledOnValidThread());
347  // TODO(gavinp): Determine if the simple backend should support sparse data.
348  return false;
349}
350
351void SimpleEntryImpl::CancelSparseIO() {
352  DCHECK(io_thread_checker_.CalledOnValidThread());
353  // TODO(gavinp): Determine if the simple backend should support sparse data.
354  NOTIMPLEMENTED();
355}
356
357int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
358  DCHECK(io_thread_checker_.CalledOnValidThread());
359  // TODO(gavinp): Determine if the simple backend should support sparse data.
360  NOTIMPLEMENTED();
361  return net::ERR_FAILED;
362}
363
364SimpleEntryImpl::~SimpleEntryImpl() {
365  DCHECK(io_thread_checker_.CalledOnValidThread());
366  DCHECK_EQ(0U, pending_operations_.size());
367  DCHECK(state_ == STATE_UNINITIALIZED || state_ == STATE_FAILURE);
368  DCHECK(!synchronous_entry_);
369  RemoveSelfFromBackend();
370}
371
372void SimpleEntryImpl::MakeUninitialized() {
373  state_ = STATE_UNINITIALIZED;
374  std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_));
375  std::memset(crc32s_, 0, sizeof(crc32s_));
376  std::memset(have_written_, 0, sizeof(have_written_));
377  std::memset(data_size_, 0, sizeof(data_size_));
378  std::memset(crc_check_state_, 0, sizeof(crc_check_state_));
379}
380
381void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) {
382  DCHECK(out_entry);
383  ++open_count_;
384  AddRef();  // Balanced in Close()
385  *out_entry = this;
386}
387
388void SimpleEntryImpl::RemoveSelfFromBackend() {
389  if (!backend_.get())
390    return;
391  backend_->OnDeactivated(this);
392  backend_.reset();
393}
394
395void SimpleEntryImpl::MarkAsDoomed() {
396  if (!backend_.get())
397    return;
398  backend_->index()->Remove(key_);
399  RemoveSelfFromBackend();
400}
401
402void SimpleEntryImpl::RunNextOperationIfNeeded() {
403  DCHECK(io_thread_checker_.CalledOnValidThread());
404  UMA_HISTOGRAM_CUSTOM_COUNTS("SimpleCache.EntryOperationsPending",
405                              pending_operations_.size(), 0, 100, 20);
406  if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) {
407    base::Closure operation = pending_operations_.front();
408    pending_operations_.pop();
409    operation.Run();
410    // |this| may have been deleted.
411  }
412}
413
414void SimpleEntryImpl::OpenEntryInternal(const CompletionCallback& callback,
415                                        Entry** out_entry) {
416  ScopedOperationRunner operation_runner(this);
417  if (state_ == STATE_READY) {
418    ReturnEntryToCaller(out_entry);
419    MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback,
420                                                                net::OK));
421    return;
422  } else if (state_ == STATE_FAILURE) {
423    if (!callback.is_null()) {
424      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
425          callback, net::ERR_FAILED));
426    }
427    return;
428  }
429  DCHECK_EQ(STATE_UNINITIALIZED, state_);
430  DCHECK(!synchronous_entry_);
431  state_ = STATE_IO_PENDING;
432  const base::TimeTicks start_time = base::TimeTicks::Now();
433  typedef SimpleSynchronousEntry* PointerToSimpleSynchronousEntry;
434  scoped_ptr<PointerToSimpleSynchronousEntry> sync_entry(
435      new PointerToSimpleSynchronousEntry());
436  scoped_ptr<int> result(new int());
437  Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry, path_,
438                            entry_hash_, sync_entry.get(), result.get());
439  Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, this,
440                             callback, start_time, base::Passed(&sync_entry),
441                             base::Passed(&result), out_entry);
442  worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
443}
444
445void SimpleEntryImpl::CreateEntryInternal(const CompletionCallback& callback,
446                                          Entry** out_entry) {
447  ScopedOperationRunner operation_runner(this);
448  if (state_ != STATE_UNINITIALIZED) {
449    // There is already an active normal entry.
450    if (!callback.is_null()) {
451      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
452          callback, net::ERR_FAILED));
453    }
454    return;
455  }
456  DCHECK_EQ(STATE_UNINITIALIZED, state_);
457  DCHECK(!synchronous_entry_);
458
459  state_ = STATE_IO_PENDING;
460
461  // Since we don't know the correct values for |last_used_| and
462  // |last_modified_| yet, we make this approximation.
463  last_used_ = last_modified_ = base::Time::Now();
464
465  // If creation succeeds, we should mark all streams to be saved on close.
466  for (int i = 0; i < kSimpleEntryFileCount; ++i)
467    have_written_[i] = true;
468
469  const base::TimeTicks start_time = base::TimeTicks::Now();
470  typedef SimpleSynchronousEntry* PointerToSimpleSynchronousEntry;
471  scoped_ptr<PointerToSimpleSynchronousEntry> sync_entry(
472      new PointerToSimpleSynchronousEntry());
473  scoped_ptr<int> result(new int());
474  Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry, path_, key_,
475                            entry_hash_, sync_entry.get(), result.get());
476  Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, this,
477                             callback, start_time, base::Passed(&sync_entry),
478                             base::Passed(&result), out_entry);
479  worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
480}
481
482void SimpleEntryImpl::CloseInternal() {
483  DCHECK(io_thread_checker_.CalledOnValidThread());
484  typedef SimpleSynchronousEntry::CRCRecord CRCRecord;
485  scoped_ptr<std::vector<CRCRecord> >
486      crc32s_to_write(new std::vector<CRCRecord>());
487
488  if (state_ == STATE_READY) {
489    DCHECK(synchronous_entry_);
490    state_ = STATE_IO_PENDING;
491    for (int i = 0; i < kSimpleEntryFileCount; ++i) {
492      if (have_written_[i]) {
493        if (GetDataSize(i) == crc32s_end_offset_[i]) {
494          int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i];
495          crc32s_to_write->push_back(CRCRecord(i, true, crc));
496        } else {
497          crc32s_to_write->push_back(CRCRecord(i, false, 0));
498        }
499      }
500    }
501  } else {
502    DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_);
503  }
504
505  if (synchronous_entry_) {
506    Closure task = base::Bind(&SimpleSynchronousEntry::Close,
507                              base::Unretained(synchronous_entry_),
508                              base::Passed(&crc32s_to_write));
509    Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this);
510    synchronous_entry_ = NULL;
511    worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
512
513    for (int i = 0; i < kSimpleEntryFileCount; ++i) {
514      if (!have_written_[i]) {
515        UMA_HISTOGRAM_ENUMERATION("SimpleCache.CheckCRCResult",
516                                  crc_check_state_[i], CRC_CHECK_MAX);
517      }
518    }
519  } else {
520    synchronous_entry_ = NULL;
521    CloseOperationComplete();
522  }
523}
524
525void SimpleEntryImpl::ReadDataInternal(int stream_index,
526                                       int offset,
527                                       net::IOBuffer* buf,
528                                       int buf_len,
529                                       const CompletionCallback& callback) {
530  DCHECK(io_thread_checker_.CalledOnValidThread());
531  ScopedOperationRunner operation_runner(this);
532
533  if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
534    if (!callback.is_null()) {
535      RecordReadResult(READ_RESULT_BAD_STATE);
536      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
537          callback, net::ERR_FAILED));
538    }
539    return;
540  }
541  DCHECK_EQ(STATE_READY, state_);
542  if (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len) {
543    RecordReadResult(READ_RESULT_FAST_EMPTY_RETURN);
544    // If there is nothing to read, we bail out before setting state_ to
545    // STATE_IO_PENDING.
546    if (!callback.is_null())
547      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
548          callback, 0));
549    return;
550  }
551  buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
552
553  state_ = STATE_IO_PENDING;
554  if (backend_.get())
555    backend_->index()->UseIfExists(key_);
556
557  scoped_ptr<uint32> read_crc32(new uint32());
558  scoped_ptr<int> result(new int());
559  Closure task = base::Bind(&SimpleSynchronousEntry::ReadData,
560                            base::Unretained(synchronous_entry_),
561                            stream_index, offset, make_scoped_refptr(buf),
562                            buf_len, read_crc32.get(), result.get());
563  Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete, this,
564                             stream_index, offset, callback,
565                             base::Passed(&read_crc32), base::Passed(&result));
566  worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
567}
568
569void SimpleEntryImpl::WriteDataInternal(int stream_index,
570                                       int offset,
571                                       net::IOBuffer* buf,
572                                       int buf_len,
573                                       const CompletionCallback& callback,
574                                       bool truncate) {
575  DCHECK(io_thread_checker_.CalledOnValidThread());
576  ScopedOperationRunner operation_runner(this);
577  if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
578    RecordWriteResult(WRITE_RESULT_BAD_STATE);
579    if (!callback.is_null()) {
580      // We need to posttask so that we don't go in a loop when we call the
581      // callback directly.
582      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
583          callback, net::ERR_FAILED));
584    }
585    // |this| may be destroyed after return here.
586    return;
587  }
588  DCHECK_EQ(STATE_READY, state_);
589  state_ = STATE_IO_PENDING;
590  if (backend_.get())
591    backend_->index()->UseIfExists(key_);
592  // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|)
593  // if |offset == 0| or we have already computed the CRC for [0 .. offset).
594  // We rely on most write operations being sequential, start to end to compute
595  // the crc of the data. When we write to an entry and close without having
596  // done a sequential write, we don't check the CRC on read.
597  if (offset == 0 || crc32s_end_offset_[stream_index] == offset) {
598    uint32 initial_crc = (offset != 0) ? crc32s_[stream_index]
599                                       : crc32(0, Z_NULL, 0);
600    if (buf_len > 0) {
601      crc32s_[stream_index] = crc32(initial_crc,
602                                    reinterpret_cast<const Bytef*>(buf->data()),
603                                    buf_len);
604    }
605    crc32s_end_offset_[stream_index] = offset + buf_len;
606  }
607
608  if (truncate) {
609    data_size_[stream_index] = offset + buf_len;
610  } else {
611    data_size_[stream_index] = std::max(offset + buf_len,
612                                        GetDataSize(stream_index));
613  }
614
615  // Since we don't know the correct values for |last_used_| and
616  // |last_modified_| yet, we make this approximation.
617  last_used_ = last_modified_ = base::Time::Now();
618
619  have_written_[stream_index] = true;
620
621  scoped_ptr<int> result(new int());
622  Closure task = base::Bind(&SimpleSynchronousEntry::WriteData,
623                            base::Unretained(synchronous_entry_),
624                            stream_index, offset, make_scoped_refptr(buf),
625                            buf_len, truncate, result.get());
626  Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete, this,
627                             stream_index, callback, base::Passed(&result));
628  worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
629}
630
631void SimpleEntryImpl::CreationOperationComplete(
632    const CompletionCallback& completion_callback,
633    const base::TimeTicks& start_time,
634    scoped_ptr<SimpleSynchronousEntry*> in_sync_entry,
635    scoped_ptr<int> in_result,
636    Entry** out_entry) {
637  DCHECK(io_thread_checker_.CalledOnValidThread());
638  DCHECK_EQ(state_, STATE_IO_PENDING);
639  DCHECK(in_sync_entry);
640  DCHECK(in_result);
641  ScopedOperationRunner operation_runner(this);
642  UMA_HISTOGRAM_BOOLEAN(
643      "SimpleCache.EntryCreationResult", *in_result == net::OK);
644  if (*in_result != net::OK) {
645    if (*in_result!= net::ERR_FILE_EXISTS)
646      MarkAsDoomed();
647    if (!completion_callback.is_null()) {
648      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
649          completion_callback, net::ERR_FAILED));
650    }
651    MakeUninitialized();
652    return;
653  }
654  // If out_entry is NULL, it means we already called ReturnEntryToCaller from
655  // the optimistic Create case.
656  if (out_entry)
657    ReturnEntryToCaller(out_entry);
658
659  state_ = STATE_READY;
660  synchronous_entry_ = *in_sync_entry;
661  if (key_.empty()) {
662    key_ = synchronous_entry_->key();
663  } else {
664    // This should only be triggered when creating an entry. The key check in
665    // the open case is handled in SimpleBackendImpl.
666    DCHECK_EQ(key_, synchronous_entry_->key());
667  }
668  SetSynchronousData();
669  UMA_HISTOGRAM_TIMES("SimpleCache.EntryCreationTime",
670                      (base::TimeTicks::Now() - start_time));
671
672  if (!completion_callback.is_null()) {
673    MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
674        completion_callback, net::OK));
675  }
676}
677
678void SimpleEntryImpl::EntryOperationComplete(
679    int stream_index,
680    const CompletionCallback& completion_callback,
681    scoped_ptr<int> result) {
682  DCHECK(io_thread_checker_.CalledOnValidThread());
683  DCHECK(synchronous_entry_);
684  DCHECK_EQ(STATE_IO_PENDING, state_);
685  DCHECK(result);
686  state_ = STATE_READY;
687  if (*result < 0) {
688    MarkAsDoomed();
689    state_ = STATE_FAILURE;
690    crc32s_end_offset_[stream_index] = 0;
691  } else {
692    SetSynchronousData();
693  }
694
695  if (!completion_callback.is_null()) {
696    MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
697        completion_callback, *result));
698  }
699  RunNextOperationIfNeeded();
700}
701
702void SimpleEntryImpl::ReadOperationComplete(
703    int stream_index,
704    int offset,
705    const CompletionCallback& completion_callback,
706    scoped_ptr<uint32> read_crc32,
707    scoped_ptr<int> result) {
708  DCHECK(io_thread_checker_.CalledOnValidThread());
709  DCHECK(synchronous_entry_);
710  DCHECK_EQ(STATE_IO_PENDING, state_);
711  DCHECK(read_crc32);
712  DCHECK(result);
713
714  if (*result > 0 && crc32s_end_offset_[stream_index] == offset) {
715    uint32 current_crc = offset == 0 ? crc32(0, Z_NULL, 0)
716                                     : crc32s_[stream_index];
717    crc32s_[stream_index] = crc32_combine(current_crc, *read_crc32, *result);
718    crc32s_end_offset_[stream_index] += *result;
719    if (!have_written_[stream_index] &&
720        GetDataSize(stream_index) == crc32s_end_offset_[stream_index]) {
721      // We have just read a file from start to finish, and so we have
722      // computed a crc of the entire file. We can check it now. If a cache
723      // entry has a single reader, the normal pattern is to read from start
724      // to finish.
725
726      // Other cases are possible. In the case of two readers on the same
727      // entry, one reader can be behind the other. In this case we compute
728      // the crc as the most advanced reader progresses, and check it for
729      // both readers as they read the last byte.
730
731      scoped_ptr<int> new_result(new int());
732      Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord,
733                                base::Unretained(synchronous_entry_),
734                                stream_index, crc32s_[stream_index],
735                                new_result.get());
736      Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete,
737                                 this, *result, stream_index,
738                                 completion_callback,
739                                 base::Passed(&new_result));
740      worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
741      crc_check_state_[stream_index] = CRC_CHECK_DONE;
742      return;
743    }
744  }
745  if (*result < 0) {
746    RecordReadResult(READ_RESULT_SYNC_READ_FAILURE);
747  } else {
748    RecordReadResult(READ_RESULT_SUCCESS);
749    if (crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_TO_END &&
750        offset + *result == GetDataSize(stream_index)) {
751      crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE;
752    }
753  }
754  EntryOperationComplete(stream_index, completion_callback, result.Pass());
755}
756
757void SimpleEntryImpl::WriteOperationComplete(
758    int stream_index,
759    const CompletionCallback& completion_callback,
760    scoped_ptr<int> result) {
761  if (*result >= 0)
762    RecordWriteResult(WRITE_RESULT_SUCCESS);
763  else
764    RecordWriteResult(WRITE_RESULT_SYNC_WRITE_FAILURE);
765  EntryOperationComplete(stream_index, completion_callback, result.Pass());
766}
767
768void SimpleEntryImpl::ChecksumOperationComplete(
769    int orig_result,
770    int stream_index,
771    const CompletionCallback& completion_callback,
772    scoped_ptr<int> result) {
773  DCHECK(io_thread_checker_.CalledOnValidThread());
774  DCHECK(synchronous_entry_);
775  DCHECK_EQ(STATE_IO_PENDING, state_);
776  DCHECK(result);
777  if (*result == net::OK) {
778    *result = orig_result;
779    if (orig_result >= 0)
780      RecordReadResult(READ_RESULT_SUCCESS);
781    else
782      RecordReadResult(READ_RESULT_SYNC_READ_FAILURE);
783  } else {
784    RecordReadResult(READ_RESULT_SYNC_CHECKSUM_FAILURE);
785  }
786  EntryOperationComplete(stream_index, completion_callback, result.Pass());
787}
788
789void SimpleEntryImpl::CloseOperationComplete() {
790  DCHECK(!synchronous_entry_);
791  DCHECK_EQ(0, open_count_);
792  DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_ ||
793         STATE_UNINITIALIZED == state_);
794  MakeUninitialized();
795  RunNextOperationIfNeeded();
796}
797
798void SimpleEntryImpl::SetSynchronousData() {
799  DCHECK(io_thread_checker_.CalledOnValidThread());
800  DCHECK(synchronous_entry_);
801  DCHECK_EQ(STATE_READY, state_);
802  // TODO(felipeg): These copies to avoid data races are not optimal. While
803  // adding an IO thread index (for fast misses etc...), we can store this data
804  // in that structure. This also solves problems with last_used() on ext4
805  // filesystems not being accurate.
806  last_used_ = synchronous_entry_->last_used();
807  last_modified_ = synchronous_entry_->last_modified();
808  for (int i = 0; i < kSimpleEntryFileCount; ++i)
809    data_size_[i] = synchronous_entry_->data_size(i);
810  if (backend_.get())
811    backend_->index()->UpdateEntrySize(key_, synchronous_entry_->GetFileSize());
812}
813
814}  // namespace disk_cache
815