simple_entry_impl.cc revision a93a17c8d99d686bd4a1511e5504e5e6cc9fcadf
1// Copyright (c) 2013 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/disk_cache/simple/simple_entry_impl.h"
6
7#include <algorithm>
8#include <cstring>
9#include <vector>
10
11#include "base/bind.h"
12#include "base/bind_helpers.h"
13#include "base/callback.h"
14#include "base/location.h"
15#include "base/logging.h"
16#include "base/message_loop_proxy.h"
17#include "base/metrics/histogram.h"
18#include "base/threading/worker_pool.h"
19#include "base/time.h"
20#include "net/base/io_buffer.h"
21#include "net/base/net_errors.h"
22#include "net/disk_cache/simple/simple_backend_impl.h"
23#include "net/disk_cache/simple/simple_index.h"
24#include "net/disk_cache/simple/simple_synchronous_entry.h"
25#include "net/disk_cache/simple/simple_util.h"
26#include "third_party/zlib/zlib.h"
27
28namespace {
29
30// Used in histograms, please only add entries at the end.
31enum ReadResult {
32  READ_RESULT_SUCCESS = 0,
33  READ_RESULT_INVALID_ARGUMENT = 1,
34  READ_RESULT_NONBLOCK_EMPTY_RETURN = 2,
35  READ_RESULT_BAD_STATE = 3,
36  READ_RESULT_FAST_EMPTY_RETURN = 4,
37  READ_RESULT_SYNC_READ_FAILURE = 5,
38  READ_RESULT_SYNC_CHECKSUM_FAILURE = 6,
39  READ_RESULT_MAX = 7,
40};
41
42// Used in histograms, please only add entries at the end.
43enum WriteResult {
44  WRITE_RESULT_SUCCESS = 0,
45  WRITE_RESULT_INVALID_ARGUMENT = 1,
46  WRITE_RESULT_OVER_MAX_SIZE = 2,
47  WRITE_RESULT_BAD_STATE = 3,
48  WRITE_RESULT_SYNC_WRITE_FAILURE = 4,
49  WRITE_RESULT_MAX = 5,
50};
51
52void RecordReadResult(ReadResult result) {
53  UMA_HISTOGRAM_ENUMERATION("SimpleCache.ReadResult", result, READ_RESULT_MAX);
54};
55
56void RecordWriteResult(WriteResult result) {
57  UMA_HISTOGRAM_ENUMERATION("SimpleCache.WriteResult",
58                            result, WRITE_RESULT_MAX);
59};
60
61// Short trampoline to take an owned input parameter and call a net completion
62// callback with its value.
63void CallCompletionCallback(const net::CompletionCallback& callback,
64                            scoped_ptr<int> result) {
65  DCHECK(result);
66  if (!callback.is_null())
67    callback.Run(*result);
68}
69
70}  // namespace
71
72namespace disk_cache {
73
74using base::Closure;
75using base::FilePath;
76using base::MessageLoopProxy;
77using base::Time;
78using base::WorkerPool;
79
80// A helper class to insure that RunNextOperationIfNeeded() is called when
81// exiting the current stack frame.
82class SimpleEntryImpl::ScopedOperationRunner {
83 public:
84  explicit ScopedOperationRunner(SimpleEntryImpl* entry) : entry_(entry) {
85  }
86
87  ~ScopedOperationRunner() {
88    entry_->RunNextOperationIfNeeded();
89  }
90
91 private:
92  SimpleEntryImpl* const entry_;
93};
94
95SimpleEntryImpl::SimpleEntryImpl(SimpleBackendImpl* backend,
96                                 const FilePath& path,
97                                 const std::string& key,
98                                 const uint64 entry_hash)
99    : backend_(backend->AsWeakPtr()),
100      path_(path),
101      key_(key),
102      entry_hash_(entry_hash),
103      open_count_(0),
104      state_(STATE_UNINITIALIZED),
105      synchronous_entry_(NULL) {
106  DCHECK_EQ(entry_hash, simple_util::GetEntryHashKey(key));
107  COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_),
108                 arrays_should_be_same_size);
109  COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_),
110                 arrays_should_be_same_size2);
111  COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_),
112                 arrays_should_be_same_size3);
113  MakeUninitialized();
114}
115
116int SimpleEntryImpl::OpenEntry(Entry** out_entry,
117                               const CompletionCallback& callback) {
118  DCHECK(backend_);
119  // This enumeration is used in histograms, add entries only at end.
120  enum OpenEntryIndexEnum {
121    INDEX_NOEXIST = 0,
122    INDEX_MISS = 1,
123    INDEX_HIT = 2,
124    INDEX_MAX = 3,
125  };
126  OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST;
127  if (backend_) {
128    if (backend_->index()->Has(key_))
129      open_entry_index_enum = INDEX_HIT;
130    else
131      open_entry_index_enum = INDEX_MISS;
132  }
133  UMA_HISTOGRAM_ENUMERATION("SimpleCache.OpenEntryIndexState",
134                            open_entry_index_enum, INDEX_MAX);
135
136  // If entry is not known to the index, initiate fast failover to the network.
137  if (open_entry_index_enum == INDEX_MISS)
138    return net::ERR_FAILED;
139
140  pending_operations_.push(base::Bind(&SimpleEntryImpl::OpenEntryInternal,
141                                      this, callback, out_entry));
142  RunNextOperationIfNeeded();
143  return net::ERR_IO_PENDING;
144}
145
146int SimpleEntryImpl::CreateEntry(Entry** out_entry,
147                                 const CompletionCallback& callback) {
148  DCHECK(backend_);
149  int ret_value = net::ERR_FAILED;
150  if (state_ == STATE_UNINITIALIZED &&
151      pending_operations_.size() == 0) {
152    ReturnEntryToCaller(out_entry);
153    // We can do optimistic Create.
154    pending_operations_.push(base::Bind(&SimpleEntryImpl::CreateEntryInternal,
155                                        this,
156                                        CompletionCallback(),
157                                        static_cast<Entry**>(NULL)));
158    ret_value = net::OK;
159  } else {
160    pending_operations_.push(base::Bind(&SimpleEntryImpl::CreateEntryInternal,
161                                        this,
162                                        callback,
163                                        out_entry));
164    ret_value = net::ERR_IO_PENDING;
165  }
166
167  // We insert the entry in the index before creating the entry files in the
168  // SimpleSynchronousEntry, because this way the worst scenario is when we
169  // have the entry in the index but we don't have the created files yet, this
170  // way we never leak files. CreationOperationComplete will remove the entry
171  // from the index if the creation fails.
172  if (backend_)
173    backend_->index()->Insert(key_);
174
175  RunNextOperationIfNeeded();
176  return ret_value;
177}
178
179int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) {
180  MarkAsDoomed();
181  scoped_ptr<int> result(new int());
182  Closure task = base::Bind(&SimpleSynchronousEntry::DoomEntry, path_, key_,
183                            entry_hash_, result.get());
184  Closure reply = base::Bind(&CallCompletionCallback,
185                             callback, base::Passed(&result));
186  WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true);
187  return net::ERR_IO_PENDING;
188}
189
190
191void SimpleEntryImpl::Doom() {
192  DoomEntry(CompletionCallback());
193}
194
195void SimpleEntryImpl::Close() {
196  DCHECK(io_thread_checker_.CalledOnValidThread());
197  DCHECK_LT(0, open_count_);
198
199  if (--open_count_ > 0) {
200    DCHECK(!HasOneRef());
201    Release();  // Balanced in ReturnEntryToCaller().
202    return;
203  }
204
205  pending_operations_.push(base::Bind(&SimpleEntryImpl::CloseInternal, this));
206  DCHECK(!HasOneRef());
207  Release();  // Balanced in ReturnEntryToCaller().
208  RunNextOperationIfNeeded();
209}
210
211std::string SimpleEntryImpl::GetKey() const {
212  DCHECK(io_thread_checker_.CalledOnValidThread());
213  return key_;
214}
215
216Time SimpleEntryImpl::GetLastUsed() const {
217  DCHECK(io_thread_checker_.CalledOnValidThread());
218  return last_used_;
219}
220
221Time SimpleEntryImpl::GetLastModified() const {
222  DCHECK(io_thread_checker_.CalledOnValidThread());
223  return last_modified_;
224}
225
226int32 SimpleEntryImpl::GetDataSize(int stream_index) const {
227  DCHECK(io_thread_checker_.CalledOnValidThread());
228  DCHECK_LE(0, data_size_[stream_index]);
229  return data_size_[stream_index];
230}
231
232int SimpleEntryImpl::ReadData(int stream_index,
233                              int offset,
234                              net::IOBuffer* buf,
235                              int buf_len,
236                              const CompletionCallback& callback) {
237  DCHECK(io_thread_checker_.CalledOnValidThread());
238  if (stream_index < 0 || stream_index >= kSimpleEntryFileCount ||
239      buf_len < 0) {
240    RecordReadResult(READ_RESULT_INVALID_ARGUMENT);
241    return net::ERR_INVALID_ARGUMENT;
242  }
243  if (pending_operations_.empty() && (offset >= data_size_[stream_index] ||
244                                      offset < 0 || !buf_len)) {
245    RecordReadResult(READ_RESULT_NONBLOCK_EMPTY_RETURN);
246    return 0;
247  }
248
249  // TODO(felipeg): Optimization: Add support for truly parallel read
250  // operations.
251  pending_operations_.push(
252      base::Bind(&SimpleEntryImpl::ReadDataInternal,
253                 this,
254                 stream_index,
255                 offset,
256                 make_scoped_refptr(buf),
257                 buf_len,
258                 callback));
259  RunNextOperationIfNeeded();
260  return net::ERR_IO_PENDING;
261}
262
263int SimpleEntryImpl::WriteData(int stream_index,
264                               int offset,
265                               net::IOBuffer* buf,
266                               int buf_len,
267                               const CompletionCallback& callback,
268                               bool truncate) {
269  DCHECK(io_thread_checker_.CalledOnValidThread());
270  if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || offset < 0 ||
271      buf_len < 0) {
272    RecordWriteResult(WRITE_RESULT_INVALID_ARGUMENT);
273    return net::ERR_INVALID_ARGUMENT;
274  }
275  if (backend_ && offset + buf_len > backend_->GetMaxFileSize()) {
276    RecordWriteResult(WRITE_RESULT_OVER_MAX_SIZE);
277    return net::ERR_FAILED;
278  }
279
280  int ret_value = net::ERR_FAILED;
281  if (state_ == STATE_READY && pending_operations_.size() == 0) {
282    // We can only do optimistic Write if there is no pending operations, so
283    // that we are sure that the next call to RunNextOperationIfNeeded will
284    // actually run the write operation that sets the stream size. It also
285    // prevents from previous possibly-conflicting writes that could be stacked
286    // in the |pending_operations_|. We could optimize this for when we have
287    // only read operations enqueued.
288    pending_operations_.push(
289        base::Bind(&SimpleEntryImpl::WriteDataInternal, this, stream_index,
290                   offset, make_scoped_refptr(buf), buf_len,
291                   CompletionCallback(), truncate));
292    ret_value = buf_len;
293  } else {
294    pending_operations_.push(
295        base::Bind(&SimpleEntryImpl::WriteDataInternal, this, stream_index,
296                   offset, make_scoped_refptr(buf), buf_len, callback,
297                   truncate));
298    ret_value = net::ERR_IO_PENDING;
299  }
300
301  RunNextOperationIfNeeded();
302  return ret_value;
303}
304
305int SimpleEntryImpl::ReadSparseData(int64 offset,
306                                    net::IOBuffer* buf,
307                                    int buf_len,
308                                    const CompletionCallback& callback) {
309  DCHECK(io_thread_checker_.CalledOnValidThread());
310  // TODO(gavinp): Determine if the simple backend should support sparse data.
311  NOTIMPLEMENTED();
312  return net::ERR_FAILED;
313}
314
315int SimpleEntryImpl::WriteSparseData(int64 offset,
316                                     net::IOBuffer* buf,
317                                     int buf_len,
318                                     const CompletionCallback& callback) {
319  DCHECK(io_thread_checker_.CalledOnValidThread());
320  // TODO(gavinp): Determine if the simple backend should support sparse data.
321  NOTIMPLEMENTED();
322  return net::ERR_FAILED;
323}
324
325int SimpleEntryImpl::GetAvailableRange(int64 offset,
326                                       int len,
327                                       int64* start,
328                                       const CompletionCallback& callback) {
329  DCHECK(io_thread_checker_.CalledOnValidThread());
330  // TODO(gavinp): Determine if the simple backend should support sparse data.
331  NOTIMPLEMENTED();
332  return net::ERR_FAILED;
333}
334
335bool SimpleEntryImpl::CouldBeSparse() const {
336  DCHECK(io_thread_checker_.CalledOnValidThread());
337  // TODO(gavinp): Determine if the simple backend should support sparse data.
338  return false;
339}
340
341void SimpleEntryImpl::CancelSparseIO() {
342  DCHECK(io_thread_checker_.CalledOnValidThread());
343  // TODO(gavinp): Determine if the simple backend should support sparse data.
344  NOTIMPLEMENTED();
345}
346
347int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
348  DCHECK(io_thread_checker_.CalledOnValidThread());
349  // TODO(gavinp): Determine if the simple backend should support sparse data.
350  NOTIMPLEMENTED();
351  return net::ERR_FAILED;
352}
353
354SimpleEntryImpl::~SimpleEntryImpl() {
355  DCHECK(io_thread_checker_.CalledOnValidThread());
356  DCHECK_EQ(0U, pending_operations_.size());
357  DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_);
358  DCHECK(!synchronous_entry_);
359  RemoveSelfFromBackend();
360}
361
362void SimpleEntryImpl::MakeUninitialized() {
363  state_ = STATE_UNINITIALIZED;
364  std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_));
365  std::memset(crc32s_, 0, sizeof(crc32s_));
366  std::memset(have_written_, 0, sizeof(have_written_));
367  std::memset(data_size_, 0, sizeof(data_size_));
368}
369
370void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) {
371  DCHECK(out_entry);
372  ++open_count_;
373  AddRef();  // Balanced in Close()
374  *out_entry = this;
375}
376
377void SimpleEntryImpl::RemoveSelfFromBackend() {
378  if (!backend_)
379    return;
380  backend_->OnDeactivated(this);
381  backend_.reset();
382}
383
384void SimpleEntryImpl::MarkAsDoomed() {
385  if (!backend_)
386    return;
387  backend_->index()->Remove(key_);
388  RemoveSelfFromBackend();
389}
390
391void SimpleEntryImpl::RunNextOperationIfNeeded() {
392  DCHECK(io_thread_checker_.CalledOnValidThread());
393  UMA_HISTOGRAM_CUSTOM_COUNTS("SimpleCache.EntryOperationsPending",
394                              pending_operations_.size(), 0, 100, 20);
395  if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) {
396    base::Closure operation = pending_operations_.front();
397    pending_operations_.pop();
398    operation.Run();
399    // |this| may have been deleted.
400  }
401}
402
403void SimpleEntryImpl::OpenEntryInternal(const CompletionCallback& callback,
404                                        Entry** out_entry) {
405  ScopedOperationRunner operation_runner(this);
406  if (state_ == STATE_READY) {
407    ReturnEntryToCaller(out_entry);
408    MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback,
409                                                                net::OK));
410    return;
411  } else if (state_ == STATE_FAILURE) {
412    if (!callback.is_null()) {
413      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
414          callback, net::ERR_FAILED));
415    }
416    return;
417  }
418  DCHECK_EQ(STATE_UNINITIALIZED, state_);
419  state_ = STATE_IO_PENDING;
420  const base::TimeTicks start_time = base::TimeTicks::Now();
421  typedef SimpleSynchronousEntry* PointerToSimpleSynchronousEntry;
422  scoped_ptr<PointerToSimpleSynchronousEntry> sync_entry(
423      new PointerToSimpleSynchronousEntry());
424  scoped_ptr<int> result(new int());
425  Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry, path_, key_,
426                            entry_hash_, sync_entry.get(), result.get());
427  Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, this,
428                             callback, start_time, base::Passed(&sync_entry),
429                             base::Passed(&result), out_entry);
430  WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true);
431}
432
433void SimpleEntryImpl::CreateEntryInternal(const CompletionCallback& callback,
434                                          Entry** out_entry) {
435  ScopedOperationRunner operation_runner(this);
436  if (state_ != STATE_UNINITIALIZED) {
437    // There is already an active normal entry.
438    if (!callback.is_null()) {
439      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
440          callback, net::ERR_FAILED));
441    }
442    return;
443  }
444  DCHECK_EQ(STATE_UNINITIALIZED, state_);
445
446  state_ = STATE_IO_PENDING;
447
448  // Since we don't know the correct values for |last_used_| and
449  // |last_modified_| yet, we make this approximation.
450  last_used_ = last_modified_ = base::Time::Now();
451
452  // If creation succeeds, we should mark all streams to be saved on close.
453  for (int i = 0; i < kSimpleEntryFileCount; ++i)
454    have_written_[i] = true;
455
456  const base::TimeTicks start_time = base::TimeTicks::Now();
457  typedef SimpleSynchronousEntry* PointerToSimpleSynchronousEntry;
458  scoped_ptr<PointerToSimpleSynchronousEntry> sync_entry(
459      new PointerToSimpleSynchronousEntry());
460  scoped_ptr<int> result(new int());
461  Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry, path_, key_,
462                            entry_hash_, sync_entry.get(), result.get());
463  Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, this,
464                             callback, start_time, base::Passed(&sync_entry),
465                             base::Passed(&result), out_entry);
466  WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true);
467}
468
469void SimpleEntryImpl::CloseInternal() {
470  DCHECK(io_thread_checker_.CalledOnValidThread());
471  typedef SimpleSynchronousEntry::CRCRecord CRCRecord;
472  scoped_ptr<std::vector<CRCRecord> >
473      crc32s_to_write(new std::vector<CRCRecord>());
474
475  if (state_ == STATE_READY) {
476    DCHECK(synchronous_entry_);
477    state_ = STATE_IO_PENDING;
478    for (int i = 0; i < kSimpleEntryFileCount; ++i) {
479      if (have_written_[i]) {
480        if (data_size_[i] == crc32s_end_offset_[i]) {
481          int32 crc = data_size_[i] == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i];
482          crc32s_to_write->push_back(CRCRecord(i, true, crc));
483        } else {
484          crc32s_to_write->push_back(CRCRecord(i, false, 0));
485        }
486      }
487    }
488  } else {
489    DCHECK_EQ(STATE_FAILURE, state_);
490  }
491
492  if (synchronous_entry_) {
493    Closure task = base::Bind(&SimpleSynchronousEntry::Close,
494                              base::Unretained(synchronous_entry_),
495                              base::Passed(&crc32s_to_write));
496    Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this);
497    synchronous_entry_ = NULL;
498    WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true);
499  } else {
500    synchronous_entry_ = NULL;
501    CloseOperationComplete();
502  }
503}
504
505void SimpleEntryImpl::ReadDataInternal(int stream_index,
506                                       int offset,
507                                       net::IOBuffer* buf,
508                                       int buf_len,
509                                       const CompletionCallback& callback) {
510  DCHECK(io_thread_checker_.CalledOnValidThread());
511  ScopedOperationRunner operation_runner(this);
512
513  if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
514    if (!callback.is_null()) {
515      RecordReadResult(READ_RESULT_BAD_STATE);
516      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
517          callback, net::ERR_FAILED));
518    }
519    return;
520  }
521  DCHECK_EQ(STATE_READY, state_);
522  if (offset >= data_size_[stream_index] || offset < 0 || !buf_len) {
523    RecordReadResult(READ_RESULT_FAST_EMPTY_RETURN);
524    // If there is nothing to read, we bail out before setting state_ to
525    // STATE_IO_PENDING.
526    if (!callback.is_null())
527      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
528          callback, 0));
529    return;
530  }
531  buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
532
533  state_ = STATE_IO_PENDING;
534  if (backend_)
535    backend_->index()->UseIfExists(key_);
536
537  scoped_ptr<uint32> read_crc32(new uint32());
538  scoped_ptr<int> result(new int());
539  Closure task = base::Bind(&SimpleSynchronousEntry::ReadData,
540                            base::Unretained(synchronous_entry_),
541                            stream_index, offset, make_scoped_refptr(buf),
542                            buf_len, read_crc32.get(), result.get());
543  Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete, this,
544                             stream_index, offset, callback,
545                             base::Passed(&read_crc32), base::Passed(&result));
546  WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true);
547}
548
549void SimpleEntryImpl::WriteDataInternal(int stream_index,
550                                       int offset,
551                                       net::IOBuffer* buf,
552                                       int buf_len,
553                                       const CompletionCallback& callback,
554                                       bool truncate) {
555  DCHECK(io_thread_checker_.CalledOnValidThread());
556  ScopedOperationRunner operation_runner(this);
557  if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
558    RecordWriteResult(WRITE_RESULT_BAD_STATE);
559    if (!callback.is_null()) {
560      // We need to posttask so that we don't go in a loop when we call the
561      // callback directly.
562      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
563          callback, net::ERR_FAILED));
564    }
565    // |this| may be destroyed after return here.
566    return;
567  }
568  DCHECK_EQ(STATE_READY, state_);
569  state_ = STATE_IO_PENDING;
570  if (backend_)
571    backend_->index()->UseIfExists(key_);
572  // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|)
573  // if |offset == 0| or we have already computed the CRC for [0 .. offset).
574  // We rely on most write operations being sequential, start to end to compute
575  // the crc of the data. When we write to an entry and close without having
576  // done a sequential write, we don't check the CRC on read.
577  if (offset == 0 || crc32s_end_offset_[stream_index] == offset) {
578    uint32 initial_crc = (offset != 0) ? crc32s_[stream_index]
579                                       : crc32(0, Z_NULL, 0);
580    if (buf_len > 0) {
581      crc32s_[stream_index] = crc32(initial_crc,
582                                    reinterpret_cast<const Bytef*>(buf->data()),
583                                    buf_len);
584    }
585    crc32s_end_offset_[stream_index] = offset + buf_len;
586  }
587
588  if (truncate) {
589    data_size_[stream_index] = offset + buf_len;
590  } else {
591    data_size_[stream_index] = std::max(offset + buf_len,
592                                        data_size_[stream_index]);
593  }
594
595  // Since we don't know the correct values for |last_used_| and
596  // |last_modified_| yet, we make this approximation.
597  last_used_ = last_modified_ = base::Time::Now();
598
599  have_written_[stream_index] = true;
600
601  scoped_ptr<int> result(new int());
602  Closure task = base::Bind(&SimpleSynchronousEntry::WriteData,
603                            base::Unretained(synchronous_entry_),
604                            stream_index, offset, make_scoped_refptr(buf),
605                            buf_len, truncate, result.get());
606  Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete, this,
607                             stream_index, callback, base::Passed(&result));
608  WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true);
609}
610
611void SimpleEntryImpl::CreationOperationComplete(
612    const CompletionCallback& completion_callback,
613    const base::TimeTicks& start_time,
614    scoped_ptr<SimpleSynchronousEntry*> in_sync_entry,
615    scoped_ptr<int> in_result,
616    Entry** out_entry) {
617  DCHECK(io_thread_checker_.CalledOnValidThread());
618  DCHECK_EQ(state_, STATE_IO_PENDING);
619  DCHECK(in_sync_entry);
620  DCHECK(in_result);
621  ScopedOperationRunner operation_runner(this);
622  UMA_HISTOGRAM_BOOLEAN(
623      "SimpleCache.EntryCreationResult", *in_result == net::OK);
624  if (*in_result != net::OK) {
625    if (*in_result!= net::ERR_FILE_EXISTS)
626      MarkAsDoomed();
627    if (!completion_callback.is_null()) {
628      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
629          completion_callback, net::ERR_FAILED));
630    }
631    MakeUninitialized();
632    state_ = STATE_FAILURE;
633    return;
634  }
635  // If out_entry is NULL, it means we already called ReturnEntryToCaller from
636  // the optimistic Create case.
637  if (out_entry)
638    ReturnEntryToCaller(out_entry);
639
640  state_ = STATE_READY;
641  synchronous_entry_ = *in_sync_entry;
642  SetSynchronousData();
643  UMA_HISTOGRAM_TIMES("SimpleCache.EntryCreationTime",
644                      (base::TimeTicks::Now() - start_time));
645
646  if (!completion_callback.is_null()) {
647    MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
648        completion_callback, net::OK));
649  }
650}
651
652void SimpleEntryImpl::EntryOperationComplete(
653    int stream_index,
654    const CompletionCallback& completion_callback,
655    scoped_ptr<int> result) {
656  DCHECK(io_thread_checker_.CalledOnValidThread());
657  DCHECK(synchronous_entry_);
658  DCHECK_EQ(STATE_IO_PENDING, state_);
659  DCHECK(result);
660  state_ = STATE_READY;
661  if (*result < 0) {
662    MarkAsDoomed();
663    state_ = STATE_FAILURE;
664    crc32s_end_offset_[stream_index] = 0;
665  } else {
666    SetSynchronousData();
667  }
668
669  if (!completion_callback.is_null()) {
670    MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
671        completion_callback, *result));
672  }
673  RunNextOperationIfNeeded();
674}
675
676void SimpleEntryImpl::ReadOperationComplete(
677    int stream_index,
678    int offset,
679    const CompletionCallback& completion_callback,
680    scoped_ptr<uint32> read_crc32,
681    scoped_ptr<int> result) {
682  DCHECK(io_thread_checker_.CalledOnValidThread());
683  DCHECK(synchronous_entry_);
684  DCHECK_EQ(STATE_IO_PENDING, state_);
685  DCHECK(read_crc32);
686  DCHECK(result);
687
688  if (*result > 0 && crc32s_end_offset_[stream_index] == offset) {
689    uint32 current_crc = offset == 0 ? crc32(0, Z_NULL, 0)
690                                     : crc32s_[stream_index];
691    crc32s_[stream_index] = crc32_combine(current_crc, *read_crc32, *result);
692    crc32s_end_offset_[stream_index] += *result;
693    if (!have_written_[stream_index] &&
694        data_size_[stream_index] == crc32s_end_offset_[stream_index]) {
695      // We have just read a file from start to finish, and so we have
696      // computed a crc of the entire file. We can check it now. If a cache
697      // entry has a single reader, the normal pattern is to read from start
698      // to finish.
699
700      // Other cases are possible. In the case of two readers on the same
701      // entry, one reader can be behind the other. In this case we compute
702      // the crc as the most advanced reader progresses, and check it for
703      // both readers as they read the last byte.
704
705      scoped_ptr<int> new_result(new int());
706      Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord,
707                                base::Unretained(synchronous_entry_),
708                                stream_index, crc32s_[stream_index],
709                                new_result.get());
710      Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete,
711                                 this, *result, stream_index,
712                                 completion_callback,
713                                 base::Passed(&new_result));
714      WorkerPool::PostTaskAndReply(FROM_HERE, task, reply, true);
715      return;
716    }
717  }
718  if (*result >= 0)
719    RecordReadResult(READ_RESULT_SUCCESS);
720  else
721    RecordReadResult(READ_RESULT_SYNC_READ_FAILURE);
722  EntryOperationComplete(stream_index, completion_callback, result.Pass());
723}
724
725void SimpleEntryImpl::WriteOperationComplete(
726    int stream_index,
727    const CompletionCallback& completion_callback,
728    scoped_ptr<int> result) {
729  if (*result >= 0)
730    RecordWriteResult(WRITE_RESULT_SUCCESS);
731  else
732    RecordWriteResult(WRITE_RESULT_SYNC_WRITE_FAILURE);
733  EntryOperationComplete(stream_index, completion_callback, result.Pass());
734}
735
736void SimpleEntryImpl::ChecksumOperationComplete(
737    int orig_result,
738    int stream_index,
739    const CompletionCallback& completion_callback,
740    scoped_ptr<int> result) {
741  DCHECK(io_thread_checker_.CalledOnValidThread());
742  DCHECK(synchronous_entry_);
743  DCHECK_EQ(STATE_IO_PENDING, state_);
744  DCHECK(result);
745  if (*result == net::OK) {
746    *result = orig_result;
747    if (orig_result >= 0)
748      RecordReadResult(READ_RESULT_SUCCESS);
749    else
750      RecordReadResult(READ_RESULT_SYNC_READ_FAILURE);
751  } else {
752    RecordReadResult(READ_RESULT_SYNC_CHECKSUM_FAILURE);
753  }
754  EntryOperationComplete(stream_index, completion_callback, result.Pass());
755}
756
757void SimpleEntryImpl::CloseOperationComplete() {
758  DCHECK(!synchronous_entry_);
759  DCHECK_EQ(0, open_count_);
760  DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_);
761  MakeUninitialized();
762  RunNextOperationIfNeeded();
763}
764
765void SimpleEntryImpl::SetSynchronousData() {
766  DCHECK(io_thread_checker_.CalledOnValidThread());
767  DCHECK(synchronous_entry_);
768  DCHECK_EQ(STATE_READY, state_);
769  // TODO(felipeg): These copies to avoid data races are not optimal. While
770  // adding an IO thread index (for fast misses etc...), we can store this data
771  // in that structure. This also solves problems with last_used() on ext4
772  // filesystems not being accurate.
773  last_used_ = synchronous_entry_->last_used();
774  last_modified_ = synchronous_entry_->last_modified();
775  for (int i = 0; i < kSimpleEntryFileCount; ++i)
776    data_size_[i] = synchronous_entry_->data_size(i);
777  if (backend_)
778    backend_->index()->UpdateEntrySize(key_, synchronous_entry_->GetFileSize());
779}
780
781}  // namespace disk_cache
782