simple_entry_impl.cc revision bbcdd45c55eb7c4641ab97aef9889b0fc828e7d3
1// Copyright (c) 2013 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/disk_cache/simple/simple_entry_impl.h"
6
7#include <algorithm>
8#include <cstring>
9#include <vector>
10
11#include "base/bind.h"
12#include "base/bind_helpers.h"
13#include "base/callback.h"
14#include "base/location.h"
15#include "base/logging.h"
16#include "base/message_loop/message_loop_proxy.h"
17#include "base/metrics/histogram.h"
18#include "base/task_runner.h"
19#include "base/time/time.h"
20#include "net/base/io_buffer.h"
21#include "net/base/net_errors.h"
22#include "net/disk_cache/net_log_parameters.h"
23#include "net/disk_cache/simple/simple_backend_impl.h"
24#include "net/disk_cache/simple/simple_index.h"
25#include "net/disk_cache/simple/simple_net_log_parameters.h"
26#include "net/disk_cache/simple/simple_synchronous_entry.h"
27#include "net/disk_cache/simple/simple_util.h"
28#include "third_party/zlib/zlib.h"
29
30namespace {
31
32// Used in histograms, please only add entries at the end.
33enum ReadResult {
34  READ_RESULT_SUCCESS = 0,
35  READ_RESULT_INVALID_ARGUMENT = 1,
36  READ_RESULT_NONBLOCK_EMPTY_RETURN = 2,
37  READ_RESULT_BAD_STATE = 3,
38  READ_RESULT_FAST_EMPTY_RETURN = 4,
39  READ_RESULT_SYNC_READ_FAILURE = 5,
40  READ_RESULT_SYNC_CHECKSUM_FAILURE = 6,
41  READ_RESULT_MAX = 7,
42};
43
44// Used in histograms, please only add entries at the end.
45enum WriteResult {
46  WRITE_RESULT_SUCCESS = 0,
47  WRITE_RESULT_INVALID_ARGUMENT = 1,
48  WRITE_RESULT_OVER_MAX_SIZE = 2,
49  WRITE_RESULT_BAD_STATE = 3,
50  WRITE_RESULT_SYNC_WRITE_FAILURE = 4,
51  WRITE_RESULT_MAX = 5,
52};
53
54void RecordReadResult(ReadResult result) {
55  UMA_HISTOGRAM_ENUMERATION("SimpleCache.ReadResult", result, READ_RESULT_MAX);
56};
57
58void RecordWriteResult(WriteResult result) {
59  UMA_HISTOGRAM_ENUMERATION("SimpleCache.WriteResult",
60                            result, WRITE_RESULT_MAX);
61};
62
63// Short trampoline to take an owned input parameter and call a net completion
64// callback with its value.
65void CallCompletionCallback(const net::CompletionCallback& callback,
66                            scoped_ptr<int> result) {
67  DCHECK(result);
68  if (!callback.is_null())
69    callback.Run(*result);
70}
71
72}  // namespace
73
74namespace disk_cache {
75
76using base::Closure;
77using base::FilePath;
78using base::MessageLoopProxy;
79using base::Time;
80using base::TaskRunner;
81
82// A helper class to insure that RunNextOperationIfNeeded() is called when
83// exiting the current stack frame.
84class SimpleEntryImpl::ScopedOperationRunner {
85 public:
86  explicit ScopedOperationRunner(SimpleEntryImpl* entry) : entry_(entry) {
87  }
88
89  ~ScopedOperationRunner() {
90    entry_->RunNextOperationIfNeeded();
91  }
92
93 private:
94  SimpleEntryImpl* const entry_;
95};
96
97SimpleEntryImpl::SimpleEntryImpl(const FilePath& path,
98                                 const uint64 entry_hash,
99                                 OperationsMode operations_mode,
100                                 SimpleBackendImpl* backend,
101                                 net::NetLog* net_log)
102    : backend_(backend->AsWeakPtr()),
103      worker_pool_(backend->worker_pool()),
104      path_(path),
105      entry_hash_(entry_hash),
106      use_optimistic_operations_(operations_mode == OPTIMISTIC_OPERATIONS),
107      last_used_(Time::Now()),
108      last_modified_(last_used_),
109      open_count_(0),
110      state_(STATE_UNINITIALIZED),
111      synchronous_entry_(NULL),
112      net_log_(net::BoundNetLog::Make(
113          net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY)),
114      last_queued_op_is_read_(false) {
115  COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_end_offset_),
116                 arrays_should_be_same_size);
117  COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc32s_),
118                 arrays_should_be_same_size);
119  COMPILE_ASSERT(arraysize(data_size_) == arraysize(have_written_),
120                 arrays_should_be_same_size);
121  COMPILE_ASSERT(arraysize(data_size_) == arraysize(crc_check_state_),
122                 arrays_should_be_same_size);
123  MakeUninitialized();
124  net_log_.BeginEvent(
125      net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL,
126      CreateNetLogSimpleEntryCreationCallback(this));
127}
128
129int SimpleEntryImpl::OpenEntry(Entry** out_entry,
130                               const CompletionCallback& callback) {
131  DCHECK(backend_.get());
132  // This enumeration is used in histograms, add entries only at end.
133  enum OpenEntryIndexEnum {
134    INDEX_NOEXIST = 0,
135    INDEX_MISS = 1,
136    INDEX_HIT = 2,
137    INDEX_MAX = 3,
138  };
139  OpenEntryIndexEnum open_entry_index_enum = INDEX_NOEXIST;
140  if (backend_.get()) {
141    if (backend_->index()->Has(entry_hash_))
142      open_entry_index_enum = INDEX_HIT;
143    else
144      open_entry_index_enum = INDEX_MISS;
145  }
146  UMA_HISTOGRAM_ENUMERATION("SimpleCache.OpenEntryIndexState",
147                            open_entry_index_enum, INDEX_MAX);
148
149  // If entry is not known to the index, initiate fast failover to the network.
150  if (open_entry_index_enum == INDEX_MISS)
151    return net::ERR_FAILED;
152
153  EnqueueOperation(base::Bind(&SimpleEntryImpl::OpenEntryInternal,
154                              this,
155                              callback,
156                              out_entry));
157  RunNextOperationIfNeeded();
158  return net::ERR_IO_PENDING;
159}
160
161int SimpleEntryImpl::CreateEntry(Entry** out_entry,
162                                 const CompletionCallback& callback) {
163  DCHECK(backend_.get());
164  DCHECK_EQ(entry_hash_, simple_util::GetEntryHashKey(key_));
165  int ret_value = net::ERR_FAILED;
166  if (use_optimistic_operations_ &&
167      state_ == STATE_UNINITIALIZED && pending_operations_.size() == 0) {
168    ReturnEntryToCaller(out_entry);
169    EnqueueOperation(base::Bind(&SimpleEntryImpl::CreateEntryInternal,
170                                this,
171                                CompletionCallback(),
172                                static_cast<Entry**>(NULL)));
173    ret_value = net::OK;
174  } else {
175    EnqueueOperation(base::Bind(&SimpleEntryImpl::CreateEntryInternal,
176                                this,
177                                callback,
178                                out_entry));
179    ret_value = net::ERR_IO_PENDING;
180  }
181
182  // We insert the entry in the index before creating the entry files in the
183  // SimpleSynchronousEntry, because this way the worst scenario is when we
184  // have the entry in the index but we don't have the created files yet, this
185  // way we never leak files. CreationOperationComplete will remove the entry
186  // from the index if the creation fails.
187  backend_->index()->Insert(key_);
188
189  RunNextOperationIfNeeded();
190  return ret_value;
191}
192
193int SimpleEntryImpl::DoomEntry(const CompletionCallback& callback) {
194  MarkAsDoomed();
195  scoped_ptr<int> result(new int());
196  Closure task = base::Bind(&SimpleSynchronousEntry::DoomEntry, path_, key_,
197                            entry_hash_, result.get());
198  Closure reply = base::Bind(&CallCompletionCallback,
199                             callback, base::Passed(&result));
200  worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
201  return net::ERR_IO_PENDING;
202}
203
204void SimpleEntryImpl::Doom() {
205  DoomEntry(CompletionCallback());
206}
207
208void SimpleEntryImpl::Close() {
209  DCHECK(io_thread_checker_.CalledOnValidThread());
210  DCHECK_LT(0, open_count_);
211
212  if (--open_count_ > 0) {
213    DCHECK(!HasOneRef());
214    Release();  // Balanced in ReturnEntryToCaller().
215    return;
216  }
217
218  EnqueueOperation(base::Bind(&SimpleEntryImpl::CloseInternal, this));
219  DCHECK(!HasOneRef());
220  Release();  // Balanced in ReturnEntryToCaller().
221  RunNextOperationIfNeeded();
222}
223
224std::string SimpleEntryImpl::GetKey() const {
225  DCHECK(io_thread_checker_.CalledOnValidThread());
226  return key_;
227}
228
229Time SimpleEntryImpl::GetLastUsed() const {
230  DCHECK(io_thread_checker_.CalledOnValidThread());
231  return last_used_;
232}
233
234Time SimpleEntryImpl::GetLastModified() const {
235  DCHECK(io_thread_checker_.CalledOnValidThread());
236  return last_modified_;
237}
238
239int32 SimpleEntryImpl::GetDataSize(int stream_index) const {
240  DCHECK(io_thread_checker_.CalledOnValidThread());
241  DCHECK_LE(0, data_size_[stream_index]);
242  return data_size_[stream_index];
243}
244
245int SimpleEntryImpl::ReadData(int stream_index,
246                              int offset,
247                              net::IOBuffer* buf,
248                              int buf_len,
249                              const CompletionCallback& callback) {
250  DCHECK(io_thread_checker_.CalledOnValidThread());
251  if (stream_index < 0 || stream_index >= kSimpleEntryFileCount ||
252      buf_len < 0) {
253    RecordReadResult(READ_RESULT_INVALID_ARGUMENT);
254    return net::ERR_INVALID_ARGUMENT;
255  }
256  if (pending_operations_.empty() && (offset >= GetDataSize(stream_index) ||
257                                      offset < 0 || !buf_len)) {
258    RecordReadResult(READ_RESULT_NONBLOCK_EMPTY_RETURN);
259    return 0;
260  }
261
262  // TODO(felipeg): Optimization: Add support for truly parallel read
263  // operations.
264  EnqueueReadOperation(base::Bind(&SimpleEntryImpl::ReadDataInternal,
265                                  this,
266                                  stream_index,
267                                  offset,
268                                  make_scoped_refptr(buf),
269                                  buf_len,
270                                  callback));
271  RunNextOperationIfNeeded();
272  return net::ERR_IO_PENDING;
273}
274
275int SimpleEntryImpl::WriteData(int stream_index,
276                               int offset,
277                               net::IOBuffer* buf,
278                               int buf_len,
279                               const CompletionCallback& callback,
280                               bool truncate) {
281  DCHECK(io_thread_checker_.CalledOnValidThread());
282  if (stream_index < 0 || stream_index >= kSimpleEntryFileCount || offset < 0 ||
283      buf_len < 0) {
284    RecordWriteResult(WRITE_RESULT_INVALID_ARGUMENT);
285    return net::ERR_INVALID_ARGUMENT;
286  }
287  if (backend_.get() && offset + buf_len > backend_->GetMaxFileSize()) {
288    RecordWriteResult(WRITE_RESULT_OVER_MAX_SIZE);
289    return net::ERR_FAILED;
290  }
291  ScopedOperationRunner operation_runner(this);
292
293  const bool do_optimistic_write = use_optimistic_operations_ &&
294      state_ == STATE_READY && pending_operations_.size() == 0;
295  if (!do_optimistic_write) {
296    pending_operations_.push(
297        base::Bind(&SimpleEntryImpl::WriteDataInternal, this, stream_index,
298                   offset, make_scoped_refptr(buf), buf_len, callback,
299                   truncate));
300    return net::ERR_IO_PENDING;
301  }
302
303  // We can only do optimistic Write if there is no pending operations, so that
304  // we are sure that the next call to RunNextOperationIfNeeded will actually
305  // run the write operation that sets the stream size. It also prevents from
306  // previous possibly-conflicting writes that could be stacked in the
307  // |pending_operations_|. We could optimize this for when we have only read
308  // operations enqueued.
309  // TODO(gavinp,pasko): For performance, don't use a copy of an IOBuffer here
310  // to avoid paying the price of the RefCountedThreadSafe atomic operations.
311  IOBuffer* buf_copy = NULL;
312  if (buf) {
313    buf_copy = new IOBuffer(buf_len);
314    memcpy(buf_copy->data(), buf->data(), buf_len);
315  }
316  EnqueueOperation(
317      base::Bind(&SimpleEntryImpl::WriteDataInternal, this, stream_index,
318                 offset, make_scoped_refptr(buf_copy), buf_len,
319                 CompletionCallback(), truncate));
320  return buf_len;
321}
322
323int SimpleEntryImpl::ReadSparseData(int64 offset,
324                                    net::IOBuffer* buf,
325                                    int buf_len,
326                                    const CompletionCallback& callback) {
327  DCHECK(io_thread_checker_.CalledOnValidThread());
328  // TODO(gavinp): Determine if the simple backend should support sparse data.
329  NOTIMPLEMENTED();
330  return net::ERR_FAILED;
331}
332
333int SimpleEntryImpl::WriteSparseData(int64 offset,
334                                     net::IOBuffer* buf,
335                                     int buf_len,
336                                     const CompletionCallback& callback) {
337  DCHECK(io_thread_checker_.CalledOnValidThread());
338  // TODO(gavinp): Determine if the simple backend should support sparse data.
339  NOTIMPLEMENTED();
340  return net::ERR_FAILED;
341}
342
343int SimpleEntryImpl::GetAvailableRange(int64 offset,
344                                       int len,
345                                       int64* start,
346                                       const CompletionCallback& callback) {
347  DCHECK(io_thread_checker_.CalledOnValidThread());
348  // TODO(gavinp): Determine if the simple backend should support sparse data.
349  NOTIMPLEMENTED();
350  return net::ERR_FAILED;
351}
352
353bool SimpleEntryImpl::CouldBeSparse() const {
354  DCHECK(io_thread_checker_.CalledOnValidThread());
355  // TODO(gavinp): Determine if the simple backend should support sparse data.
356  return false;
357}
358
359void SimpleEntryImpl::CancelSparseIO() {
360  DCHECK(io_thread_checker_.CalledOnValidThread());
361  // TODO(gavinp): Determine if the simple backend should support sparse data.
362  NOTIMPLEMENTED();
363}
364
365int SimpleEntryImpl::ReadyForSparseIO(const CompletionCallback& callback) {
366  DCHECK(io_thread_checker_.CalledOnValidThread());
367  // TODO(gavinp): Determine if the simple backend should support sparse data.
368  NOTIMPLEMENTED();
369  return net::ERR_FAILED;
370}
371
372SimpleEntryImpl::~SimpleEntryImpl() {
373  DCHECK(io_thread_checker_.CalledOnValidThread());
374  DCHECK_EQ(0U, pending_operations_.size());
375  DCHECK(state_ == STATE_UNINITIALIZED || state_ == STATE_FAILURE);
376  DCHECK(!synchronous_entry_);
377  RemoveSelfFromBackend();
378  net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL);
379}
380
381void SimpleEntryImpl::MakeUninitialized() {
382  state_ = STATE_UNINITIALIZED;
383  std::memset(crc32s_end_offset_, 0, sizeof(crc32s_end_offset_));
384  std::memset(crc32s_, 0, sizeof(crc32s_));
385  std::memset(have_written_, 0, sizeof(have_written_));
386  std::memset(data_size_, 0, sizeof(data_size_));
387  std::memset(crc_check_state_, 0, sizeof(crc_check_state_));
388}
389
390void SimpleEntryImpl::ReturnEntryToCaller(Entry** out_entry) {
391  DCHECK(out_entry);
392  ++open_count_;
393  AddRef();  // Balanced in Close()
394  *out_entry = this;
395}
396
397void SimpleEntryImpl::RemoveSelfFromBackend() {
398  if (!backend_.get())
399    return;
400  backend_->OnDeactivated(this);
401  backend_.reset();
402}
403
404void SimpleEntryImpl::MarkAsDoomed() {
405  net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM);
406  if (!backend_.get())
407    return;
408  backend_->index()->Remove(key_);
409  RemoveSelfFromBackend();
410}
411
412void SimpleEntryImpl::RunNextOperationIfNeeded() {
413  DCHECK(io_thread_checker_.CalledOnValidThread());
414  UMA_HISTOGRAM_CUSTOM_COUNTS("SimpleCache.EntryOperationsPending",
415                              pending_operations_.size(), 0, 100, 20);
416  if (!pending_operations_.empty() && state_ != STATE_IO_PENDING) {
417    base::Closure operation = pending_operations_.front();
418    pending_operations_.pop();
419    operation.Run();
420    // |this| may have been deleted.
421  }
422}
423
424void SimpleEntryImpl::EnqueueOperation(const base::Closure& operation) {
425  last_queued_op_is_read_ = false;
426  pending_operations_.push(operation);
427}
428
429void SimpleEntryImpl::EnqueueReadOperation(const base::Closure& operation) {
430  bool parallelizable_read = last_queued_op_is_read_ &&
431      (!pending_operations_.empty() || state_ == STATE_IO_PENDING);
432  UMA_HISTOGRAM_BOOLEAN("SimpleCache.ReadIsParallelizable",
433                        parallelizable_read);
434  last_queued_op_is_read_ = true;
435  pending_operations_.push(operation);
436}
437
438void SimpleEntryImpl::OpenEntryInternal(const CompletionCallback& callback,
439                                        Entry** out_entry) {
440  ScopedOperationRunner operation_runner(this);
441  if (state_ == STATE_READY) {
442    ReturnEntryToCaller(out_entry);
443    MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(callback,
444                                                                net::OK));
445    return;
446  } else if (state_ == STATE_FAILURE) {
447    if (!callback.is_null()) {
448      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
449          callback, net::ERR_FAILED));
450    }
451    return;
452  }
453  DCHECK_EQ(STATE_UNINITIALIZED, state_);
454  DCHECK(!synchronous_entry_);
455  state_ = STATE_IO_PENDING;
456  const base::TimeTicks start_time = base::TimeTicks::Now();
457  typedef SimpleSynchronousEntry* PointerToSimpleSynchronousEntry;
458  scoped_ptr<PointerToSimpleSynchronousEntry> sync_entry(
459      new PointerToSimpleSynchronousEntry());
460  scoped_ptr<int> result(new int());
461  Closure task = base::Bind(&SimpleSynchronousEntry::OpenEntry, path_,
462                            entry_hash_, sync_entry.get(), result.get());
463  Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, this,
464                             callback, start_time, base::Passed(&sync_entry),
465                             base::Passed(&result), out_entry);
466  worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
467}
468
469void SimpleEntryImpl::CreateEntryInternal(const CompletionCallback& callback,
470                                          Entry** out_entry) {
471  ScopedOperationRunner operation_runner(this);
472  if (state_ != STATE_UNINITIALIZED) {
473    // There is already an active normal entry.
474    if (!callback.is_null()) {
475      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
476          callback, net::ERR_FAILED));
477    }
478    return;
479  }
480  DCHECK_EQ(STATE_UNINITIALIZED, state_);
481  DCHECK(!synchronous_entry_);
482
483  state_ = STATE_IO_PENDING;
484
485  // Since we don't know the correct values for |last_used_| and
486  // |last_modified_| yet, we make this approximation.
487  last_used_ = last_modified_ = base::Time::Now();
488
489  // If creation succeeds, we should mark all streams to be saved on close.
490  for (int i = 0; i < kSimpleEntryFileCount; ++i)
491    have_written_[i] = true;
492
493  const base::TimeTicks start_time = base::TimeTicks::Now();
494  typedef SimpleSynchronousEntry* PointerToSimpleSynchronousEntry;
495  scoped_ptr<PointerToSimpleSynchronousEntry> sync_entry(
496      new PointerToSimpleSynchronousEntry());
497  scoped_ptr<int> result(new int());
498  Closure task = base::Bind(&SimpleSynchronousEntry::CreateEntry, path_, key_,
499                            entry_hash_, sync_entry.get(), result.get());
500  Closure reply = base::Bind(&SimpleEntryImpl::CreationOperationComplete, this,
501                             callback, start_time, base::Passed(&sync_entry),
502                             base::Passed(&result), out_entry);
503  worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
504}
505
506void SimpleEntryImpl::CloseInternal() {
507  DCHECK(io_thread_checker_.CalledOnValidThread());
508  typedef SimpleSynchronousEntry::CRCRecord CRCRecord;
509  scoped_ptr<std::vector<CRCRecord> >
510      crc32s_to_write(new std::vector<CRCRecord>());
511
512  net_log_.BeginEvent(net::NetLog::TYPE_ENTRY_CLOSE);
513
514  if (state_ == STATE_READY) {
515    DCHECK(synchronous_entry_);
516    state_ = STATE_IO_PENDING;
517    for (int i = 0; i < kSimpleEntryFileCount; ++i) {
518      if (have_written_[i]) {
519        if (GetDataSize(i) == crc32s_end_offset_[i]) {
520          int32 crc = GetDataSize(i) == 0 ? crc32(0, Z_NULL, 0) : crc32s_[i];
521          crc32s_to_write->push_back(CRCRecord(i, true, crc));
522        } else {
523          crc32s_to_write->push_back(CRCRecord(i, false, 0));
524        }
525      }
526    }
527  } else {
528    DCHECK(STATE_UNINITIALIZED == state_ || STATE_FAILURE == state_);
529  }
530
531  if (synchronous_entry_) {
532    Closure task = base::Bind(&SimpleSynchronousEntry::Close,
533                              base::Unretained(synchronous_entry_),
534                              base::Passed(&crc32s_to_write));
535    Closure reply = base::Bind(&SimpleEntryImpl::CloseOperationComplete, this);
536    synchronous_entry_ = NULL;
537    worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
538
539    for (int i = 0; i < kSimpleEntryFileCount; ++i) {
540      if (!have_written_[i]) {
541        UMA_HISTOGRAM_ENUMERATION("SimpleCache.CheckCRCResult",
542                                  crc_check_state_[i], CRC_CHECK_MAX);
543      }
544    }
545  } else {
546    synchronous_entry_ = NULL;
547    CloseOperationComplete();
548  }
549}
550
551void SimpleEntryImpl::ReadDataInternal(int stream_index,
552                                       int offset,
553                                       net::IOBuffer* buf,
554                                       int buf_len,
555                                       const CompletionCallback& callback) {
556  DCHECK(io_thread_checker_.CalledOnValidThread());
557  ScopedOperationRunner operation_runner(this);
558
559  if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
560    if (!callback.is_null()) {
561      RecordReadResult(READ_RESULT_BAD_STATE);
562      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
563          callback, net::ERR_FAILED));
564    }
565    return;
566  }
567  DCHECK_EQ(STATE_READY, state_);
568  if (offset >= GetDataSize(stream_index) || offset < 0 || !buf_len) {
569    RecordReadResult(READ_RESULT_FAST_EMPTY_RETURN);
570    // If there is nothing to read, we bail out before setting state_ to
571    // STATE_IO_PENDING.
572    if (!callback.is_null())
573      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
574          callback, 0));
575    return;
576  }
577
578  if (net_log_.IsLoggingAllEvents()) {
579    net_log_.BeginEvent(
580        net::NetLog::TYPE_ENTRY_READ_DATA,
581        CreateNetLogReadWriteDataCallback(
582            stream_index, offset, buf_len, false));
583  }
584
585  buf_len = std::min(buf_len, GetDataSize(stream_index) - offset);
586
587  state_ = STATE_IO_PENDING;
588  if (backend_.get())
589    backend_->index()->UseIfExists(key_);
590
591  scoped_ptr<uint32> read_crc32(new uint32());
592  scoped_ptr<int> result(new int());
593  Closure task = base::Bind(&SimpleSynchronousEntry::ReadData,
594                            base::Unretained(synchronous_entry_),
595                            stream_index, offset, make_scoped_refptr(buf),
596                            buf_len, read_crc32.get(), result.get());
597  Closure reply = base::Bind(&SimpleEntryImpl::ReadOperationComplete, this,
598                             stream_index, offset, callback,
599                             base::Passed(&read_crc32), base::Passed(&result));
600  worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
601}
602
603void SimpleEntryImpl::WriteDataInternal(int stream_index,
604                                       int offset,
605                                       net::IOBuffer* buf,
606                                       int buf_len,
607                                       const CompletionCallback& callback,
608                                       bool truncate) {
609  DCHECK(io_thread_checker_.CalledOnValidThread());
610  ScopedOperationRunner operation_runner(this);
611  if (state_ == STATE_FAILURE || state_ == STATE_UNINITIALIZED) {
612    RecordWriteResult(WRITE_RESULT_BAD_STATE);
613    if (!callback.is_null()) {
614      // We need to posttask so that we don't go in a loop when we call the
615      // callback directly.
616      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
617          callback, net::ERR_FAILED));
618    }
619    // |this| may be destroyed after return here.
620    return;
621  }
622
623  if (net_log_.IsLoggingAllEvents()) {
624    net_log_.BeginEvent(
625        net::NetLog::TYPE_ENTRY_WRITE_DATA,
626        CreateNetLogReadWriteDataCallback(
627            stream_index, offset, buf_len, truncate));
628  }
629
630  DCHECK_EQ(STATE_READY, state_);
631  state_ = STATE_IO_PENDING;
632  if (backend_.get())
633    backend_->index()->UseIfExists(key_);
634  // It is easy to incrementally compute the CRC from [0 .. |offset + buf_len|)
635  // if |offset == 0| or we have already computed the CRC for [0 .. offset).
636  // We rely on most write operations being sequential, start to end to compute
637  // the crc of the data. When we write to an entry and close without having
638  // done a sequential write, we don't check the CRC on read.
639  if (offset == 0 || crc32s_end_offset_[stream_index] == offset) {
640    uint32 initial_crc = (offset != 0) ? crc32s_[stream_index]
641                                       : crc32(0, Z_NULL, 0);
642    if (buf_len > 0) {
643      crc32s_[stream_index] = crc32(initial_crc,
644                                    reinterpret_cast<const Bytef*>(buf->data()),
645                                    buf_len);
646    }
647    crc32s_end_offset_[stream_index] = offset + buf_len;
648  }
649
650  if (truncate) {
651    data_size_[stream_index] = offset + buf_len;
652  } else {
653    data_size_[stream_index] = std::max(offset + buf_len,
654                                        GetDataSize(stream_index));
655  }
656
657  // Since we don't know the correct values for |last_used_| and
658  // |last_modified_| yet, we make this approximation.
659  last_used_ = last_modified_ = base::Time::Now();
660
661  have_written_[stream_index] = true;
662
663  scoped_ptr<int> result(new int());
664  Closure task = base::Bind(&SimpleSynchronousEntry::WriteData,
665                            base::Unretained(synchronous_entry_),
666                            stream_index, offset, make_scoped_refptr(buf),
667                            buf_len, truncate, result.get());
668  Closure reply = base::Bind(&SimpleEntryImpl::WriteOperationComplete, this,
669                             stream_index, callback, base::Passed(&result));
670  worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
671}
672
673void SimpleEntryImpl::CreationOperationComplete(
674    const CompletionCallback& completion_callback,
675    const base::TimeTicks& start_time,
676    scoped_ptr<SimpleSynchronousEntry*> in_sync_entry,
677    scoped_ptr<int> in_result,
678    Entry** out_entry) {
679  DCHECK(io_thread_checker_.CalledOnValidThread());
680  DCHECK_EQ(state_, STATE_IO_PENDING);
681  DCHECK(in_sync_entry);
682  DCHECK(in_result);
683  ScopedOperationRunner operation_runner(this);
684  UMA_HISTOGRAM_BOOLEAN(
685      "SimpleCache.EntryCreationResult", *in_result == net::OK);
686  if (*in_result != net::OK) {
687    if (*in_result!= net::ERR_FILE_EXISTS)
688      MarkAsDoomed();
689    if (!completion_callback.is_null()) {
690      MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
691          completion_callback, net::ERR_FAILED));
692    }
693    MakeUninitialized();
694    return;
695  }
696  // If out_entry is NULL, it means we already called ReturnEntryToCaller from
697  // the optimistic Create case.
698  if (out_entry)
699    ReturnEntryToCaller(out_entry);
700
701  state_ = STATE_READY;
702  synchronous_entry_ = *in_sync_entry;
703  if (key_.empty()) {
704    key_ = synchronous_entry_->key();
705  } else {
706    // This should only be triggered when creating an entry. The key check in
707    // the open case is handled in SimpleBackendImpl.
708    DCHECK_EQ(key_, synchronous_entry_->key());
709  }
710  SetSynchronousData();
711  UMA_HISTOGRAM_TIMES("SimpleCache.EntryCreationTime",
712                      (base::TimeTicks::Now() - start_time));
713
714  if (!completion_callback.is_null()) {
715    MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
716        completion_callback, net::OK));
717  }
718}
719
720void SimpleEntryImpl::EntryOperationComplete(
721    int stream_index,
722    const CompletionCallback& completion_callback,
723    scoped_ptr<int> result) {
724  DCHECK(io_thread_checker_.CalledOnValidThread());
725  DCHECK(synchronous_entry_);
726  DCHECK_EQ(STATE_IO_PENDING, state_);
727  DCHECK(result);
728  state_ = STATE_READY;
729  if (*result < 0) {
730    MarkAsDoomed();
731    state_ = STATE_FAILURE;
732    crc32s_end_offset_[stream_index] = 0;
733  } else {
734    SetSynchronousData();
735  }
736
737  if (!completion_callback.is_null()) {
738    MessageLoopProxy::current()->PostTask(FROM_HERE, base::Bind(
739        completion_callback, *result));
740  }
741  RunNextOperationIfNeeded();
742}
743
744void SimpleEntryImpl::ReadOperationComplete(
745    int stream_index,
746    int offset,
747    const CompletionCallback& completion_callback,
748    scoped_ptr<uint32> read_crc32,
749    scoped_ptr<int> result) {
750  DCHECK(io_thread_checker_.CalledOnValidThread());
751  DCHECK(synchronous_entry_);
752  DCHECK_EQ(STATE_IO_PENDING, state_);
753  DCHECK(read_crc32);
754  DCHECK(result);
755
756  if (*result > 0 && crc32s_end_offset_[stream_index] == offset) {
757    uint32 current_crc = offset == 0 ? crc32(0, Z_NULL, 0)
758                                     : crc32s_[stream_index];
759    crc32s_[stream_index] = crc32_combine(current_crc, *read_crc32, *result);
760    crc32s_end_offset_[stream_index] += *result;
761    if (!have_written_[stream_index] &&
762        GetDataSize(stream_index) == crc32s_end_offset_[stream_index]) {
763      // We have just read a file from start to finish, and so we have
764      // computed a crc of the entire file. We can check it now. If a cache
765      // entry has a single reader, the normal pattern is to read from start
766      // to finish.
767
768      // Other cases are possible. In the case of two readers on the same
769      // entry, one reader can be behind the other. In this case we compute
770      // the crc as the most advanced reader progresses, and check it for
771      // both readers as they read the last byte.
772
773      scoped_ptr<int> new_result(new int());
774      Closure task = base::Bind(&SimpleSynchronousEntry::CheckEOFRecord,
775                                base::Unretained(synchronous_entry_),
776                                stream_index, crc32s_[stream_index],
777                                new_result.get());
778      Closure reply = base::Bind(&SimpleEntryImpl::ChecksumOperationComplete,
779                                 this, *result, stream_index,
780                                 completion_callback,
781                                 base::Passed(&new_result));
782      worker_pool_->PostTaskAndReply(FROM_HERE, task, reply);
783      crc_check_state_[stream_index] = CRC_CHECK_DONE;
784      return;
785    }
786  }
787
788  if (net_log_.IsLoggingAllEvents()) {
789    net_log_.EndEvent(
790        net::NetLog::TYPE_ENTRY_READ_DATA,
791        CreateNetLogReadWriteCompleteCallback(*result));
792  }
793
794  if (*result < 0) {
795    RecordReadResult(READ_RESULT_SYNC_READ_FAILURE);
796  } else {
797    RecordReadResult(READ_RESULT_SUCCESS);
798    if (crc_check_state_[stream_index] == CRC_CHECK_NEVER_READ_TO_END &&
799        offset + *result == GetDataSize(stream_index)) {
800      crc_check_state_[stream_index] = CRC_CHECK_NOT_DONE;
801    }
802  }
803  EntryOperationComplete(stream_index, completion_callback, result.Pass());
804}
805
806void SimpleEntryImpl::WriteOperationComplete(
807    int stream_index,
808    const CompletionCallback& completion_callback,
809    scoped_ptr<int> result) {
810  if (net_log_.IsLoggingAllEvents()) {
811    net_log_.EndEvent(
812        net::NetLog::TYPE_ENTRY_WRITE_DATA,
813        CreateNetLogReadWriteCompleteCallback(*result));
814  }
815
816  if (*result >= 0)
817    RecordWriteResult(WRITE_RESULT_SUCCESS);
818  else
819    RecordWriteResult(WRITE_RESULT_SYNC_WRITE_FAILURE);
820  EntryOperationComplete(stream_index, completion_callback, result.Pass());
821}
822
823void SimpleEntryImpl::ChecksumOperationComplete(
824    int orig_result,
825    int stream_index,
826    const CompletionCallback& completion_callback,
827    scoped_ptr<int> result) {
828  DCHECK(io_thread_checker_.CalledOnValidThread());
829  DCHECK(synchronous_entry_);
830  DCHECK_EQ(STATE_IO_PENDING, state_);
831  DCHECK(result);
832
833  if (net_log_.IsLoggingAllEvents()) {
834    net_log_.EndEvent(
835        net::NetLog::TYPE_ENTRY_READ_DATA,
836        CreateNetLogReadWriteCompleteCallback(*result));
837  }
838
839  if (*result == net::OK) {
840    *result = orig_result;
841    if (orig_result >= 0)
842      RecordReadResult(READ_RESULT_SUCCESS);
843    else
844      RecordReadResult(READ_RESULT_SYNC_READ_FAILURE);
845  } else {
846    RecordReadResult(READ_RESULT_SYNC_CHECKSUM_FAILURE);
847  }
848  EntryOperationComplete(stream_index, completion_callback, result.Pass());
849}
850
851void SimpleEntryImpl::CloseOperationComplete() {
852  DCHECK(!synchronous_entry_);
853  DCHECK_EQ(0, open_count_);
854  DCHECK(STATE_IO_PENDING == state_ || STATE_FAILURE == state_ ||
855         STATE_UNINITIALIZED == state_);
856  net_log_.EndEvent(net::NetLog::TYPE_ENTRY_CLOSE);
857  MakeUninitialized();
858  RunNextOperationIfNeeded();
859}
860
861void SimpleEntryImpl::SetSynchronousData() {
862  DCHECK(io_thread_checker_.CalledOnValidThread());
863  DCHECK(synchronous_entry_);
864  DCHECK_EQ(STATE_READY, state_);
865  // TODO(felipeg): These copies to avoid data races are not optimal. While
866  // adding an IO thread index (for fast misses etc...), we can store this data
867  // in that structure. This also solves problems with last_used() on ext4
868  // filesystems not being accurate.
869  last_used_ = synchronous_entry_->last_used();
870  last_modified_ = synchronous_entry_->last_modified();
871  for (int i = 0; i < kSimpleEntryFileCount; ++i)
872    data_size_[i] = synchronous_entry_->data_size(i);
873  if (backend_.get())
874    backend_->index()->UpdateEntrySize(key_, synchronous_entry_->GetFileSize());
875}
876
877}  // namespace disk_cache
878