simple_backend_impl.cc revision 5d1f7b1de12d16ceb2c938c56701a3e8bfa558f7
1// Copyright (c) 2013 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/disk_cache/simple/simple_backend_impl.h"
6
7#include <algorithm>
8#include <cstdlib>
9#include <functional>
10
11#if defined(OS_POSIX)
12#include <sys/resource.h>
13#endif
14
15#include "base/bind.h"
16#include "base/callback.h"
17#include "base/file_util.h"
18#include "base/location.h"
19#include "base/message_loop/message_loop_proxy.h"
20#include "base/metrics/field_trial.h"
21#include "base/metrics/histogram.h"
22#include "base/metrics/sparse_histogram.h"
23#include "base/single_thread_task_runner.h"
24#include "base/sys_info.h"
25#include "base/task_runner_util.h"
26#include "base/threading/sequenced_worker_pool.h"
27#include "base/time/time.h"
28#include "net/base/net_errors.h"
29#include "net/disk_cache/cache_util.h"
30#include "net/disk_cache/simple/simple_entry_format.h"
31#include "net/disk_cache/simple/simple_entry_impl.h"
32#include "net/disk_cache/simple/simple_histogram_macros.h"
33#include "net/disk_cache/simple/simple_index.h"
34#include "net/disk_cache/simple/simple_index_file.h"
35#include "net/disk_cache/simple/simple_synchronous_entry.h"
36#include "net/disk_cache/simple/simple_util.h"
37#include "net/disk_cache/simple/simple_version_upgrade.h"
38
39using base::Callback;
40using base::Closure;
41using base::FilePath;
42using base::MessageLoopProxy;
43using base::SequencedWorkerPool;
44using base::SingleThreadTaskRunner;
45using base::Time;
46using base::DirectoryExists;
47using base::CreateDirectory;
48
49namespace disk_cache {
50
51namespace {
52
53// Maximum number of concurrent worker pool threads, which also is the limit
54// on concurrent IO (as we use one thread per IO request).
55const int kDefaultMaxWorkerThreads = 50;
56
57const char kThreadNamePrefix[] = "SimpleCache";
58
59// Maximum fraction of the cache that one entry can consume.
60const int kMaxFileRatio = 8;
61
62// A global sequenced worker pool to use for launching all tasks.
63SequencedWorkerPool* g_sequenced_worker_pool = NULL;
64
65void MaybeCreateSequencedWorkerPool() {
66  if (!g_sequenced_worker_pool) {
67    int max_worker_threads = kDefaultMaxWorkerThreads;
68
69    const std::string thread_count_field_trial =
70        base::FieldTrialList::FindFullName("SimpleCacheMaxThreads");
71    if (!thread_count_field_trial.empty()) {
72      max_worker_threads =
73          std::max(1, std::atoi(thread_count_field_trial.c_str()));
74    }
75
76    g_sequenced_worker_pool = new SequencedWorkerPool(max_worker_threads,
77                                                      kThreadNamePrefix);
78    g_sequenced_worker_pool->AddRef();  // Leak it.
79  }
80}
81
82bool g_fd_limit_histogram_has_been_populated = false;
83
84void MaybeHistogramFdLimit(net::CacheType cache_type) {
85  if (g_fd_limit_histogram_has_been_populated)
86    return;
87
88  // Used in histograms; add new entries at end.
89  enum FdLimitStatus {
90    FD_LIMIT_STATUS_UNSUPPORTED = 0,
91    FD_LIMIT_STATUS_FAILED      = 1,
92    FD_LIMIT_STATUS_SUCCEEDED   = 2,
93    FD_LIMIT_STATUS_MAX         = 3
94  };
95  FdLimitStatus fd_limit_status = FD_LIMIT_STATUS_UNSUPPORTED;
96  int soft_fd_limit = 0;
97  int hard_fd_limit = 0;
98
99#if defined(OS_POSIX)
100  struct rlimit nofile;
101  if (!getrlimit(RLIMIT_NOFILE, &nofile)) {
102    soft_fd_limit = nofile.rlim_cur;
103    hard_fd_limit = nofile.rlim_max;
104    fd_limit_status = FD_LIMIT_STATUS_SUCCEEDED;
105  } else {
106    fd_limit_status = FD_LIMIT_STATUS_FAILED;
107  }
108#endif
109
110  SIMPLE_CACHE_UMA(ENUMERATION,
111                   "FileDescriptorLimitStatus", cache_type,
112                   fd_limit_status, FD_LIMIT_STATUS_MAX);
113  if (fd_limit_status == FD_LIMIT_STATUS_SUCCEEDED) {
114    SIMPLE_CACHE_UMA(SPARSE_SLOWLY,
115                     "FileDescriptorLimitSoft", cache_type, soft_fd_limit);
116    SIMPLE_CACHE_UMA(SPARSE_SLOWLY,
117                     "FileDescriptorLimitHard", cache_type, hard_fd_limit);
118  }
119
120  g_fd_limit_histogram_has_been_populated = true;
121}
122
123// Detects if the files in the cache directory match the current disk cache
124// backend type and version. If the directory contains no cache, occupies it
125// with the fresh structure.
126bool FileStructureConsistent(const base::FilePath& path) {
127  if (!base::PathExists(path) && !base::CreateDirectory(path)) {
128    LOG(ERROR) << "Failed to create directory: " << path.LossyDisplayName();
129    return false;
130  }
131  return disk_cache::UpgradeSimpleCacheOnDisk(path);
132}
133
134// A context used by a BarrierCompletionCallback to track state.
135struct BarrierContext {
136  BarrierContext(int expected)
137      : expected(expected),
138        count(0),
139        had_error(false) {}
140
141  const int expected;
142  int count;
143  bool had_error;
144};
145
146void BarrierCompletionCallbackImpl(
147    BarrierContext* context,
148    const net::CompletionCallback& final_callback,
149    int result) {
150  DCHECK_GT(context->expected, context->count);
151  if (context->had_error)
152    return;
153  if (result != net::OK) {
154    context->had_error = true;
155    final_callback.Run(result);
156    return;
157  }
158  ++context->count;
159  if (context->count == context->expected)
160    final_callback.Run(net::OK);
161}
162
163// A barrier completion callback is a net::CompletionCallback that waits for
164// |count| successful results before invoking |final_callback|. In the case of
165// an error, the first error is passed to |final_callback| and all others
166// are ignored.
167net::CompletionCallback MakeBarrierCompletionCallback(
168    int count,
169    const net::CompletionCallback& final_callback) {
170  BarrierContext* context = new BarrierContext(count);
171  return base::Bind(&BarrierCompletionCallbackImpl,
172                    base::Owned(context), final_callback);
173}
174
175// A short bindable thunk that ensures a completion callback is always called
176// after running an operation asynchronously.
177void RunOperationAndCallback(
178    const Callback<int(const net::CompletionCallback&)>& operation,
179    const net::CompletionCallback& operation_callback) {
180  const int operation_result = operation.Run(operation_callback);
181  if (operation_result != net::ERR_IO_PENDING)
182    operation_callback.Run(operation_result);
183}
184
185void RecordIndexLoad(net::CacheType cache_type,
186                     base::TimeTicks constructed_since,
187                     int result) {
188  const base::TimeDelta creation_to_index = base::TimeTicks::Now() -
189                                            constructed_since;
190  if (result == net::OK) {
191    SIMPLE_CACHE_UMA(TIMES, "CreationToIndex", cache_type, creation_to_index);
192  } else {
193    SIMPLE_CACHE_UMA(TIMES,
194                     "CreationToIndexFail", cache_type, creation_to_index);
195  }
196}
197
198}  // namespace
199
200SimpleBackendImpl::SimpleBackendImpl(const FilePath& path,
201                                     int max_bytes,
202                                     net::CacheType cache_type,
203                                     base::SingleThreadTaskRunner* cache_thread,
204                                     net::NetLog* net_log)
205    : path_(path),
206      cache_type_(cache_type),
207      cache_thread_(cache_thread),
208      orig_max_size_(max_bytes),
209      entry_operations_mode_(
210          cache_type == net::DISK_CACHE ?
211              SimpleEntryImpl::OPTIMISTIC_OPERATIONS :
212              SimpleEntryImpl::NON_OPTIMISTIC_OPERATIONS),
213      net_log_(net_log) {
214  MaybeHistogramFdLimit(cache_type_);
215}
216
217SimpleBackendImpl::~SimpleBackendImpl() {
218  index_->WriteToDisk();
219}
220
221int SimpleBackendImpl::Init(const CompletionCallback& completion_callback) {
222  MaybeCreateSequencedWorkerPool();
223
224  worker_pool_ = g_sequenced_worker_pool->GetTaskRunnerWithShutdownBehavior(
225      SequencedWorkerPool::CONTINUE_ON_SHUTDOWN);
226
227  index_.reset(new SimpleIndex(MessageLoopProxy::current(), this, cache_type_,
228                               make_scoped_ptr(new SimpleIndexFile(
229                                   cache_thread_.get(), worker_pool_.get(),
230                                   cache_type_, path_))));
231  index_->ExecuteWhenReady(
232      base::Bind(&RecordIndexLoad, cache_type_, base::TimeTicks::Now()));
233
234  PostTaskAndReplyWithResult(
235      cache_thread_,
236      FROM_HERE,
237      base::Bind(&SimpleBackendImpl::InitCacheStructureOnDisk, path_,
238                 orig_max_size_),
239      base::Bind(&SimpleBackendImpl::InitializeIndex, AsWeakPtr(),
240                 completion_callback));
241  return net::ERR_IO_PENDING;
242}
243
244bool SimpleBackendImpl::SetMaxSize(int max_bytes) {
245  orig_max_size_ = max_bytes;
246  return index_->SetMaxSize(max_bytes);
247}
248
249int SimpleBackendImpl::GetMaxFileSize() const {
250  return index_->max_size() / kMaxFileRatio;
251}
252
253void SimpleBackendImpl::OnDeactivated(const SimpleEntryImpl* entry) {
254  active_entries_.erase(entry->entry_hash());
255}
256
257void SimpleBackendImpl::OnDoomStart(uint64 entry_hash) {
258  // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
259  CHECK_EQ(0u, entries_pending_doom_.count(entry_hash));
260  entries_pending_doom_.insert(
261      std::make_pair(entry_hash, std::vector<Closure>()));
262}
263
264void SimpleBackendImpl::OnDoomComplete(uint64 entry_hash) {
265  // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
266  CHECK_EQ(1u, entries_pending_doom_.count(entry_hash));
267  base::hash_map<uint64, std::vector<Closure> >::iterator it =
268      entries_pending_doom_.find(entry_hash);
269  std::vector<Closure> to_run_closures;
270  to_run_closures.swap(it->second);
271  entries_pending_doom_.erase(it);
272
273  std::for_each(to_run_closures.begin(), to_run_closures.end(),
274                std::mem_fun_ref(&Closure::Run));
275}
276
277void SimpleBackendImpl::DoomEntries(std::vector<uint64>* entry_hashes,
278                                    const net::CompletionCallback& callback) {
279  scoped_ptr<std::vector<uint64> >
280      mass_doom_entry_hashes(new std::vector<uint64>());
281  mass_doom_entry_hashes->swap(*entry_hashes);
282
283  std::vector<uint64> to_doom_individually_hashes;
284
285  // For each of the entry hashes, there are two cases:
286  // 1. The entry is either open or pending doom, and so it should be doomed
287  //    individually to avoid flakes.
288  // 2. The entry is not in use at all, so we can call
289  //    SimpleSynchronousEntry::DoomEntrySet and delete the files en masse.
290  for (int i = mass_doom_entry_hashes->size() - 1; i >= 0; --i) {
291    const uint64 entry_hash = (*mass_doom_entry_hashes)[i];
292    // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
293    CHECK(active_entries_.count(entry_hash) == 0 ||
294          entries_pending_doom_.count(entry_hash) == 0)
295        << "The entry 0x" << std::hex << entry_hash
296        << " is both active and pending doom.";
297    if (!active_entries_.count(entry_hash) &&
298        !entries_pending_doom_.count(entry_hash)) {
299      continue;
300    }
301
302    to_doom_individually_hashes.push_back(entry_hash);
303
304    (*mass_doom_entry_hashes)[i] = mass_doom_entry_hashes->back();
305    mass_doom_entry_hashes->resize(mass_doom_entry_hashes->size() - 1);
306  }
307
308  net::CompletionCallback barrier_callback =
309      MakeBarrierCompletionCallback(to_doom_individually_hashes.size() + 1,
310                                    callback);
311  for (std::vector<uint64>::const_iterator
312           it = to_doom_individually_hashes.begin(),
313           end = to_doom_individually_hashes.end(); it != end; ++it) {
314    const int doom_result = DoomEntryFromHash(*it, barrier_callback);
315    // TODO(ttuttle): Revert to DCHECK once http://crbug.com/317138 is fixed.
316    CHECK_EQ(net::ERR_IO_PENDING, doom_result);
317    index_->Remove(*it);
318  }
319
320  for (std::vector<uint64>::const_iterator it = mass_doom_entry_hashes->begin(),
321                                           end = mass_doom_entry_hashes->end();
322       it != end; ++it) {
323    index_->Remove(*it);
324    OnDoomStart(*it);
325  }
326
327  // Taking this pointer here avoids undefined behaviour from calling
328  // base::Passed before mass_doom_entry_hashes.get().
329  std::vector<uint64>* mass_doom_entry_hashes_ptr =
330      mass_doom_entry_hashes.get();
331  PostTaskAndReplyWithResult(
332      worker_pool_, FROM_HERE,
333      base::Bind(&SimpleSynchronousEntry::DoomEntrySet,
334                 mass_doom_entry_hashes_ptr, path_),
335      base::Bind(&SimpleBackendImpl::DoomEntriesComplete,
336                 AsWeakPtr(), base::Passed(&mass_doom_entry_hashes),
337                 barrier_callback));
338}
339
340net::CacheType SimpleBackendImpl::GetCacheType() const {
341  return net::DISK_CACHE;
342}
343
344int32 SimpleBackendImpl::GetEntryCount() const {
345  // TODO(pasko): Use directory file count when index is not ready.
346  return index_->GetEntryCount();
347}
348
349int SimpleBackendImpl::OpenEntry(const std::string& key,
350                                 Entry** entry,
351                                 const CompletionCallback& callback) {
352  const uint64 entry_hash = simple_util::GetEntryHashKey(key);
353
354  // TODO(gavinp): Factor out this (not quite completely) repetitive code
355  // block from OpenEntry/CreateEntry/DoomEntry.
356  base::hash_map<uint64, std::vector<Closure> >::iterator it =
357      entries_pending_doom_.find(entry_hash);
358  if (it != entries_pending_doom_.end()) {
359    Callback<int(const net::CompletionCallback&)> operation =
360        base::Bind(&SimpleBackendImpl::OpenEntry,
361                   base::Unretained(this), key, entry);
362    it->second.push_back(base::Bind(&RunOperationAndCallback,
363                                    operation, callback));
364    return net::ERR_IO_PENDING;
365  }
366  scoped_refptr<SimpleEntryImpl> simple_entry =
367      CreateOrFindActiveEntry(entry_hash, key);
368  CompletionCallback backend_callback =
369      base::Bind(&SimpleBackendImpl::OnEntryOpenedFromKey,
370                 AsWeakPtr(),
371                 key,
372                 entry,
373                 simple_entry,
374                 callback);
375  return simple_entry->OpenEntry(entry, backend_callback);
376}
377
378int SimpleBackendImpl::CreateEntry(const std::string& key,
379                                   Entry** entry,
380                                   const CompletionCallback& callback) {
381  DCHECK_LT(0u, key.size());
382  const uint64 entry_hash = simple_util::GetEntryHashKey(key);
383
384  base::hash_map<uint64, std::vector<Closure> >::iterator it =
385      entries_pending_doom_.find(entry_hash);
386  if (it != entries_pending_doom_.end()) {
387    Callback<int(const net::CompletionCallback&)> operation =
388        base::Bind(&SimpleBackendImpl::CreateEntry,
389                   base::Unretained(this), key, entry);
390    it->second.push_back(base::Bind(&RunOperationAndCallback,
391                                    operation, callback));
392    return net::ERR_IO_PENDING;
393  }
394  scoped_refptr<SimpleEntryImpl> simple_entry =
395      CreateOrFindActiveEntry(entry_hash, key);
396  return simple_entry->CreateEntry(entry, callback);
397}
398
399int SimpleBackendImpl::DoomEntry(const std::string& key,
400                                 const net::CompletionCallback& callback) {
401  const uint64 entry_hash = simple_util::GetEntryHashKey(key);
402
403  base::hash_map<uint64, std::vector<Closure> >::iterator it =
404      entries_pending_doom_.find(entry_hash);
405  if (it != entries_pending_doom_.end()) {
406    Callback<int(const net::CompletionCallback&)> operation =
407        base::Bind(&SimpleBackendImpl::DoomEntry, base::Unretained(this), key);
408    it->second.push_back(base::Bind(&RunOperationAndCallback,
409                                    operation, callback));
410    return net::ERR_IO_PENDING;
411  }
412  scoped_refptr<SimpleEntryImpl> simple_entry =
413      CreateOrFindActiveEntry(entry_hash, key);
414  return simple_entry->DoomEntry(callback);
415}
416
417int SimpleBackendImpl::DoomAllEntries(const CompletionCallback& callback) {
418  return DoomEntriesBetween(Time(), Time(), callback);
419}
420
421void SimpleBackendImpl::IndexReadyForDoom(Time initial_time,
422                                          Time end_time,
423                                          const CompletionCallback& callback,
424                                          int result) {
425  if (result != net::OK) {
426    callback.Run(result);
427    return;
428  }
429  scoped_ptr<std::vector<uint64> > removed_key_hashes(
430      index_->GetEntriesBetween(initial_time, end_time).release());
431  DoomEntries(removed_key_hashes.get(), callback);
432}
433
434int SimpleBackendImpl::DoomEntriesBetween(
435    const Time initial_time,
436    const Time end_time,
437    const CompletionCallback& callback) {
438  return index_->ExecuteWhenReady(
439      base::Bind(&SimpleBackendImpl::IndexReadyForDoom, AsWeakPtr(),
440                 initial_time, end_time, callback));
441}
442
443int SimpleBackendImpl::DoomEntriesSince(
444    const Time initial_time,
445    const CompletionCallback& callback) {
446  return DoomEntriesBetween(initial_time, Time(), callback);
447}
448
449int SimpleBackendImpl::OpenNextEntry(void** iter,
450                                     Entry** next_entry,
451                                     const CompletionCallback& callback) {
452  CompletionCallback get_next_entry =
453      base::Bind(&SimpleBackendImpl::GetNextEntryInIterator, AsWeakPtr(), iter,
454                 next_entry, callback);
455  return index_->ExecuteWhenReady(get_next_entry);
456}
457
458void SimpleBackendImpl::EndEnumeration(void** iter) {
459  SimpleIndex::HashList* entry_list =
460      static_cast<SimpleIndex::HashList*>(*iter);
461  delete entry_list;
462  *iter = NULL;
463}
464
465void SimpleBackendImpl::GetStats(
466    std::vector<std::pair<std::string, std::string> >* stats) {
467  std::pair<std::string, std::string> item;
468  item.first = "Cache type";
469  item.second = "Simple Cache";
470  stats->push_back(item);
471}
472
473void SimpleBackendImpl::OnExternalCacheHit(const std::string& key) {
474  index_->UseIfExists(simple_util::GetEntryHashKey(key));
475}
476
477void SimpleBackendImpl::InitializeIndex(const CompletionCallback& callback,
478                                        const DiskStatResult& result) {
479  if (result.net_error == net::OK) {
480    index_->SetMaxSize(result.max_size);
481    index_->Initialize(result.cache_dir_mtime);
482  }
483  callback.Run(result.net_error);
484}
485
486SimpleBackendImpl::DiskStatResult SimpleBackendImpl::InitCacheStructureOnDisk(
487    const base::FilePath& path,
488    uint64 suggested_max_size) {
489  DiskStatResult result;
490  result.max_size = suggested_max_size;
491  result.net_error = net::OK;
492  if (!FileStructureConsistent(path)) {
493    LOG(ERROR) << "Simple Cache Backend: wrong file structure on disk: "
494               << path.LossyDisplayName();
495    result.net_error = net::ERR_FAILED;
496  } else {
497    bool mtime_result =
498        disk_cache::simple_util::GetMTime(path, &result.cache_dir_mtime);
499    DCHECK(mtime_result);
500    if (!result.max_size) {
501      int64 available = base::SysInfo::AmountOfFreeDiskSpace(path);
502      result.max_size = disk_cache::PreferredCacheSize(available);
503    }
504    DCHECK(result.max_size);
505  }
506  return result;
507}
508
509scoped_refptr<SimpleEntryImpl> SimpleBackendImpl::CreateOrFindActiveEntry(
510    const uint64 entry_hash,
511    const std::string& key) {
512  DCHECK_EQ(entry_hash, simple_util::GetEntryHashKey(key));
513  std::pair<EntryMap::iterator, bool> insert_result =
514      active_entries_.insert(std::make_pair(entry_hash,
515                                            base::WeakPtr<SimpleEntryImpl>()));
516  EntryMap::iterator& it = insert_result.first;
517  if (insert_result.second)
518    DCHECK(!it->second.get());
519  if (!it->second.get()) {
520    SimpleEntryImpl* entry = new SimpleEntryImpl(
521        cache_type_, path_, entry_hash, entry_operations_mode_, this, net_log_);
522    entry->SetKey(key);
523    it->second = entry->AsWeakPtr();
524  }
525  DCHECK(it->second.get());
526  // It's possible, but unlikely, that we have an entry hash collision with a
527  // currently active entry.
528  if (key != it->second->key()) {
529    it->second->Doom();
530    DCHECK_EQ(0U, active_entries_.count(entry_hash));
531    return CreateOrFindActiveEntry(entry_hash, key);
532  }
533  return make_scoped_refptr(it->second.get());
534}
535
536int SimpleBackendImpl::OpenEntryFromHash(uint64 entry_hash,
537                                         Entry** entry,
538                                         const CompletionCallback& callback) {
539  base::hash_map<uint64, std::vector<Closure> >::iterator it =
540      entries_pending_doom_.find(entry_hash);
541  if (it != entries_pending_doom_.end()) {
542    Callback<int(const net::CompletionCallback&)> operation =
543        base::Bind(&SimpleBackendImpl::OpenEntryFromHash,
544                   base::Unretained(this), entry_hash, entry);
545    it->second.push_back(base::Bind(&RunOperationAndCallback,
546                                    operation, callback));
547    return net::ERR_IO_PENDING;
548  }
549
550  EntryMap::iterator has_active = active_entries_.find(entry_hash);
551  if (has_active != active_entries_.end()) {
552    return OpenEntry(has_active->second->key(), entry, callback);
553  }
554
555  scoped_refptr<SimpleEntryImpl> simple_entry = new SimpleEntryImpl(
556      cache_type_, path_, entry_hash, entry_operations_mode_, this, net_log_);
557  CompletionCallback backend_callback =
558      base::Bind(&SimpleBackendImpl::OnEntryOpenedFromHash,
559                 AsWeakPtr(), entry_hash, entry, simple_entry, callback);
560  return simple_entry->OpenEntry(entry, backend_callback);
561}
562
563int SimpleBackendImpl::DoomEntryFromHash(uint64 entry_hash,
564                                         const CompletionCallback& callback) {
565  Entry** entry = new Entry*();
566  scoped_ptr<Entry*> scoped_entry(entry);
567
568  base::hash_map<uint64, std::vector<Closure> >::iterator pending_it =
569      entries_pending_doom_.find(entry_hash);
570  if (pending_it != entries_pending_doom_.end()) {
571    Callback<int(const net::CompletionCallback&)> operation =
572        base::Bind(&SimpleBackendImpl::DoomEntryFromHash,
573                   base::Unretained(this), entry_hash);
574    pending_it->second.push_back(base::Bind(&RunOperationAndCallback,
575                                    operation, callback));
576    return net::ERR_IO_PENDING;
577  }
578
579  EntryMap::iterator active_it = active_entries_.find(entry_hash);
580  if (active_it != active_entries_.end())
581    return active_it->second->DoomEntry(callback);
582
583  // There's no pending dooms, nor any open entry. We can make a trivial
584  // call to DoomEntries() to delete this entry.
585  std::vector<uint64> entry_hash_vector;
586  entry_hash_vector.push_back(entry_hash);
587  DoomEntries(&entry_hash_vector, callback);
588  return net::ERR_IO_PENDING;
589}
590
591void SimpleBackendImpl::GetNextEntryInIterator(
592    void** iter,
593    Entry** next_entry,
594    const CompletionCallback& callback,
595    int error_code) {
596  if (error_code != net::OK) {
597    callback.Run(error_code);
598    return;
599  }
600  if (*iter == NULL) {
601    *iter = index()->GetAllHashes().release();
602  }
603  SimpleIndex::HashList* entry_list =
604      static_cast<SimpleIndex::HashList*>(*iter);
605  while (entry_list->size() > 0) {
606    uint64 entry_hash = entry_list->back();
607    entry_list->pop_back();
608    if (index()->Has(entry_hash)) {
609      *next_entry = NULL;
610      CompletionCallback continue_iteration = base::Bind(
611          &SimpleBackendImpl::CheckIterationReturnValue,
612          AsWeakPtr(),
613          iter,
614          next_entry,
615          callback);
616      int error_code_open = OpenEntryFromHash(entry_hash,
617                                              next_entry,
618                                              continue_iteration);
619      if (error_code_open == net::ERR_IO_PENDING)
620        return;
621      if (error_code_open != net::ERR_FAILED) {
622        callback.Run(error_code_open);
623        return;
624      }
625    }
626  }
627  callback.Run(net::ERR_FAILED);
628}
629
630void SimpleBackendImpl::OnEntryOpenedFromHash(
631    uint64 hash,
632    Entry** entry,
633    scoped_refptr<SimpleEntryImpl> simple_entry,
634    const CompletionCallback& callback,
635    int error_code) {
636  if (error_code != net::OK) {
637    callback.Run(error_code);
638    return;
639  }
640  DCHECK(*entry);
641  std::pair<EntryMap::iterator, bool> insert_result =
642      active_entries_.insert(std::make_pair(hash,
643                                            base::WeakPtr<SimpleEntryImpl>()));
644  EntryMap::iterator& it = insert_result.first;
645  const bool did_insert = insert_result.second;
646  if (did_insert) {
647    // There is no active entry corresponding to this hash. The entry created
648    // is put in the map of active entries and returned to the caller.
649    it->second = simple_entry->AsWeakPtr();
650    callback.Run(error_code);
651  } else {
652    // The entry was made active with the key while the creation from hash
653    // occurred. The entry created from hash needs to be closed, and the one
654    // coming from the key returned to the caller.
655    simple_entry->Close();
656    it->second->OpenEntry(entry, callback);
657  }
658}
659
660void SimpleBackendImpl::OnEntryOpenedFromKey(
661    const std::string key,
662    Entry** entry,
663    scoped_refptr<SimpleEntryImpl> simple_entry,
664    const CompletionCallback& callback,
665    int error_code) {
666  int final_code = error_code;
667  if (final_code == net::OK) {
668    bool key_matches = key.compare(simple_entry->key()) == 0;
669    if (!key_matches) {
670      // TODO(clamy): Add a unit test to check this code path.
671      DLOG(WARNING) << "Key mismatch on open.";
672      simple_entry->Doom();
673      simple_entry->Close();
674      final_code = net::ERR_FAILED;
675    } else {
676      DCHECK_EQ(simple_entry->entry_hash(), simple_util::GetEntryHashKey(key));
677    }
678    SIMPLE_CACHE_UMA(BOOLEAN, "KeyMatchedOnOpen", cache_type_, key_matches);
679  }
680  callback.Run(final_code);
681}
682
683void SimpleBackendImpl::CheckIterationReturnValue(
684    void** iter,
685    Entry** entry,
686    const CompletionCallback& callback,
687    int error_code) {
688  if (error_code == net::ERR_FAILED) {
689    OpenNextEntry(iter, entry, callback);
690    return;
691  }
692  callback.Run(error_code);
693}
694
695void SimpleBackendImpl::DoomEntriesComplete(
696    scoped_ptr<std::vector<uint64> > entry_hashes,
697    const net::CompletionCallback& callback,
698    int result) {
699  std::for_each(
700      entry_hashes->begin(), entry_hashes->end(),
701      std::bind1st(std::mem_fun(&SimpleBackendImpl::OnDoomComplete),
702                   this));
703  callback.Run(result);
704}
705
706void SimpleBackendImpl::FlushWorkerPoolForTesting() {
707  if (g_sequenced_worker_pool)
708    g_sequenced_worker_pool->FlushForTesting();
709}
710
711}  // namespace disk_cache
712