1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/http/mock_http_cache.h"
6
7#include "base/bind.h"
8#include "base/message_loop/message_loop.h"
9#include "net/base/completion_callback.h"
10#include "net/base/net_errors.h"
11#include "testing/gtest/include/gtest/gtest.h"
12
13namespace {
14
15// We can override the test mode for a given operation by setting this global
16// variable.
17int g_test_mode = 0;
18
19int GetTestModeForEntry(const std::string& key) {
20  // 'key' is prefixed with an identifier if it corresponds to a cached POST.
21  // Skip past that to locate the actual URL.
22  //
23  // TODO(darin): It breaks the abstraction a bit that we assume 'key' is an
24  // URL corresponding to a registered MockTransaction.  It would be good to
25  // have another way to access the test_mode.
26  GURL url;
27  if (isdigit(key[0])) {
28    size_t slash = key.find('/');
29    DCHECK(slash != std::string::npos);
30    url = GURL(key.substr(slash + 1));
31  } else {
32    url = GURL(key);
33  }
34  const MockTransaction* t = FindMockTransaction(url);
35  DCHECK(t);
36  return t->test_mode;
37}
38
39void CallbackForwader(const net::CompletionCallback& callback, int result) {
40  callback.Run(result);
41}
42
43}  // namespace
44
45//-----------------------------------------------------------------------------
46
47struct MockDiskEntry::CallbackInfo {
48  scoped_refptr<MockDiskEntry> entry;
49  net::CompletionCallback callback;
50  int result;
51};
52
53MockDiskEntry::MockDiskEntry(const std::string& key)
54    : key_(key), doomed_(false), sparse_(false),
55      fail_requests_(false), fail_sparse_requests_(false), busy_(false),
56      delayed_(false) {
57  test_mode_ = GetTestModeForEntry(key);
58}
59
60void MockDiskEntry::Doom() {
61  doomed_ = true;
62}
63
64void MockDiskEntry::Close() {
65  Release();
66}
67
68std::string MockDiskEntry::GetKey() const {
69  return key_;
70}
71
72base::Time MockDiskEntry::GetLastUsed() const {
73  return base::Time::FromInternalValue(0);
74}
75
76base::Time MockDiskEntry::GetLastModified() const {
77  return base::Time::FromInternalValue(0);
78}
79
80int32 MockDiskEntry::GetDataSize(int index) const {
81  DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
82  return static_cast<int32>(data_[index].size());
83}
84
85int MockDiskEntry::ReadData(
86    int index, int offset, net::IOBuffer* buf, int buf_len,
87    const net::CompletionCallback& callback) {
88  DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
89  DCHECK(!callback.is_null());
90
91  if (fail_requests_)
92    return net::ERR_CACHE_READ_FAILURE;
93
94  if (offset < 0 || offset > static_cast<int>(data_[index].size()))
95    return net::ERR_FAILED;
96  if (static_cast<size_t>(offset) == data_[index].size())
97    return 0;
98
99  int num = std::min(buf_len, static_cast<int>(data_[index].size()) - offset);
100  memcpy(buf->data(), &data_[index][offset], num);
101
102  if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
103    return num;
104
105  CallbackLater(callback, num);
106  return net::ERR_IO_PENDING;
107}
108
109int MockDiskEntry::WriteData(
110    int index, int offset, net::IOBuffer* buf, int buf_len,
111    const net::CompletionCallback& callback, bool truncate) {
112  DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
113  DCHECK(!callback.is_null());
114  DCHECK(truncate);
115
116  if (fail_requests_) {
117    CallbackLater(callback, net::ERR_CACHE_READ_FAILURE);
118    return net::ERR_IO_PENDING;
119  }
120
121  if (offset < 0 || offset > static_cast<int>(data_[index].size()))
122    return net::ERR_FAILED;
123
124  data_[index].resize(offset + buf_len);
125  if (buf_len)
126    memcpy(&data_[index][offset], buf->data(), buf_len);
127
128  if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
129    return buf_len;
130
131  CallbackLater(callback, buf_len);
132  return net::ERR_IO_PENDING;
133}
134
135int MockDiskEntry::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
136                                  const net::CompletionCallback& callback) {
137  DCHECK(!callback.is_null());
138  if (fail_sparse_requests_)
139    return net::ERR_NOT_IMPLEMENTED;
140  if (!sparse_ || busy_)
141    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
142  if (offset < 0)
143    return net::ERR_FAILED;
144
145  if (fail_requests_)
146    return net::ERR_CACHE_READ_FAILURE;
147
148  DCHECK(offset < kint32max);
149  int real_offset = static_cast<int>(offset);
150  if (!buf_len)
151    return 0;
152
153  int num = std::min(static_cast<int>(data_[1].size()) - real_offset,
154                     buf_len);
155  memcpy(buf->data(), &data_[1][real_offset], num);
156
157  if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
158    return num;
159
160  CallbackLater(callback, num);
161  busy_ = true;
162  delayed_ = false;
163  return net::ERR_IO_PENDING;
164}
165
166int MockDiskEntry::WriteSparseData(int64 offset, net::IOBuffer* buf,
167                                   int buf_len,
168                                   const net::CompletionCallback& callback) {
169  DCHECK(!callback.is_null());
170  if (fail_sparse_requests_)
171    return net::ERR_NOT_IMPLEMENTED;
172  if (busy_)
173    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
174  if (!sparse_) {
175    if (data_[1].size())
176      return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
177    sparse_ = true;
178  }
179  if (offset < 0)
180    return net::ERR_FAILED;
181  if (!buf_len)
182    return 0;
183
184  if (fail_requests_)
185    return net::ERR_CACHE_READ_FAILURE;
186
187  DCHECK(offset < kint32max);
188  int real_offset = static_cast<int>(offset);
189
190  if (static_cast<int>(data_[1].size()) < real_offset + buf_len)
191    data_[1].resize(real_offset + buf_len);
192
193  memcpy(&data_[1][real_offset], buf->data(), buf_len);
194  if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
195    return buf_len;
196
197  CallbackLater(callback, buf_len);
198  return net::ERR_IO_PENDING;
199}
200
201int MockDiskEntry::GetAvailableRange(int64 offset, int len, int64* start,
202                                     const net::CompletionCallback& callback) {
203  DCHECK(!callback.is_null());
204  if (!sparse_ || busy_)
205    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
206  if (offset < 0)
207    return net::ERR_FAILED;
208
209  if (fail_requests_)
210    return net::ERR_CACHE_READ_FAILURE;
211
212  *start = offset;
213  DCHECK(offset < kint32max);
214  int real_offset = static_cast<int>(offset);
215  if (static_cast<int>(data_[1].size()) < real_offset)
216    return 0;
217
218  int num = std::min(static_cast<int>(data_[1].size()) - real_offset, len);
219  int count = 0;
220  for (; num > 0; num--, real_offset++) {
221    if (!count) {
222      if (data_[1][real_offset]) {
223        count++;
224        *start = real_offset;
225      }
226    } else {
227      if (!data_[1][real_offset])
228        break;
229      count++;
230    }
231  }
232  if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
233    return count;
234
235  CallbackLater(callback, count);
236  return net::ERR_IO_PENDING;
237}
238
239bool MockDiskEntry::CouldBeSparse() const {
240  if (fail_sparse_requests_)
241    return false;
242  return sparse_;
243}
244
245void MockDiskEntry::CancelSparseIO() {
246  cancel_ = true;
247}
248
249int MockDiskEntry::ReadyForSparseIO(const net::CompletionCallback& callback) {
250  if (fail_sparse_requests_)
251    return net::ERR_NOT_IMPLEMENTED;
252  if (!cancel_)
253    return net::OK;
254
255  cancel_ = false;
256  DCHECK(!callback.is_null());
257  if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
258    return net::OK;
259
260  // The pending operation is already in the message loop (and hopefully
261  // already in the second pass).  Just notify the caller that it finished.
262  CallbackLater(callback, 0);
263  return net::ERR_IO_PENDING;
264}
265
266// If |value| is true, don't deliver any completion callbacks until called
267// again with |value| set to false.  Caution: remember to enable callbacks
268// again or all subsequent tests will fail.
269// Static.
270void MockDiskEntry::IgnoreCallbacks(bool value) {
271  if (ignore_callbacks_ == value)
272    return;
273  ignore_callbacks_ = value;
274  if (!value)
275    StoreAndDeliverCallbacks(false, NULL, net::CompletionCallback(), 0);
276}
277
278MockDiskEntry::~MockDiskEntry() {
279}
280
281// Unlike the callbacks for MockHttpTransaction, we want this one to run even
282// if the consumer called Close on the MockDiskEntry.  We achieve that by
283// leveraging the fact that this class is reference counted.
284void MockDiskEntry::CallbackLater(const net::CompletionCallback& callback,
285                                  int result) {
286  if (ignore_callbacks_)
287    return StoreAndDeliverCallbacks(true, this, callback, result);
288  base::MessageLoop::current()->PostTask(
289      FROM_HERE,
290      base::Bind(&MockDiskEntry::RunCallback, this, callback, result));
291}
292
293void MockDiskEntry::RunCallback(
294    const net::CompletionCallback& callback, int result) {
295  if (busy_) {
296    // This is kind of hacky, but controlling the behavior of just this entry
297    // from a test is sort of complicated.  What we really want to do is
298    // delay the delivery of a sparse IO operation a little more so that the
299    // request start operation (async) will finish without seeing the end of
300    // this operation (already posted to the message loop)... and without
301    // just delaying for n mS (which may cause trouble with slow bots).  So
302    // we re-post this operation (all async sparse IO operations will take two
303    // trips through the message loop instead of one).
304    if (!delayed_) {
305      delayed_ = true;
306      return CallbackLater(callback, result);
307    }
308  }
309  busy_ = false;
310  callback.Run(result);
311}
312
313// When |store| is true, stores the callback to be delivered later; otherwise
314// delivers any callback previously stored.
315// Static.
316void MockDiskEntry::StoreAndDeliverCallbacks(
317    bool store, MockDiskEntry* entry, const net::CompletionCallback& callback,
318    int result) {
319  static std::vector<CallbackInfo> callback_list;
320  if (store) {
321    CallbackInfo c = {entry, callback, result};
322    callback_list.push_back(c);
323  } else {
324    for (size_t i = 0; i < callback_list.size(); i++) {
325      CallbackInfo& c = callback_list[i];
326      c.entry->CallbackLater(c.callback, c.result);
327    }
328    callback_list.clear();
329  }
330}
331
332// Statics.
333bool MockDiskEntry::cancel_ = false;
334bool MockDiskEntry::ignore_callbacks_ = false;
335
336//-----------------------------------------------------------------------------
337
338MockDiskCache::MockDiskCache()
339    : open_count_(0), create_count_(0), fail_requests_(false),
340      soft_failures_(false), double_create_check_(true),
341      fail_sparse_requests_(false) {
342}
343
344MockDiskCache::~MockDiskCache() {
345  ReleaseAll();
346}
347
348net::CacheType MockDiskCache::GetCacheType() const {
349  return net::DISK_CACHE;
350}
351
352int32 MockDiskCache::GetEntryCount() const {
353  return static_cast<int32>(entries_.size());
354}
355
356int MockDiskCache::OpenEntry(const std::string& key, disk_cache::Entry** entry,
357                             const net::CompletionCallback& callback) {
358  DCHECK(!callback.is_null());
359  if (fail_requests_)
360    return net::ERR_CACHE_OPEN_FAILURE;
361
362  EntryMap::iterator it = entries_.find(key);
363  if (it == entries_.end())
364    return net::ERR_CACHE_OPEN_FAILURE;
365
366  if (it->second->is_doomed()) {
367    it->second->Release();
368    entries_.erase(it);
369    return net::ERR_CACHE_OPEN_FAILURE;
370  }
371
372  open_count_++;
373
374  it->second->AddRef();
375  *entry = it->second;
376
377  if (soft_failures_)
378    it->second->set_fail_requests();
379
380  if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
381    return net::OK;
382
383  CallbackLater(callback, net::OK);
384  return net::ERR_IO_PENDING;
385}
386
387int MockDiskCache::CreateEntry(const std::string& key,
388                               disk_cache::Entry** entry,
389                               const net::CompletionCallback& callback) {
390  DCHECK(!callback.is_null());
391  if (fail_requests_)
392    return net::ERR_CACHE_CREATE_FAILURE;
393
394  EntryMap::iterator it = entries_.find(key);
395  if (it != entries_.end()) {
396    if (!it->second->is_doomed()) {
397      if (double_create_check_)
398        NOTREACHED();
399      else
400        return net::ERR_CACHE_CREATE_FAILURE;
401    }
402    it->second->Release();
403    entries_.erase(it);
404  }
405
406  create_count_++;
407
408  MockDiskEntry* new_entry = new MockDiskEntry(key);
409
410  new_entry->AddRef();
411  entries_[key] = new_entry;
412
413  new_entry->AddRef();
414  *entry = new_entry;
415
416  if (soft_failures_)
417    new_entry->set_fail_requests();
418
419  if (fail_sparse_requests_)
420    new_entry->set_fail_sparse_requests();
421
422  if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
423    return net::OK;
424
425  CallbackLater(callback, net::OK);
426  return net::ERR_IO_PENDING;
427}
428
429int MockDiskCache::DoomEntry(const std::string& key,
430                             const net::CompletionCallback& callback) {
431  DCHECK(!callback.is_null());
432  EntryMap::iterator it = entries_.find(key);
433  if (it != entries_.end()) {
434    it->second->Release();
435    entries_.erase(it);
436  }
437
438  if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
439    return net::OK;
440
441  CallbackLater(callback, net::OK);
442  return net::ERR_IO_PENDING;
443}
444
445int MockDiskCache::DoomAllEntries(const net::CompletionCallback& callback) {
446  return net::ERR_NOT_IMPLEMENTED;
447}
448
449int MockDiskCache::DoomEntriesBetween(const base::Time initial_time,
450                                      const base::Time end_time,
451                                      const net::CompletionCallback& callback) {
452  return net::ERR_NOT_IMPLEMENTED;
453}
454
455int MockDiskCache::DoomEntriesSince(const base::Time initial_time,
456                                    const net::CompletionCallback& callback) {
457  return net::ERR_NOT_IMPLEMENTED;
458}
459
460class MockDiskCache::NotImplementedIterator : public Iterator {
461 public:
462  virtual int OpenNextEntry(disk_cache::Entry** next_entry,
463                            const net::CompletionCallback& callback) OVERRIDE {
464    return net::ERR_NOT_IMPLEMENTED;
465  }
466};
467
468scoped_ptr<disk_cache::Backend::Iterator> MockDiskCache::CreateIterator() {
469  return scoped_ptr<Iterator>(new NotImplementedIterator());
470}
471
472void MockDiskCache::GetStats(
473    std::vector<std::pair<std::string, std::string> >* stats) {
474}
475
476void MockDiskCache::OnExternalCacheHit(const std::string& key) {
477}
478
479void MockDiskCache::ReleaseAll() {
480  EntryMap::iterator it = entries_.begin();
481  for (; it != entries_.end(); ++it)
482    it->second->Release();
483  entries_.clear();
484}
485
486void MockDiskCache::CallbackLater(const net::CompletionCallback& callback,
487                                  int result) {
488  base::MessageLoop::current()->PostTask(
489      FROM_HERE, base::Bind(&CallbackForwader, callback, result));
490}
491
492//-----------------------------------------------------------------------------
493
494int MockBackendFactory::CreateBackend(net::NetLog* net_log,
495                                      scoped_ptr<disk_cache::Backend>* backend,
496                                      const net::CompletionCallback& callback) {
497  backend->reset(new MockDiskCache());
498  return net::OK;
499}
500
501//-----------------------------------------------------------------------------
502
503MockHttpCache::MockHttpCache()
504    : http_cache_(new MockNetworkLayer(), NULL, new MockBackendFactory()) {
505}
506
507MockHttpCache::MockHttpCache(net::HttpCache::BackendFactory* disk_cache_factory)
508    : http_cache_(new MockNetworkLayer(), NULL, disk_cache_factory) {
509}
510
511MockDiskCache* MockHttpCache::disk_cache() {
512  net::TestCompletionCallback cb;
513  disk_cache::Backend* backend;
514  int rv = http_cache_.GetBackend(&backend, cb.callback());
515  rv = cb.GetResult(rv);
516  return (rv == net::OK) ? static_cast<MockDiskCache*>(backend) : NULL;
517}
518
519int MockHttpCache::CreateTransaction(scoped_ptr<net::HttpTransaction>* trans) {
520  return http_cache_.CreateTransaction(net::DEFAULT_PRIORITY, trans);
521}
522
523void MockHttpCache::BypassCacheLock() {
524  http_cache_.BypassLockForTest();
525}
526
527bool MockHttpCache::ReadResponseInfo(disk_cache::Entry* disk_entry,
528                                     net::HttpResponseInfo* response_info,
529                                     bool* response_truncated) {
530  int size = disk_entry->GetDataSize(0);
531
532  net::TestCompletionCallback cb;
533  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(size));
534  int rv = disk_entry->ReadData(0, 0, buffer.get(), size, cb.callback());
535  rv = cb.GetResult(rv);
536  EXPECT_EQ(size, rv);
537
538  return net::HttpCache::ParseResponseInfo(buffer->data(), size,
539                                           response_info,
540                                           response_truncated);
541}
542
543bool MockHttpCache::WriteResponseInfo(
544    disk_cache::Entry* disk_entry, const net::HttpResponseInfo* response_info,
545    bool skip_transient_headers, bool response_truncated) {
546  Pickle pickle;
547  response_info->Persist(
548      &pickle, skip_transient_headers, response_truncated);
549
550  net::TestCompletionCallback cb;
551  scoped_refptr<net::WrappedIOBuffer> data(new net::WrappedIOBuffer(
552      reinterpret_cast<const char*>(pickle.data())));
553  int len = static_cast<int>(pickle.size());
554
555  int rv = disk_entry->WriteData(0, 0, data.get(), len, cb.callback(), true);
556  rv = cb.GetResult(rv);
557  return (rv == len);
558}
559
560bool MockHttpCache::OpenBackendEntry(const std::string& key,
561                                     disk_cache::Entry** entry) {
562  net::TestCompletionCallback cb;
563  int rv = disk_cache()->OpenEntry(key, entry, cb.callback());
564  return (cb.GetResult(rv) == net::OK);
565}
566
567bool MockHttpCache::CreateBackendEntry(const std::string& key,
568                                       disk_cache::Entry** entry,
569                                       net::NetLog* net_log) {
570  net::TestCompletionCallback cb;
571  int rv = disk_cache()->CreateEntry(key, entry, cb.callback());
572  return (cb.GetResult(rv) == net::OK);
573}
574
575// Static.
576int MockHttpCache::GetTestMode(int test_mode) {
577  if (!g_test_mode)
578    return test_mode;
579
580  return g_test_mode;
581}
582
583// Static.
584void MockHttpCache::SetTestMode(int test_mode) {
585  g_test_mode = test_mode;
586}
587
588//-----------------------------------------------------------------------------
589
590int MockDiskCacheNoCB::CreateEntry(const std::string& key,
591                                   disk_cache::Entry** entry,
592                                   const net::CompletionCallback& callback) {
593  return net::ERR_IO_PENDING;
594}
595
596//-----------------------------------------------------------------------------
597
598int MockBackendNoCbFactory::CreateBackend(
599    net::NetLog* net_log, scoped_ptr<disk_cache::Backend>* backend,
600    const net::CompletionCallback& callback) {
601  backend->reset(new MockDiskCacheNoCB());
602  return net::OK;
603}
604
605//-----------------------------------------------------------------------------
606
607MockBlockingBackendFactory::MockBlockingBackendFactory()
608    : backend_(NULL),
609      block_(true),
610      fail_(false) {
611}
612
613MockBlockingBackendFactory::~MockBlockingBackendFactory() {
614}
615
616int MockBlockingBackendFactory::CreateBackend(
617    net::NetLog* net_log, scoped_ptr<disk_cache::Backend>* backend,
618    const net::CompletionCallback& callback) {
619  if (!block_) {
620    if (!fail_)
621      backend->reset(new MockDiskCache());
622    return Result();
623  }
624
625  backend_ =  backend;
626  callback_ = callback;
627  return net::ERR_IO_PENDING;
628}
629
630void MockBlockingBackendFactory::FinishCreation() {
631  block_ = false;
632  if (!callback_.is_null()) {
633    if (!fail_)
634      backend_->reset(new MockDiskCache());
635    net::CompletionCallback cb = callback_;
636    callback_.Reset();
637    cb.Run(Result());  // This object can be deleted here.
638  }
639}
640