1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/http/mock_http_cache.h"
6
7#include "base/bind.h"
8#include "base/message_loop/message_loop.h"
9#include "net/base/completion_callback.h"
10#include "net/base/net_errors.h"
11#include "testing/gtest/include/gtest/gtest.h"
12
13namespace {
14
15// We can override the test mode for a given operation by setting this global
16// variable.
17int g_test_mode = 0;
18
19int GetTestModeForEntry(const std::string& key) {
20  // 'key' is prefixed with an identifier if it corresponds to a cached POST.
21  // Skip past that to locate the actual URL.
22  //
23  // TODO(darin): It breaks the abstraction a bit that we assume 'key' is an
24  // URL corresponding to a registered MockTransaction.  It would be good to
25  // have another way to access the test_mode.
26  GURL url;
27  if (isdigit(key[0])) {
28    size_t slash = key.find('/');
29    DCHECK(slash != std::string::npos);
30    url = GURL(key.substr(slash + 1));
31  } else {
32    url = GURL(key);
33  }
34  const MockTransaction* t = FindMockTransaction(url);
35  DCHECK(t);
36  return t->test_mode;
37}
38
39void CallbackForwader(const net::CompletionCallback& callback, int result) {
40  callback.Run(result);
41}
42
43}  // namespace
44
45//-----------------------------------------------------------------------------
46
47struct MockDiskEntry::CallbackInfo {
48  scoped_refptr<MockDiskEntry> entry;
49  net::CompletionCallback callback;
50  int result;
51};
52
53MockDiskEntry::MockDiskEntry(const std::string& key)
54    : key_(key), doomed_(false), sparse_(false),
55      fail_requests_(false), fail_sparse_requests_(false), busy_(false),
56      delayed_(false) {
57  test_mode_ = GetTestModeForEntry(key);
58}
59
60void MockDiskEntry::Doom() {
61  doomed_ = true;
62}
63
64void MockDiskEntry::Close() {
65  Release();
66}
67
68std::string MockDiskEntry::GetKey() const {
69  return key_;
70}
71
72base::Time MockDiskEntry::GetLastUsed() const {
73  return base::Time::FromInternalValue(0);
74}
75
76base::Time MockDiskEntry::GetLastModified() const {
77  return base::Time::FromInternalValue(0);
78}
79
80int32 MockDiskEntry::GetDataSize(int index) const {
81  DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
82  return static_cast<int32>(data_[index].size());
83}
84
85int MockDiskEntry::ReadData(
86    int index, int offset, net::IOBuffer* buf, int buf_len,
87    const net::CompletionCallback& callback) {
88  DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
89  DCHECK(!callback.is_null());
90
91  if (fail_requests_)
92    return net::ERR_CACHE_READ_FAILURE;
93
94  if (offset < 0 || offset > static_cast<int>(data_[index].size()))
95    return net::ERR_FAILED;
96  if (static_cast<size_t>(offset) == data_[index].size())
97    return 0;
98
99  int num = std::min(buf_len, static_cast<int>(data_[index].size()) - offset);
100  memcpy(buf->data(), &data_[index][offset], num);
101
102  if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
103    return num;
104
105  CallbackLater(callback, num);
106  return net::ERR_IO_PENDING;
107}
108
109int MockDiskEntry::WriteData(
110    int index, int offset, net::IOBuffer* buf, int buf_len,
111    const net::CompletionCallback& callback, bool truncate) {
112  DCHECK(index >= 0 && index < kNumCacheEntryDataIndices);
113  DCHECK(!callback.is_null());
114  DCHECK(truncate);
115
116  if (fail_requests_) {
117    CallbackLater(callback, net::ERR_CACHE_READ_FAILURE);
118    return net::ERR_IO_PENDING;
119  }
120
121  if (offset < 0 || offset > static_cast<int>(data_[index].size()))
122    return net::ERR_FAILED;
123
124  data_[index].resize(offset + buf_len);
125  if (buf_len)
126    memcpy(&data_[index][offset], buf->data(), buf_len);
127
128  if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
129    return buf_len;
130
131  CallbackLater(callback, buf_len);
132  return net::ERR_IO_PENDING;
133}
134
135int MockDiskEntry::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
136                                  const net::CompletionCallback& callback) {
137  DCHECK(!callback.is_null());
138  if (fail_sparse_requests_)
139    return net::ERR_NOT_IMPLEMENTED;
140  if (!sparse_ || busy_)
141    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
142  if (offset < 0)
143    return net::ERR_FAILED;
144
145  if (fail_requests_)
146    return net::ERR_CACHE_READ_FAILURE;
147
148  DCHECK(offset < kint32max);
149  int real_offset = static_cast<int>(offset);
150  if (!buf_len)
151    return 0;
152
153  int num = std::min(static_cast<int>(data_[1].size()) - real_offset,
154                     buf_len);
155  memcpy(buf->data(), &data_[1][real_offset], num);
156
157  if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
158    return num;
159
160  CallbackLater(callback, num);
161  busy_ = true;
162  delayed_ = false;
163  return net::ERR_IO_PENDING;
164}
165
166int MockDiskEntry::WriteSparseData(int64 offset, net::IOBuffer* buf,
167                                   int buf_len,
168                                   const net::CompletionCallback& callback) {
169  DCHECK(!callback.is_null());
170  if (fail_sparse_requests_)
171    return net::ERR_NOT_IMPLEMENTED;
172  if (busy_)
173    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
174  if (!sparse_) {
175    if (data_[1].size())
176      return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
177    sparse_ = true;
178  }
179  if (offset < 0)
180    return net::ERR_FAILED;
181  if (!buf_len)
182    return 0;
183
184  if (fail_requests_)
185    return net::ERR_CACHE_READ_FAILURE;
186
187  DCHECK(offset < kint32max);
188  int real_offset = static_cast<int>(offset);
189
190  if (static_cast<int>(data_[1].size()) < real_offset + buf_len)
191    data_[1].resize(real_offset + buf_len);
192
193  memcpy(&data_[1][real_offset], buf->data(), buf_len);
194  if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
195    return buf_len;
196
197  CallbackLater(callback, buf_len);
198  return net::ERR_IO_PENDING;
199}
200
201int MockDiskEntry::GetAvailableRange(int64 offset, int len, int64* start,
202                                     const net::CompletionCallback& callback) {
203  DCHECK(!callback.is_null());
204  if (!sparse_ || busy_)
205    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
206  if (offset < 0)
207    return net::ERR_FAILED;
208
209  if (fail_requests_)
210    return net::ERR_CACHE_READ_FAILURE;
211
212  *start = offset;
213  DCHECK(offset < kint32max);
214  int real_offset = static_cast<int>(offset);
215  if (static_cast<int>(data_[1].size()) < real_offset)
216    return 0;
217
218  int num = std::min(static_cast<int>(data_[1].size()) - real_offset, len);
219  int count = 0;
220  for (; num > 0; num--, real_offset++) {
221    if (!count) {
222      if (data_[1][real_offset]) {
223        count++;
224        *start = real_offset;
225      }
226    } else {
227      if (!data_[1][real_offset])
228        break;
229      count++;
230    }
231  }
232  if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_WRITE)
233    return count;
234
235  CallbackLater(callback, count);
236  return net::ERR_IO_PENDING;
237}
238
239bool MockDiskEntry::CouldBeSparse() const {
240  if (fail_sparse_requests_)
241    return false;
242  return sparse_;
243}
244
245void MockDiskEntry::CancelSparseIO() {
246  cancel_ = true;
247}
248
249int MockDiskEntry::ReadyForSparseIO(const net::CompletionCallback& callback) {
250  if (fail_sparse_requests_)
251    return net::ERR_NOT_IMPLEMENTED;
252  if (!cancel_)
253    return net::OK;
254
255  cancel_ = false;
256  DCHECK(!callback.is_null());
257  if (MockHttpCache::GetTestMode(test_mode_) & TEST_MODE_SYNC_CACHE_READ)
258    return net::OK;
259
260  // The pending operation is already in the message loop (and hopefully
261  // already in the second pass).  Just notify the caller that it finished.
262  CallbackLater(callback, 0);
263  return net::ERR_IO_PENDING;
264}
265
266// If |value| is true, don't deliver any completion callbacks until called
267// again with |value| set to false.  Caution: remember to enable callbacks
268// again or all subsequent tests will fail.
269// Static.
270void MockDiskEntry::IgnoreCallbacks(bool value) {
271  if (ignore_callbacks_ == value)
272    return;
273  ignore_callbacks_ = value;
274  if (!value)
275    StoreAndDeliverCallbacks(false, NULL, net::CompletionCallback(), 0);
276}
277
278MockDiskEntry::~MockDiskEntry() {
279}
280
281// Unlike the callbacks for MockHttpTransaction, we want this one to run even
282// if the consumer called Close on the MockDiskEntry.  We achieve that by
283// leveraging the fact that this class is reference counted.
284void MockDiskEntry::CallbackLater(const net::CompletionCallback& callback,
285                                  int result) {
286  if (ignore_callbacks_)
287    return StoreAndDeliverCallbacks(true, this, callback, result);
288  base::MessageLoop::current()->PostTask(
289      FROM_HERE,
290      base::Bind(&MockDiskEntry::RunCallback, this, callback, result));
291}
292
293void MockDiskEntry::RunCallback(
294    const net::CompletionCallback& callback, int result) {
295  if (busy_) {
296    // This is kind of hacky, but controlling the behavior of just this entry
297    // from a test is sort of complicated.  What we really want to do is
298    // delay the delivery of a sparse IO operation a little more so that the
299    // request start operation (async) will finish without seeing the end of
300    // this operation (already posted to the message loop)... and without
301    // just delaying for n mS (which may cause trouble with slow bots).  So
302    // we re-post this operation (all async sparse IO operations will take two
303    // trips through the message loop instead of one).
304    if (!delayed_) {
305      delayed_ = true;
306      return CallbackLater(callback, result);
307    }
308  }
309  busy_ = false;
310  callback.Run(result);
311}
312
313// When |store| is true, stores the callback to be delivered later; otherwise
314// delivers any callback previously stored.
315// Static.
316void MockDiskEntry::StoreAndDeliverCallbacks(
317    bool store, MockDiskEntry* entry, const net::CompletionCallback& callback,
318    int result) {
319  static std::vector<CallbackInfo> callback_list;
320  if (store) {
321    CallbackInfo c = {entry, callback, result};
322    callback_list.push_back(c);
323  } else {
324    for (size_t i = 0; i < callback_list.size(); i++) {
325      CallbackInfo& c = callback_list[i];
326      c.entry->CallbackLater(c.callback, c.result);
327    }
328    callback_list.clear();
329  }
330}
331
332// Statics.
333bool MockDiskEntry::cancel_ = false;
334bool MockDiskEntry::ignore_callbacks_ = false;
335
336//-----------------------------------------------------------------------------
337
338MockDiskCache::MockDiskCache()
339    : open_count_(0), create_count_(0), fail_requests_(false),
340      soft_failures_(false), double_create_check_(true),
341      fail_sparse_requests_(false) {
342}
343
344MockDiskCache::~MockDiskCache() {
345  ReleaseAll();
346}
347
348net::CacheType MockDiskCache::GetCacheType() const {
349  return net::DISK_CACHE;
350}
351
352int32 MockDiskCache::GetEntryCount() const {
353  return static_cast<int32>(entries_.size());
354}
355
356int MockDiskCache::OpenEntry(const std::string& key, disk_cache::Entry** entry,
357                             const net::CompletionCallback& callback) {
358  DCHECK(!callback.is_null());
359  if (fail_requests_)
360    return net::ERR_CACHE_OPEN_FAILURE;
361
362  EntryMap::iterator it = entries_.find(key);
363  if (it == entries_.end())
364    return net::ERR_CACHE_OPEN_FAILURE;
365
366  if (it->second->is_doomed()) {
367    it->second->Release();
368    entries_.erase(it);
369    return net::ERR_CACHE_OPEN_FAILURE;
370  }
371
372  open_count_++;
373
374  it->second->AddRef();
375  *entry = it->second;
376
377  if (soft_failures_)
378    it->second->set_fail_requests();
379
380  if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
381    return net::OK;
382
383  CallbackLater(callback, net::OK);
384  return net::ERR_IO_PENDING;
385}
386
387int MockDiskCache::CreateEntry(const std::string& key,
388                               disk_cache::Entry** entry,
389                               const net::CompletionCallback& callback) {
390  DCHECK(!callback.is_null());
391  if (fail_requests_)
392    return net::ERR_CACHE_CREATE_FAILURE;
393
394  EntryMap::iterator it = entries_.find(key);
395  if (it != entries_.end()) {
396    if (!it->second->is_doomed()) {
397      if (double_create_check_)
398        NOTREACHED();
399      else
400        return net::ERR_CACHE_CREATE_FAILURE;
401    }
402    it->second->Release();
403    entries_.erase(it);
404  }
405
406  create_count_++;
407
408  MockDiskEntry* new_entry = new MockDiskEntry(key);
409
410  new_entry->AddRef();
411  entries_[key] = new_entry;
412
413  new_entry->AddRef();
414  *entry = new_entry;
415
416  if (soft_failures_)
417    new_entry->set_fail_requests();
418
419  if (fail_sparse_requests_)
420    new_entry->set_fail_sparse_requests();
421
422  if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
423    return net::OK;
424
425  CallbackLater(callback, net::OK);
426  return net::ERR_IO_PENDING;
427}
428
429int MockDiskCache::DoomEntry(const std::string& key,
430                             const net::CompletionCallback& callback) {
431  DCHECK(!callback.is_null());
432  EntryMap::iterator it = entries_.find(key);
433  if (it != entries_.end()) {
434    it->second->Release();
435    entries_.erase(it);
436  }
437
438  if (GetTestModeForEntry(key) & TEST_MODE_SYNC_CACHE_START)
439    return net::OK;
440
441  CallbackLater(callback, net::OK);
442  return net::ERR_IO_PENDING;
443}
444
445int MockDiskCache::DoomAllEntries(const net::CompletionCallback& callback) {
446  return net::ERR_NOT_IMPLEMENTED;
447}
448
449int MockDiskCache::DoomEntriesBetween(const base::Time initial_time,
450                                      const base::Time end_time,
451                                      const net::CompletionCallback& callback) {
452  return net::ERR_NOT_IMPLEMENTED;
453}
454
455int MockDiskCache::DoomEntriesSince(const base::Time initial_time,
456                                    const net::CompletionCallback& callback) {
457  return net::ERR_NOT_IMPLEMENTED;
458}
459
460int MockDiskCache::OpenNextEntry(void** iter, disk_cache::Entry** next_entry,
461                                 const net::CompletionCallback& callback) {
462  return net::ERR_NOT_IMPLEMENTED;
463}
464
465void MockDiskCache::EndEnumeration(void** iter) {
466}
467
468void MockDiskCache::GetStats(
469    std::vector<std::pair<std::string, std::string> >* stats) {
470}
471
472void MockDiskCache::OnExternalCacheHit(const std::string& key) {
473}
474
475void MockDiskCache::ReleaseAll() {
476  EntryMap::iterator it = entries_.begin();
477  for (; it != entries_.end(); ++it)
478    it->second->Release();
479  entries_.clear();
480}
481
482void MockDiskCache::CallbackLater(const net::CompletionCallback& callback,
483                                  int result) {
484  base::MessageLoop::current()->PostTask(
485      FROM_HERE, base::Bind(&CallbackForwader, callback, result));
486}
487
488//-----------------------------------------------------------------------------
489
490int MockBackendFactory::CreateBackend(net::NetLog* net_log,
491                                      scoped_ptr<disk_cache::Backend>* backend,
492                                      const net::CompletionCallback& callback) {
493  backend->reset(new MockDiskCache());
494  return net::OK;
495}
496
497//-----------------------------------------------------------------------------
498
499MockHttpCache::MockHttpCache()
500    : http_cache_(new MockNetworkLayer(), NULL, new MockBackendFactory()) {
501}
502
503MockHttpCache::MockHttpCache(net::HttpCache::BackendFactory* disk_cache_factory)
504    : http_cache_(new MockNetworkLayer(), NULL, disk_cache_factory) {
505}
506
507MockDiskCache* MockHttpCache::disk_cache() {
508  net::TestCompletionCallback cb;
509  disk_cache::Backend* backend;
510  int rv = http_cache_.GetBackend(&backend, cb.callback());
511  rv = cb.GetResult(rv);
512  return (rv == net::OK) ? static_cast<MockDiskCache*>(backend) : NULL;
513}
514
515bool MockHttpCache::ReadResponseInfo(disk_cache::Entry* disk_entry,
516                                     net::HttpResponseInfo* response_info,
517                                     bool* response_truncated) {
518  int size = disk_entry->GetDataSize(0);
519
520  net::TestCompletionCallback cb;
521  scoped_refptr<net::IOBuffer> buffer(new net::IOBuffer(size));
522  int rv = disk_entry->ReadData(0, 0, buffer.get(), size, cb.callback());
523  rv = cb.GetResult(rv);
524  EXPECT_EQ(size, rv);
525
526  return net::HttpCache::ParseResponseInfo(buffer->data(), size,
527                                           response_info,
528                                           response_truncated);
529}
530
531bool MockHttpCache::WriteResponseInfo(
532    disk_cache::Entry* disk_entry, const net::HttpResponseInfo* response_info,
533    bool skip_transient_headers, bool response_truncated) {
534  Pickle pickle;
535  response_info->Persist(
536      &pickle, skip_transient_headers, response_truncated);
537
538  net::TestCompletionCallback cb;
539  scoped_refptr<net::WrappedIOBuffer> data(new net::WrappedIOBuffer(
540      reinterpret_cast<const char*>(pickle.data())));
541  int len = static_cast<int>(pickle.size());
542
543  int rv = disk_entry->WriteData(0, 0, data.get(), len, cb.callback(), true);
544  rv = cb.GetResult(rv);
545  return (rv == len);
546}
547
548bool MockHttpCache::OpenBackendEntry(const std::string& key,
549                                     disk_cache::Entry** entry) {
550  net::TestCompletionCallback cb;
551  int rv = disk_cache()->OpenEntry(key, entry, cb.callback());
552  return (cb.GetResult(rv) == net::OK);
553}
554
555bool MockHttpCache::CreateBackendEntry(const std::string& key,
556                                       disk_cache::Entry** entry,
557                                       net::NetLog* net_log) {
558  net::TestCompletionCallback cb;
559  int rv = disk_cache()->CreateEntry(key, entry, cb.callback());
560  return (cb.GetResult(rv) == net::OK);
561}
562
563// Static.
564int MockHttpCache::GetTestMode(int test_mode) {
565  if (!g_test_mode)
566    return test_mode;
567
568  return g_test_mode;
569}
570
571// Static.
572void MockHttpCache::SetTestMode(int test_mode) {
573  g_test_mode = test_mode;
574}
575
576//-----------------------------------------------------------------------------
577
578int MockDiskCacheNoCB::CreateEntry(const std::string& key,
579                                   disk_cache::Entry** entry,
580                                   const net::CompletionCallback& callback) {
581  return net::ERR_IO_PENDING;
582}
583
584//-----------------------------------------------------------------------------
585
586int MockBackendNoCbFactory::CreateBackend(
587    net::NetLog* net_log, scoped_ptr<disk_cache::Backend>* backend,
588    const net::CompletionCallback& callback) {
589  backend->reset(new MockDiskCacheNoCB());
590  return net::OK;
591}
592
593//-----------------------------------------------------------------------------
594
595MockBlockingBackendFactory::MockBlockingBackendFactory()
596    : backend_(NULL),
597      block_(true),
598      fail_(false) {
599}
600
601MockBlockingBackendFactory::~MockBlockingBackendFactory() {
602}
603
604int MockBlockingBackendFactory::CreateBackend(
605    net::NetLog* net_log, scoped_ptr<disk_cache::Backend>* backend,
606    const net::CompletionCallback& callback) {
607  if (!block_) {
608    if (!fail_)
609      backend->reset(new MockDiskCache());
610    return Result();
611  }
612
613  backend_ =  backend;
614  callback_ = callback;
615  return net::ERR_IO_PENDING;
616}
617
618void MockBlockingBackendFactory::FinishCreation() {
619  block_ = false;
620  if (!callback_.is_null()) {
621    if (!fail_)
622      backend_->reset(new MockDiskCache());
623    net::CompletionCallback cb = callback_;
624    callback_.Reset();
625    cb.Run(Result());  // This object can be deleted here.
626  }
627}
628