1// Copyright (c) 2009-2010 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/disk_cache/sparse_control.h"
6
7#include "base/format_macros.h"
8#include "base/logging.h"
9#include "base/message_loop.h"
10#include "base/string_util.h"
11#include "base/stringprintf.h"
12#include "base/time.h"
13#include "net/base/io_buffer.h"
14#include "net/base/net_errors.h"
15#include "net/disk_cache/backend_impl.h"
16#include "net/disk_cache/entry_impl.h"
17#include "net/disk_cache/file.h"
18#include "net/disk_cache/net_log_parameters.h"
19
20using base::Time;
21
22namespace {
23
24// Stream of the sparse data index.
25const int kSparseIndex = 2;
26
27// Stream of the sparse data.
28const int kSparseData = 1;
29
30// We can have up to 64k children.
31const int kMaxMapSize = 8 * 1024;
32
33// The maximum number of bytes that a child can store.
34const int kMaxEntrySize = 0x100000;
35
36// The size of each data block (tracked by the child allocation bitmap).
37const int kBlockSize = 1024;
38
39// Returns the name of a child entry given the base_name and signature of the
40// parent and the child_id.
41// If the entry is called entry_name, child entries will be named something
42// like Range_entry_name:XXX:YYY where XXX is the entry signature and YYY is the
43// number of the particular child.
44std::string GenerateChildName(const std::string& base_name, int64 signature,
45                              int64 child_id) {
46  return base::StringPrintf("Range_%s:%" PRIx64 ":%" PRIx64, base_name.c_str(),
47                            signature, child_id);
48}
49
50// This class deletes the children of a sparse entry.
51class ChildrenDeleter
52    : public base::RefCounted<ChildrenDeleter>,
53      public disk_cache::FileIOCallback {
54 public:
55  ChildrenDeleter(disk_cache::BackendImpl* backend, const std::string& name)
56      : backend_(backend->GetWeakPtr()), name_(name), signature_(0) {}
57
58  virtual void OnFileIOComplete(int bytes_copied);
59
60  // Two ways of deleting the children: if we have the children map, use Start()
61  // directly, otherwise pass the data address to ReadData().
62  void Start(char* buffer, int len);
63  void ReadData(disk_cache::Addr address, int len);
64
65 private:
66  friend class base::RefCounted<ChildrenDeleter>;
67  ~ChildrenDeleter() {}
68
69  void DeleteChildren();
70
71  base::WeakPtr<disk_cache::BackendImpl> backend_;
72  std::string name_;
73  disk_cache::Bitmap children_map_;
74  int64 signature_;
75  scoped_array<char> buffer_;
76  DISALLOW_COPY_AND_ASSIGN(ChildrenDeleter);
77};
78
79// This is the callback of the file operation.
80void ChildrenDeleter::OnFileIOComplete(int bytes_copied) {
81  char* buffer = buffer_.release();
82  Start(buffer, bytes_copied);
83}
84
85void ChildrenDeleter::Start(char* buffer, int len) {
86  buffer_.reset(buffer);
87  if (len < static_cast<int>(sizeof(disk_cache::SparseData)))
88    return Release();
89
90  // Just copy the information from |buffer|, delete |buffer| and start deleting
91  // the child entries.
92  disk_cache::SparseData* data =
93      reinterpret_cast<disk_cache::SparseData*>(buffer);
94  signature_ = data->header.signature;
95
96  int num_bits = (len - sizeof(disk_cache::SparseHeader)) * 8;
97  children_map_.Resize(num_bits, false);
98  children_map_.SetMap(data->bitmap, num_bits / 32);
99  buffer_.reset();
100
101  DeleteChildren();
102}
103
104void ChildrenDeleter::ReadData(disk_cache::Addr address, int len) {
105  DCHECK(address.is_block_file());
106  if (!backend_)
107    return Release();
108
109  disk_cache::File* file(backend_->File(address));
110  if (!file)
111    return Release();
112
113  size_t file_offset = address.start_block() * address.BlockSize() +
114                       disk_cache::kBlockHeaderSize;
115
116  buffer_.reset(new char[len]);
117  bool completed;
118  if (!file->Read(buffer_.get(), len, file_offset, this, &completed))
119    return Release();
120
121  if (completed)
122    OnFileIOComplete(len);
123
124  // And wait until OnFileIOComplete gets called.
125}
126
127void ChildrenDeleter::DeleteChildren() {
128  int child_id = 0;
129  if (!children_map_.FindNextSetBit(&child_id) || !backend_) {
130    // We are done. Just delete this object.
131    return Release();
132  }
133  std::string child_name = GenerateChildName(name_, signature_, child_id);
134  backend_->SyncDoomEntry(child_name);
135  children_map_.Set(child_id, false);
136
137  // Post a task to delete the next child.
138  MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
139      this, &ChildrenDeleter::DeleteChildren));
140}
141
142// Returns the NetLog event type corresponding to a SparseOperation.
143net::NetLog::EventType GetSparseEventType(
144    disk_cache::SparseControl::SparseOperation operation) {
145  switch (operation) {
146    case disk_cache::SparseControl::kReadOperation:
147      return net::NetLog::TYPE_SPARSE_READ;
148    case disk_cache::SparseControl::kWriteOperation:
149      return net::NetLog::TYPE_SPARSE_WRITE;
150    case disk_cache::SparseControl::kGetRangeOperation:
151      return net::NetLog::TYPE_SPARSE_GET_RANGE;
152    default:
153      NOTREACHED();
154      return net::NetLog::TYPE_CANCELLED;
155  }
156}
157
158// Logs the end event for |operation| on a child entry.  Range operations log
159// no events for each child they search through.
160void LogChildOperationEnd(const net::BoundNetLog& net_log,
161                          disk_cache::SparseControl::SparseOperation operation,
162                          int result) {
163  if (net_log.IsLoggingAllEvents()) {
164    net::NetLog::EventType event_type;
165    switch (operation) {
166      case disk_cache::SparseControl::kReadOperation:
167        event_type = net::NetLog::TYPE_SPARSE_READ_CHILD_DATA;
168        break;
169      case disk_cache::SparseControl::kWriteOperation:
170        event_type = net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA;
171        break;
172      case disk_cache::SparseControl::kGetRangeOperation:
173        return;
174      default:
175        NOTREACHED();
176        return;
177    }
178    net_log.EndEventWithNetErrorCode(event_type, result);
179  }
180}
181
182}  // namespace.
183
184namespace disk_cache {
185
186SparseControl::SparseControl(EntryImpl* entry)
187    : entry_(entry),
188      child_(NULL),
189      operation_(kNoOperation),
190      init_(false),
191      child_map_(child_data_.bitmap, kNumSparseBits, kNumSparseBits / 32),
192      ALLOW_THIS_IN_INITIALIZER_LIST(
193          child_callback_(this, &SparseControl::OnChildIOCompleted)),
194      user_callback_(NULL) {
195}
196
197SparseControl::~SparseControl() {
198  if (child_)
199    CloseChild();
200  if (init_)
201    WriteSparseData();
202}
203
204int SparseControl::Init() {
205  DCHECK(!init_);
206
207  // We should not have sparse data for the exposed entry.
208  if (entry_->GetDataSize(kSparseData))
209    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
210
211  // Now see if there is something where we store our data.
212  int rv = net::OK;
213  int data_len = entry_->GetDataSize(kSparseIndex);
214  if (!data_len) {
215    rv = CreateSparseEntry();
216  } else {
217    rv = OpenSparseEntry(data_len);
218  }
219
220  if (rv == net::OK)
221    init_ = true;
222  return rv;
223}
224
225bool SparseControl::CouldBeSparse() const {
226  DCHECK(!init_);
227
228  if (entry_->GetDataSize(kSparseData))
229    return false;
230
231  // We don't verify the data, just see if it could be there.
232  return (entry_->GetDataSize(kSparseIndex) != 0);
233}
234
235int SparseControl::StartIO(SparseOperation op, int64 offset, net::IOBuffer* buf,
236                           int buf_len, net::CompletionCallback* callback) {
237  DCHECK(init_);
238  // We don't support simultaneous IO for sparse data.
239  if (operation_ != kNoOperation)
240    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
241
242  if (offset < 0 || buf_len < 0)
243    return net::ERR_INVALID_ARGUMENT;
244
245  // We only support up to 64 GB.
246  if (offset + buf_len >= 0x1000000000LL || offset + buf_len < 0)
247    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
248
249  DCHECK(!user_buf_);
250  DCHECK(!user_callback_);
251
252  if (!buf && (op == kReadOperation || op == kWriteOperation))
253    return 0;
254
255  // Copy the operation parameters.
256  operation_ = op;
257  offset_ = offset;
258  user_buf_ = buf ? new net::DrainableIOBuffer(buf, buf_len) : NULL;
259  buf_len_ = buf_len;
260  user_callback_ = callback;
261
262  result_ = 0;
263  pending_ = false;
264  finished_ = false;
265  abort_ = false;
266
267  if (entry_->net_log().IsLoggingAllEvents()) {
268    entry_->net_log().BeginEvent(
269        GetSparseEventType(operation_),
270        make_scoped_refptr(new SparseOperationParameters(offset_, buf_len_)));
271  }
272  DoChildrenIO();
273
274  if (!pending_) {
275    // Everything was done synchronously.
276    operation_ = kNoOperation;
277    user_buf_ = NULL;
278    user_callback_ = NULL;
279    return result_;
280  }
281
282  return net::ERR_IO_PENDING;
283}
284
285int SparseControl::GetAvailableRange(int64 offset, int len, int64* start) {
286  DCHECK(init_);
287  // We don't support simultaneous IO for sparse data.
288  if (operation_ != kNoOperation)
289    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
290
291  DCHECK(start);
292
293  range_found_ = false;
294  int result = StartIO(kGetRangeOperation, offset, NULL, len, NULL);
295  if (range_found_) {
296    *start = offset_;
297    return result;
298  }
299
300  // This is a failure. We want to return a valid start value in any case.
301  *start = offset;
302  return result < 0 ? result : 0;  // Don't mask error codes to the caller.
303}
304
305void SparseControl::CancelIO() {
306  if (operation_ == kNoOperation)
307    return;
308  abort_ = true;
309}
310
311int SparseControl::ReadyToUse(net::CompletionCallback* completion_callback) {
312  if (!abort_)
313    return net::OK;
314
315  // We'll grab another reference to keep this object alive because we just have
316  // one extra reference due to the pending IO operation itself, but we'll
317  // release that one before invoking user_callback_.
318  entry_->AddRef();  // Balanced in DoAbortCallbacks.
319  abort_callbacks_.push_back(completion_callback);
320  return net::ERR_IO_PENDING;
321}
322
323// Static
324void SparseControl::DeleteChildren(EntryImpl* entry) {
325  DCHECK(entry->GetEntryFlags() & PARENT_ENTRY);
326  int data_len = entry->GetDataSize(kSparseIndex);
327  if (data_len < static_cast<int>(sizeof(SparseData)) ||
328      entry->GetDataSize(kSparseData))
329    return;
330
331  int map_len = data_len - sizeof(SparseHeader);
332  if (map_len > kMaxMapSize || map_len % 4)
333    return;
334
335  char* buffer;
336  Addr address;
337  entry->GetData(kSparseIndex, &buffer, &address);
338  if (!buffer && !address.is_initialized())
339    return;
340
341  entry->net_log().AddEvent(net::NetLog::TYPE_SPARSE_DELETE_CHILDREN, NULL);
342
343  ChildrenDeleter* deleter = new ChildrenDeleter(entry->backend_,
344                                                 entry->GetKey());
345  // The object will self destruct when finished.
346  deleter->AddRef();
347
348  if (buffer) {
349    MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
350        deleter, &ChildrenDeleter::Start, buffer, data_len));
351  } else {
352    MessageLoop::current()->PostTask(FROM_HERE, NewRunnableMethod(
353        deleter, &ChildrenDeleter::ReadData, address, data_len));
354  }
355}
356
357// We are going to start using this entry to store sparse data, so we have to
358// initialize our control info.
359int SparseControl::CreateSparseEntry() {
360  if (CHILD_ENTRY & entry_->GetEntryFlags())
361    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
362
363  memset(&sparse_header_, 0, sizeof(sparse_header_));
364  sparse_header_.signature = Time::Now().ToInternalValue();
365  sparse_header_.magic = kIndexMagic;
366  sparse_header_.parent_key_len = entry_->GetKey().size();
367  children_map_.Resize(kNumSparseBits, true);
368
369  // Save the header. The bitmap is saved in the destructor.
370  scoped_refptr<net::IOBuffer> buf(
371      new net::WrappedIOBuffer(reinterpret_cast<char*>(&sparse_header_)));
372
373  int rv = entry_->WriteData(kSparseIndex, 0, buf, sizeof(sparse_header_), NULL,
374                             false);
375  if (rv != sizeof(sparse_header_)) {
376    DLOG(ERROR) << "Unable to save sparse_header_";
377    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
378  }
379
380  entry_->SetEntryFlags(PARENT_ENTRY);
381  return net::OK;
382}
383
384// We are opening an entry from disk. Make sure that our control data is there.
385int SparseControl::OpenSparseEntry(int data_len) {
386  if (data_len < static_cast<int>(sizeof(SparseData)))
387    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
388
389  if (entry_->GetDataSize(kSparseData))
390    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
391
392  if (!(PARENT_ENTRY & entry_->GetEntryFlags()))
393    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
394
395  // Dont't go over board with the bitmap. 8 KB gives us offsets up to 64 GB.
396  int map_len = data_len - sizeof(sparse_header_);
397  if (map_len > kMaxMapSize || map_len % 4)
398    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
399
400  scoped_refptr<net::IOBuffer> buf(
401      new net::WrappedIOBuffer(reinterpret_cast<char*>(&sparse_header_)));
402
403  // Read header.
404  int rv = entry_->ReadData(kSparseIndex, 0, buf, sizeof(sparse_header_), NULL);
405  if (rv != static_cast<int>(sizeof(sparse_header_)))
406    return net::ERR_CACHE_READ_FAILURE;
407
408  // The real validation should be performed by the caller. This is just to
409  // double check.
410  if (sparse_header_.magic != kIndexMagic ||
411      sparse_header_.parent_key_len !=
412          static_cast<int>(entry_->GetKey().size()))
413    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
414
415  // Read the actual bitmap.
416  buf = new net::IOBuffer(map_len);
417  rv = entry_->ReadData(kSparseIndex, sizeof(sparse_header_), buf, map_len,
418                        NULL);
419  if (rv != map_len)
420    return net::ERR_CACHE_READ_FAILURE;
421
422  // Grow the bitmap to the current size and copy the bits.
423  children_map_.Resize(map_len * 8, false);
424  children_map_.SetMap(reinterpret_cast<uint32*>(buf->data()), map_len);
425  return net::OK;
426}
427
428bool SparseControl::OpenChild() {
429  DCHECK_GE(result_, 0);
430
431  std::string key = GenerateChildKey();
432  if (child_) {
433    // Keep using the same child or open another one?.
434    if (key == child_->GetKey())
435      return true;
436    CloseChild();
437  }
438
439  // See if we are tracking this child.
440  if (!ChildPresent())
441    return ContinueWithoutChild(key);
442
443  child_ = entry_->backend_->OpenEntryImpl(key);
444  if (!child_)
445    return ContinueWithoutChild(key);
446
447  EntryImpl* child = static_cast<EntryImpl*>(child_);
448  if (!(CHILD_ENTRY & child->GetEntryFlags()) ||
449      child->GetDataSize(kSparseIndex) <
450          static_cast<int>(sizeof(child_data_)))
451    return KillChildAndContinue(key, false);
452
453  scoped_refptr<net::WrappedIOBuffer> buf(
454      new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_)));
455
456  // Read signature.
457  int rv = child_->ReadData(kSparseIndex, 0, buf, sizeof(child_data_), NULL);
458  if (rv != sizeof(child_data_))
459    return KillChildAndContinue(key, true);  // This is a fatal failure.
460
461  if (child_data_.header.signature != sparse_header_.signature ||
462      child_data_.header.magic != kIndexMagic)
463    return KillChildAndContinue(key, false);
464
465  if (child_data_.header.last_block_len < 0 ||
466      child_data_.header.last_block_len > kBlockSize) {
467    // Make sure these values are always within range.
468    child_data_.header.last_block_len = 0;
469    child_data_.header.last_block = -1;
470  }
471
472  return true;
473}
474
475void SparseControl::CloseChild() {
476  scoped_refptr<net::WrappedIOBuffer> buf(
477      new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_)));
478
479  // Save the allocation bitmap before closing the child entry.
480  int rv = child_->WriteData(kSparseIndex, 0, buf, sizeof(child_data_),
481                             NULL, false);
482  if (rv != sizeof(child_data_))
483    DLOG(ERROR) << "Failed to save child data";
484  child_->Release();
485  child_ = NULL;
486}
487
488std::string SparseControl::GenerateChildKey() {
489  return GenerateChildName(entry_->GetKey(), sparse_header_.signature,
490                           offset_ >> 20);
491}
492
493// We are deleting the child because something went wrong.
494bool SparseControl::KillChildAndContinue(const std::string& key, bool fatal) {
495  SetChildBit(false);
496  child_->DoomImpl();
497  child_->Release();
498  child_ = NULL;
499  if (fatal) {
500    result_ = net::ERR_CACHE_READ_FAILURE;
501    return false;
502  }
503  return ContinueWithoutChild(key);
504}
505
506// We were not able to open this child; see what we can do.
507bool SparseControl::ContinueWithoutChild(const std::string& key) {
508  if (kReadOperation == operation_)
509    return false;
510  if (kGetRangeOperation == operation_)
511    return true;
512
513  child_ = entry_->backend_->CreateEntryImpl(key);
514  if (!child_) {
515    child_ = NULL;
516    result_ = net::ERR_CACHE_READ_FAILURE;
517    return false;
518  }
519  // Write signature.
520  InitChildData();
521  return true;
522}
523
524bool SparseControl::ChildPresent() {
525  int child_bit = static_cast<int>(offset_ >> 20);
526  if (children_map_.Size() <= child_bit)
527    return false;
528
529  return children_map_.Get(child_bit);
530}
531
532void SparseControl::SetChildBit(bool value) {
533  int child_bit = static_cast<int>(offset_ >> 20);
534
535  // We may have to increase the bitmap of child entries.
536  if (children_map_.Size() <= child_bit)
537    children_map_.Resize(Bitmap::RequiredArraySize(child_bit + 1) * 32, true);
538
539  children_map_.Set(child_bit, value);
540}
541
542void SparseControl::WriteSparseData() {
543  scoped_refptr<net::IOBuffer> buf(new net::WrappedIOBuffer(
544      reinterpret_cast<const char*>(children_map_.GetMap())));
545
546  int len = children_map_.ArraySize() * 4;
547  int rv = entry_->WriteData(kSparseIndex, sizeof(sparse_header_), buf, len,
548                             NULL, false);
549  if (rv != len) {
550    DLOG(ERROR) << "Unable to save sparse map";
551  }
552}
553
554bool SparseControl::VerifyRange() {
555  DCHECK_GE(result_, 0);
556
557  child_offset_ = static_cast<int>(offset_) & (kMaxEntrySize - 1);
558  child_len_ = std::min(buf_len_, kMaxEntrySize - child_offset_);
559
560  // We can write to (or get info from) anywhere in this child.
561  if (operation_ != kReadOperation)
562    return true;
563
564  // Check that there are no holes in this range.
565  int last_bit = (child_offset_ + child_len_ + 1023) >> 10;
566  int start = child_offset_ >> 10;
567  if (child_map_.FindNextBit(&start, last_bit, false)) {
568    // Something is not here.
569    DCHECK_GE(child_data_.header.last_block_len, 0);
570    DCHECK_LT(child_data_.header.last_block_len, kMaxEntrySize);
571    int partial_block_len = PartialBlockLength(start);
572    if (start == child_offset_ >> 10) {
573      // It looks like we don't have anything.
574      if (partial_block_len <= (child_offset_ & (kBlockSize - 1)))
575        return false;
576    }
577
578    // We have the first part.
579    child_len_ = (start << 10) - child_offset_;
580    if (partial_block_len) {
581      // We may have a few extra bytes.
582      child_len_ = std::min(child_len_ + partial_block_len, buf_len_);
583    }
584    // There is no need to read more after this one.
585    buf_len_ = child_len_;
586  }
587  return true;
588}
589
590void SparseControl::UpdateRange(int result) {
591  if (result <= 0 || operation_ != kWriteOperation)
592    return;
593
594  DCHECK_GE(child_data_.header.last_block_len, 0);
595  DCHECK_LT(child_data_.header.last_block_len, kMaxEntrySize);
596
597  // Write the bitmap.
598  int first_bit = child_offset_ >> 10;
599  int block_offset = child_offset_ & (kBlockSize - 1);
600  if (block_offset && (child_data_.header.last_block != first_bit ||
601                       child_data_.header.last_block_len < block_offset)) {
602    // The first block is not completely filled; ignore it.
603    first_bit++;
604  }
605
606  int last_bit = (child_offset_ + result) >> 10;
607  block_offset = (child_offset_ + result) & (kBlockSize - 1);
608
609  // This condition will hit with the following criteria:
610  // 1. The first byte doesn't follow the last write.
611  // 2. The first byte is in the middle of a block.
612  // 3. The first byte and the last byte are in the same block.
613  if (first_bit > last_bit)
614    return;
615
616  if (block_offset && !child_map_.Get(last_bit)) {
617    // The last block is not completely filled; save it for later.
618    child_data_.header.last_block = last_bit;
619    child_data_.header.last_block_len = block_offset;
620  } else {
621    child_data_.header.last_block = -1;
622  }
623
624  child_map_.SetRange(first_bit, last_bit, true);
625}
626
627int SparseControl::PartialBlockLength(int block_index) const {
628  if (block_index == child_data_.header.last_block)
629    return child_data_.header.last_block_len;
630
631  // This may be the last stored index.
632  int entry_len = child_->GetDataSize(kSparseData);
633  if (block_index == entry_len >> 10)
634    return entry_len & (kBlockSize - 1);
635
636  // This is really empty.
637  return 0;
638}
639
640void SparseControl::InitChildData() {
641  // We know the real type of child_.
642  EntryImpl* child = static_cast<EntryImpl*>(child_);
643  child->SetEntryFlags(CHILD_ENTRY);
644
645  memset(&child_data_, 0, sizeof(child_data_));
646  child_data_.header = sparse_header_;
647
648  scoped_refptr<net::WrappedIOBuffer> buf(
649      new net::WrappedIOBuffer(reinterpret_cast<char*>(&child_data_)));
650
651  int rv = child_->WriteData(kSparseIndex, 0, buf, sizeof(child_data_),
652                             NULL, false);
653  if (rv != sizeof(child_data_))
654    DLOG(ERROR) << "Failed to save child data";
655  SetChildBit(true);
656}
657
658void SparseControl::DoChildrenIO() {
659  while (DoChildIO()) continue;
660
661  // Range operations are finished synchronously, often without setting
662  // |finished_| to true.
663  if (kGetRangeOperation == operation_ &&
664      entry_->net_log().IsLoggingAllEvents()) {
665    entry_->net_log().EndEvent(
666        net::NetLog::TYPE_SPARSE_GET_RANGE,
667        make_scoped_refptr(
668            new GetAvailableRangeResultParameters(offset_, result_)));
669  }
670  if (finished_) {
671    if (kGetRangeOperation != operation_ &&
672        entry_->net_log().IsLoggingAllEvents()) {
673      entry_->net_log().EndEvent(GetSparseEventType(operation_), NULL);
674    }
675    if (pending_)
676      DoUserCallback();
677  }
678}
679
680bool SparseControl::DoChildIO() {
681  finished_ = true;
682  if (!buf_len_ || result_ < 0)
683    return false;
684
685  if (!OpenChild())
686    return false;
687
688  if (!VerifyRange())
689    return false;
690
691  // We have more work to do. Let's not trigger a callback to the caller.
692  finished_ = false;
693  net::CompletionCallback* callback = user_callback_ ? &child_callback_ : NULL;
694
695  int rv = 0;
696  switch (operation_) {
697    case kReadOperation:
698      if (entry_->net_log().IsLoggingAllEvents()) {
699        entry_->net_log().BeginEvent(
700            net::NetLog::TYPE_SPARSE_READ_CHILD_DATA,
701            make_scoped_refptr(new SparseReadWriteParameters(
702                child_->net_log().source(),
703                child_len_)));
704      }
705      rv = child_->ReadDataImpl(kSparseData, child_offset_, user_buf_,
706                                child_len_, callback);
707      break;
708    case kWriteOperation:
709      if (entry_->net_log().IsLoggingAllEvents()) {
710        entry_->net_log().BeginEvent(
711            net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA,
712            make_scoped_refptr(new SparseReadWriteParameters(
713                child_->net_log().source(),
714                child_len_)));
715      }
716      rv = child_->WriteDataImpl(kSparseData, child_offset_, user_buf_,
717                                 child_len_, callback, false);
718      break;
719    case kGetRangeOperation:
720      rv = DoGetAvailableRange();
721      break;
722    default:
723      NOTREACHED();
724  }
725
726  if (rv == net::ERR_IO_PENDING) {
727    if (!pending_) {
728      pending_ = true;
729      // The child will protect himself against closing the entry while IO is in
730      // progress. However, this entry can still be closed, and that would not
731      // be a good thing for us, so we increase the refcount until we're
732      // finished doing sparse stuff.
733      entry_->AddRef();  // Balanced in DoUserCallback.
734    }
735    return false;
736  }
737  if (!rv)
738    return false;
739
740  DoChildIOCompleted(rv);
741  return true;
742}
743
744int SparseControl::DoGetAvailableRange() {
745  if (!child_)
746    return child_len_;  // Move on to the next child.
747
748  // Check that there are no holes in this range.
749  int last_bit = (child_offset_ + child_len_ + 1023) >> 10;
750  int start = child_offset_ >> 10;
751  int partial_start_bytes = PartialBlockLength(start);
752  int found = start;
753  int bits_found = child_map_.FindBits(&found, last_bit, true);
754
755  // We don't care if there is a partial block in the middle of the range.
756  int block_offset = child_offset_ & (kBlockSize - 1);
757  if (!bits_found && partial_start_bytes <= block_offset)
758    return child_len_;
759
760  // We are done. Just break the loop and reset result_ to our real result.
761  range_found_ = true;
762
763  // found now points to the first 1. Lets see if we have zeros before it.
764  int empty_start = std::max((found << 10) - child_offset_, 0);
765
766  int bytes_found = bits_found << 10;
767  bytes_found += PartialBlockLength(found + bits_found);
768
769  if (start == found)
770    bytes_found -= block_offset;
771
772  // If the user is searching past the end of this child, bits_found is the
773  // right result; otherwise, we have some empty space at the start of this
774  // query that we have to subtract from the range that we searched.
775  result_ = std::min(bytes_found, child_len_ - empty_start);
776
777  if (!bits_found) {
778    result_ = std::min(partial_start_bytes - block_offset, child_len_);
779    empty_start = 0;
780  }
781
782  // Only update offset_ when this query found zeros at the start.
783  if (empty_start)
784    offset_ += empty_start;
785
786  // This will actually break the loop.
787  buf_len_ = 0;
788  return 0;
789}
790
791void SparseControl::DoChildIOCompleted(int result) {
792  LogChildOperationEnd(entry_->net_log(), operation_, result);
793  if (result < 0) {
794    // We fail the whole operation if we encounter an error.
795    result_ = result;
796    return;
797  }
798
799  UpdateRange(result);
800
801  result_ += result;
802  offset_ += result;
803  buf_len_ -= result;
804
805  // We'll be reusing the user provided buffer for the next chunk.
806  if (buf_len_ && user_buf_)
807    user_buf_->DidConsume(result);
808}
809
810void SparseControl::OnChildIOCompleted(int result) {
811  DCHECK_NE(net::ERR_IO_PENDING, result);
812  DoChildIOCompleted(result);
813
814  if (abort_) {
815    // We'll return the current result of the operation, which may be less than
816    // the bytes to read or write, but the user cancelled the operation.
817    abort_ = false;
818    if (entry_->net_log().IsLoggingAllEvents()) {
819      entry_->net_log().AddEvent(net::NetLog::TYPE_CANCELLED, NULL);
820      entry_->net_log().EndEvent(GetSparseEventType(operation_), NULL);
821    }
822    DoUserCallback();
823    return DoAbortCallbacks();
824  }
825
826  // We are running a callback from the message loop. It's time to restart what
827  // we were doing before.
828  DoChildrenIO();
829}
830
831void SparseControl::DoUserCallback() {
832  DCHECK(user_callback_);
833  net::CompletionCallback* c = user_callback_;
834  user_callback_ = NULL;
835  user_buf_ = NULL;
836  pending_ = false;
837  operation_ = kNoOperation;
838  entry_->Release();  // Don't touch object after this line.
839  c->Run(result_);
840}
841
842void SparseControl::DoAbortCallbacks() {
843  for (size_t i = 0; i < abort_callbacks_.size(); i++) {
844    // Releasing all references to entry_ may result in the destruction of this
845    // object so we should not be touching it after the last Release().
846    net::CompletionCallback* c = abort_callbacks_[i];
847    if (i == abort_callbacks_.size() - 1)
848      abort_callbacks_.clear();
849
850    entry_->Release();  // Don't touch object after this line.
851    c->Run(net::OK);
852  }
853}
854
855}  // namespace disk_cache
856