mem_entry_impl.cc revision c7f5f8508d98d5952d42ed7648c2a8f30a4da156
1// Copyright (c) 2006-2009 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/disk_cache/mem_entry_impl.h"
6
7#include "base/logging.h"
8#include "net/base/io_buffer.h"
9#include "net/base/net_errors.h"
10#include "net/disk_cache/mem_backend_impl.h"
11
12using base::Time;
13
14namespace {
15
16const int kSparseData = 1;
17
18// Maximum size of a sparse entry is 2 to the power of this number.
19const int kMaxSparseEntryBits = 12;
20
21// Sparse entry has maximum size of 4KB.
22const int kMaxSparseEntrySize = 1 << kMaxSparseEntryBits;
23
24// Convert global offset to child index.
25inline int ToChildIndex(int64 offset) {
26  return static_cast<int>(offset >> kMaxSparseEntryBits);
27}
28
29// Convert global offset to offset in child entry.
30inline int ToChildOffset(int64 offset) {
31  return static_cast<int>(offset & (kMaxSparseEntrySize - 1));
32}
33
34}  // nemespace
35
36namespace disk_cache {
37
38MemEntryImpl::MemEntryImpl(MemBackendImpl* backend) {
39  doomed_ = false;
40  backend_ = backend;
41  ref_count_ = 0;
42  parent_ = NULL;
43  child_id_ = 0;
44  child_first_pos_ = 0;
45  next_ = NULL;
46  prev_ = NULL;
47  for (int i = 0; i < NUM_STREAMS; i++)
48    data_size_[i] = 0;
49}
50
51MemEntryImpl::~MemEntryImpl() {
52  for (int i = 0; i < NUM_STREAMS; i++)
53    backend_->ModifyStorageSize(data_size_[i], 0);
54  backend_->ModifyStorageSize(static_cast<int32>(key_.size()), 0);
55}
56
57void MemEntryImpl::Doom() {
58  if (doomed_)
59    return;
60  if (type() == kParentEntry) {
61    // Perform internal doom from the backend if this is a parent entry.
62    backend_->InternalDoomEntry(this);
63  } else {
64    // Manually detach from the backend and perform internal doom.
65    backend_->RemoveFromRankingList(this);
66    InternalDoom();
67  }
68}
69
70void MemEntryImpl::Close() {
71  // Only a parent entry can be closed.
72  DCHECK(type() == kParentEntry);
73  ref_count_--;
74  DCHECK(ref_count_ >= 0);
75  if (!ref_count_ && doomed_)
76    InternalDoom();
77}
78
79std::string MemEntryImpl::GetKey() const {
80  // A child entry doesn't have key so this method should not be called.
81  DCHECK(type() == kParentEntry);
82  return key_;
83}
84
85Time MemEntryImpl::GetLastUsed() const {
86  return last_used_;
87}
88
89Time MemEntryImpl::GetLastModified() const {
90  return last_modified_;
91}
92
93int32 MemEntryImpl::GetDataSize(int index) const {
94  if (index < 0 || index >= NUM_STREAMS)
95    return 0;
96  return data_size_[index];
97}
98
99int MemEntryImpl::ReadData(int index, int offset, net::IOBuffer* buf,
100    int buf_len, net::CompletionCallback* completion_callback) {
101  DCHECK(type() == kParentEntry || index == kSparseData);
102
103  if (index < 0 || index >= NUM_STREAMS)
104    return net::ERR_INVALID_ARGUMENT;
105
106  int entry_size = GetDataSize(index);
107  if (offset >= entry_size || offset < 0 || !buf_len)
108    return 0;
109
110  if (buf_len < 0)
111    return net::ERR_INVALID_ARGUMENT;
112
113  if (offset + buf_len > entry_size)
114    buf_len = entry_size - offset;
115
116  UpdateRank(false);
117
118  memcpy(buf->data() , &(data_[index])[offset], buf_len);
119  return buf_len;
120}
121
122int MemEntryImpl::WriteData(int index, int offset, net::IOBuffer* buf,
123    int buf_len, net::CompletionCallback* completion_callback, bool truncate) {
124  DCHECK(type() == kParentEntry || index == kSparseData);
125
126  if (index < 0 || index >= NUM_STREAMS)
127    return net::ERR_INVALID_ARGUMENT;
128
129  if (offset < 0 || buf_len < 0)
130    return net::ERR_INVALID_ARGUMENT;
131
132  int max_file_size = backend_->MaxFileSize();
133
134  // offset of buf_len could be negative numbers.
135  if (offset > max_file_size || buf_len > max_file_size ||
136      offset + buf_len > max_file_size) {
137    return net::ERR_FAILED;
138  }
139
140  // Read the size at this point.
141  int entry_size = GetDataSize(index);
142
143  PrepareTarget(index, offset, buf_len);
144
145  if (entry_size < offset + buf_len) {
146    backend_->ModifyStorageSize(entry_size, offset + buf_len);
147    data_size_[index] = offset + buf_len;
148  } else if (truncate) {
149    if (entry_size > offset + buf_len) {
150      backend_->ModifyStorageSize(entry_size, offset + buf_len);
151      data_size_[index] = offset + buf_len;
152    }
153  }
154
155  UpdateRank(true);
156
157  if (!buf_len)
158    return 0;
159
160  memcpy(&(data_[index])[offset], buf->data(), buf_len);
161  return buf_len;
162}
163
164int MemEntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
165                                 net::CompletionCallback* completion_callback) {
166  DCHECK(type() == kParentEntry);
167
168  if (!InitSparseInfo())
169    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
170
171  if (offset < 0 || buf_len < 0)
172    return net::ERR_INVALID_ARGUMENT;
173
174  // We will keep using this buffer and adjust the offset in this buffer.
175  scoped_refptr<net::DrainableIOBuffer> io_buf =
176      new net::DrainableIOBuffer(buf, buf_len);
177
178  // Iterate until we have read enough.
179  while (io_buf->BytesRemaining()) {
180    MemEntryImpl* child = OpenChild(offset + io_buf->BytesConsumed(), false);
181
182    // No child present for that offset.
183    if (!child)
184      break;
185
186    // We then need to prepare the child offset and len.
187    int child_offset = ToChildOffset(offset + io_buf->BytesConsumed());
188
189    // If we are trying to read from a position that the child entry has no data
190    // we should stop.
191    if (child_offset < child->child_first_pos_)
192      break;
193    int ret = child->ReadData(kSparseData, child_offset, io_buf,
194                              io_buf->BytesRemaining(), NULL);
195
196    // If we encounter an error in one entry, return immediately.
197    if (ret < 0)
198      return ret;
199    else if (ret == 0)
200      break;
201
202    // Increment the counter by number of bytes read in the child entry.
203    io_buf->DidConsume(ret);
204  }
205
206  UpdateRank(false);
207
208  return io_buf->BytesConsumed();
209}
210
211int MemEntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
212    net::CompletionCallback* completion_callback) {
213  DCHECK(type() == kParentEntry);
214
215  if (!InitSparseInfo())
216    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
217
218  if (offset < 0 || buf_len < 0)
219    return net::ERR_INVALID_ARGUMENT;
220
221  scoped_refptr<net::DrainableIOBuffer> io_buf =
222      new net::DrainableIOBuffer(buf, buf_len);
223
224  // This loop walks through child entries continuously starting from |offset|
225  // and writes blocks of data (of maximum size kMaxSparseEntrySize) into each
226  // child entry until all |buf_len| bytes are written. The write operation can
227  // start in the middle of an entry.
228  while (io_buf->BytesRemaining()) {
229    MemEntryImpl* child = OpenChild(offset + io_buf->BytesConsumed(), true);
230    int child_offset = ToChildOffset(offset + io_buf->BytesConsumed());
231
232    // Find the right amount to write, this evaluates the remaining bytes to
233    // write and remaining capacity of this child entry.
234    int write_len = std::min(static_cast<int>(io_buf->BytesRemaining()),
235                             kMaxSparseEntrySize - child_offset);
236
237    // Keep a record of the last byte position (exclusive) in the child.
238    int data_size = child->GetDataSize(kSparseData);
239
240    // Always writes to the child entry. This operation may overwrite data
241    // previously written.
242    // TODO(hclam): if there is data in the entry and this write is not
243    // continuous we may want to discard this write.
244    int ret = child->WriteData(kSparseData, child_offset, io_buf, write_len,
245                               NULL, true);
246    if (ret < 0)
247      return ret;
248    else if (ret == 0)
249      break;
250
251    // Keep a record of the first byte position in the child if the write was
252    // not aligned nor continuous. This is to enable witting to the middle
253    // of an entry and still keep track of data off the aligned edge.
254    if (data_size != child_offset)
255      child->child_first_pos_ = child_offset;
256
257    // Adjust the offset in the IO buffer.
258    io_buf->DidConsume(ret);
259  }
260
261  UpdateRank(true);
262
263  return io_buf->BytesConsumed();
264}
265
266int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start) {
267  DCHECK(type() == kParentEntry);
268  DCHECK(start);
269
270  if (!InitSparseInfo())
271    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
272
273  if (offset < 0 || len < 0 || !start)
274    return net::ERR_INVALID_ARGUMENT;
275
276  MemEntryImpl* current_child = NULL;
277
278  // Find the first child and record the number of empty bytes.
279  int empty = FindNextChild(offset, len, &current_child);
280  if (current_child) {
281    *start = offset + empty;
282    len -= empty;
283
284    // Counts the number of continuous bytes.
285    int continuous = 0;
286
287    // This loop scan for continuous bytes.
288    while (len && current_child) {
289      // Number of bytes available in this child.
290      int data_size = current_child->GetDataSize(kSparseData) -
291                      ToChildOffset(*start + continuous);
292      if (data_size > len)
293        data_size = len;
294
295      // We have found more continuous bytes so increment the count. Also
296      // decrement the length we should scan.
297      continuous += data_size;
298      len -= data_size;
299
300      // If the next child is discontinuous, break the loop.
301      if (FindNextChild(*start + continuous, len, &current_child))
302        break;
303    }
304    return continuous;
305  }
306  *start = offset;
307  return 0;
308}
309
310int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
311                                    CompletionCallback* callback) {
312  return GetAvailableRange(offset, len, start);
313}
314
315int MemEntryImpl::ReadyForSparseIO(
316    net::CompletionCallback* completion_callback) {
317  return net::OK;
318}
319
320// ------------------------------------------------------------------------
321
322bool MemEntryImpl::CreateEntry(const std::string& key) {
323  key_ = key;
324  last_modified_ = Time::Now();
325  last_used_ = Time::Now();
326  Open();
327  backend_->ModifyStorageSize(0, static_cast<int32>(key.size()));
328  return true;
329}
330
331void MemEntryImpl::InternalDoom() {
332  doomed_ = true;
333  if (!ref_count_) {
334    if (type() == kParentEntry) {
335      // If this is a parent entry, we need to doom all the child entries.
336      if (children_.get()) {
337        EntryMap children;
338        children.swap(*children_);
339        for (EntryMap::iterator i = children.begin();
340             i != children.end(); ++i) {
341          // Since a pointer to this object is also saved in the map, avoid
342          // dooming it.
343          if (i->second != this)
344            i->second->Doom();
345        }
346        DCHECK(children_->size() == 0);
347      }
348    } else {
349      // If this is a child entry, detach it from the parent.
350      parent_->DetachChild(child_id_);
351    }
352    delete this;
353  }
354}
355
356void MemEntryImpl::Open() {
357  // Only a parent entry can be opened.
358  // TODO(hclam): make sure it's correct to not apply the concept of ref
359  // counting to child entry.
360  DCHECK(type() == kParentEntry);
361  ref_count_++;
362  DCHECK(ref_count_ >= 0);
363  DCHECK(!doomed_);
364}
365
366bool MemEntryImpl::InUse() {
367  if (type() == kParentEntry) {
368    return ref_count_ > 0;
369  } else {
370    // A child entry is always not in use. The consequence is that a child entry
371    // can always be evicted while the associated parent entry is currently in
372    // used (i.e. opened).
373    return false;
374  }
375}
376
377// ------------------------------------------------------------------------
378
379void MemEntryImpl::PrepareTarget(int index, int offset, int buf_len) {
380  int entry_size = GetDataSize(index);
381
382  if (entry_size >= offset + buf_len)
383    return;  // Not growing the stored data.
384
385  if (static_cast<int>(data_[index].size()) < offset + buf_len)
386    data_[index].resize(offset + buf_len);
387
388  if (offset <= entry_size)
389    return;  // There is no "hole" on the stored data.
390
391  // Cleanup the hole not written by the user. The point is to avoid returning
392  // random stuff later on.
393  memset(&(data_[index])[entry_size], 0, offset - entry_size);
394}
395
396void MemEntryImpl::UpdateRank(bool modified) {
397  Time current = Time::Now();
398  last_used_ = current;
399
400  if (modified)
401    last_modified_ = current;
402
403  if (!doomed_)
404    backend_->UpdateRank(this);
405}
406
407bool MemEntryImpl::InitSparseInfo() {
408  DCHECK(type() == kParentEntry);
409
410  if (!children_.get()) {
411    // If we already have some data in sparse stream but we are being
412    // initialized as a sparse entry, we should fail.
413    if (GetDataSize(kSparseData))
414      return false;
415    children_.reset(new EntryMap());
416
417    // The parent entry stores data for the first block, so save this object to
418    // index 0.
419    (*children_)[0] = this;
420  }
421  return true;
422}
423
424bool MemEntryImpl::InitChildEntry(MemEntryImpl* parent, int child_id) {
425  DCHECK(!parent_);
426  DCHECK(!child_id_);
427  parent_ = parent;
428  child_id_ = child_id;
429  last_modified_ = Time::Now();
430  last_used_ = Time::Now();
431  // Insert this to the backend's ranking list.
432  backend_->InsertIntoRankingList(this);
433  return true;
434}
435
436MemEntryImpl* MemEntryImpl::OpenChild(int64 offset, bool create) {
437  DCHECK(type() == kParentEntry);
438  int index = ToChildIndex(offset);
439  EntryMap::iterator i = children_->find(index);
440  if (i != children_->end()) {
441    return i->second;
442  } else if (create) {
443    MemEntryImpl* child = new MemEntryImpl(backend_);
444    child->InitChildEntry(this, index);
445    (*children_)[index] = child;
446    return child;
447  }
448  return NULL;
449}
450
451int MemEntryImpl::FindNextChild(int64 offset, int len, MemEntryImpl** child) {
452  DCHECK(child);
453  *child = NULL;
454  int scanned_len = 0;
455
456  // This loop tries to find the first existing child.
457  while (scanned_len < len) {
458    // This points to the current offset in the child.
459    int current_child_offset = ToChildOffset(offset + scanned_len);
460    MemEntryImpl* current_child = OpenChild(offset + scanned_len, false);
461    if (current_child) {
462      int child_first_pos = current_child->child_first_pos_;
463
464      // This points to the first byte that we should be reading from, we need
465      // to take care of the filled region and the current offset in the child.
466      int first_pos =  std::max(current_child_offset, child_first_pos);
467
468      // If the first byte position we should read from doesn't exceed the
469      // filled region, we have found the first child.
470      if (first_pos < current_child->GetDataSize(kSparseData)) {
471         *child = current_child;
472
473         // We need to advance the scanned length.
474         scanned_len += first_pos - current_child_offset;
475         break;
476      }
477    }
478    scanned_len += kMaxSparseEntrySize - current_child_offset;
479  }
480  return scanned_len;
481}
482
483void MemEntryImpl::DetachChild(int child_id) {
484  children_->erase(child_id);
485}
486
487}  // namespace disk_cache
488