1// Copyright (c) 2006-2010 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "net/disk_cache/mem_entry_impl.h"
6
7#include "base/logging.h"
8#include "base/stringprintf.h"
9#include "net/base/io_buffer.h"
10#include "net/base/net_errors.h"
11#include "net/disk_cache/mem_backend_impl.h"
12#include "net/disk_cache/net_log_parameters.h"
13
14using base::Time;
15
16namespace {
17
18const int kSparseData = 1;
19
20// Maximum size of a sparse entry is 2 to the power of this number.
21const int kMaxSparseEntryBits = 12;
22
23// Sparse entry has maximum size of 4KB.
24const int kMaxSparseEntrySize = 1 << kMaxSparseEntryBits;
25
26// Convert global offset to child index.
27inline int ToChildIndex(int64 offset) {
28  return static_cast<int>(offset >> kMaxSparseEntryBits);
29}
30
31// Convert global offset to offset in child entry.
32inline int ToChildOffset(int64 offset) {
33  return static_cast<int>(offset & (kMaxSparseEntrySize - 1));
34}
35
36// Returns a name for a child entry given the base_name of the parent and the
37// child_id.  This name is only used for logging purposes.
38// If the entry is called entry_name, child entries will be named something
39// like Range_entry_name:YYY where YYY is the number of the particular child.
40std::string GenerateChildName(const std::string& base_name, int child_id) {
41  return base::StringPrintf("Range_%s:%i", base_name.c_str(), child_id);
42}
43
44}  // namespace
45
46namespace disk_cache {
47
48MemEntryImpl::MemEntryImpl(MemBackendImpl* backend) {
49  doomed_ = false;
50  backend_ = backend;
51  ref_count_ = 0;
52  parent_ = NULL;
53  child_id_ = 0;
54  child_first_pos_ = 0;
55  next_ = NULL;
56  prev_ = NULL;
57  for (int i = 0; i < NUM_STREAMS; i++)
58    data_size_[i] = 0;
59}
60
61// ------------------------------------------------------------------------
62
63bool MemEntryImpl::CreateEntry(const std::string& key, net::NetLog* net_log) {
64  net_log_ = net::BoundNetLog::Make(net_log,
65                                    net::NetLog::SOURCE_MEMORY_CACHE_ENTRY);
66  net_log_.BeginEvent(
67      net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL,
68      make_scoped_refptr(new EntryCreationParameters(key, true)));
69  key_ = key;
70  Time current = Time::Now();
71  last_modified_ = current;
72  last_used_ = current;
73  Open();
74  backend_->ModifyStorageSize(0, static_cast<int32>(key.size()));
75  return true;
76}
77
78void MemEntryImpl::InternalDoom() {
79  net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM, NULL);
80  doomed_ = true;
81  if (!ref_count_) {
82    if (type() == kParentEntry) {
83      // If this is a parent entry, we need to doom all the child entries.
84      if (children_.get()) {
85        EntryMap children;
86        children.swap(*children_);
87        for (EntryMap::iterator i = children.begin();
88             i != children.end(); ++i) {
89          // Since a pointer to this object is also saved in the map, avoid
90          // dooming it.
91          if (i->second != this)
92            i->second->Doom();
93        }
94        DCHECK(children_->empty());
95      }
96    } else {
97      // If this is a child entry, detach it from the parent.
98      parent_->DetachChild(child_id_);
99    }
100    delete this;
101  }
102}
103
104void MemEntryImpl::Open() {
105  // Only a parent entry can be opened.
106  // TODO(hclam): make sure it's correct to not apply the concept of ref
107  // counting to child entry.
108  DCHECK(type() == kParentEntry);
109  ref_count_++;
110  DCHECK(ref_count_ >= 0);
111  DCHECK(!doomed_);
112}
113
114bool MemEntryImpl::InUse() {
115  if (type() == kParentEntry) {
116    return ref_count_ > 0;
117  } else {
118    // A child entry is always not in use. The consequence is that a child entry
119    // can always be evicted while the associated parent entry is currently in
120    // used (i.e. opened).
121    return false;
122  }
123}
124
125// ------------------------------------------------------------------------
126
127void MemEntryImpl::Doom() {
128  if (doomed_)
129    return;
130  if (type() == kParentEntry) {
131    // Perform internal doom from the backend if this is a parent entry.
132    backend_->InternalDoomEntry(this);
133  } else {
134    // Manually detach from the backend and perform internal doom.
135    backend_->RemoveFromRankingList(this);
136    InternalDoom();
137  }
138}
139
140void MemEntryImpl::Close() {
141  // Only a parent entry can be closed.
142  DCHECK(type() == kParentEntry);
143  ref_count_--;
144  DCHECK(ref_count_ >= 0);
145  if (!ref_count_ && doomed_)
146    InternalDoom();
147}
148
149std::string MemEntryImpl::GetKey() const {
150  // A child entry doesn't have key so this method should not be called.
151  DCHECK(type() == kParentEntry);
152  return key_;
153}
154
155Time MemEntryImpl::GetLastUsed() const {
156  return last_used_;
157}
158
159Time MemEntryImpl::GetLastModified() const {
160  return last_modified_;
161}
162
163int32 MemEntryImpl::GetDataSize(int index) const {
164  if (index < 0 || index >= NUM_STREAMS)
165    return 0;
166  return data_size_[index];
167}
168
169int MemEntryImpl::ReadData(int index, int offset, net::IOBuffer* buf,
170    int buf_len, net::CompletionCallback* completion_callback) {
171  if (net_log_.IsLoggingAllEvents()) {
172    net_log_.BeginEvent(
173        net::NetLog::TYPE_ENTRY_READ_DATA,
174        make_scoped_refptr(
175            new ReadWriteDataParameters(index, offset, buf_len, false)));
176  }
177
178  int result = InternalReadData(index, offset, buf, buf_len);
179
180  if (net_log_.IsLoggingAllEvents()) {
181    net_log_.EndEvent(
182        net::NetLog::TYPE_ENTRY_READ_DATA,
183        make_scoped_refptr(new ReadWriteCompleteParameters(result)));
184  }
185  return result;
186}
187
188int MemEntryImpl::WriteData(int index, int offset, net::IOBuffer* buf,
189    int buf_len, net::CompletionCallback* completion_callback, bool truncate) {
190  if (net_log_.IsLoggingAllEvents()) {
191    net_log_.BeginEvent(
192        net::NetLog::TYPE_ENTRY_WRITE_DATA,
193        make_scoped_refptr(
194            new ReadWriteDataParameters(index, offset, buf_len, truncate)));
195  }
196
197  int result = InternalWriteData(index, offset, buf, buf_len, truncate);
198
199  if (net_log_.IsLoggingAllEvents()) {
200    net_log_.EndEvent(
201        net::NetLog::TYPE_ENTRY_WRITE_DATA,
202        make_scoped_refptr(new ReadWriteCompleteParameters(result)));
203  }
204  return result;
205}
206
207int MemEntryImpl::ReadSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
208                                 net::CompletionCallback* completion_callback) {
209  if (net_log_.IsLoggingAllEvents()) {
210    net_log_.BeginEvent(
211        net::NetLog::TYPE_SPARSE_READ,
212        make_scoped_refptr(
213            new SparseOperationParameters(offset, buf_len)));
214  }
215  int result = InternalReadSparseData(offset, buf, buf_len);
216  if (net_log_.IsLoggingAllEvents())
217    net_log_.EndEvent(net::NetLog::TYPE_SPARSE_READ, NULL);
218  return result;
219}
220
221int MemEntryImpl::WriteSparseData(int64 offset, net::IOBuffer* buf, int buf_len,
222    net::CompletionCallback* completion_callback) {
223  if (net_log_.IsLoggingAllEvents()) {
224    net_log_.BeginEvent(net::NetLog::TYPE_SPARSE_WRITE,
225        make_scoped_refptr(
226            new SparseOperationParameters(offset, buf_len)));
227  }
228  int result = InternalWriteSparseData(offset, buf, buf_len);
229  if (net_log_.IsLoggingAllEvents())
230    net_log_.EndEvent(net::NetLog::TYPE_SPARSE_WRITE, NULL);
231  return result;
232}
233
234int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start,
235                                    CompletionCallback* callback) {
236  if (net_log_.IsLoggingAllEvents()) {
237    net_log_.BeginEvent(
238        net::NetLog::TYPE_SPARSE_GET_RANGE,
239        make_scoped_refptr(
240            new SparseOperationParameters(offset, len)));
241  }
242  int result = GetAvailableRange(offset, len, start);
243  if (net_log_.IsLoggingAllEvents()) {
244    net_log_.EndEvent(
245        net::NetLog::TYPE_SPARSE_GET_RANGE,
246        make_scoped_refptr(
247            new GetAvailableRangeResultParameters(*start, result)));
248  }
249  return result;
250}
251
252bool MemEntryImpl::CouldBeSparse() const {
253  DCHECK_EQ(kParentEntry, type());
254  return (children_.get() != NULL);
255}
256
257int MemEntryImpl::ReadyForSparseIO(
258    net::CompletionCallback* completion_callback) {
259  return net::OK;
260}
261
262// ------------------------------------------------------------------------
263
264MemEntryImpl::~MemEntryImpl() {
265  for (int i = 0; i < NUM_STREAMS; i++)
266    backend_->ModifyStorageSize(data_size_[i], 0);
267  backend_->ModifyStorageSize(static_cast<int32>(key_.size()), 0);
268  net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL, NULL);
269}
270
271int MemEntryImpl::InternalReadData(int index, int offset, net::IOBuffer* buf,
272                                   int buf_len) {
273  DCHECK(type() == kParentEntry || index == kSparseData);
274
275  if (index < 0 || index >= NUM_STREAMS)
276    return net::ERR_INVALID_ARGUMENT;
277
278  int entry_size = GetDataSize(index);
279  if (offset >= entry_size || offset < 0 || !buf_len)
280    return 0;
281
282  if (buf_len < 0)
283    return net::ERR_INVALID_ARGUMENT;
284
285  if (offset + buf_len > entry_size)
286    buf_len = entry_size - offset;
287
288  UpdateRank(false);
289
290  memcpy(buf->data(), &(data_[index])[offset], buf_len);
291  return buf_len;
292}
293
294int MemEntryImpl::InternalWriteData(int index, int offset, net::IOBuffer* buf,
295                                    int buf_len, bool truncate) {
296  DCHECK(type() == kParentEntry || index == kSparseData);
297
298  if (index < 0 || index >= NUM_STREAMS)
299    return net::ERR_INVALID_ARGUMENT;
300
301  if (offset < 0 || buf_len < 0)
302    return net::ERR_INVALID_ARGUMENT;
303
304  int max_file_size = backend_->MaxFileSize();
305
306  // offset of buf_len could be negative numbers.
307  if (offset > max_file_size || buf_len > max_file_size ||
308      offset + buf_len > max_file_size) {
309    return net::ERR_FAILED;
310  }
311
312  // Read the size at this point.
313  int entry_size = GetDataSize(index);
314
315  PrepareTarget(index, offset, buf_len);
316
317  if (entry_size < offset + buf_len) {
318    backend_->ModifyStorageSize(entry_size, offset + buf_len);
319    data_size_[index] = offset + buf_len;
320  } else if (truncate) {
321    if (entry_size > offset + buf_len) {
322      backend_->ModifyStorageSize(entry_size, offset + buf_len);
323      data_size_[index] = offset + buf_len;
324    }
325  }
326
327  UpdateRank(true);
328
329  if (!buf_len)
330    return 0;
331
332  memcpy(&(data_[index])[offset], buf->data(), buf_len);
333  return buf_len;
334}
335
336int MemEntryImpl::InternalReadSparseData(int64 offset, net::IOBuffer* buf,
337                                         int buf_len) {
338  DCHECK(type() == kParentEntry);
339
340  if (!InitSparseInfo())
341    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
342
343  if (offset < 0 || buf_len < 0)
344    return net::ERR_INVALID_ARGUMENT;
345
346  // We will keep using this buffer and adjust the offset in this buffer.
347  scoped_refptr<net::DrainableIOBuffer> io_buf(
348      new net::DrainableIOBuffer(buf, buf_len));
349
350  // Iterate until we have read enough.
351  while (io_buf->BytesRemaining()) {
352    MemEntryImpl* child = OpenChild(offset + io_buf->BytesConsumed(), false);
353
354    // No child present for that offset.
355    if (!child)
356      break;
357
358    // We then need to prepare the child offset and len.
359    int child_offset = ToChildOffset(offset + io_buf->BytesConsumed());
360
361    // If we are trying to read from a position that the child entry has no data
362    // we should stop.
363    if (child_offset < child->child_first_pos_)
364      break;
365    if (net_log_.IsLoggingAllEvents()) {
366      net_log_.BeginEvent(
367          net::NetLog::TYPE_SPARSE_READ_CHILD_DATA,
368          make_scoped_refptr(new SparseReadWriteParameters(
369              child->net_log().source(),
370              io_buf->BytesRemaining())));
371    }
372    int ret = child->ReadData(kSparseData, child_offset, io_buf,
373                              io_buf->BytesRemaining(), NULL);
374    if (net_log_.IsLoggingAllEvents()) {
375      net_log_.EndEventWithNetErrorCode(
376          net::NetLog::TYPE_SPARSE_READ_CHILD_DATA, ret);
377    }
378
379    // If we encounter an error in one entry, return immediately.
380    if (ret < 0)
381      return ret;
382    else if (ret == 0)
383      break;
384
385    // Increment the counter by number of bytes read in the child entry.
386    io_buf->DidConsume(ret);
387  }
388
389  UpdateRank(false);
390
391  return io_buf->BytesConsumed();
392}
393
394int MemEntryImpl::InternalWriteSparseData(int64 offset, net::IOBuffer* buf,
395                                          int buf_len) {
396  DCHECK(type() == kParentEntry);
397
398  if (!InitSparseInfo())
399    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
400
401  if (offset < 0 || buf_len < 0)
402    return net::ERR_INVALID_ARGUMENT;
403
404  scoped_refptr<net::DrainableIOBuffer> io_buf(
405      new net::DrainableIOBuffer(buf, buf_len));
406
407  // This loop walks through child entries continuously starting from |offset|
408  // and writes blocks of data (of maximum size kMaxSparseEntrySize) into each
409  // child entry until all |buf_len| bytes are written. The write operation can
410  // start in the middle of an entry.
411  while (io_buf->BytesRemaining()) {
412    MemEntryImpl* child = OpenChild(offset + io_buf->BytesConsumed(), true);
413    int child_offset = ToChildOffset(offset + io_buf->BytesConsumed());
414
415    // Find the right amount to write, this evaluates the remaining bytes to
416    // write and remaining capacity of this child entry.
417    int write_len = std::min(static_cast<int>(io_buf->BytesRemaining()),
418                             kMaxSparseEntrySize - child_offset);
419
420    // Keep a record of the last byte position (exclusive) in the child.
421    int data_size = child->GetDataSize(kSparseData);
422
423    if (net_log_.IsLoggingAllEvents()) {
424      net_log_.BeginEvent(
425          net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA,
426          make_scoped_refptr(new SparseReadWriteParameters(
427              child->net_log().source(),
428              write_len)));
429    }
430
431    // Always writes to the child entry. This operation may overwrite data
432    // previously written.
433    // TODO(hclam): if there is data in the entry and this write is not
434    // continuous we may want to discard this write.
435    int ret = child->WriteData(kSparseData, child_offset, io_buf, write_len,
436                               NULL, true);
437    if (net_log_.IsLoggingAllEvents()) {
438      net_log_.EndEventWithNetErrorCode(
439          net::NetLog::TYPE_SPARSE_WRITE_CHILD_DATA, ret);
440    }
441    if (ret < 0)
442      return ret;
443    else if (ret == 0)
444      break;
445
446    // Keep a record of the first byte position in the child if the write was
447    // not aligned nor continuous. This is to enable witting to the middle
448    // of an entry and still keep track of data off the aligned edge.
449    if (data_size != child_offset)
450      child->child_first_pos_ = child_offset;
451
452    // Adjust the offset in the IO buffer.
453    io_buf->DidConsume(ret);
454  }
455
456  UpdateRank(true);
457
458  return io_buf->BytesConsumed();
459}
460
461int MemEntryImpl::GetAvailableRange(int64 offset, int len, int64* start) {
462  DCHECK(type() == kParentEntry);
463  DCHECK(start);
464
465  if (!InitSparseInfo())
466    return net::ERR_CACHE_OPERATION_NOT_SUPPORTED;
467
468  if (offset < 0 || len < 0 || !start)
469    return net::ERR_INVALID_ARGUMENT;
470
471  MemEntryImpl* current_child = NULL;
472
473  // Find the first child and record the number of empty bytes.
474  int empty = FindNextChild(offset, len, &current_child);
475  if (current_child) {
476    *start = offset + empty;
477    len -= empty;
478
479    // Counts the number of continuous bytes.
480    int continuous = 0;
481
482    // This loop scan for continuous bytes.
483    while (len && current_child) {
484      // Number of bytes available in this child.
485      int data_size = current_child->GetDataSize(kSparseData) -
486                      ToChildOffset(*start + continuous);
487      if (data_size > len)
488        data_size = len;
489
490      // We have found more continuous bytes so increment the count. Also
491      // decrement the length we should scan.
492      continuous += data_size;
493      len -= data_size;
494
495      // If the next child is discontinuous, break the loop.
496      if (FindNextChild(*start + continuous, len, &current_child))
497        break;
498    }
499    return continuous;
500  }
501  *start = offset;
502  return 0;
503}
504
505void MemEntryImpl::PrepareTarget(int index, int offset, int buf_len) {
506  int entry_size = GetDataSize(index);
507
508  if (entry_size >= offset + buf_len)
509    return;  // Not growing the stored data.
510
511  if (static_cast<int>(data_[index].size()) < offset + buf_len)
512    data_[index].resize(offset + buf_len);
513
514  if (offset <= entry_size)
515    return;  // There is no "hole" on the stored data.
516
517  // Cleanup the hole not written by the user. The point is to avoid returning
518  // random stuff later on.
519  memset(&(data_[index])[entry_size], 0, offset - entry_size);
520}
521
522void MemEntryImpl::UpdateRank(bool modified) {
523  Time current = Time::Now();
524  last_used_ = current;
525
526  if (modified)
527    last_modified_ = current;
528
529  if (!doomed_)
530    backend_->UpdateRank(this);
531}
532
533bool MemEntryImpl::InitSparseInfo() {
534  DCHECK(type() == kParentEntry);
535
536  if (!children_.get()) {
537    // If we already have some data in sparse stream but we are being
538    // initialized as a sparse entry, we should fail.
539    if (GetDataSize(kSparseData))
540      return false;
541    children_.reset(new EntryMap());
542
543    // The parent entry stores data for the first block, so save this object to
544    // index 0.
545    (*children_)[0] = this;
546  }
547  return true;
548}
549
550bool MemEntryImpl::InitChildEntry(MemEntryImpl* parent, int child_id,
551                                  net::NetLog* net_log) {
552  DCHECK(!parent_);
553  DCHECK(!child_id_);
554
555  net_log_ = net::BoundNetLog::Make(net_log,
556                                    net::NetLog::SOURCE_MEMORY_CACHE_ENTRY);
557  net_log_.BeginEvent(
558      net::NetLog::TYPE_DISK_CACHE_MEM_ENTRY_IMPL,
559      make_scoped_refptr(new EntryCreationParameters(
560          GenerateChildName(parent->key(), child_id_),
561          true)));
562
563  parent_ = parent;
564  child_id_ = child_id;
565  Time current = Time::Now();
566  last_modified_ = current;
567  last_used_ = current;
568  // Insert this to the backend's ranking list.
569  backend_->InsertIntoRankingList(this);
570  return true;
571}
572
573MemEntryImpl* MemEntryImpl::OpenChild(int64 offset, bool create) {
574  DCHECK(type() == kParentEntry);
575  int index = ToChildIndex(offset);
576  EntryMap::iterator i = children_->find(index);
577  if (i != children_->end()) {
578    return i->second;
579  } else if (create) {
580    MemEntryImpl* child = new MemEntryImpl(backend_);
581    child->InitChildEntry(this, index, net_log_.net_log());
582    (*children_)[index] = child;
583    return child;
584  }
585  return NULL;
586}
587
588int MemEntryImpl::FindNextChild(int64 offset, int len, MemEntryImpl** child) {
589  DCHECK(child);
590  *child = NULL;
591  int scanned_len = 0;
592
593  // This loop tries to find the first existing child.
594  while (scanned_len < len) {
595    // This points to the current offset in the child.
596    int current_child_offset = ToChildOffset(offset + scanned_len);
597    MemEntryImpl* current_child = OpenChild(offset + scanned_len, false);
598    if (current_child) {
599      int child_first_pos = current_child->child_first_pos_;
600
601      // This points to the first byte that we should be reading from, we need
602      // to take care of the filled region and the current offset in the child.
603      int first_pos =  std::max(current_child_offset, child_first_pos);
604
605      // If the first byte position we should read from doesn't exceed the
606      // filled region, we have found the first child.
607      if (first_pos < current_child->GetDataSize(kSparseData)) {
608         *child = current_child;
609
610         // We need to advance the scanned length.
611         scanned_len += first_pos - current_child_offset;
612         break;
613      }
614    }
615    scanned_len += kMaxSparseEntrySize - current_child_offset;
616  }
617  return scanned_len;
618}
619
620void MemEntryImpl::DetachChild(int child_id) {
621  children_->erase(child_id);
622}
623
624}  // namespace disk_cache
625