1// Copyright (c) 2012 The Chromium Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "net/disk_cache/blockfile/entry_impl_v3.h" 6 7#include "base/hash.h" 8#include "base/message_loop/message_loop.h" 9#include "base/metrics/histogram.h" 10#include "base/strings/string_util.h" 11#include "net/base/io_buffer.h" 12#include "net/base/net_errors.h" 13#include "net/disk_cache/blockfile/backend_impl_v3.h" 14#include "net/disk_cache/blockfile/bitmap.h" 15#include "net/disk_cache/blockfile/disk_format_v3.h" 16#include "net/disk_cache/blockfile/histogram_macros_v3.h" 17#include "net/disk_cache/cache_util.h" 18#include "net/disk_cache/net_log_parameters.h" 19// #include "net/disk_cache/blockfile/sparse_control_v3.h" 20 21// Provide a BackendImpl object to macros from histogram_macros.h. 22#define CACHE_UMA_BACKEND_IMPL_OBJ backend_ 23 24using base::Time; 25using base::TimeDelta; 26using base::TimeTicks; 27 28namespace { 29 30const int kMaxBufferSize = 1024 * 1024; // 1 MB. 31 32} // namespace 33 34namespace disk_cache { 35 36typedef StorageBlock<EntryRecord> CacheEntryBlockV3; 37typedef StorageBlock<ShortEntryRecord> CacheShortEntryBlock; 38 39// This class handles individual memory buffers that store data before it is 40// sent to disk. The buffer can start at any offset, but if we try to write to 41// anywhere in the first 16KB of the file (kMaxBlockSize), we set the offset to 42// zero. The buffer grows up to a size determined by the backend, to keep the 43// total memory used under control. 44class EntryImplV3::UserBuffer { 45 public: 46 explicit UserBuffer(BackendImplV3* backend) 47 : backend_(backend->GetWeakPtr()), offset_(0), grow_allowed_(true) { 48 buffer_.reserve(kMaxBlockSize); 49 } 50 ~UserBuffer() { 51 if (backend_) 52 backend_->BufferDeleted(capacity() - kMaxBlockSize); 53 } 54 55 // Returns true if we can handle writing |len| bytes to |offset|. 56 bool PreWrite(int offset, int len); 57 58 // Truncates the buffer to |offset| bytes. 59 void Truncate(int offset); 60 61 // Writes |len| bytes from |buf| at the given |offset|. 62 void Write(int offset, IOBuffer* buf, int len); 63 64 // Returns true if we can read |len| bytes from |offset|, given that the 65 // actual file has |eof| bytes stored. Note that the number of bytes to read 66 // may be modified by this method even though it returns false: that means we 67 // should do a smaller read from disk. 68 bool PreRead(int eof, int offset, int* len); 69 70 // Read |len| bytes from |buf| at the given |offset|. 71 int Read(int offset, IOBuffer* buf, int len); 72 73 // Prepare this buffer for reuse. 74 void Reset(); 75 76 char* Data() { return buffer_.size() ? &buffer_[0] : NULL; } 77 int Size() { return static_cast<int>(buffer_.size()); } 78 int Start() { return offset_; } 79 int End() { return offset_ + Size(); } 80 81 private: 82 int capacity() { return static_cast<int>(buffer_.capacity()); } 83 bool GrowBuffer(int required, int limit); 84 85 base::WeakPtr<BackendImplV3> backend_; 86 int offset_; 87 std::vector<char> buffer_; 88 bool grow_allowed_; 89 DISALLOW_COPY_AND_ASSIGN(UserBuffer); 90}; 91 92bool EntryImplV3::UserBuffer::PreWrite(int offset, int len) { 93 DCHECK_GE(offset, 0); 94 DCHECK_GE(len, 0); 95 DCHECK_GE(offset + len, 0); 96 97 // We don't want to write before our current start. 98 if (offset < offset_) 99 return false; 100 101 // Lets get the common case out of the way. 102 if (offset + len <= capacity()) 103 return true; 104 105 // If we are writing to the first 16K (kMaxBlockSize), we want to keep the 106 // buffer offset_ at 0. 107 if (!Size() && offset > kMaxBlockSize) 108 return GrowBuffer(len, kMaxBufferSize); 109 110 int required = offset - offset_ + len; 111 return GrowBuffer(required, kMaxBufferSize * 6 / 5); 112} 113 114void EntryImplV3::UserBuffer::Truncate(int offset) { 115 DCHECK_GE(offset, 0); 116 DCHECK_GE(offset, offset_); 117 DVLOG(3) << "Buffer truncate at " << offset << " current " << offset_; 118 119 offset -= offset_; 120 if (Size() >= offset) 121 buffer_.resize(offset); 122} 123 124void EntryImplV3::UserBuffer::Write(int offset, IOBuffer* buf, int len) { 125 DCHECK_GE(offset, 0); 126 DCHECK_GE(len, 0); 127 DCHECK_GE(offset + len, 0); 128 DCHECK_GE(offset, offset_); 129 DVLOG(3) << "Buffer write at " << offset << " current " << offset_; 130 131 if (!Size() && offset > kMaxBlockSize) 132 offset_ = offset; 133 134 offset -= offset_; 135 136 if (offset > Size()) 137 buffer_.resize(offset); 138 139 if (!len) 140 return; 141 142 char* buffer = buf->data(); 143 int valid_len = Size() - offset; 144 int copy_len = std::min(valid_len, len); 145 if (copy_len) { 146 memcpy(&buffer_[offset], buffer, copy_len); 147 len -= copy_len; 148 buffer += copy_len; 149 } 150 if (!len) 151 return; 152 153 buffer_.insert(buffer_.end(), buffer, buffer + len); 154} 155 156bool EntryImplV3::UserBuffer::PreRead(int eof, int offset, int* len) { 157 DCHECK_GE(offset, 0); 158 DCHECK_GT(*len, 0); 159 160 if (offset < offset_) { 161 // We are reading before this buffer. 162 if (offset >= eof) 163 return true; 164 165 // If the read overlaps with the buffer, change its length so that there is 166 // no overlap. 167 *len = std::min(*len, offset_ - offset); 168 *len = std::min(*len, eof - offset); 169 170 // We should read from disk. 171 return false; 172 } 173 174 if (!Size()) 175 return false; 176 177 // See if we can fulfill the first part of the operation. 178 return (offset - offset_ < Size()); 179} 180 181int EntryImplV3::UserBuffer::Read(int offset, IOBuffer* buf, int len) { 182 DCHECK_GE(offset, 0); 183 DCHECK_GT(len, 0); 184 DCHECK(Size() || offset < offset_); 185 186 int clean_bytes = 0; 187 if (offset < offset_) { 188 // We don't have a file so lets fill the first part with 0. 189 clean_bytes = std::min(offset_ - offset, len); 190 memset(buf->data(), 0, clean_bytes); 191 if (len == clean_bytes) 192 return len; 193 offset = offset_; 194 len -= clean_bytes; 195 } 196 197 int start = offset - offset_; 198 int available = Size() - start; 199 DCHECK_GE(start, 0); 200 DCHECK_GE(available, 0); 201 len = std::min(len, available); 202 memcpy(buf->data() + clean_bytes, &buffer_[start], len); 203 return len + clean_bytes; 204} 205 206void EntryImplV3::UserBuffer::Reset() { 207 if (!grow_allowed_) { 208 if (backend_) 209 backend_->BufferDeleted(capacity() - kMaxBlockSize); 210 grow_allowed_ = true; 211 std::vector<char> tmp; 212 buffer_.swap(tmp); 213 buffer_.reserve(kMaxBlockSize); 214 } 215 offset_ = 0; 216 buffer_.clear(); 217} 218 219bool EntryImplV3::UserBuffer::GrowBuffer(int required, int limit) { 220 DCHECK_GE(required, 0); 221 int current_size = capacity(); 222 if (required <= current_size) 223 return true; 224 225 if (required > limit) 226 return false; 227 228 if (!backend_) 229 return false; 230 231 int to_add = std::max(required - current_size, kMaxBlockSize * 4); 232 to_add = std::max(current_size, to_add); 233 required = std::min(current_size + to_add, limit); 234 235 grow_allowed_ = backend_->IsAllocAllowed(current_size, required); 236 if (!grow_allowed_) 237 return false; 238 239 DVLOG(3) << "Buffer grow to " << required; 240 241 buffer_.reserve(required); 242 return true; 243} 244 245// ------------------------------------------------------------------------ 246 247EntryImplV3::EntryImplV3(BackendImplV3* backend, Addr address, bool read_only) 248 : backend_(backend->GetWeakPtr()), 249 address_(address), 250 doomed_(false), 251 read_only_(read_only), 252 dirty_(true), 253 modified_(false) { 254 for (int i = 0; i < kNumStreams; i++) { 255 unreported_size_[i] = 0; 256 } 257} 258 259#if defined(V3_NOT_JUST_YET_READY) 260 261bool EntryImplV3::CreateEntry(Addr node_address, const std::string& key, 262 uint32 hash) { 263 Trace("Create entry In"); 264 EntryStore* entry_store = entry_.Data(); 265 RankingsNode* node = node_.Data(); 266 memset(entry_store, 0, sizeof(EntryStore) * entry_.address().num_blocks()); 267 memset(node, 0, sizeof(RankingsNode)); 268 if (!node_.LazyInit(backend_->File(node_address), node_address)) 269 return false; 270 271 entry_store->rankings_node = node_address.value(); 272 node->contents = entry_.address().value(); 273 274 entry_store->hash = hash; 275 entry_store->creation_time = Time::Now().ToInternalValue(); 276 entry_store->key_len = static_cast<int32>(key.size()); 277 if (entry_store->key_len > kMaxInternalKeyLength) { 278 Addr address(0); 279 if (!CreateBlock(entry_store->key_len + 1, &address)) 280 return false; 281 282 entry_store->long_key = address.value(); 283 File* key_file = GetBackingFile(address, kKeyFileIndex); 284 key_ = key; 285 286 size_t offset = 0; 287 if (address.is_block_file()) 288 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; 289 290 if (!key_file || !key_file->Write(key.data(), key.size(), offset)) { 291 DeleteData(address, kKeyFileIndex); 292 return false; 293 } 294 295 if (address.is_separate_file()) 296 key_file->SetLength(key.size() + 1); 297 } else { 298 memcpy(entry_store->key, key.data(), key.size()); 299 entry_store->key[key.size()] = '\0'; 300 } 301 backend_->ModifyStorageSize(0, static_cast<int32>(key.size())); 302 CACHE_UMA(COUNTS, "KeySize", 0, static_cast<int32>(key.size())); 303 node->dirty = backend_->GetCurrentEntryId(); 304 Log("Create Entry "); 305 return true; 306} 307 308uint32 EntryImplV3::GetHash() { 309 return entry_.Data()->hash; 310} 311 312bool EntryImplV3::IsSameEntry(const std::string& key, uint32 hash) { 313 if (entry_.Data()->hash != hash || 314 static_cast<size_t>(entry_.Data()->key_len) != key.size()) 315 return false; 316 317 return (key.compare(GetKey()) == 0); 318} 319 320void EntryImplV3::InternalDoom() { 321 net_log_.AddEvent(net::NetLog::TYPE_ENTRY_DOOM); 322 DCHECK(node_.HasData()); 323 if (!node_.Data()->dirty) { 324 node_.Data()->dirty = backend_->GetCurrentEntryId(); 325 node_.Store(); 326 } 327 doomed_ = true; 328} 329 330// This only includes checks that relate to the first block of the entry (the 331// first 256 bytes), and values that should be set from the entry creation. 332// Basically, even if there is something wrong with this entry, we want to see 333// if it is possible to load the rankings node and delete them together. 334bool EntryImplV3::SanityCheck() { 335 if (!entry_.VerifyHash()) 336 return false; 337 338 EntryStore* stored = entry_.Data(); 339 if (!stored->rankings_node || stored->key_len <= 0) 340 return false; 341 342 if (stored->reuse_count < 0 || stored->refetch_count < 0) 343 return false; 344 345 Addr rankings_addr(stored->rankings_node); 346 if (!rankings_addr.SanityCheckForRankings()) 347 return false; 348 349 Addr next_addr(stored->next); 350 if (next_addr.is_initialized() && !next_addr.SanityCheckForEntry()) { 351 STRESS_NOTREACHED(); 352 return false; 353 } 354 STRESS_DCHECK(next_addr.value() != entry_.address().value()); 355 356 if (stored->state > ENTRY_DOOMED || stored->state < ENTRY_NORMAL) 357 return false; 358 359 Addr key_addr(stored->long_key); 360 if ((stored->key_len <= kMaxInternalKeyLength && key_addr.is_initialized()) || 361 (stored->key_len > kMaxInternalKeyLength && !key_addr.is_initialized())) 362 return false; 363 364 if (!key_addr.SanityCheck()) 365 return false; 366 367 if (key_addr.is_initialized() && 368 ((stored->key_len < kMaxBlockSize && key_addr.is_separate_file()) || 369 (stored->key_len >= kMaxBlockSize && key_addr.is_block_file()))) 370 return false; 371 372 int num_blocks = NumBlocksForEntry(stored->key_len); 373 if (entry_.address().num_blocks() != num_blocks) 374 return false; 375 376 return true; 377} 378 379bool EntryImplV3::DataSanityCheck() { 380 EntryStore* stored = entry_.Data(); 381 Addr key_addr(stored->long_key); 382 383 // The key must be NULL terminated. 384 if (!key_addr.is_initialized() && stored->key[stored->key_len]) 385 return false; 386 387 if (stored->hash != base::Hash(GetKey())) 388 return false; 389 390 for (int i = 0; i < kNumStreams; i++) { 391 Addr data_addr(stored->data_addr[i]); 392 int data_size = stored->data_size[i]; 393 if (data_size < 0) 394 return false; 395 if (!data_size && data_addr.is_initialized()) 396 return false; 397 if (!data_addr.SanityCheck()) 398 return false; 399 if (!data_size) 400 continue; 401 if (data_size <= kMaxBlockSize && data_addr.is_separate_file()) 402 return false; 403 if (data_size > kMaxBlockSize && data_addr.is_block_file()) 404 return false; 405 } 406 return true; 407} 408 409void EntryImplV3::FixForDelete() { 410 EntryStore* stored = entry_.Data(); 411 Addr key_addr(stored->long_key); 412 413 if (!key_addr.is_initialized()) 414 stored->key[stored->key_len] = '\0'; 415 416 for (int i = 0; i < kNumStreams; i++) { 417 Addr data_addr(stored->data_addr[i]); 418 int data_size = stored->data_size[i]; 419 if (data_addr.is_initialized()) { 420 if ((data_size <= kMaxBlockSize && data_addr.is_separate_file()) || 421 (data_size > kMaxBlockSize && data_addr.is_block_file()) || 422 !data_addr.SanityCheck()) { 423 STRESS_NOTREACHED(); 424 // The address is weird so don't attempt to delete it. 425 stored->data_addr[i] = 0; 426 // In general, trust the stored size as it should be in sync with the 427 // total size tracked by the backend. 428 } 429 } 430 if (data_size < 0) 431 stored->data_size[i] = 0; 432 } 433 entry_.Store(); 434} 435 436void EntryImplV3::SetTimes(base::Time last_used, base::Time last_modified) { 437 node_.Data()->last_used = last_used.ToInternalValue(); 438 node_.Data()->last_modified = last_modified.ToInternalValue(); 439 node_.set_modified(); 440} 441 442void EntryImplV3::BeginLogging(net::NetLog* net_log, bool created) { 443 DCHECK(!net_log_.net_log()); 444 net_log_ = net::BoundNetLog::Make( 445 net_log, net::NetLog::SOURCE_DISK_CACHE_ENTRY); 446 net_log_.BeginEvent( 447 net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL, 448 CreateNetLogEntryCreationCallback(this, created)); 449} 450 451const net::BoundNetLog& EntryImplV3::net_log() const { 452 return net_log_; 453} 454 455// ------------------------------------------------------------------------ 456 457void EntryImplV3::Doom() { 458 if (background_queue_) 459 background_queue_->DoomEntryImpl(this); 460} 461 462void EntryImplV3::DoomImpl() { 463 if (doomed_ || !backend_) 464 return; 465 466 SetPointerForInvalidEntry(backend_->GetCurrentEntryId()); 467 backend_->InternalDoomEntry(this); 468} 469 470void EntryImplV3::Close() { 471 if (background_queue_) 472 background_queue_->CloseEntryImpl(this); 473} 474 475std::string EntryImplV3::GetKey() const { 476 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); 477 int key_len = entry->Data()->key_len; 478 if (key_len <= kMaxInternalKeyLength) 479 return std::string(entry->Data()->key); 480 481 // We keep a copy of the key so that we can always return it, even if the 482 // backend is disabled. 483 if (!key_.empty()) 484 return key_; 485 486 Addr address(entry->Data()->long_key); 487 DCHECK(address.is_initialized()); 488 size_t offset = 0; 489 if (address.is_block_file()) 490 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; 491 492 COMPILE_ASSERT(kNumStreams == kKeyFileIndex, invalid_key_index); 493 File* key_file = const_cast<EntryImpl*>(this)->GetBackingFile(address, 494 kKeyFileIndex); 495 if (!key_file) 496 return std::string(); 497 498 ++key_len; // We store a trailing \0 on disk that we read back below. 499 if (!offset && key_file->GetLength() != static_cast<size_t>(key_len)) 500 return std::string(); 501 502 if (!key_file->Read(WriteInto(&key_, key_len), key_len, offset)) 503 key_.clear(); 504 return key_; 505} 506 507Time EntryImplV3::GetLastUsed() const { 508 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); 509 return Time::FromInternalValue(node->Data()->last_used); 510} 511 512Time EntryImplV3::GetLastModified() const { 513 CacheRankingsBlock* node = const_cast<CacheRankingsBlock*>(&node_); 514 return Time::FromInternalValue(node->Data()->last_modified); 515} 516 517int32 EntryImplV3::GetDataSize(int index) const { 518 if (index < 0 || index >= kNumStreams) 519 return 0; 520 521 CacheEntryBlock* entry = const_cast<CacheEntryBlock*>(&entry_); 522 return entry->Data()->data_size[index]; 523} 524 525int EntryImplV3::ReadData(int index, int offset, IOBuffer* buf, int buf_len, 526 const CompletionCallback& callback) { 527 if (callback.is_null()) 528 return ReadDataImpl(index, offset, buf, buf_len, callback); 529 530 DCHECK(node_.Data()->dirty || read_only_); 531 if (index < 0 || index >= kNumStreams) 532 return net::ERR_INVALID_ARGUMENT; 533 534 int entry_size = entry_.Data()->data_size[index]; 535 if (offset >= entry_size || offset < 0 || !buf_len) 536 return 0; 537 538 if (buf_len < 0) 539 return net::ERR_INVALID_ARGUMENT; 540 541 if (!background_queue_) 542 return net::ERR_UNEXPECTED; 543 544 background_queue_->ReadData(this, index, offset, buf, buf_len, callback); 545 return net::ERR_IO_PENDING; 546} 547 548int EntryImpl::ReadDataImpl(int index, int offset, IOBuffer* buf, int buf_len, 549 const CompletionCallback& callback) { 550 if (net_log_.IsLogging()) { 551 net_log_.BeginEvent( 552 net::NetLog::TYPE_ENTRY_READ_DATA, 553 CreateNetLogReadWriteDataCallback(index, offset, buf_len, false)); 554 } 555 556 int result = InternalReadData(index, offset, buf, buf_len, callback); 557 558 if (result != net::ERR_IO_PENDING && net_log_.IsLogging()) { 559 net_log_.EndEvent( 560 net::NetLog::TYPE_ENTRY_READ_DATA, 561 CreateNetLogReadWriteCompleteCallback(result)); 562 } 563 return result; 564} 565 566int EntryImplV3::WriteData(int index, int offset, IOBuffer* buf, int buf_len, 567 const CompletionCallback& callback, bool truncate) { 568 if (callback.is_null()) 569 return WriteDataImpl(index, offset, buf, buf_len, callback, truncate); 570 571 DCHECK(node_.Data()->dirty || read_only_); 572 if (index < 0 || index >= kNumStreams) 573 return net::ERR_INVALID_ARGUMENT; 574 575 if (offset < 0 || buf_len < 0) 576 return net::ERR_INVALID_ARGUMENT; 577 578 if (!background_queue_) 579 return net::ERR_UNEXPECTED; 580 581 background_queue_->WriteData(this, index, offset, buf, buf_len, truncate, 582 callback); 583 return net::ERR_IO_PENDING; 584} 585 586int EntryImpl::WriteDataImpl(int index, int offset, IOBuffer* buf, int buf_len, 587 const CompletionCallback& callback, 588 bool truncate) { 589 if (net_log_.IsLogging()) { 590 net_log_.BeginEvent( 591 net::NetLog::TYPE_ENTRY_WRITE_DATA, 592 CreateNetLogReadWriteDataCallback(index, offset, buf_len, truncate)); 593 } 594 595 int result = InternalWriteData(index, offset, buf, buf_len, callback, 596 truncate); 597 598 if (result != net::ERR_IO_PENDING && net_log_.IsLogging()) { 599 net_log_.EndEvent( 600 net::NetLog::TYPE_ENTRY_WRITE_DATA, 601 CreateNetLogReadWriteCompleteCallback(result)); 602 } 603 return result; 604} 605 606int EntryImplV3::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, 607 const CompletionCallback& callback) { 608 if (callback.is_null()) 609 return ReadSparseDataImpl(offset, buf, buf_len, callback); 610 611 if (!background_queue_) 612 return net::ERR_UNEXPECTED; 613 614 background_queue_->ReadSparseData(this, offset, buf, buf_len, callback); 615 return net::ERR_IO_PENDING; 616} 617 618int EntryImpl::ReadSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, 619 const CompletionCallback& callback) { 620 DCHECK(node_.Data()->dirty || read_only_); 621 int result = InitSparseData(); 622 if (net::OK != result) 623 return result; 624 625 TimeTicks start = TimeTicks::Now(); 626 result = sparse_->StartIO(SparseControl::kReadOperation, offset, buf, buf_len, 627 callback); 628 ReportIOTime(kSparseRead, start); 629 return result; 630} 631 632int EntryImplV3::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, 633 const CompletionCallback& callback) { 634 if (callback.is_null()) 635 return WriteSparseDataImpl(offset, buf, buf_len, callback); 636 637 if (!background_queue_) 638 return net::ERR_UNEXPECTED; 639 640 background_queue_->WriteSparseData(this, offset, buf, buf_len, callback); 641 return net::ERR_IO_PENDING; 642} 643 644int EntryImpl::WriteSparseDataImpl(int64 offset, IOBuffer* buf, int buf_len, 645 const CompletionCallback& callback) { 646 DCHECK(node_.Data()->dirty || read_only_); 647 int result = InitSparseData(); 648 if (net::OK != result) 649 return result; 650 651 TimeTicks start = TimeTicks::Now(); 652 result = sparse_->StartIO(SparseControl::kWriteOperation, offset, buf, 653 buf_len, callback); 654 ReportIOTime(kSparseWrite, start); 655 return result; 656} 657 658int EntryImplV3::GetAvailableRange(int64 offset, int len, int64* start, 659 const CompletionCallback& callback) { 660 if (!background_queue_) 661 return net::ERR_UNEXPECTED; 662 663 background_queue_->GetAvailableRange(this, offset, len, start, callback); 664 return net::ERR_IO_PENDING; 665} 666 667int EntryImpl::GetAvailableRangeImpl(int64 offset, int len, int64* start) { 668 int result = InitSparseData(); 669 if (net::OK != result) 670 return result; 671 672 return sparse_->GetAvailableRange(offset, len, start); 673} 674 675bool EntryImplV3::CouldBeSparse() const { 676 if (sparse_.get()) 677 return true; 678 679 scoped_ptr<SparseControl> sparse; 680 sparse.reset(new SparseControl(const_cast<EntryImpl*>(this))); 681 return sparse->CouldBeSparse(); 682} 683 684void EntryImplV3::CancelSparseIO() { 685 if (background_queue_) 686 background_queue_->CancelSparseIO(this); 687} 688 689void EntryImplV3::CancelSparseIOImpl() { 690 if (!sparse_.get()) 691 return; 692 693 sparse_->CancelIO(); 694} 695 696int EntryImplV3::ReadyForSparseIO(const CompletionCallback& callback) { 697 if (!sparse_.get()) 698 return net::OK; 699 700 if (!background_queue_) 701 return net::ERR_UNEXPECTED; 702 703 background_queue_->ReadyForSparseIO(this, callback); 704 return net::ERR_IO_PENDING; 705} 706 707int EntryImplV3::ReadyForSparseIOImpl(const CompletionCallback& callback) { 708 DCHECK(sparse_.get()); 709 return sparse_->ReadyToUse(callback); 710} 711 712// ------------------------------------------------------------------------ 713 714// When an entry is deleted from the cache, we clean up all the data associated 715// with it for two reasons: to simplify the reuse of the block (we know that any 716// unused block is filled with zeros), and to simplify the handling of write / 717// read partial information from an entry (don't have to worry about returning 718// data related to a previous cache entry because the range was not fully 719// written before). 720EntryImplV3::~EntryImplV3() { 721 if (!backend_) { 722 entry_.clear_modified(); 723 node_.clear_modified(); 724 return; 725 } 726 Log("~EntryImpl in"); 727 728 // Save the sparse info to disk. This will generate IO for this entry and 729 // maybe for a child entry, so it is important to do it before deleting this 730 // entry. 731 sparse_.reset(); 732 733 // Remove this entry from the list of open entries. 734 backend_->OnEntryDestroyBegin(entry_.address()); 735 736 if (doomed_) { 737 DeleteEntryData(true); 738 } else { 739#if defined(NET_BUILD_STRESS_CACHE) 740 SanityCheck(); 741#endif 742 net_log_.AddEvent(net::NetLog::TYPE_ENTRY_CLOSE); 743 bool ret = true; 744 for (int index = 0; index < kNumStreams; index++) { 745 if (user_buffers_[index].get()) { 746 if (!(ret = Flush(index, 0))) 747 LOG(ERROR) << "Failed to save user data"; 748 } 749 if (unreported_size_[index]) { 750 backend_->ModifyStorageSize( 751 entry_.Data()->data_size[index] - unreported_size_[index], 752 entry_.Data()->data_size[index]); 753 } 754 } 755 756 if (!ret) { 757 // There was a failure writing the actual data. Mark the entry as dirty. 758 int current_id = backend_->GetCurrentEntryId(); 759 node_.Data()->dirty = current_id == 1 ? -1 : current_id - 1; 760 node_.Store(); 761 } else if (node_.HasData() && !dirty_ && node_.Data()->dirty) { 762 node_.Data()->dirty = 0; 763 node_.Store(); 764 } 765 } 766 767 Trace("~EntryImpl out 0x%p", reinterpret_cast<void*>(this)); 768 net_log_.EndEvent(net::NetLog::TYPE_DISK_CACHE_ENTRY_IMPL); 769 backend_->OnEntryDestroyEnd(); 770} 771 772int EntryImpl::InternalReadData(int index, int offset, 773 IOBuffer* buf, int buf_len, 774 const CompletionCallback& callback) { 775 DCHECK(node_.Data()->dirty || read_only_); 776 DVLOG(2) << "Read from " << index << " at " << offset << " : " << buf_len; 777 if (index < 0 || index >= kNumStreams) 778 return net::ERR_INVALID_ARGUMENT; 779 780 int entry_size = entry_.Data()->data_size[index]; 781 if (offset >= entry_size || offset < 0 || !buf_len) 782 return 0; 783 784 if (buf_len < 0) 785 return net::ERR_INVALID_ARGUMENT; 786 787 if (!backend_) 788 return net::ERR_UNEXPECTED; 789 790 TimeTicks start = TimeTicks::Now(); 791 792 if (offset + buf_len > entry_size) 793 buf_len = entry_size - offset; 794 795 UpdateRank(false); 796 797 backend_->OnEvent(Stats::READ_DATA); 798 backend_->OnRead(buf_len); 799 800 Addr address(entry_.Data()->data_addr[index]); 801 int eof = address.is_initialized() ? entry_size : 0; 802 if (user_buffers_[index].get() && 803 user_buffers_[index]->PreRead(eof, offset, &buf_len)) { 804 // Complete the operation locally. 805 buf_len = user_buffers_[index]->Read(offset, buf, buf_len); 806 ReportIOTime(kRead, start); 807 return buf_len; 808 } 809 810 address.set_value(entry_.Data()->data_addr[index]); 811 DCHECK(address.is_initialized()); 812 if (!address.is_initialized()) { 813 DoomImpl(); 814 return net::ERR_FAILED; 815 } 816 817 File* file = GetBackingFile(address, index); 818 if (!file) { 819 DoomImpl(); 820 LOG(ERROR) << "No file for " << std::hex << address.value(); 821 return net::ERR_FILE_NOT_FOUND; 822 } 823 824 size_t file_offset = offset; 825 if (address.is_block_file()) { 826 DCHECK_LE(offset + buf_len, kMaxBlockSize); 827 file_offset += address.start_block() * address.BlockSize() + 828 kBlockHeaderSize; 829 } 830 831 SyncCallback* io_callback = NULL; 832 if (!callback.is_null()) { 833 io_callback = new SyncCallback(this, buf, callback, 834 net::NetLog::TYPE_ENTRY_READ_DATA); 835 } 836 837 TimeTicks start_async = TimeTicks::Now(); 838 839 bool completed; 840 if (!file->Read(buf->data(), buf_len, file_offset, io_callback, &completed)) { 841 if (io_callback) 842 io_callback->Discard(); 843 DoomImpl(); 844 return net::ERR_CACHE_READ_FAILURE; 845 } 846 847 if (io_callback && completed) 848 io_callback->Discard(); 849 850 if (io_callback) 851 ReportIOTime(kReadAsync1, start_async); 852 853 ReportIOTime(kRead, start); 854 return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING; 855} 856 857int EntryImpl::InternalWriteData(int index, int offset, 858 IOBuffer* buf, int buf_len, 859 const CompletionCallback& callback, 860 bool truncate) { 861 DCHECK(node_.Data()->dirty || read_only_); 862 DVLOG(2) << "Write to " << index << " at " << offset << " : " << buf_len; 863 if (index < 0 || index >= kNumStreams) 864 return net::ERR_INVALID_ARGUMENT; 865 866 if (offset < 0 || buf_len < 0) 867 return net::ERR_INVALID_ARGUMENT; 868 869 if (!backend_) 870 return net::ERR_UNEXPECTED; 871 872 int max_file_size = backend_->MaxFileSize(); 873 874 // offset or buf_len could be negative numbers. 875 if (offset > max_file_size || buf_len > max_file_size || 876 offset + buf_len > max_file_size) { 877 int size = offset + buf_len; 878 if (size <= max_file_size) 879 size = kint32max; 880 backend_->TooMuchStorageRequested(size); 881 return net::ERR_FAILED; 882 } 883 884 TimeTicks start = TimeTicks::Now(); 885 886 // Read the size at this point (it may change inside prepare). 887 int entry_size = entry_.Data()->data_size[index]; 888 bool extending = entry_size < offset + buf_len; 889 truncate = truncate && entry_size > offset + buf_len; 890 Trace("To PrepareTarget 0x%x", entry_.address().value()); 891 if (!PrepareTarget(index, offset, buf_len, truncate)) 892 return net::ERR_FAILED; 893 894 Trace("From PrepareTarget 0x%x", entry_.address().value()); 895 if (extending || truncate) 896 UpdateSize(index, entry_size, offset + buf_len); 897 898 UpdateRank(true); 899 900 backend_->OnEvent(Stats::WRITE_DATA); 901 backend_->OnWrite(buf_len); 902 903 if (user_buffers_[index].get()) { 904 // Complete the operation locally. 905 user_buffers_[index]->Write(offset, buf, buf_len); 906 ReportIOTime(kWrite, start); 907 return buf_len; 908 } 909 910 Addr address(entry_.Data()->data_addr[index]); 911 if (offset + buf_len == 0) { 912 if (truncate) { 913 DCHECK(!address.is_initialized()); 914 } 915 return 0; 916 } 917 918 File* file = GetBackingFile(address, index); 919 if (!file) 920 return net::ERR_FILE_NOT_FOUND; 921 922 size_t file_offset = offset; 923 if (address.is_block_file()) { 924 DCHECK_LE(offset + buf_len, kMaxBlockSize); 925 file_offset += address.start_block() * address.BlockSize() + 926 kBlockHeaderSize; 927 } else if (truncate || (extending && !buf_len)) { 928 if (!file->SetLength(offset + buf_len)) 929 return net::ERR_FAILED; 930 } 931 932 if (!buf_len) 933 return 0; 934 935 SyncCallback* io_callback = NULL; 936 if (!callback.is_null()) { 937 io_callback = new SyncCallback(this, buf, callback, 938 net::NetLog::TYPE_ENTRY_WRITE_DATA); 939 } 940 941 TimeTicks start_async = TimeTicks::Now(); 942 943 bool completed; 944 if (!file->Write(buf->data(), buf_len, file_offset, io_callback, 945 &completed)) { 946 if (io_callback) 947 io_callback->Discard(); 948 return net::ERR_CACHE_WRITE_FAILURE; 949 } 950 951 if (io_callback && completed) 952 io_callback->Discard(); 953 954 if (io_callback) 955 ReportIOTime(kWriteAsync1, start_async); 956 957 ReportIOTime(kWrite, start); 958 return (completed || callback.is_null()) ? buf_len : net::ERR_IO_PENDING; 959} 960 961// ------------------------------------------------------------------------ 962 963bool EntryImpl::CreateDataBlock(int index, int size) { 964 DCHECK(index >= 0 && index < kNumStreams); 965 966 Addr address(entry_.Data()->data_addr[index]); 967 if (!CreateBlock(size, &address)) 968 return false; 969 970 entry_.Data()->data_addr[index] = address.value(); 971 entry_.Store(); 972 return true; 973} 974 975bool EntryImpl::CreateBlock(int size, Addr* address) { 976 DCHECK(!address->is_initialized()); 977 if (!backend_) 978 return false; 979 980 FileType file_type = Addr::RequiredFileType(size); 981 if (EXTERNAL == file_type) { 982 if (size > backend_->MaxFileSize()) 983 return false; 984 if (!backend_->CreateExternalFile(address)) 985 return false; 986 } else { 987 int num_blocks = Addr::RequiredBlocks(size, file_type); 988 989 if (!backend_->CreateBlock(file_type, num_blocks, address)) 990 return false; 991 } 992 return true; 993} 994 995// Note that this method may end up modifying a block file so upon return the 996// involved block will be free, and could be reused for something else. If there 997// is a crash after that point (and maybe before returning to the caller), the 998// entry will be left dirty... and at some point it will be discarded; it is 999// important that the entry doesn't keep a reference to this address, or we'll 1000// end up deleting the contents of |address| once again. 1001void EntryImpl::DeleteData(Addr address, int index) { 1002 DCHECK(backend_); 1003 if (!address.is_initialized()) 1004 return; 1005 if (address.is_separate_file()) { 1006 int failure = !DeleteCacheFile(backend_->GetFileName(address)); 1007 CACHE_UMA(COUNTS, "DeleteFailed", 0, failure); 1008 if (failure) { 1009 LOG(ERROR) << "Failed to delete " << 1010 backend_->GetFileName(address).value() << " from the cache."; 1011 } 1012 if (files_[index]) 1013 files_[index] = NULL; // Releases the object. 1014 } else { 1015 backend_->DeleteBlock(address, true); 1016 } 1017} 1018 1019void EntryImpl::UpdateRank(bool modified) { 1020 if (!backend_) 1021 return; 1022 1023 if (!doomed_) { 1024 // Everything is handled by the backend. 1025 backend_->UpdateRank(this, modified); 1026 return; 1027 } 1028 1029 Time current = Time::Now(); 1030 node_.Data()->last_used = current.ToInternalValue(); 1031 1032 if (modified) 1033 node_.Data()->last_modified = current.ToInternalValue(); 1034} 1035 1036void EntryImpl::DeleteEntryData(bool everything) { 1037 DCHECK(doomed_ || !everything); 1038 1039 if (GetEntryFlags() & PARENT_ENTRY) { 1040 // We have some child entries that must go away. 1041 SparseControl::DeleteChildren(this); 1042 } 1043 1044 if (GetDataSize(0)) 1045 CACHE_UMA(COUNTS, "DeleteHeader", 0, GetDataSize(0)); 1046 if (GetDataSize(1)) 1047 CACHE_UMA(COUNTS, "DeleteData", 0, GetDataSize(1)); 1048 for (int index = 0; index < kNumStreams; index++) { 1049 Addr address(entry_.Data()->data_addr[index]); 1050 if (address.is_initialized()) { 1051 backend_->ModifyStorageSize(entry_.Data()->data_size[index] - 1052 unreported_size_[index], 0); 1053 entry_.Data()->data_addr[index] = 0; 1054 entry_.Data()->data_size[index] = 0; 1055 entry_.Store(); 1056 DeleteData(address, index); 1057 } 1058 } 1059 1060 if (!everything) 1061 return; 1062 1063 // Remove all traces of this entry. 1064 backend_->RemoveEntry(this); 1065 1066 // Note that at this point node_ and entry_ are just two blocks of data, and 1067 // even if they reference each other, nobody should be referencing them. 1068 1069 Addr address(entry_.Data()->long_key); 1070 DeleteData(address, kKeyFileIndex); 1071 backend_->ModifyStorageSize(entry_.Data()->key_len, 0); 1072 1073 backend_->DeleteBlock(entry_.address(), true); 1074 entry_.Discard(); 1075 1076 if (!LeaveRankingsBehind()) { 1077 backend_->DeleteBlock(node_.address(), true); 1078 node_.Discard(); 1079 } 1080} 1081 1082// We keep a memory buffer for everything that ends up stored on a block file 1083// (because we don't know yet the final data size), and for some of the data 1084// that end up on external files. This function will initialize that memory 1085// buffer and / or the files needed to store the data. 1086// 1087// In general, a buffer may overlap data already stored on disk, and in that 1088// case, the contents of the buffer are the most accurate. It may also extend 1089// the file, but we don't want to read from disk just to keep the buffer up to 1090// date. This means that as soon as there is a chance to get confused about what 1091// is the most recent version of some part of a file, we'll flush the buffer and 1092// reuse it for the new data. Keep in mind that the normal use pattern is quite 1093// simple (write sequentially from the beginning), so we optimize for handling 1094// that case. 1095bool EntryImpl::PrepareTarget(int index, int offset, int buf_len, 1096 bool truncate) { 1097 if (truncate) 1098 return HandleTruncation(index, offset, buf_len); 1099 1100 if (!offset && !buf_len) 1101 return true; 1102 1103 Addr address(entry_.Data()->data_addr[index]); 1104 if (address.is_initialized()) { 1105 if (address.is_block_file() && !MoveToLocalBuffer(index)) 1106 return false; 1107 1108 if (!user_buffers_[index].get() && offset < kMaxBlockSize) { 1109 // We are about to create a buffer for the first 16KB, make sure that we 1110 // preserve existing data. 1111 if (!CopyToLocalBuffer(index)) 1112 return false; 1113 } 1114 } 1115 1116 if (!user_buffers_[index].get()) 1117 user_buffers_[index].reset(new UserBuffer(backend_.get())); 1118 1119 return PrepareBuffer(index, offset, buf_len); 1120} 1121 1122// We get to this function with some data already stored. If there is a 1123// truncation that results on data stored internally, we'll explicitly 1124// handle the case here. 1125bool EntryImpl::HandleTruncation(int index, int offset, int buf_len) { 1126 Addr address(entry_.Data()->data_addr[index]); 1127 1128 int current_size = entry_.Data()->data_size[index]; 1129 int new_size = offset + buf_len; 1130 1131 if (!new_size) { 1132 // This is by far the most common scenario. 1133 backend_->ModifyStorageSize(current_size - unreported_size_[index], 0); 1134 entry_.Data()->data_addr[index] = 0; 1135 entry_.Data()->data_size[index] = 0; 1136 unreported_size_[index] = 0; 1137 entry_.Store(); 1138 DeleteData(address, index); 1139 1140 user_buffers_[index].reset(); 1141 return true; 1142 } 1143 1144 // We never postpone truncating a file, if there is one, but we may postpone 1145 // telling the backend about the size reduction. 1146 if (user_buffers_[index].get()) { 1147 DCHECK_GE(current_size, user_buffers_[index]->Start()); 1148 if (!address.is_initialized()) { 1149 // There is no overlap between the buffer and disk. 1150 if (new_size > user_buffers_[index]->Start()) { 1151 // Just truncate our buffer. 1152 DCHECK_LT(new_size, user_buffers_[index]->End()); 1153 user_buffers_[index]->Truncate(new_size); 1154 return true; 1155 } 1156 1157 // Just discard our buffer. 1158 user_buffers_[index]->Reset(); 1159 return PrepareBuffer(index, offset, buf_len); 1160 } 1161 1162 // There is some overlap or we need to extend the file before the 1163 // truncation. 1164 if (offset > user_buffers_[index]->Start()) 1165 user_buffers_[index]->Truncate(new_size); 1166 UpdateSize(index, current_size, new_size); 1167 if (!Flush(index, 0)) 1168 return false; 1169 user_buffers_[index].reset(); 1170 } 1171 1172 // We have data somewhere, and it is not in a buffer. 1173 DCHECK(!user_buffers_[index].get()); 1174 DCHECK(address.is_initialized()); 1175 1176 if (new_size > kMaxBlockSize) 1177 return true; // Let the operation go directly to disk. 1178 1179 return ImportSeparateFile(index, offset + buf_len); 1180} 1181 1182bool EntryImpl::CopyToLocalBuffer(int index) { 1183 Addr address(entry_.Data()->data_addr[index]); 1184 DCHECK(!user_buffers_[index].get()); 1185 DCHECK(address.is_initialized()); 1186 1187 int len = std::min(entry_.Data()->data_size[index], kMaxBlockSize); 1188 user_buffers_[index].reset(new UserBuffer(backend_.get())); 1189 user_buffers_[index]->Write(len, NULL, 0); 1190 1191 File* file = GetBackingFile(address, index); 1192 int offset = 0; 1193 1194 if (address.is_block_file()) 1195 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; 1196 1197 if (!file || 1198 !file->Read(user_buffers_[index]->Data(), len, offset, NULL, NULL)) { 1199 user_buffers_[index].reset(); 1200 return false; 1201 } 1202 return true; 1203} 1204 1205bool EntryImpl::MoveToLocalBuffer(int index) { 1206 if (!CopyToLocalBuffer(index)) 1207 return false; 1208 1209 Addr address(entry_.Data()->data_addr[index]); 1210 entry_.Data()->data_addr[index] = 0; 1211 entry_.Store(); 1212 DeleteData(address, index); 1213 1214 // If we lose this entry we'll see it as zero sized. 1215 int len = entry_.Data()->data_size[index]; 1216 backend_->ModifyStorageSize(len - unreported_size_[index], 0); 1217 unreported_size_[index] = len; 1218 return true; 1219} 1220 1221bool EntryImpl::ImportSeparateFile(int index, int new_size) { 1222 if (entry_.Data()->data_size[index] > new_size) 1223 UpdateSize(index, entry_.Data()->data_size[index], new_size); 1224 1225 return MoveToLocalBuffer(index); 1226} 1227 1228bool EntryImpl::PrepareBuffer(int index, int offset, int buf_len) { 1229 DCHECK(user_buffers_[index].get()); 1230 if ((user_buffers_[index]->End() && offset > user_buffers_[index]->End()) || 1231 offset > entry_.Data()->data_size[index]) { 1232 // We are about to extend the buffer or the file (with zeros), so make sure 1233 // that we are not overwriting anything. 1234 Addr address(entry_.Data()->data_addr[index]); 1235 if (address.is_initialized() && address.is_separate_file()) { 1236 if (!Flush(index, 0)) 1237 return false; 1238 // There is an actual file already, and we don't want to keep track of 1239 // its length so we let this operation go straight to disk. 1240 // The only case when a buffer is allowed to extend the file (as in fill 1241 // with zeros before the start) is when there is no file yet to extend. 1242 user_buffers_[index].reset(); 1243 return true; 1244 } 1245 } 1246 1247 if (!user_buffers_[index]->PreWrite(offset, buf_len)) { 1248 if (!Flush(index, offset + buf_len)) 1249 return false; 1250 1251 // Lets try again. 1252 if (offset > user_buffers_[index]->End() || 1253 !user_buffers_[index]->PreWrite(offset, buf_len)) { 1254 // We cannot complete the operation with a buffer. 1255 DCHECK(!user_buffers_[index]->Size()); 1256 DCHECK(!user_buffers_[index]->Start()); 1257 user_buffers_[index].reset(); 1258 } 1259 } 1260 return true; 1261} 1262 1263bool EntryImpl::Flush(int index, int min_len) { 1264 Addr address(entry_.Data()->data_addr[index]); 1265 DCHECK(user_buffers_[index].get()); 1266 DCHECK(!address.is_initialized() || address.is_separate_file()); 1267 DVLOG(3) << "Flush"; 1268 1269 int size = std::max(entry_.Data()->data_size[index], min_len); 1270 if (size && !address.is_initialized() && !CreateDataBlock(index, size)) 1271 return false; 1272 1273 if (!entry_.Data()->data_size[index]) { 1274 DCHECK(!user_buffers_[index]->Size()); 1275 return true; 1276 } 1277 1278 address.set_value(entry_.Data()->data_addr[index]); 1279 1280 int len = user_buffers_[index]->Size(); 1281 int offset = user_buffers_[index]->Start(); 1282 if (!len && !offset) 1283 return true; 1284 1285 if (address.is_block_file()) { 1286 DCHECK_EQ(len, entry_.Data()->data_size[index]); 1287 DCHECK(!offset); 1288 offset = address.start_block() * address.BlockSize() + kBlockHeaderSize; 1289 } 1290 1291 File* file = GetBackingFile(address, index); 1292 if (!file) 1293 return false; 1294 1295 if (!file->Write(user_buffers_[index]->Data(), len, offset, NULL, NULL)) 1296 return false; 1297 user_buffers_[index]->Reset(); 1298 1299 return true; 1300} 1301 1302void EntryImpl::UpdateSize(int index, int old_size, int new_size) { 1303 if (entry_.Data()->data_size[index] == new_size) 1304 return; 1305 1306 unreported_size_[index] += new_size - old_size; 1307 entry_.Data()->data_size[index] = new_size; 1308 entry_.set_modified(); 1309} 1310 1311int EntryImpl::InitSparseData() { 1312 if (sparse_.get()) 1313 return net::OK; 1314 1315 // Use a local variable so that sparse_ never goes from 'valid' to NULL. 1316 scoped_ptr<SparseControl> sparse(new SparseControl(this)); 1317 int result = sparse->Init(); 1318 if (net::OK == result) 1319 sparse_.swap(sparse); 1320 1321 return result; 1322} 1323 1324void EntryImpl::SetEntryFlags(uint32 flags) { 1325 entry_.Data()->flags |= flags; 1326 entry_.set_modified(); 1327} 1328 1329uint32 EntryImpl::GetEntryFlags() { 1330 return entry_.Data()->flags; 1331} 1332 1333void EntryImpl::GetData(int index, char** buffer, Addr* address) { 1334 DCHECK(backend_); 1335 if (user_buffers_[index].get() && user_buffers_[index]->Size() && 1336 !user_buffers_[index]->Start()) { 1337 // The data is already in memory, just copy it and we're done. 1338 int data_len = entry_.Data()->data_size[index]; 1339 if (data_len <= user_buffers_[index]->Size()) { 1340 DCHECK(!user_buffers_[index]->Start()); 1341 *buffer = new char[data_len]; 1342 memcpy(*buffer, user_buffers_[index]->Data(), data_len); 1343 return; 1344 } 1345 } 1346 1347 // Bad news: we'd have to read the info from disk so instead we'll just tell 1348 // the caller where to read from. 1349 *buffer = NULL; 1350 address->set_value(entry_.Data()->data_addr[index]); 1351 if (address->is_initialized()) { 1352 // Prevent us from deleting the block from the backing store. 1353 backend_->ModifyStorageSize(entry_.Data()->data_size[index] - 1354 unreported_size_[index], 0); 1355 entry_.Data()->data_addr[index] = 0; 1356 entry_.Data()->data_size[index] = 0; 1357 } 1358} 1359 1360#endif // defined(V3_NOT_JUST_YET_READY). 1361 1362void EntryImplV3::ReportIOTime(Operation op, const base::TimeTicks& start) { 1363 if (!backend_) 1364 return; 1365 1366 switch (op) { 1367 case kRead: 1368 CACHE_UMA(AGE_MS, "ReadTime", start); 1369 break; 1370 case kWrite: 1371 CACHE_UMA(AGE_MS, "WriteTime", start); 1372 break; 1373 case kSparseRead: 1374 CACHE_UMA(AGE_MS, "SparseReadTime", start); 1375 break; 1376 case kSparseWrite: 1377 CACHE_UMA(AGE_MS, "SparseWriteTime", start); 1378 break; 1379 case kAsyncIO: 1380 CACHE_UMA(AGE_MS, "AsyncIOTime", start); 1381 break; 1382 case kReadAsync1: 1383 CACHE_UMA(AGE_MS, "AsyncReadDispatchTime", start); 1384 break; 1385 case kWriteAsync1: 1386 CACHE_UMA(AGE_MS, "AsyncWriteDispatchTime", start); 1387 break; 1388 default: 1389 NOTREACHED(); 1390 } 1391} 1392 1393void EntryImplV3::Log(const char* msg) { 1394 Trace("%s 0x%p 0x%x", msg, reinterpret_cast<void*>(this), address_); 1395 Trace(" data: 0x%x 0x%x", entry_->data_addr[0], entry_->data_addr[1]); 1396 Trace(" doomed: %d", doomed_); 1397} 1398 1399void EntryImplV3::Doom() { 1400 NOTIMPLEMENTED(); 1401} 1402 1403void EntryImplV3::Close() { 1404 NOTIMPLEMENTED(); 1405} 1406 1407std::string EntryImplV3::GetKey() const { 1408 return std::string(); 1409} 1410 1411Time EntryImplV3::GetLastUsed() const { 1412 return Time(); 1413} 1414 1415Time EntryImplV3::GetLastModified() const { 1416 return Time(); 1417} 1418 1419int32 EntryImplV3::GetDataSize(int index) const { 1420 return 0; 1421} 1422 1423int EntryImplV3::ReadData(int index, int offset, IOBuffer* buf, int buf_len, 1424 const CompletionCallback& callback) { 1425 return net::ERR_FAILED; 1426} 1427 1428int EntryImplV3::WriteData(int index, int offset, IOBuffer* buf, int buf_len, 1429 const CompletionCallback& callback, bool truncate) { 1430 return net::ERR_FAILED; 1431} 1432 1433int EntryImplV3::ReadSparseData(int64 offset, IOBuffer* buf, int buf_len, 1434 const CompletionCallback& callback) { 1435 return net::ERR_FAILED; 1436} 1437 1438int EntryImplV3::WriteSparseData(int64 offset, IOBuffer* buf, int buf_len, 1439 const CompletionCallback& callback) { 1440 return net::ERR_FAILED; 1441} 1442 1443int EntryImplV3::GetAvailableRange(int64 offset, int len, int64* start, 1444 const CompletionCallback& callback) { 1445 return net::ERR_FAILED; 1446} 1447 1448bool EntryImplV3::CouldBeSparse() const { 1449 return false; 1450} 1451 1452void EntryImplV3::CancelSparseIO() { 1453 NOTIMPLEMENTED(); 1454} 1455 1456int EntryImplV3::ReadyForSparseIO(const CompletionCallback& callback) { 1457 return net::ERR_FAILED; 1458} 1459 1460EntryImplV3::~EntryImplV3() { 1461 NOTIMPLEMENTED(); 1462} 1463 1464} // namespace disk_cache 1465