1// Copyright (c) 2012 The Chromium Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "chrome/browser/history/expire_history_backend.h" 6 7#include <algorithm> 8#include <functional> 9#include <limits> 10 11#include "base/bind.h" 12#include "base/compiler_specific.h" 13#include "base/file_util.h" 14#include "base/files/file_enumerator.h" 15#include "base/logging.h" 16#include "base/message_loop/message_loop.h" 17#include "chrome/browser/bookmarks/bookmark_service.h" 18#include "chrome/browser/chrome_notification_types.h" 19#include "chrome/browser/history/archived_database.h" 20#include "chrome/browser/history/history_database.h" 21#include "chrome/browser/history/history_notifications.h" 22#include "chrome/browser/history/thumbnail_database.h" 23 24using base::Time; 25using base::TimeDelta; 26 27namespace history { 28 29namespace { 30 31// The number of days by which the expiration threshold is advanced for items 32// that we want to expire early, such as those of AUTO_SUBFRAME transition type. 33// 34// Early expiration stuff is kept around only for edge cases, as subframes 35// don't appear in history and the vast majority of them are ads anyway. The 36// main use case for these is if you're on a site with links to different 37// frames, you'll be able to see those links as visited, and we'll also be 38// able to get redirect information for those URLs. 39// 40// But since these uses are most valuable when you're actually on the site, 41// and because these can take up the bulk of your history, we get a lot of 42// space savings by deleting them quickly. 43const int kEarlyExpirationAdvanceDays = 3; 44 45// Reads all types of visits starting from beginning of time to the given end 46// time. This is the most general reader. 47class AllVisitsReader : public ExpiringVisitsReader { 48 public: 49 virtual bool Read(Time end_time, HistoryDatabase* db, 50 VisitVector* visits, int max_visits) const OVERRIDE { 51 DCHECK(db) << "must have a database to operate upon"; 52 DCHECK(visits) << "visit vector has to exist in order to populate it"; 53 54 db->GetAllVisitsInRange(Time(), end_time, max_visits, visits); 55 // When we got the maximum number of visits we asked for, we say there could 56 // be additional things to expire now. 57 return static_cast<int>(visits->size()) == max_visits; 58 } 59}; 60 61// Reads only AUTO_SUBFRAME visits, within a computed range. The range is 62// computed as follows: 63// * |begin_time| is read from the meta table. This value is updated whenever 64// there are no more additional visits to expire by this reader. 65// * |end_time| is advanced forward by a constant (kEarlyExpirationAdvanceDay), 66// but not past the current time. 67class AutoSubframeVisitsReader : public ExpiringVisitsReader { 68 public: 69 virtual bool Read(Time end_time, HistoryDatabase* db, 70 VisitVector* visits, int max_visits) const OVERRIDE { 71 DCHECK(db) << "must have a database to operate upon"; 72 DCHECK(visits) << "visit vector has to exist in order to populate it"; 73 74 Time begin_time = db->GetEarlyExpirationThreshold(); 75 // Advance |end_time| to expire early. 76 Time early_end_time = end_time + 77 TimeDelta::FromDays(kEarlyExpirationAdvanceDays); 78 79 // We don't want to set the early expiration threshold to a time in the 80 // future. 81 Time now = Time::Now(); 82 if (early_end_time > now) 83 early_end_time = now; 84 85 db->GetVisitsInRangeForTransition(begin_time, early_end_time, 86 max_visits, 87 content::PAGE_TRANSITION_AUTO_SUBFRAME, 88 visits); 89 bool more = static_cast<int>(visits->size()) == max_visits; 90 if (!more) 91 db->UpdateEarlyExpirationThreshold(early_end_time); 92 93 return more; 94 } 95}; 96 97// Returns true if this visit is worth archiving. Otherwise, this visit is not 98// worth saving (for example, subframe navigations and redirects) and we can 99// just delete it when it gets old. 100bool ShouldArchiveVisit(const VisitRow& visit) { 101 int no_qualifier = content::PageTransitionStripQualifier(visit.transition); 102 103 // These types of transitions are always "important" and the user will want 104 // to see them. 105 if (no_qualifier == content::PAGE_TRANSITION_TYPED || 106 no_qualifier == content::PAGE_TRANSITION_AUTO_BOOKMARK || 107 no_qualifier == content::PAGE_TRANSITION_AUTO_TOPLEVEL) 108 return true; 109 110 // Only archive these "less important" transitions when they were the final 111 // navigation and not part of a redirect chain. 112 if ((no_qualifier == content::PAGE_TRANSITION_LINK || 113 no_qualifier == content::PAGE_TRANSITION_FORM_SUBMIT || 114 no_qualifier == content::PAGE_TRANSITION_KEYWORD || 115 no_qualifier == content::PAGE_TRANSITION_GENERATED) && 116 visit.transition & content::PAGE_TRANSITION_CHAIN_END) 117 return true; 118 119 // The transition types we ignore are AUTO_SUBFRAME and MANUAL_SUBFRAME. 120 return false; 121} 122 123// The number of visits we will expire very time we check for old items. This 124// Prevents us from doing too much work any given time. 125const int kNumExpirePerIteration = 32; 126 127// The number of seconds between checking for items that should be expired when 128// we think there might be more items to expire. This timeout is used when the 129// last expiration found at least kNumExpirePerIteration and we want to check 130// again "soon." 131const int kExpirationDelaySec = 30; 132 133// The number of minutes between checking, as with kExpirationDelaySec, but 134// when we didn't find enough things to expire last time. If there was no 135// history to expire last iteration, it's likely there is nothing next 136// iteration, so we want to wait longer before checking to avoid wasting CPU. 137const int kExpirationEmptyDelayMin = 5; 138 139// The number of minutes that we wait for before scheduling a task to 140// delete old history index files. 141const int kIndexExpirationDelayMin = 2; 142 143// The number of the most recent months for which we do not want to delete 144// the history index files. 145const int kStoreHistoryIndexesForMonths = 3; 146 147} // namespace 148 149struct ExpireHistoryBackend::DeleteDependencies { 150 // The time range affected. These can be is_null() to be unbounded in one 151 // or both directions. 152 base::Time begin_time, end_time; 153 154 // ----- Filled by DeleteVisitRelatedInfo or manually if a function doesn't 155 // call that function. ----- 156 157 // The unique URL rows affected by this delete. 158 std::map<URLID, URLRow> affected_urls; 159 160 // ----- Filled by DeleteOneURL ----- 161 162 // The URLs deleted during this operation. 163 URLRows deleted_urls; 164 165 // The list of all favicon IDs that the affected URLs had. Favicons will be 166 // shared between all URLs with the same favicon, so this is the set of IDs 167 // that we will need to check when the delete operations are complete. 168 std::set<chrome::FaviconID> affected_favicons; 169 170 // The list of all favicon urls that were actually deleted from the thumbnail 171 // db. 172 std::set<GURL> expired_favicons; 173}; 174 175ExpireHistoryBackend::ExpireHistoryBackend( 176 BroadcastNotificationDelegate* delegate, 177 BookmarkService* bookmark_service) 178 : delegate_(delegate), 179 main_db_(NULL), 180 archived_db_(NULL), 181 thumb_db_(NULL), 182 weak_factory_(this), 183 bookmark_service_(bookmark_service) { 184} 185 186ExpireHistoryBackend::~ExpireHistoryBackend() { 187} 188 189void ExpireHistoryBackend::SetDatabases(HistoryDatabase* main_db, 190 ArchivedDatabase* archived_db, 191 ThumbnailDatabase* thumb_db) { 192 main_db_ = main_db; 193 archived_db_ = archived_db; 194 thumb_db_ = thumb_db; 195} 196 197void ExpireHistoryBackend::DeleteURL(const GURL& url) { 198 DeleteURLs(std::vector<GURL>(1, url)); 199} 200 201void ExpireHistoryBackend::DeleteURLs(const std::vector<GURL>& urls) { 202 if (!main_db_) 203 return; 204 205 DeleteDependencies dependencies; 206 for (std::vector<GURL>::const_iterator url = urls.begin(); url != urls.end(); 207 ++url) { 208 URLRow url_row; 209 if (!main_db_->GetRowForURL(*url, &url_row)) 210 continue; // Nothing to delete. 211 212 // Collect all the visits and delete them. Note that we don't give 213 // up if there are no visits, since the URL could still have an 214 // entry that we should delete. TODO(brettw): bug 1171148: We 215 // should also delete from the archived DB. 216 VisitVector visits; 217 main_db_->GetVisitsForURL(url_row.id(), &visits); 218 219 DeleteVisitRelatedInfo(visits, &dependencies); 220 221 // We skip ExpireURLsForVisits (since we are deleting from the 222 // URL, and not starting with visits in a given time range). We 223 // therefore need to call the deletion and favicon update 224 // functions manually. 225 226 BookmarkService* bookmark_service = GetBookmarkService(); 227 bool is_bookmarked = 228 (bookmark_service && bookmark_service->IsBookmarked(*url)); 229 230 DeleteOneURL(url_row, is_bookmarked, &dependencies); 231 } 232 233 DeleteFaviconsIfPossible(dependencies.affected_favicons, 234 &dependencies.expired_favicons); 235 236 BroadcastDeleteNotifications(&dependencies, DELETION_USER_INITIATED); 237} 238 239void ExpireHistoryBackend::ExpireHistoryBetween( 240 const std::set<GURL>& restrict_urls, Time begin_time, Time end_time) { 241 if (!main_db_) 242 return; 243 244 // Find the affected visits and delete them. 245 // TODO(brettw): bug 1171164: We should query the archived database here, too. 246 VisitVector visits; 247 main_db_->GetAllVisitsInRange(begin_time, end_time, 0, &visits); 248 if (!restrict_urls.empty()) { 249 std::set<URLID> url_ids; 250 for (std::set<GURL>::const_iterator url = restrict_urls.begin(); 251 url != restrict_urls.end(); ++url) 252 url_ids.insert(main_db_->GetRowForURL(*url, NULL)); 253 VisitVector all_visits; 254 all_visits.swap(visits); 255 for (VisitVector::iterator visit = all_visits.begin(); 256 visit != all_visits.end(); ++visit) { 257 if (url_ids.find(visit->url_id) != url_ids.end()) 258 visits.push_back(*visit); 259 } 260 } 261 ExpireVisits(visits); 262} 263 264void ExpireHistoryBackend::ExpireHistoryForTimes( 265 const std::vector<base::Time>& times) { 266 // |times| must be in reverse chronological order and have no 267 // duplicates, i.e. each member must be earlier than the one before 268 // it. 269 DCHECK( 270 std::adjacent_find( 271 times.begin(), times.end(), std::less_equal<base::Time>()) == 272 times.end()); 273 274 if (!main_db_) 275 return; 276 277 // Find the affected visits and delete them. 278 // TODO(brettw): bug 1171164: We should query the archived database here, too. 279 VisitVector visits; 280 main_db_->GetVisitsForTimes(times, &visits); 281 ExpireVisits(visits); 282} 283 284void ExpireHistoryBackend::ExpireVisits(const VisitVector& visits) { 285 if (visits.empty()) 286 return; 287 288 DeleteDependencies dependencies; 289 DeleteVisitRelatedInfo(visits, &dependencies); 290 291 // Delete or update the URLs affected. We want to update the visit counts 292 // since this is called by the user who wants to delete their recent history, 293 // and we don't want to leave any evidence. 294 ExpireURLsForVisits(visits, &dependencies); 295 DeleteFaviconsIfPossible(dependencies.affected_favicons, 296 &dependencies.expired_favicons); 297 298 // An is_null begin time means that all history should be deleted. 299 BroadcastDeleteNotifications(&dependencies, DELETION_USER_INITIATED); 300 301 // Pick up any bits possibly left over. 302 ParanoidExpireHistory(); 303} 304 305void ExpireHistoryBackend::ArchiveHistoryBefore(Time end_time) { 306 if (!main_db_) 307 return; 308 309 // Archive as much history as possible before the given date. 310 ArchiveSomeOldHistory(end_time, GetAllVisitsReader(), 311 std::numeric_limits<int>::max()); 312 ParanoidExpireHistory(); 313} 314 315void ExpireHistoryBackend::InitWorkQueue() { 316 DCHECK(work_queue_.empty()) << "queue has to be empty prior to init"; 317 318 for (size_t i = 0; i < readers_.size(); i++) 319 work_queue_.push(readers_[i]); 320} 321 322const ExpiringVisitsReader* ExpireHistoryBackend::GetAllVisitsReader() { 323 if (!all_visits_reader_) 324 all_visits_reader_.reset(new AllVisitsReader()); 325 return all_visits_reader_.get(); 326} 327 328const ExpiringVisitsReader* 329 ExpireHistoryBackend::GetAutoSubframeVisitsReader() { 330 if (!auto_subframe_visits_reader_) 331 auto_subframe_visits_reader_.reset(new AutoSubframeVisitsReader()); 332 return auto_subframe_visits_reader_.get(); 333} 334 335void ExpireHistoryBackend::StartArchivingOldStuff( 336 TimeDelta expiration_threshold) { 337 expiration_threshold_ = expiration_threshold; 338 339 // Remove all readers, just in case this was method was called before. 340 readers_.clear(); 341 // For now, we explicitly add all known readers. If we come up with more 342 // reader types (in case we want to expire different types of visits in 343 // different ways), we can make it be populated by creator/owner of 344 // ExpireHistoryBackend. 345 readers_.push_back(GetAllVisitsReader()); 346 readers_.push_back(GetAutoSubframeVisitsReader()); 347 348 // Initialize the queue with all tasks for the first set of iterations. 349 InitWorkQueue(); 350 ScheduleArchive(); 351} 352 353void ExpireHistoryBackend::DeleteFaviconsIfPossible( 354 const std::set<chrome::FaviconID>& favicon_set, 355 std::set<GURL>* expired_favicons) { 356 if (!thumb_db_) 357 return; 358 359 for (std::set<chrome::FaviconID>::const_iterator i = favicon_set.begin(); 360 i != favicon_set.end(); ++i) { 361 if (!thumb_db_->HasMappingFor(*i)) { 362 GURL icon_url; 363 chrome::IconType icon_type; 364 if (thumb_db_->GetFaviconHeader(*i, 365 &icon_url, 366 &icon_type) && 367 thumb_db_->DeleteFavicon(*i)) { 368 expired_favicons->insert(icon_url); 369 } 370 } 371 } 372} 373 374void ExpireHistoryBackend::BroadcastDeleteNotifications( 375 DeleteDependencies* dependencies, DeletionType type) { 376 if (!dependencies->deleted_urls.empty()) { 377 // Broadcast the URL deleted notification. Note that we also broadcast when 378 // we were requested to delete everything even if that was a NOP, since 379 // some components care to know when history is deleted (it's up to them to 380 // determine if they care whether anything was deleted). 381 URLsDeletedDetails* deleted_details = new URLsDeletedDetails; 382 deleted_details->all_history = false; 383 deleted_details->archived = (type == DELETION_ARCHIVED); 384 deleted_details->rows = dependencies->deleted_urls; 385 deleted_details->favicon_urls = dependencies->expired_favicons; 386 delegate_->NotifySyncURLsDeleted(false, 387 deleted_details->archived, 388 &deleted_details->rows); 389 delegate_->BroadcastNotifications( 390 chrome::NOTIFICATION_HISTORY_URLS_DELETED, deleted_details); 391 } 392} 393 394void ExpireHistoryBackend::DeleteVisitRelatedInfo( 395 const VisitVector& visits, 396 DeleteDependencies* dependencies) { 397 for (size_t i = 0; i < visits.size(); i++) { 398 // Delete the visit itself. 399 main_db_->DeleteVisit(visits[i]); 400 401 // Add the URL row to the affected URL list. 402 std::map<URLID, URLRow>::const_iterator found = 403 dependencies->affected_urls.find(visits[i].url_id); 404 if (found == dependencies->affected_urls.end()) { 405 URLRow row; 406 if (!main_db_->GetURLRow(visits[i].url_id, &row)) 407 continue; 408 dependencies->affected_urls[visits[i].url_id] = row; 409 } 410 } 411} 412 413void ExpireHistoryBackend::DeleteOneURL( 414 const URLRow& url_row, 415 bool is_bookmarked, 416 DeleteDependencies* dependencies) { 417 main_db_->DeleteSegmentForURL(url_row.id()); 418 419 if (!is_bookmarked) { 420 dependencies->deleted_urls.push_back(url_row); 421 422 // Delete stuff that references this URL. 423 if (thumb_db_) { 424 thumb_db_->DeleteThumbnail(url_row.id()); 425 426 // Collect shared information. 427 std::vector<IconMapping> icon_mappings; 428 if (thumb_db_->GetIconMappingsForPageURL(url_row.url(), &icon_mappings)) { 429 for (std::vector<IconMapping>::iterator m = icon_mappings.begin(); 430 m != icon_mappings.end(); ++m) { 431 dependencies->affected_favicons.insert(m->icon_id); 432 } 433 // Delete the mapping entries for the url. 434 thumb_db_->DeleteIconMappings(url_row.url()); 435 } 436 } 437 // Last, delete the URL entry. 438 main_db_->DeleteURLRow(url_row.id()); 439 } 440} 441 442URLID ExpireHistoryBackend::ArchiveOneURL(const URLRow& url_row) { 443 if (!archived_db_) 444 return 0; 445 446 // See if this URL is present in the archived database already. Note that 447 // we must look up by ID since the URL ID will be different. 448 URLRow archived_row; 449 if (archived_db_->GetRowForURL(url_row.url(), &archived_row)) { 450 // TODO(sky): bug 1168470, need to archive past search terms. 451 // TODO(brettw): should be copy the visit counts over? This will mean that 452 // the main DB's visit counts are only for the last 3 months rather than 453 // accumulative. 454 archived_row.set_last_visit(url_row.last_visit()); 455 archived_db_->UpdateURLRow(archived_row.id(), archived_row); 456 return archived_row.id(); 457 } 458 459 // This row is not in the archived DB, add it. 460 return archived_db_->AddURL(url_row); 461} 462 463namespace { 464 465struct ChangedURL { 466 ChangedURL() : visit_count(0), typed_count(0) {} 467 int visit_count; 468 int typed_count; 469}; 470 471} // namespace 472 473void ExpireHistoryBackend::ExpireURLsForVisits( 474 const VisitVector& visits, 475 DeleteDependencies* dependencies) { 476 // First find all unique URLs and the number of visits we're deleting for 477 // each one. 478 std::map<URLID, ChangedURL> changed_urls; 479 for (size_t i = 0; i < visits.size(); i++) { 480 ChangedURL& cur = changed_urls[visits[i].url_id]; 481 // NOTE: This code must stay in sync with HistoryBackend::AddPageVisit(). 482 // TODO(pkasting): http://b/1148304 We shouldn't be marking so many URLs as 483 // typed, which would help eliminate the need for this code (we still would 484 // need to handle RELOAD transitions specially, though). 485 content::PageTransition transition = 486 content::PageTransitionStripQualifier(visits[i].transition); 487 if (transition != content::PAGE_TRANSITION_RELOAD) 488 cur.visit_count++; 489 if ((transition == content::PAGE_TRANSITION_TYPED && 490 !content::PageTransitionIsRedirect(visits[i].transition)) || 491 transition == content::PAGE_TRANSITION_KEYWORD_GENERATED) 492 cur.typed_count++; 493 } 494 495 // Check each unique URL with deleted visits. 496 BookmarkService* bookmark_service = GetBookmarkService(); 497 for (std::map<URLID, ChangedURL>::const_iterator i = changed_urls.begin(); 498 i != changed_urls.end(); ++i) { 499 // The unique URL rows should already be filled into the dependencies. 500 URLRow& url_row = dependencies->affected_urls[i->first]; 501 if (!url_row.id()) 502 continue; // URL row doesn't exist in the database. 503 504 // Check if there are any other visits for this URL and update the time 505 // (the time change may not actually be synced to disk below when we're 506 // archiving). 507 VisitRow last_visit; 508 if (main_db_->GetMostRecentVisitForURL(url_row.id(), &last_visit)) 509 url_row.set_last_visit(last_visit.visit_time); 510 else 511 url_row.set_last_visit(Time()); 512 513 // Don't delete URLs with visits still in the DB, or bookmarked. 514 bool is_bookmarked = 515 (bookmark_service && bookmark_service->IsBookmarked(url_row.url())); 516 if (!is_bookmarked && url_row.last_visit().is_null()) { 517 // Not bookmarked and no more visits. Nuke the url. 518 DeleteOneURL(url_row, is_bookmarked, dependencies); 519 } else { 520 // NOTE: The calls to std::max() below are a backstop, but they should 521 // never actually be needed unless the database is corrupt (I think). 522 url_row.set_visit_count( 523 std::max(0, url_row.visit_count() - i->second.visit_count)); 524 url_row.set_typed_count( 525 std::max(0, url_row.typed_count() - i->second.typed_count)); 526 527 // Update the db with the new details. 528 main_db_->UpdateURLRow(url_row.id(), url_row); 529 } 530 } 531} 532 533void ExpireHistoryBackend::ArchiveURLsAndVisits( 534 const VisitVector& visits, 535 DeleteDependencies* dependencies) { 536 if (!archived_db_ || !main_db_) 537 return; 538 539 // Make sure all unique URL rows are added to the dependency list and the 540 // archived database. We will also keep the mapping between the main DB URLID 541 // and the archived one. 542 std::map<URLID, URLID> main_id_to_archived_id; 543 for (size_t i = 0; i < visits.size(); i++) { 544 std::map<URLID, URLRow>::const_iterator found = 545 dependencies->affected_urls.find(visits[i].url_id); 546 if (found == dependencies->affected_urls.end()) { 547 // Unique URL encountered, archive it. 548 URLRow row; // Row in the main DB. 549 URLID archived_id; // ID in the archived DB. 550 if (!main_db_->GetURLRow(visits[i].url_id, &row) || 551 !(archived_id = ArchiveOneURL(row))) { 552 // Failure archiving, skip this one. 553 continue; 554 } 555 556 // Only add URL to the dependency list once we know we successfully 557 // archived it. 558 main_id_to_archived_id[row.id()] = archived_id; 559 dependencies->affected_urls[row.id()] = row; 560 } 561 } 562 563 // Retrieve the sources for all the archived visits before archiving. 564 // The returned visit_sources vector should contain the source for each visit 565 // from visits at the same index. 566 VisitSourceMap visit_sources; 567 main_db_->GetVisitsSource(visits, &visit_sources); 568 569 // Now archive the visits since we know the URL ID to make them reference. 570 // The source visit list should still reference the visits in the main DB, but 571 // we will update it to reflect only the visits that were successfully 572 // archived. 573 for (size_t i = 0; i < visits.size(); i++) { 574 // Construct the visit that we will add to the archived database. We do 575 // not store referring visits since we delete many of the visits when 576 // archiving. 577 VisitRow cur_visit(visits[i]); 578 cur_visit.url_id = main_id_to_archived_id[cur_visit.url_id]; 579 cur_visit.referring_visit = 0; 580 VisitSourceMap::iterator iter = visit_sources.find(visits[i].visit_id); 581 archived_db_->AddVisit( 582 &cur_visit, 583 iter == visit_sources.end() ? SOURCE_BROWSED : iter->second); 584 // Ignore failures, we will delete it from the main DB no matter what. 585 } 586} 587 588void ExpireHistoryBackend::ScheduleArchive() { 589 TimeDelta delay; 590 if (work_queue_.empty()) { 591 // If work queue is empty, reset the work queue to contain all tasks and 592 // schedule next iteration after a longer delay. 593 InitWorkQueue(); 594 delay = TimeDelta::FromMinutes(kExpirationEmptyDelayMin); 595 } else { 596 delay = TimeDelta::FromSeconds(kExpirationDelaySec); 597 } 598 599 base::MessageLoop::current()->PostDelayedTask( 600 FROM_HERE, 601 base::Bind(&ExpireHistoryBackend::DoArchiveIteration, 602 weak_factory_.GetWeakPtr()), 603 delay); 604} 605 606void ExpireHistoryBackend::DoArchiveIteration() { 607 DCHECK(!work_queue_.empty()) << "queue has to be non-empty"; 608 609 const ExpiringVisitsReader* reader = work_queue_.front(); 610 bool more_to_expire = ArchiveSomeOldHistory(GetCurrentArchiveTime(), reader, 611 kNumExpirePerIteration); 612 613 work_queue_.pop(); 614 // If there are more items to expire, add the reader back to the queue, thus 615 // creating a new task for future iterations. 616 if (more_to_expire) 617 work_queue_.push(reader); 618 619 ScheduleArchive(); 620} 621 622bool ExpireHistoryBackend::ArchiveSomeOldHistory( 623 base::Time end_time, 624 const ExpiringVisitsReader* reader, 625 int max_visits) { 626 if (!main_db_) 627 return false; 628 629 // Add an extra time unit to given end time, because 630 // GetAllVisitsInRange, et al. queries' end value is non-inclusive. 631 Time effective_end_time = 632 Time::FromInternalValue(end_time.ToInternalValue() + 1); 633 634 VisitVector affected_visits; 635 bool more_to_expire = reader->Read(effective_end_time, main_db_, 636 &affected_visits, max_visits); 637 638 // Some visits we'll delete while others we'll archive. 639 VisitVector deleted_visits, archived_visits; 640 for (size_t i = 0; i < affected_visits.size(); i++) { 641 if (ShouldArchiveVisit(affected_visits[i])) 642 archived_visits.push_back(affected_visits[i]); 643 else 644 deleted_visits.push_back(affected_visits[i]); 645 } 646 647 // Do the actual archiving. 648 DeleteDependencies archived_dependencies; 649 ArchiveURLsAndVisits(archived_visits, &archived_dependencies); 650 DeleteVisitRelatedInfo(archived_visits, &archived_dependencies); 651 652 DeleteDependencies deleted_dependencies; 653 DeleteVisitRelatedInfo(deleted_visits, &deleted_dependencies); 654 655 // This will remove or archive all the affected URLs. Must do the deleting 656 // cleanup before archiving so the delete dependencies structure references 657 // only those URLs that were actually deleted instead of having some visits 658 // archived and then the rest deleted. 659 ExpireURLsForVisits(deleted_visits, &deleted_dependencies); 660 ExpireURLsForVisits(archived_visits, &archived_dependencies); 661 662 // Create a union of all affected favicons (we don't store favicons for 663 // archived URLs) and delete them. 664 std::set<chrome::FaviconID> affected_favicons( 665 archived_dependencies.affected_favicons); 666 for (std::set<chrome::FaviconID>::const_iterator i = 667 deleted_dependencies.affected_favicons.begin(); 668 i != deleted_dependencies.affected_favicons.end(); ++i) { 669 affected_favicons.insert(*i); 670 } 671 DeleteFaviconsIfPossible(affected_favicons, 672 &deleted_dependencies.expired_favicons); 673 674 // Send notifications for the stuff that was deleted. These won't normally be 675 // in history views since they were subframes, but they will be in the visited 676 // link system, which needs to be updated now. This function is smart enough 677 // to not do anything if nothing was deleted. 678 BroadcastDeleteNotifications(&deleted_dependencies, DELETION_ARCHIVED); 679 680 return more_to_expire; 681} 682 683void ExpireHistoryBackend::ParanoidExpireHistory() { 684 // TODO(brettw): Bug 1067331: write this to clean up any errors. 685} 686 687BookmarkService* ExpireHistoryBackend::GetBookmarkService() { 688 // We use the bookmark service to determine if a URL is bookmarked. The 689 // bookmark service is loaded on a separate thread and may not be done by the 690 // time we get here. We therefor block until the bookmarks have finished 691 // loading. 692 if (bookmark_service_) 693 bookmark_service_->BlockTillLoaded(); 694 return bookmark_service_; 695} 696 697} // namespace history 698