syncer_util.cc revision 5821806d5e7f356e8fa4b058a389a808ea183019
1// Copyright (c) 2012 The Chromium Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "sync/engine/syncer_util.h" 6 7#include <algorithm> 8#include <set> 9#include <string> 10#include <vector> 11 12#include "base/location.h" 13#include "base/metrics/histogram.h" 14#include "sync/engine/conflict_resolver.h" 15#include "sync/engine/syncer_proto_util.h" 16#include "sync/engine/syncer_types.h" 17#include "sync/internal_api/public/base/model_type.h" 18#include "sync/protocol/bookmark_specifics.pb.h" 19#include "sync/protocol/password_specifics.pb.h" 20#include "sync/protocol/sync.pb.h" 21#include "sync/syncable/directory.h" 22#include "sync/syncable/entry.h" 23#include "sync/syncable/mutable_entry.h" 24#include "sync/syncable/read_transaction.h" 25#include "sync/syncable/syncable_changes_version.h" 26#include "sync/syncable/syncable_proto_util.h" 27#include "sync/syncable/syncable_util.h" 28#include "sync/syncable/write_transaction.h" 29#include "sync/util/cryptographer.h" 30#include "sync/util/time.h" 31 32// TODO(vishwath): Remove this include after node positions have 33// shifted to completely uing Ordinals. 34// See http://crbug.com/145412 . 35#include "sync/internal_api/public/base/node_ordinal.h" 36 37namespace syncer { 38 39using syncable::BASE_VERSION; 40using syncable::CHANGES_VERSION; 41using syncable::CREATE_NEW_UPDATE_ITEM; 42using syncable::CTIME; 43using syncable::Directory; 44using syncable::Entry; 45using syncable::GET_BY_HANDLE; 46using syncable::GET_BY_ID; 47using syncable::ID; 48using syncable::IS_DEL; 49using syncable::IS_DIR; 50using syncable::IS_UNAPPLIED_UPDATE; 51using syncable::IS_UNSYNCED; 52using syncable::Id; 53using syncable::META_HANDLE; 54using syncable::MTIME; 55using syncable::MutableEntry; 56using syncable::NON_UNIQUE_NAME; 57using syncable::BASE_SERVER_SPECIFICS; 58using syncable::PARENT_ID; 59using syncable::PREV_ID; 60using syncable::SERVER_CTIME; 61using syncable::SERVER_IS_DEL; 62using syncable::SERVER_IS_DIR; 63using syncable::SERVER_MTIME; 64using syncable::SERVER_NON_UNIQUE_NAME; 65using syncable::SERVER_PARENT_ID; 66using syncable::SERVER_ORDINAL_IN_PARENT; 67using syncable::SERVER_SPECIFICS; 68using syncable::SERVER_VERSION; 69using syncable::UNIQUE_CLIENT_TAG; 70using syncable::UNIQUE_SERVER_TAG; 71using syncable::SPECIFICS; 72using syncable::SYNCER; 73using syncable::WriteTransaction; 74 75syncable::Id FindLocalIdToUpdate( 76 syncable::BaseTransaction* trans, 77 const sync_pb::SyncEntity& update) { 78 // Expected entry points of this function: 79 // SyncEntity has NOT been applied to SERVER fields. 80 // SyncEntity has NOT been applied to LOCAL fields. 81 // DB has not yet been modified, no entries created for this update. 82 83 const std::string& client_id = trans->directory()->cache_guid(); 84 const syncable::Id& update_id = SyncableIdFromProto(update.id_string()); 85 86 if (update.has_client_defined_unique_tag() && 87 !update.client_defined_unique_tag().empty()) { 88 // When a server sends down a client tag, the following cases can occur: 89 // 1) Client has entry for tag already, ID is server style, matches 90 // 2) Client has entry for tag already, ID is server, doesn't match. 91 // 3) Client has entry for tag already, ID is local, (never matches) 92 // 4) Client has no entry for tag 93 94 // Case 1, we don't have to do anything since the update will 95 // work just fine. Update will end up in the proper entry, via ID lookup. 96 // Case 2 - Happens very rarely due to lax enforcement of client tags 97 // on the server, if two clients commit the same tag at the same time. 98 // When this happens, we pick the lexically-least ID and ignore all other 99 // items. 100 // Case 3 - We need to replace the local ID with the server ID so that 101 // this update gets targeted at the correct local entry; we expect conflict 102 // resolution to occur. 103 // Case 4 - Perfect. Same as case 1. 104 105 syncable::Entry local_entry(trans, syncable::GET_BY_CLIENT_TAG, 106 update.client_defined_unique_tag()); 107 108 // The SyncAPI equivalent of this function will return !good if IS_DEL. 109 // The syncable version will return good even if IS_DEL. 110 // TODO(chron): Unit test the case with IS_DEL and make sure. 111 if (local_entry.good()) { 112 if (local_entry.Get(ID).ServerKnows()) { 113 if (local_entry.Get(ID) != update_id) { 114 // Case 2. 115 LOG(WARNING) << "Duplicated client tag."; 116 if (local_entry.Get(ID) < update_id) { 117 // Signal an error; drop this update on the floor. Note that 118 // we don't server delete the item, because we don't allow it to 119 // exist locally at all. So the item will remain orphaned on 120 // the server, and we won't pay attention to it. 121 return syncable::GetNullId(); 122 } 123 } 124 // Target this change to the existing local entry; later, 125 // we'll change the ID of the local entry to update_id 126 // if needed. 127 return local_entry.Get(ID); 128 } else { 129 // Case 3: We have a local entry with the same client tag. 130 // We should change the ID of the local entry to the server entry. 131 // This will result in an server ID with base version == 0, but that's 132 // a legal state for an item with a client tag. By changing the ID, 133 // update will now be applied to local_entry. 134 DCHECK(0 == local_entry.Get(BASE_VERSION) || 135 CHANGES_VERSION == local_entry.Get(BASE_VERSION)); 136 return local_entry.Get(ID); 137 } 138 } 139 } else if (update.has_originator_cache_guid() && 140 update.originator_cache_guid() == client_id) { 141 // If a commit succeeds, but the response does not come back fast enough 142 // then the syncer might assume that it was never committed. 143 // The server will track the client that sent up the original commit and 144 // return this in a get updates response. When this matches a local 145 // uncommitted item, we must mutate our local item and version to pick up 146 // the committed version of the same item whose commit response was lost. 147 // There is however still a race condition if the server has not 148 // completed the commit by the time the syncer tries to get updates 149 // again. To mitigate this, we need to have the server time out in 150 // a reasonable span, our commit batches have to be small enough 151 // to process within our HTTP response "assumed alive" time. 152 153 // We need to check if we have an entry that didn't get its server 154 // id updated correctly. The server sends down a client ID 155 // and a local (negative) id. If we have a entry by that 156 // description, we should update the ID and version to the 157 // server side ones to avoid multiple copies of the same thing. 158 159 syncable::Id client_item_id = syncable::Id::CreateFromClientString( 160 update.originator_client_item_id()); 161 DCHECK(!client_item_id.ServerKnows()); 162 syncable::Entry local_entry(trans, GET_BY_ID, client_item_id); 163 164 // If it exists, then our local client lost a commit response. Use 165 // the local entry. 166 if (local_entry.good() && !local_entry.Get(IS_DEL)) { 167 int64 old_version = local_entry.Get(BASE_VERSION); 168 int64 new_version = update.version(); 169 DCHECK_LE(old_version, 0); 170 DCHECK_GT(new_version, 0); 171 // Otherwise setting the base version could cause a consistency failure. 172 // An entry should never be version 0 and SYNCED. 173 DCHECK(local_entry.Get(IS_UNSYNCED)); 174 175 // Just a quick sanity check. 176 DCHECK(!local_entry.Get(ID).ServerKnows()); 177 178 DVLOG(1) << "Reuniting lost commit response IDs. server id: " 179 << update_id << " local id: " << local_entry.Get(ID) 180 << " new version: " << new_version; 181 182 return local_entry.Get(ID); 183 } 184 } 185 // Fallback: target an entry having the server ID, creating one if needed. 186 return update_id; 187} 188 189UpdateAttemptResponse AttemptToUpdateEntry( 190 syncable::WriteTransaction* const trans, 191 syncable::MutableEntry* const entry, 192 Cryptographer* cryptographer) { 193 CHECK(entry->good()); 194 if (!entry->Get(IS_UNAPPLIED_UPDATE)) 195 return SUCCESS; // No work to do. 196 syncable::Id id = entry->Get(ID); 197 const sync_pb::EntitySpecifics& specifics = entry->Get(SERVER_SPECIFICS); 198 199 // Only apply updates that we can decrypt. If we can't decrypt the update, it 200 // is likely because the passphrase has not arrived yet. Because the 201 // passphrase may not arrive within this GetUpdates, we can't just return 202 // conflict, else we try to perform normal conflict resolution prematurely or 203 // the syncer may get stuck. As such, we return CONFLICT_ENCRYPTION, which is 204 // treated as an unresolvable conflict. See the description in syncer_types.h. 205 // This prevents any unsynced changes from commiting and postpones conflict 206 // resolution until all data can be decrypted. 207 if (specifics.has_encrypted() && 208 !cryptographer->CanDecrypt(specifics.encrypted())) { 209 // We can't decrypt this node yet. 210 DVLOG(1) << "Received an undecryptable " 211 << ModelTypeToString(entry->GetServerModelType()) 212 << " update, returning encryption_conflict."; 213 return CONFLICT_ENCRYPTION; 214 } else if (specifics.has_password() && 215 entry->Get(UNIQUE_SERVER_TAG).empty()) { 216 // Passwords use their own legacy encryption scheme. 217 const sync_pb::PasswordSpecifics& password = specifics.password(); 218 if (!cryptographer->CanDecrypt(password.encrypted())) { 219 DVLOG(1) << "Received an undecryptable password update, returning " 220 << "encryption_conflict."; 221 return CONFLICT_ENCRYPTION; 222 } 223 } 224 225 if (!entry->Get(SERVER_IS_DEL)) { 226 syncable::Id new_parent = entry->Get(SERVER_PARENT_ID); 227 Entry parent(trans, GET_BY_ID, new_parent); 228 // A note on non-directory parents: 229 // We catch most unfixable tree invariant errors at update receipt time, 230 // however we deal with this case here because we may receive the child 231 // first then the illegal parent. Instead of dealing with it twice in 232 // different ways we deal with it once here to reduce the amount of code and 233 // potential errors. 234 if (!parent.good() || parent.Get(IS_DEL) || !parent.Get(IS_DIR)) { 235 return CONFLICT_HIERARCHY; 236 } 237 if (entry->Get(PARENT_ID) != new_parent) { 238 if (!entry->Get(IS_DEL) && !IsLegalNewParent(trans, id, new_parent)) { 239 DVLOG(1) << "Not updating item " << id 240 << ", illegal new parent (would cause loop)."; 241 return CONFLICT_HIERARCHY; 242 } 243 } 244 } else if (entry->Get(IS_DIR)) { 245 Directory::ChildHandles handles; 246 trans->directory()->GetChildHandlesById(trans, id, &handles); 247 if (!handles.empty()) { 248 // If we have still-existing children, then we need to deal with 249 // them before we can process this change. 250 DVLOG(1) << "Not deleting directory; it's not empty " << *entry; 251 return CONFLICT_HIERARCHY; 252 } 253 } 254 255 if (entry->Get(IS_UNSYNCED)) { 256 DVLOG(1) << "Skipping update, returning conflict for: " << id 257 << " ; it's unsynced."; 258 return CONFLICT_SIMPLE; 259 } 260 261 if (specifics.has_encrypted()) { 262 DVLOG(2) << "Received a decryptable " 263 << ModelTypeToString(entry->GetServerModelType()) 264 << " update, applying normally."; 265 } else { 266 DVLOG(2) << "Received an unencrypted " 267 << ModelTypeToString(entry->GetServerModelType()) 268 << " update, applying normally."; 269 } 270 271 UpdateLocalDataFromServerData(trans, entry); 272 273 return SUCCESS; 274} 275 276namespace { 277// Helper to synthesize a new-style sync_pb::EntitySpecifics for use locally, 278// when the server speaks only the old sync_pb::SyncEntity_BookmarkData-based 279// protocol. 280void UpdateBookmarkSpecifics(const std::string& singleton_tag, 281 const std::string& url, 282 const std::string& favicon_bytes, 283 MutableEntry* local_entry) { 284 // In the new-style protocol, the server no longer sends bookmark info for 285 // the "google_chrome" folder. Mimic that here. 286 if (singleton_tag == "google_chrome") 287 return; 288 sync_pb::EntitySpecifics pb; 289 sync_pb::BookmarkSpecifics* bookmark = pb.mutable_bookmark(); 290 if (!url.empty()) 291 bookmark->set_url(url); 292 if (!favicon_bytes.empty()) 293 bookmark->set_favicon(favicon_bytes); 294 local_entry->Put(SERVER_SPECIFICS, pb); 295} 296 297} // namespace 298 299// Pass in name and checksum because of UTF8 conversion. 300void UpdateServerFieldsFromUpdate( 301 MutableEntry* target, 302 const sync_pb::SyncEntity& update, 303 const std::string& name) { 304 if (update.deleted()) { 305 if (target->Get(SERVER_IS_DEL)) { 306 // If we already think the item is server-deleted, we're done. 307 // Skipping these cases prevents our committed deletions from coming 308 // back and overriding subsequent undeletions. For non-deleted items, 309 // the version number check has a similar effect. 310 return; 311 } 312 // The server returns very lightweight replies for deletions, so we don't 313 // clobber a bunch of fields on delete. 314 target->Put(SERVER_IS_DEL, true); 315 if (!target->Get(UNIQUE_CLIENT_TAG).empty()) { 316 // Items identified by the client unique tag are undeletable; when 317 // they're deleted, they go back to version 0. 318 target->Put(SERVER_VERSION, 0); 319 } else { 320 // Otherwise, fake a server version by bumping the local number. 321 target->Put(SERVER_VERSION, 322 std::max(target->Get(SERVER_VERSION), 323 target->Get(BASE_VERSION)) + 1); 324 } 325 target->Put(IS_UNAPPLIED_UPDATE, true); 326 return; 327 } 328 329 DCHECK_EQ(target->Get(ID), SyncableIdFromProto(update.id_string())) 330 << "ID Changing not supported here"; 331 target->Put(SERVER_PARENT_ID, SyncableIdFromProto(update.parent_id_string())); 332 target->Put(SERVER_NON_UNIQUE_NAME, name); 333 target->Put(SERVER_VERSION, update.version()); 334 target->Put(SERVER_CTIME, ProtoTimeToTime(update.ctime())); 335 target->Put(SERVER_MTIME, ProtoTimeToTime(update.mtime())); 336 target->Put(SERVER_IS_DIR, IsFolder(update)); 337 if (update.has_server_defined_unique_tag()) { 338 const std::string& tag = update.server_defined_unique_tag(); 339 target->Put(UNIQUE_SERVER_TAG, tag); 340 } 341 if (update.has_client_defined_unique_tag()) { 342 const std::string& tag = update.client_defined_unique_tag(); 343 target->Put(UNIQUE_CLIENT_TAG, tag); 344 } 345 // Store the datatype-specific part as a protobuf. 346 if (update.has_specifics()) { 347 DCHECK_NE(GetModelType(update), UNSPECIFIED) 348 << "Storing unrecognized datatype in sync database."; 349 target->Put(SERVER_SPECIFICS, update.specifics()); 350 } else if (update.has_bookmarkdata()) { 351 // Legacy protocol response for bookmark data. 352 const sync_pb::SyncEntity::BookmarkData& bookmark = update.bookmarkdata(); 353 UpdateBookmarkSpecifics(update.server_defined_unique_tag(), 354 bookmark.bookmark_url(), 355 bookmark.bookmark_favicon(), 356 target); 357 } 358 if (update.has_position_in_parent()) 359 target->Put(SERVER_ORDINAL_IN_PARENT, 360 Int64ToNodeOrdinal(update.position_in_parent())); 361 362 target->Put(SERVER_IS_DEL, update.deleted()); 363 // We only mark the entry as unapplied if its version is greater than the 364 // local data. If we're processing the update that corresponds to one of our 365 // commit we don't apply it as time differences may occur. 366 if (update.version() > target->Get(BASE_VERSION)) { 367 target->Put(IS_UNAPPLIED_UPDATE, true); 368 } 369} 370 371// Creates a new Entry iff no Entry exists with the given id. 372void CreateNewEntry(syncable::WriteTransaction *trans, 373 const syncable::Id& id) { 374 syncable::MutableEntry entry(trans, GET_BY_ID, id); 375 if (!entry.good()) { 376 syncable::MutableEntry new_entry(trans, syncable::CREATE_NEW_UPDATE_ITEM, 377 id); 378 } 379} 380 381void SplitServerInformationIntoNewEntry( 382 syncable::WriteTransaction* trans, 383 syncable::MutableEntry* entry) { 384 syncable::Id id = entry->Get(ID); 385 ChangeEntryIDAndUpdateChildren(trans, entry, trans->directory()->NextId()); 386 entry->Put(BASE_VERSION, 0); 387 388 MutableEntry new_entry(trans, CREATE_NEW_UPDATE_ITEM, id); 389 CopyServerFields(entry, &new_entry); 390 ClearServerData(entry); 391 392 DVLOG(1) << "Splitting server information, local entry: " << *entry 393 << " server entry: " << new_entry; 394} 395 396// This function is called on an entry when we can update the user-facing data 397// from the server data. 398void UpdateLocalDataFromServerData( 399 syncable::WriteTransaction* trans, 400 syncable::MutableEntry* entry) { 401 DCHECK(!entry->Get(IS_UNSYNCED)); 402 DCHECK(entry->Get(IS_UNAPPLIED_UPDATE)); 403 404 DVLOG(2) << "Updating entry : " << *entry; 405 // Start by setting the properties that determine the model_type. 406 entry->Put(SPECIFICS, entry->Get(SERVER_SPECIFICS)); 407 // Clear the previous server specifics now that we're applying successfully. 408 entry->Put(BASE_SERVER_SPECIFICS, sync_pb::EntitySpecifics()); 409 entry->Put(IS_DIR, entry->Get(SERVER_IS_DIR)); 410 // This strange dance around the IS_DEL flag avoids problems when setting 411 // the name. 412 // TODO(chron): Is this still an issue? Unit test this codepath. 413 if (entry->Get(SERVER_IS_DEL)) { 414 entry->Put(IS_DEL, true); 415 } else { 416 entry->Put(NON_UNIQUE_NAME, entry->Get(SERVER_NON_UNIQUE_NAME)); 417 entry->Put(PARENT_ID, entry->Get(SERVER_PARENT_ID)); 418 CHECK(entry->Put(IS_DEL, false)); 419 Id new_predecessor = 420 entry->ComputePrevIdFromServerPosition(entry->Get(SERVER_PARENT_ID)); 421 CHECK(entry->PutPredecessor(new_predecessor)) 422 << " Illegal predecessor after converting from server position."; 423 } 424 425 entry->Put(CTIME, entry->Get(SERVER_CTIME)); 426 entry->Put(MTIME, entry->Get(SERVER_MTIME)); 427 entry->Put(BASE_VERSION, entry->Get(SERVER_VERSION)); 428 entry->Put(IS_DEL, entry->Get(SERVER_IS_DEL)); 429 entry->Put(IS_UNAPPLIED_UPDATE, false); 430} 431 432VerifyCommitResult ValidateCommitEntry(syncable::Entry* entry) { 433 syncable::Id id = entry->Get(ID); 434 if (id == entry->Get(PARENT_ID)) { 435 CHECK(id.IsRoot()) << "Non-root item is self parenting." << *entry; 436 // If the root becomes unsynced it can cause us problems. 437 LOG(ERROR) << "Root item became unsynced " << *entry; 438 return VERIFY_UNSYNCABLE; 439 } 440 if (entry->IsRoot()) { 441 LOG(ERROR) << "Permanent item became unsynced " << *entry; 442 return VERIFY_UNSYNCABLE; 443 } 444 if (entry->Get(IS_DEL) && !entry->Get(ID).ServerKnows()) { 445 // Drop deleted uncommitted entries. 446 return VERIFY_UNSYNCABLE; 447 } 448 return VERIFY_OK; 449} 450 451bool AddItemThenPredecessors( 452 syncable::BaseTransaction* trans, 453 syncable::Entry* item, 454 syncable::IndexedBitField inclusion_filter, 455 syncable::MetahandleSet* inserted_items, 456 std::vector<syncable::Id>* commit_ids) { 457 458 if (!inserted_items->insert(item->Get(META_HANDLE)).second) 459 return false; 460 commit_ids->push_back(item->Get(ID)); 461 if (item->Get(IS_DEL)) 462 return true; // Deleted items have no predecessors. 463 464 Id prev_id = item->Get(PREV_ID); 465 while (!prev_id.IsRoot()) { 466 Entry prev(trans, GET_BY_ID, prev_id); 467 CHECK(prev.good()) << "Bad id when walking predecessors."; 468 if (!prev.Get(inclusion_filter)) 469 break; 470 if (!inserted_items->insert(prev.Get(META_HANDLE)).second) 471 break; 472 commit_ids->push_back(prev_id); 473 prev_id = prev.Get(PREV_ID); 474 } 475 return true; 476} 477 478void AddPredecessorsThenItem( 479 syncable::BaseTransaction* trans, 480 syncable::Entry* item, 481 syncable::IndexedBitField inclusion_filter, 482 syncable::MetahandleSet* inserted_items, 483 std::vector<syncable::Id>* commit_ids) { 484 size_t initial_size = commit_ids->size(); 485 if (!AddItemThenPredecessors(trans, item, inclusion_filter, inserted_items, 486 commit_ids)) 487 return; 488 // Reverse what we added to get the correct order. 489 std::reverse(commit_ids->begin() + initial_size, commit_ids->end()); 490} 491 492void MarkDeletedChildrenSynced( 493 syncable::Directory* dir, 494 std::set<syncable::Id>* deleted_folders) { 495 // There's two options here. 496 // 1. Scan deleted unsynced entries looking up their pre-delete tree for any 497 // of the deleted folders. 498 // 2. Take each folder and do a tree walk of all entries underneath it. 499 // #2 has a lower big O cost, but writing code to limit the time spent inside 500 // the transaction during each step is simpler with 1. Changing this decision 501 // may be sensible if this code shows up in profiling. 502 if (deleted_folders->empty()) 503 return; 504 Directory::UnsyncedMetaHandles handles; 505 { 506 syncable::ReadTransaction trans(FROM_HERE, dir); 507 dir->GetUnsyncedMetaHandles(&trans, &handles); 508 } 509 if (handles.empty()) 510 return; 511 Directory::UnsyncedMetaHandles::iterator it; 512 for (it = handles.begin() ; it != handles.end() ; ++it) { 513 // Single transaction / entry we deal with. 514 WriteTransaction trans(FROM_HERE, SYNCER, dir); 515 MutableEntry entry(&trans, GET_BY_HANDLE, *it); 516 if (!entry.Get(IS_UNSYNCED) || !entry.Get(IS_DEL)) 517 continue; 518 syncable::Id id = entry.Get(PARENT_ID); 519 while (id != trans.root_id()) { 520 if (deleted_folders->find(id) != deleted_folders->end()) { 521 // We've synced the deletion of this deleted entries parent. 522 entry.Put(IS_UNSYNCED, false); 523 break; 524 } 525 Entry parent(&trans, GET_BY_ID, id); 526 if (!parent.good() || !parent.Get(IS_DEL)) 527 break; 528 id = parent.Get(PARENT_ID); 529 } 530 } 531} 532 533VerifyResult VerifyNewEntry( 534 const sync_pb::SyncEntity& update, 535 syncable::Entry* target, 536 const bool deleted) { 537 if (target->good()) { 538 // Not a new update. 539 return VERIFY_UNDECIDED; 540 } 541 if (deleted) { 542 // Deletion of an item we've never seen can be ignored. 543 return VERIFY_SKIP; 544 } 545 546 return VERIFY_SUCCESS; 547} 548 549// Assumes we have an existing entry; check here for updates that break 550// consistency rules. 551VerifyResult VerifyUpdateConsistency( 552 syncable::WriteTransaction* trans, 553 const sync_pb::SyncEntity& update, 554 syncable::MutableEntry* target, 555 const bool deleted, 556 const bool is_directory, 557 ModelType model_type) { 558 559 CHECK(target->good()); 560 const syncable::Id& update_id = SyncableIdFromProto(update.id_string()); 561 562 // If the update is a delete, we don't really need to worry at this stage. 563 if (deleted) 564 return VERIFY_SUCCESS; 565 566 if (model_type == UNSPECIFIED) { 567 // This update is to an item of a datatype we don't recognize. The server 568 // shouldn't have sent it to us. Throw it on the ground. 569 return VERIFY_SKIP; 570 } 571 572 if (target->Get(SERVER_VERSION) > 0) { 573 // Then we've had an update for this entry before. 574 if (is_directory != target->Get(SERVER_IS_DIR) || 575 model_type != target->GetServerModelType()) { 576 if (target->Get(IS_DEL)) { // If we've deleted the item, we don't care. 577 return VERIFY_SKIP; 578 } else { 579 LOG(ERROR) << "Server update doesn't agree with previous updates. "; 580 LOG(ERROR) << " Entry: " << *target; 581 LOG(ERROR) << " Update: " 582 << SyncerProtoUtil::SyncEntityDebugString(update); 583 return VERIFY_FAIL; 584 } 585 } 586 587 if (!deleted && (target->Get(ID) == update_id) && 588 (target->Get(SERVER_IS_DEL) || 589 (!target->Get(IS_UNSYNCED) && target->Get(IS_DEL) && 590 target->Get(BASE_VERSION) > 0))) { 591 // An undelete. The latter case in the above condition is for 592 // when the server does not give us an update following the 593 // commit of a delete, before undeleting. 594 // Undeletion is common for items that reuse the client-unique tag. 595 VerifyResult result = VerifyUndelete(trans, update, target); 596 if (VERIFY_UNDECIDED != result) 597 return result; 598 } 599 } 600 if (target->Get(BASE_VERSION) > 0) { 601 // We've committed this update in the past. 602 if (is_directory != target->Get(IS_DIR) || 603 model_type != target->GetModelType()) { 604 LOG(ERROR) << "Server update doesn't agree with committed item. "; 605 LOG(ERROR) << " Entry: " << *target; 606 LOG(ERROR) << " Update: " 607 << SyncerProtoUtil::SyncEntityDebugString(update); 608 return VERIFY_FAIL; 609 } 610 if (target->Get(ID) == update_id) { 611 if (target->Get(SERVER_VERSION) > update.version()) { 612 LOG(WARNING) << "We've already seen a more recent version."; 613 LOG(WARNING) << " Entry: " << *target; 614 LOG(WARNING) << " Update: " 615 << SyncerProtoUtil::SyncEntityDebugString(update); 616 return VERIFY_SKIP; 617 } 618 } 619 } 620 return VERIFY_SUCCESS; 621} 622 623// Assumes we have an existing entry; verify an update that seems to be 624// expressing an 'undelete' 625VerifyResult VerifyUndelete(syncable::WriteTransaction* trans, 626 const sync_pb::SyncEntity& update, 627 syncable::MutableEntry* target) { 628 // TODO(nick): We hit this path for items deleted items that the server 629 // tells us to re-create; only deleted items with positive base versions 630 // will hit this path. However, it's not clear how such an undeletion 631 // would actually succeed on the server; in the protocol, a base 632 // version of 0 is required to undelete an object. This codepath 633 // should be deprecated in favor of client-tag style undeletion 634 // (where items go to version 0 when they're deleted), or else 635 // removed entirely (if this type of undeletion is indeed impossible). 636 CHECK(target->good()); 637 DVLOG(1) << "Server update is attempting undelete. " << *target 638 << "Update:" << SyncerProtoUtil::SyncEntityDebugString(update); 639 // Move the old one aside and start over. It's too tricky to get the old one 640 // back into a state that would pass CheckTreeInvariants(). 641 if (target->Get(IS_DEL)) { 642 DCHECK(target->Get(UNIQUE_CLIENT_TAG).empty()) 643 << "Doing move-aside undeletion on client-tagged item."; 644 target->Put(ID, trans->directory()->NextId()); 645 target->Put(UNIQUE_CLIENT_TAG, ""); 646 target->Put(BASE_VERSION, CHANGES_VERSION); 647 target->Put(SERVER_VERSION, 0); 648 return VERIFY_SUCCESS; 649 } 650 if (update.version() < target->Get(SERVER_VERSION)) { 651 LOG(WARNING) << "Update older than current server version for " 652 << *target << " Update:" 653 << SyncerProtoUtil::SyncEntityDebugString(update); 654 return VERIFY_SUCCESS; // Expected in new sync protocol. 655 } 656 return VERIFY_UNDECIDED; 657} 658 659} // namespace syncer 660