1// Copyright (c) 2011 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "chrome/browser/sync/engine/build_and_process_conflict_sets_command.h"
6
7#include <set>
8#include <string>
9#include <sstream>
10#include <vector>
11
12#include "base/basictypes.h"
13#include "base/format_macros.h"
14#include "chrome/browser/sync/engine/syncer_util.h"
15#include "chrome/browser/sync/engine/update_applicator.h"
16#include "chrome/browser/sync/sessions/sync_session.h"
17#include "chrome/browser/sync/syncable/directory_manager.h"
18
19namespace browser_sync {
20
21using sessions::ConflictProgress;
22using sessions::StatusController;
23using sessions::SyncSession;
24using sessions::UpdateProgress;
25using std::set;
26using std::string;
27using std::vector;
28
29BuildAndProcessConflictSetsCommand::BuildAndProcessConflictSetsCommand() {}
30BuildAndProcessConflictSetsCommand::~BuildAndProcessConflictSetsCommand() {}
31
32void BuildAndProcessConflictSetsCommand::ModelChangingExecuteImpl(
33    SyncSession* session) {
34  session->status_controller()->update_conflict_sets_built(
35      BuildAndProcessConflictSets(session));
36}
37
38bool BuildAndProcessConflictSetsCommand::BuildAndProcessConflictSets(
39    SyncSession* session) {
40  syncable::ScopedDirLookup dir(session->context()->directory_manager(),
41                                session->context()->account_name());
42  if (!dir.good())
43    return false;
44  bool had_single_direction_sets = false;
45  {  // Scope for transaction.
46    syncable::WriteTransaction trans(dir, syncable::SYNCER, __FILE__, __LINE__);
47    BuildConflictSets(&trans,
48        session->status_controller()->mutable_conflict_progress());
49    had_single_direction_sets = ProcessSingleDirectionConflictSets(&trans,
50        session->context()->resolver(),
51        session->context()->directory_manager()->GetCryptographer(&trans),
52        session->status_controller(), session->routing_info());
53    // We applied some updates transactionally, lets try syncing again.
54    if (had_single_direction_sets)
55      return true;
56  }
57  return false;
58}
59
60bool BuildAndProcessConflictSetsCommand::ProcessSingleDirectionConflictSets(
61    syncable::WriteTransaction* trans, ConflictResolver* resolver,
62    Cryptographer* cryptographer, StatusController* status,
63    const ModelSafeRoutingInfo& routes) {
64  bool rv = false;
65  set<ConflictSet*>::const_iterator all_sets_iterator;
66  for (all_sets_iterator = status->conflict_progress().ConflictSetsBegin();
67       all_sets_iterator != status->conflict_progress().ConflictSetsEnd();) {
68    const ConflictSet* conflict_set = *all_sets_iterator;
69    CHECK_GE(conflict_set->size(), 2U);
70    // We scan the set to see if it consists of changes of only one type.
71    ConflictSet::const_iterator i;
72    size_t unsynced_count = 0, unapplied_count = 0;
73    for (i = conflict_set->begin(); i != conflict_set->end(); ++i) {
74      syncable::Entry entry(trans, syncable::GET_BY_ID, *i);
75      CHECK(entry.good());
76      if (entry.Get(syncable::IS_UNSYNCED))
77        unsynced_count++;
78      if (entry.Get(syncable::IS_UNAPPLIED_UPDATE))
79        unapplied_count++;
80    }
81    if (conflict_set->size() == unsynced_count && 0 == unapplied_count) {
82      VLOG(1) << "Skipped transactional commit attempt.";
83    } else if (conflict_set->size() == unapplied_count && 0 == unsynced_count &&
84          ApplyUpdatesTransactionally(trans, conflict_set, resolver,
85                                      cryptographer, routes, status)) {
86      rv = true;
87    }
88    ++all_sets_iterator;
89  }
90  return rv;
91}
92
93namespace {
94
95void StoreLocalDataForUpdateRollback(syncable::Entry* entry,
96                                     syncable::EntryKernel* backup) {
97  CHECK(!entry->Get(syncable::IS_UNSYNCED)) << " Storing Rollback data for "
98      "entry that's unsynced." << *entry;
99  CHECK(entry->Get(syncable::IS_UNAPPLIED_UPDATE)) << " Storing Rollback data "
100      "for entry that's not an unapplied update." << *entry;
101  *backup = entry->GetKernelCopy();
102}
103
104
105bool RollbackEntry(syncable::WriteTransaction* trans,
106                   syncable::EntryKernel* backup) {
107  syncable::MutableEntry entry(trans, syncable::GET_BY_HANDLE,
108                               backup->ref(syncable::META_HANDLE));
109  CHECK(entry.good());
110
111  if (!entry.Put(syncable::IS_DEL, backup->ref(syncable::IS_DEL)))
112    return false;
113
114  entry.Put(syncable::NON_UNIQUE_NAME, backup->ref(syncable::NON_UNIQUE_NAME));
115  entry.Put(syncable::PARENT_ID, backup->ref(syncable::PARENT_ID));
116
117  if (!backup->ref(syncable::IS_DEL)) {
118    if (!entry.PutPredecessor(backup->ref(syncable::PREV_ID)))
119      return false;
120  }
121
122  if (backup->ref(syncable::PREV_ID) != entry.Get(syncable::PREV_ID))
123    return false;
124
125  entry.Put(syncable::CTIME, backup->ref(syncable::CTIME));
126  entry.Put(syncable::MTIME, backup->ref(syncable::MTIME));
127  entry.Put(syncable::BASE_VERSION, backup->ref(syncable::BASE_VERSION));
128  entry.Put(syncable::IS_DIR, backup->ref(syncable::IS_DIR));
129  entry.Put(syncable::IS_DEL, backup->ref(syncable::IS_DEL));
130  entry.Put(syncable::ID, backup->ref(syncable::ID));
131  entry.Put(syncable::IS_UNAPPLIED_UPDATE,
132            backup->ref(syncable::IS_UNAPPLIED_UPDATE));
133  return true;
134}
135
136void PlaceEntriesAtRoot(syncable::WriteTransaction* trans,
137                      const vector<syncable::Id>* ids) {
138    vector<syncable::Id>::const_iterator it;
139    for (it = ids->begin(); it != ids->end(); ++it) {
140      syncable::MutableEntry entry(trans, syncable::GET_BY_ID, *it);
141    entry.Put(syncable::PARENT_ID, trans->root_id());
142    }
143  }
144
145}  // namespace
146
147bool BuildAndProcessConflictSetsCommand::ApplyUpdatesTransactionally(
148    syncable::WriteTransaction* trans,
149    const vector<syncable::Id>* const update_set,
150    ConflictResolver* resolver,
151    Cryptographer* cryptographer,
152    const ModelSafeRoutingInfo& routes,
153    StatusController* status) {
154  // The handles in the |update_set| order.
155  vector<int64> handles;
156
157  // Holds the same Ids as update_set, but sorted so that runs of adjacent
158  // nodes appear in order.
159  vector<syncable::Id> rollback_ids;
160  rollback_ids.reserve(update_set->size());
161
162  // Tracks what's added to |rollback_ids|.
163  syncable::MetahandleSet rollback_ids_inserted_items;
164  vector<syncable::Id>::const_iterator it;
165
166  // 1. Build |rollback_ids| in the order required for successful rollback.
167  //    Specifically, for positions to come out right, restoring an item
168  //    requires that its predecessor in the sibling order is properly
169  //    restored first.
170  // 2. Build |handles|, the list of handles for ApplyUpdates.
171  for (it = update_set->begin(); it != update_set->end(); ++it) {
172    syncable::Entry entry(trans, syncable::GET_BY_ID, *it);
173    SyncerUtil::AddPredecessorsThenItem(trans, &entry,
174        syncable::IS_UNAPPLIED_UPDATE, &rollback_ids_inserted_items,
175        &rollback_ids);
176    handles.push_back(entry.Get(syncable::META_HANDLE));
177  }
178  DCHECK_EQ(rollback_ids.size(), update_set->size());
179  DCHECK_EQ(rollback_ids_inserted_items.size(), update_set->size());
180
181  // 3. Store the information needed to rollback if the transaction fails.
182  // Do this before modifying anything to keep the next/prev values intact.
183  vector<syncable::EntryKernel> rollback_data(rollback_ids.size());
184  for (size_t i = 0; i < rollback_ids.size(); ++i) {
185    syncable::Entry entry(trans, syncable::GET_BY_ID, rollback_ids[i]);
186    StoreLocalDataForUpdateRollback(&entry, &rollback_data[i]);
187  }
188
189  // 4. Use the preparer to move things to an initial starting state where
190  // nothing in the set is a child of anything else.  If
191  // we've correctly calculated the set, the server tree is valid and no
192  // changes have occurred locally we should be able to apply updates from this
193  // state.
194  PlaceEntriesAtRoot(trans, update_set);
195
196  // 5. Use the usual apply updates from the special start state we've just
197  // prepared.
198  UpdateApplicator applicator(resolver, cryptographer,
199                              handles.begin(), handles.end(),
200                              routes, status->group_restriction());
201  while (applicator.AttemptOneApplication(trans)) {
202    // Keep going till all updates are applied.
203  }
204  if (!applicator.AllUpdatesApplied()) {
205    LOG(ERROR) << "Transactional Apply Failed, Rolling back.";
206    // We have to move entries into the temp dir again. e.g. if a swap was in a
207    // set with other failing updates, the swap may have gone through, meaning
208    // the roll back needs to be transactional. But as we're going to a known
209    // good state we should always succeed.
210    PlaceEntriesAtRoot(trans, update_set);
211
212    // Rollback all entries.
213    for (size_t i = 0; i < rollback_data.size(); ++i) {
214      CHECK(RollbackEntry(trans, &rollback_data[i]));
215    }
216    return false;  // Don't save progress -- we just undid it.
217  }
218  applicator.SaveProgressIntoSessionState(status->mutable_conflict_progress(),
219                                          status->mutable_update_progress());
220  return true;
221}
222
223void BuildAndProcessConflictSetsCommand::BuildConflictSets(
224    syncable::BaseTransaction* trans,
225    ConflictProgress* conflict_progress) {
226  conflict_progress->CleanupSets();
227  set<syncable::Id>::iterator i = conflict_progress->ConflictingItemsBegin();
228  while (i != conflict_progress->ConflictingItemsEnd()) {
229    syncable::Entry entry(trans, syncable::GET_BY_ID, *i);
230    if (!entry.good() ||
231        (!entry.Get(syncable::IS_UNSYNCED) &&
232         !entry.Get(syncable::IS_UNAPPLIED_UPDATE))) {
233      // This can happen very rarely. It means we had a simply conflicting item
234      // that randomly committed; its ID could have changed during the commit.
235      // We drop the entry as it's no longer conflicting.
236      conflict_progress->EraseConflictingItemById(*(i++));
237      continue;
238    }
239    if (entry.ExistsOnClientBecauseNameIsNonEmpty() &&
240       (entry.Get(syncable::IS_DEL) || entry.Get(syncable::SERVER_IS_DEL))) {
241       // If we're deleted on client or server we can't be in a complex set.
242      ++i;
243      continue;
244    }
245    bool new_parent =
246        entry.Get(syncable::PARENT_ID) != entry.Get(syncable::SERVER_PARENT_ID);
247    if (new_parent)
248      MergeSetsForIntroducedLoops(trans, &entry, conflict_progress);
249    MergeSetsForNonEmptyDirectories(trans, &entry, conflict_progress);
250    ++i;
251  }
252}
253
254void BuildAndProcessConflictSetsCommand::MergeSetsForIntroducedLoops(
255    syncable::BaseTransaction* trans, syncable::Entry* entry,
256    ConflictProgress* conflict_progress) {
257  // This code crawls up from the item in question until it gets to the root
258  // or itself. If it gets to the root it does nothing. If it finds a loop all
259  // moved unsynced entries in the list of crawled entries have their sets
260  // merged with the entry.
261  // TODO(sync): Build test cases to cover this function when the argument list
262  // has settled.
263  syncable::Id parent_id = entry->Get(syncable::SERVER_PARENT_ID);
264  syncable::Entry parent(trans, syncable::GET_BY_ID, parent_id);
265  if (!parent.good()) {
266    return;
267  }
268  // Don't check for loop if the server parent is deleted.
269  if (parent.Get(syncable::IS_DEL))
270    return;
271  vector<syncable::Id> conflicting_entries;
272  while (!parent_id.IsRoot()) {
273    syncable::Entry parent(trans, syncable::GET_BY_ID, parent_id);
274    if (!parent.good()) {
275      VLOG(1) << "Bad parent in loop check, skipping. Bad parent id: "
276              << parent_id << " entry: " << *entry;
277      return;
278    }
279    if (parent.Get(syncable::IS_UNSYNCED) &&
280        entry->Get(syncable::PARENT_ID) !=
281            entry->Get(syncable::SERVER_PARENT_ID))
282      conflicting_entries.push_back(parent_id);
283    parent_id = parent.Get(syncable::PARENT_ID);
284    if (parent_id == entry->Get(syncable::ID))
285      break;
286  }
287  if (parent_id.IsRoot())
288    return;
289  for (size_t i = 0; i < conflicting_entries.size(); i++) {
290    conflict_progress->MergeSets(entry->Get(syncable::ID),
291                                 conflicting_entries[i]);
292  }
293}
294
295namespace {
296
297class ServerDeletedPathChecker {
298 public:
299  static bool CausingConflict(const syncable::Entry& e,
300                              const syncable::Entry& log_entry) {
301    CHECK(e.good()) << "Missing parent in path of: " << log_entry;
302    if (e.Get(syncable::IS_UNAPPLIED_UPDATE) &&
303        e.Get(syncable::SERVER_IS_DEL)) {
304      CHECK(!e.Get(syncable::IS_DEL)) << " Inconsistency in local tree. "
305                            "syncable::Entry: " << e << " Leaf: " << log_entry;
306      return true;
307    } else {
308      CHECK(!e.Get(syncable::IS_DEL)) << " Deleted entry has children. "
309                            "syncable::Entry: " << e << " Leaf: " << log_entry;
310      return false;
311    }
312  }
313
314  // returns 0 if we should stop investigating the path.
315  static syncable::Id GetAndExamineParent(syncable::BaseTransaction* trans,
316                                          const syncable::Id& id,
317                                          const syncable::Id& check_id,
318                                          const syncable::Entry& log_entry) {
319    syncable::Entry parent(trans, syncable::GET_BY_ID, id);
320    CHECK(parent.good()) << "Tree inconsitency, missing id" << id << " "
321        << log_entry;
322    syncable::Id parent_id = parent.Get(syncable::PARENT_ID);
323    CHECK(parent_id != check_id) << "Loop in dir tree! "
324      << log_entry << " " << parent;
325    return parent_id;
326  }
327};
328
329class LocallyDeletedPathChecker {
330 public:
331  static bool CausingConflict(const syncable::Entry& e,
332                              const syncable::Entry& log_entry) {
333    return e.good() && e.Get(syncable::IS_DEL) && e.Get(syncable::IS_UNSYNCED);
334  }
335
336  // returns 0 if we should stop investigating the path.
337  static syncable::Id GetAndExamineParent(syncable::BaseTransaction* trans,
338                                          const syncable::Id& id,
339                                          const syncable::Id& check_id,
340                                          const syncable::Entry& log_entry) {
341    syncable::Entry parent(trans, syncable::GET_BY_ID, id);
342    if (!parent.good())
343      return syncable::kNullId;
344    syncable::Id parent_id = parent.Get(syncable::PARENT_ID);
345    if (parent_id == check_id)
346      return syncable::kNullId;
347    return parent_id;
348  }
349};
350
351template <typename Checker>
352void CrawlDeletedTreeMergingSets(syncable::BaseTransaction* trans,
353                                 const syncable::Entry& entry,
354                                 ConflictProgress* conflict_progress,
355                                 Checker checker) {
356  syncable::Id parent_id = entry.Get(syncable::PARENT_ID);
357  syncable::Id double_step_parent_id = parent_id;
358  // This block builds sets where we've got an entry in a directory the server
359  // wants to delete.
360  //
361  // Here we're walking up the tree to find all entries that the pass checks
362  // deleted. We can be extremely strict here as anything unexpected means
363  // invariants in the local hierarchy have been broken.
364  while (!parent_id.IsRoot()) {
365    if (!double_step_parent_id.IsRoot()) {
366      // Checks to ensure we don't loop.
367      double_step_parent_id = checker.GetAndExamineParent(
368          trans, double_step_parent_id, parent_id, entry);
369      double_step_parent_id = checker.GetAndExamineParent(
370          trans, double_step_parent_id, parent_id, entry);
371    }
372    syncable::Entry parent(trans, syncable::GET_BY_ID, parent_id);
373    if (checker.CausingConflict(parent, entry)) {
374      conflict_progress->MergeSets(entry.Get(syncable::ID),
375                                   parent.Get(syncable::ID));
376    } else {
377      break;
378    }
379    parent_id = parent.Get(syncable::PARENT_ID);
380  }
381}
382
383}  // namespace
384
385void BuildAndProcessConflictSetsCommand::MergeSetsForNonEmptyDirectories(
386    syncable::BaseTransaction* trans, syncable::Entry* entry,
387    ConflictProgress* conflict_progress) {
388  if (entry->Get(syncable::IS_UNSYNCED) && !entry->Get(syncable::IS_DEL)) {
389    ServerDeletedPathChecker checker;
390    CrawlDeletedTreeMergingSets(trans, *entry, conflict_progress, checker);
391  }
392  if (entry->Get(syncable::IS_UNAPPLIED_UPDATE) &&
393      !entry->Get(syncable::SERVER_IS_DEL)) {
394    syncable::Entry parent(trans, syncable::GET_BY_ID,
395                           entry->Get(syncable::SERVER_PARENT_ID));
396    syncable::Id parent_id = entry->Get(syncable::SERVER_PARENT_ID);
397    if (!parent.good())
398      return;
399    LocallyDeletedPathChecker checker;
400    if (!checker.CausingConflict(parent, *entry))
401      return;
402    conflict_progress->MergeSets(entry->Get(syncable::ID),
403                                 parent.Get(syncable::ID));
404    CrawlDeletedTreeMergingSets(trans, parent, conflict_progress, checker);
405  }
406}
407
408}  // namespace browser_sync
409