directory.cc revision 868fa2fe829687343ffae624259930155e16dbd8
1// Copyright 2013 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "sync/syncable/directory.h"
6
7#include "base/base64.h"
8#include "base/debug/trace_event.h"
9#include "base/stl_util.h"
10#include "base/strings/string_number_conversions.h"
11#include "sync/internal_api/public/base/unique_position.h"
12#include "sync/internal_api/public/util/unrecoverable_error_handler.h"
13#include "sync/syncable/entry.h"
14#include "sync/syncable/entry_kernel.h"
15#include "sync/syncable/in_memory_directory_backing_store.h"
16#include "sync/syncable/on_disk_directory_backing_store.h"
17#include "sync/syncable/scoped_kernel_lock.h"
18#include "sync/syncable/scoped_parent_child_index_updater.h"
19#include "sync/syncable/syncable-inl.h"
20#include "sync/syncable/syncable_base_transaction.h"
21#include "sync/syncable/syncable_changes_version.h"
22#include "sync/syncable/syncable_read_transaction.h"
23#include "sync/syncable/syncable_util.h"
24#include "sync/syncable/syncable_write_transaction.h"
25
26using std::string;
27
28namespace syncer {
29namespace syncable {
30
31// static
32const base::FilePath::CharType Directory::kSyncDatabaseFilename[] =
33    FILE_PATH_LITERAL("SyncData.sqlite3");
34
35Directory::PersistedKernelInfo::PersistedKernelInfo()
36    : next_id(0) {
37  ModelTypeSet protocol_types = ProtocolTypes();
38  for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
39       iter.Inc()) {
40    reset_download_progress(iter.Get());
41    transaction_version[iter.Get()] = 0;
42  }
43}
44
45Directory::PersistedKernelInfo::~PersistedKernelInfo() {}
46
47void Directory::PersistedKernelInfo::reset_download_progress(
48    ModelType model_type) {
49  download_progress[model_type].set_data_type_id(
50      GetSpecificsFieldNumberFromModelType(model_type));
51  // An empty-string token indicates no prior knowledge.
52  download_progress[model_type].set_token(std::string());
53}
54
55Directory::SaveChangesSnapshot::SaveChangesSnapshot()
56    : kernel_info_status(KERNEL_SHARE_INFO_INVALID) {
57}
58
59Directory::SaveChangesSnapshot::~SaveChangesSnapshot() {
60  STLDeleteElements(&dirty_metas);
61  STLDeleteElements(&delete_journals);
62}
63
64Directory::Kernel::Kernel(
65    const std::string& name,
66    const KernelLoadInfo& info, DirectoryChangeDelegate* delegate,
67    const WeakHandle<TransactionObserver>& transaction_observer)
68    : next_write_transaction_id(0),
69      name(name),
70      info_status(Directory::KERNEL_SHARE_INFO_VALID),
71      persisted_info(info.kernel_info),
72      cache_guid(info.cache_guid),
73      next_metahandle(info.max_metahandle + 1),
74      delegate(delegate),
75      transaction_observer(transaction_observer) {
76  DCHECK(delegate);
77  DCHECK(transaction_observer.IsInitialized());
78}
79
80Directory::Kernel::~Kernel() {
81  STLDeleteContainerPairSecondPointers(metahandles_map.begin(),
82                                       metahandles_map.end());
83}
84
85Directory::Directory(
86    DirectoryBackingStore* store,
87    UnrecoverableErrorHandler* unrecoverable_error_handler,
88    ReportUnrecoverableErrorFunction report_unrecoverable_error_function,
89    NigoriHandler* nigori_handler,
90    Cryptographer* cryptographer)
91    : kernel_(NULL),
92      store_(store),
93      unrecoverable_error_handler_(unrecoverable_error_handler),
94      report_unrecoverable_error_function_(
95          report_unrecoverable_error_function),
96      unrecoverable_error_set_(false),
97      nigori_handler_(nigori_handler),
98      cryptographer_(cryptographer),
99      invariant_check_level_(VERIFY_CHANGES) {
100}
101
102Directory::~Directory() {
103  Close();
104}
105
106DirOpenResult Directory::Open(
107    const string& name,
108    DirectoryChangeDelegate* delegate,
109    const WeakHandle<TransactionObserver>& transaction_observer) {
110  TRACE_EVENT0("sync", "SyncDatabaseOpen");
111
112  const DirOpenResult result =
113      OpenImpl(name, delegate, transaction_observer);
114
115  if (OPENED != result)
116    Close();
117  return result;
118}
119
120void Directory::InitializeIndices(MetahandlesMap* handles_map) {
121  kernel_->metahandles_map.swap(*handles_map);
122  for (MetahandlesMap::const_iterator it = kernel_->metahandles_map.begin();
123       it != kernel_->metahandles_map.end(); ++it) {
124    EntryKernel* entry = it->second;
125    if (ParentChildIndex::ShouldInclude(entry))
126      kernel_->parent_child_index.Insert(entry);
127    const int64 metahandle = entry->ref(META_HANDLE);
128    if (entry->ref(IS_UNSYNCED))
129      kernel_->unsynced_metahandles.insert(metahandle);
130    if (entry->ref(IS_UNAPPLIED_UPDATE)) {
131      const ModelType type = entry->GetServerModelType();
132      kernel_->unapplied_update_metahandles[type].insert(metahandle);
133    }
134    if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
135      DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
136             kernel_->server_tags_map.end())
137          << "Unexpected duplicate use of client tag";
138      kernel_->server_tags_map[entry->ref(UNIQUE_SERVER_TAG)] = entry;
139    }
140    if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
141      DCHECK(kernel_->server_tags_map.find(entry->ref(UNIQUE_SERVER_TAG)) ==
142             kernel_->server_tags_map.end())
143          << "Unexpected duplicate use of server tag";
144      kernel_->client_tags_map[entry->ref(UNIQUE_CLIENT_TAG)] = entry;
145    }
146    DCHECK(kernel_->ids_map.find(entry->ref(ID).value()) ==
147           kernel_->ids_map.end()) << "Unexpected duplicate use of ID";
148    kernel_->ids_map[entry->ref(ID).value()] = entry;
149    DCHECK(!entry->is_dirty());
150  }
151}
152
153DirOpenResult Directory::OpenImpl(
154    const string& name,
155    DirectoryChangeDelegate* delegate,
156    const WeakHandle<TransactionObserver>&
157        transaction_observer) {
158  KernelLoadInfo info;
159  // Temporary indices before kernel_ initialized in case Load fails. We 0(1)
160  // swap these later.
161  Directory::MetahandlesMap tmp_handles_map;
162  JournalIndex delete_journals;
163
164  DirOpenResult result =
165      store_->Load(&tmp_handles_map, &delete_journals, &info);
166  if (OPENED != result)
167    return result;
168
169  kernel_ = new Kernel(name, info, delegate, transaction_observer);
170  delete_journal_.reset(new DeleteJournal(&delete_journals));
171  InitializeIndices(&tmp_handles_map);
172
173  // Write back the share info to reserve some space in 'next_id'.  This will
174  // prevent local ID reuse in the case of an early crash.  See the comments in
175  // TakeSnapshotForSaveChanges() or crbug.com/142987 for more information.
176  kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
177  if (!SaveChanges())
178    return FAILED_INITIAL_WRITE;
179
180  return OPENED;
181}
182
183DeleteJournal* Directory::delete_journal() {
184  DCHECK(delete_journal_.get());
185  return delete_journal_.get();
186}
187
188void Directory::Close() {
189  store_.reset();
190  if (kernel_) {
191    delete kernel_;
192    kernel_ = NULL;
193  }
194}
195
196void Directory::OnUnrecoverableError(const BaseTransaction* trans,
197                                     const tracked_objects::Location& location,
198                                     const std::string & message) {
199  DCHECK(trans != NULL);
200  unrecoverable_error_set_ = true;
201  unrecoverable_error_handler_->OnUnrecoverableError(location,
202                                                     message);
203}
204
205EntryKernel* Directory::GetEntryById(const Id& id) {
206  ScopedKernelLock lock(this);
207  return GetEntryById(id, &lock);
208}
209
210EntryKernel* Directory::GetEntryById(const Id& id,
211                                     ScopedKernelLock* const lock) {
212  DCHECK(kernel_);
213  // Find it in the in memory ID index.
214  IdsMap::iterator id_found = kernel_->ids_map.find(id.value());
215  if (id_found != kernel_->ids_map.end()) {
216    return id_found->second;
217  }
218  return NULL;
219}
220
221EntryKernel* Directory::GetEntryByClientTag(const string& tag) {
222  ScopedKernelLock lock(this);
223  DCHECK(kernel_);
224
225  TagsMap::iterator it = kernel_->client_tags_map.find(tag);
226  if (it != kernel_->client_tags_map.end()) {
227    return it->second;
228  }
229  return NULL;
230}
231
232EntryKernel* Directory::GetEntryByServerTag(const string& tag) {
233  ScopedKernelLock lock(this);
234  DCHECK(kernel_);
235  TagsMap::iterator it = kernel_->server_tags_map.find(tag);
236  if (it != kernel_->server_tags_map.end()) {
237    return it->second;
238  }
239  return NULL;
240}
241
242EntryKernel* Directory::GetEntryByHandle(int64 metahandle) {
243  ScopedKernelLock lock(this);
244  return GetEntryByHandle(metahandle, &lock);
245}
246
247EntryKernel* Directory::GetEntryByHandle(int64 metahandle,
248                                         ScopedKernelLock* lock) {
249  // Look up in memory
250  MetahandlesMap::iterator found =
251      kernel_->metahandles_map.find(metahandle);
252  if (found != kernel_->metahandles_map.end()) {
253    // Found it in memory.  Easy.
254    return found->second;
255  }
256  return NULL;
257}
258
259bool Directory::GetChildHandlesById(
260    BaseTransaction* trans, const Id& parent_id,
261    Directory::Metahandles* result) {
262  if (!SyncAssert(this == trans->directory(), FROM_HERE,
263                  "Directories don't match", trans))
264    return false;
265  result->clear();
266
267  ScopedKernelLock lock(this);
268  AppendChildHandles(lock, parent_id, result);
269  return true;
270}
271
272bool Directory::GetChildHandlesByHandle(
273    BaseTransaction* trans, int64 handle,
274    Directory::Metahandles* result) {
275  if (!SyncAssert(this == trans->directory(), FROM_HERE,
276                  "Directories don't match", trans))
277    return false;
278
279  result->clear();
280
281  ScopedKernelLock lock(this);
282  EntryKernel* kernel = GetEntryByHandle(handle, &lock);
283  if (!kernel)
284    return true;
285
286  AppendChildHandles(lock, kernel->ref(ID), result);
287  return true;
288}
289
290int Directory::GetTotalNodeCount(
291    BaseTransaction* trans,
292    EntryKernel* kernel) const {
293  if (!SyncAssert(this == trans->directory(), FROM_HERE,
294                  "Directories don't match", trans))
295    return false;
296
297  int count = 1;
298  std::deque<const OrderedChildSet*> child_sets;
299
300  GetChildSetForKernel(trans, kernel, &child_sets);
301  while (!child_sets.empty()) {
302    const OrderedChildSet* set = child_sets.front();
303    child_sets.pop_front();
304    for (OrderedChildSet::const_iterator it = set->begin();
305         it != set->end(); ++it) {
306      count++;
307      GetChildSetForKernel(trans, *it, &child_sets);
308    }
309  }
310
311  return count;
312}
313
314void Directory::GetChildSetForKernel(
315    BaseTransaction* trans,
316    EntryKernel* kernel,
317    std::deque<const OrderedChildSet*>* child_sets) const {
318  if (!kernel->ref(IS_DIR))
319    return;  // Not a directory => no children.
320
321  const OrderedChildSet* descendants =
322      kernel_->parent_child_index.GetChildren(kernel->ref(ID));
323  if (!descendants)
324    return;  // This directory has no children.
325
326  // Add our children to the list of items to be traversed.
327  child_sets->push_back(descendants);
328}
329
330EntryKernel* Directory::GetRootEntry() {
331  return GetEntryById(Id());
332}
333
334bool Directory::InsertEntry(WriteTransaction* trans, EntryKernel* entry) {
335  ScopedKernelLock lock(this);
336  return InsertEntry(trans, entry, &lock);
337}
338
339bool Directory::InsertEntry(WriteTransaction* trans,
340                            EntryKernel* entry,
341                            ScopedKernelLock* lock) {
342  DCHECK(NULL != lock);
343  if (!SyncAssert(NULL != entry, FROM_HERE, "Entry is null", trans))
344    return false;
345
346  static const char error[] = "Entry already in memory index.";
347
348  if (!SyncAssert(
349          kernel_->metahandles_map.insert(
350              std::make_pair(entry->ref(META_HANDLE), entry)).second,
351          FROM_HERE,
352          error,
353          trans)) {
354    return false;
355  }
356  if (!SyncAssert(
357          kernel_->ids_map.insert(
358              std::make_pair(entry->ref(ID).value(), entry)).second,
359          FROM_HERE,
360          error,
361          trans)) {
362    return false;
363  }
364  if (ParentChildIndex::ShouldInclude(entry)) {
365    if (!SyncAssert(kernel_->parent_child_index.Insert(entry),
366                    FROM_HERE,
367                    error,
368                    trans)) {
369      return false;
370    }
371  }
372
373  // Should NEVER be created with a client tag or server tag.
374  if (!SyncAssert(entry->ref(UNIQUE_SERVER_TAG).empty(), FROM_HERE,
375                  "Server tag should be empty", trans)) {
376    return false;
377  }
378  if (!SyncAssert(entry->ref(UNIQUE_CLIENT_TAG).empty(), FROM_HERE,
379                  "Client tag should be empty", trans))
380    return false;
381
382  return true;
383}
384
385bool Directory::ReindexId(WriteTransaction* trans,
386                         EntryKernel* const entry,
387                         const Id& new_id) {
388  ScopedKernelLock lock(this);
389  if (NULL != GetEntryById(new_id, &lock))
390    return false;
391
392  {
393    // Update the indices that depend on the ID field.
394    ScopedParentChildIndexUpdater updater_b(lock, entry,
395        &kernel_->parent_child_index);
396    size_t num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
397    DCHECK_EQ(1U, num_erased);
398    entry->put(ID, new_id);
399    kernel_->ids_map[entry->ref(ID).value()] = entry;
400  }
401  return true;
402}
403
404bool Directory::ReindexParentId(WriteTransaction* trans,
405                                EntryKernel* const entry,
406                                const Id& new_parent_id) {
407  ScopedKernelLock lock(this);
408
409  {
410    // Update the indices that depend on the PARENT_ID field.
411    ScopedParentChildIndexUpdater index_updater(lock, entry,
412        &kernel_->parent_child_index);
413    entry->put(PARENT_ID, new_parent_id);
414  }
415  return true;
416}
417
418bool Directory::unrecoverable_error_set(const BaseTransaction* trans) const {
419  DCHECK(trans != NULL);
420  return unrecoverable_error_set_;
421}
422
423void Directory::ClearDirtyMetahandles() {
424  kernel_->transaction_mutex.AssertAcquired();
425  kernel_->dirty_metahandles.clear();
426}
427
428bool Directory::SafeToPurgeFromMemory(WriteTransaction* trans,
429                                      const EntryKernel* const entry) const {
430  bool safe = entry->ref(IS_DEL) && !entry->is_dirty() &&
431      !entry->ref(SYNCING) && !entry->ref(IS_UNAPPLIED_UPDATE) &&
432      !entry->ref(IS_UNSYNCED);
433
434  if (safe) {
435    int64 handle = entry->ref(META_HANDLE);
436    const ModelType type = entry->GetServerModelType();
437    if (!SyncAssert(kernel_->dirty_metahandles.count(handle) == 0U,
438                    FROM_HERE,
439                    "Dirty metahandles should be empty", trans))
440      return false;
441    // TODO(tim): Bug 49278.
442    if (!SyncAssert(!kernel_->unsynced_metahandles.count(handle),
443                    FROM_HERE,
444                    "Unsynced handles should be empty",
445                    trans))
446      return false;
447    if (!SyncAssert(!kernel_->unapplied_update_metahandles[type].count(handle),
448                    FROM_HERE,
449                    "Unapplied metahandles should be empty",
450                    trans))
451      return false;
452  }
453
454  return safe;
455}
456
457void Directory::TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot) {
458  ReadTransaction trans(FROM_HERE, this);
459  ScopedKernelLock lock(this);
460
461  // If there is an unrecoverable error then just bail out.
462  if (unrecoverable_error_set(&trans))
463    return;
464
465  // Deep copy dirty entries from kernel_->metahandles_index into snapshot and
466  // clear dirty flags.
467  for (MetahandleSet::const_iterator i = kernel_->dirty_metahandles.begin();
468       i != kernel_->dirty_metahandles.end(); ++i) {
469    EntryKernel* entry = GetEntryByHandle(*i, &lock);
470    if (!entry)
471      continue;
472    // Skip over false positives; it happens relatively infrequently.
473    if (!entry->is_dirty())
474      continue;
475    snapshot->dirty_metas.insert(snapshot->dirty_metas.end(),
476                                 new EntryKernel(*entry));
477    DCHECK_EQ(1U, kernel_->dirty_metahandles.count(*i));
478    // We don't bother removing from the index here as we blow the entire thing
479    // in a moment, and it unnecessarily complicates iteration.
480    entry->clear_dirty(NULL);
481  }
482  ClearDirtyMetahandles();
483
484  // Set purged handles.
485  DCHECK(snapshot->metahandles_to_purge.empty());
486  snapshot->metahandles_to_purge.swap(kernel_->metahandles_to_purge);
487
488  // Fill kernel_info_status and kernel_info.
489  snapshot->kernel_info = kernel_->persisted_info;
490  // To avoid duplicates when the process crashes, we record the next_id to be
491  // greater magnitude than could possibly be reached before the next save
492  // changes.  In other words, it's effectively impossible for the user to
493  // generate 65536 new bookmarks in 3 seconds.
494  snapshot->kernel_info.next_id -= 65536;
495  snapshot->kernel_info_status = kernel_->info_status;
496  // This one we reset on failure.
497  kernel_->info_status = KERNEL_SHARE_INFO_VALID;
498
499  delete_journal_->TakeSnapshotAndClear(
500      &trans, &snapshot->delete_journals, &snapshot->delete_journals_to_purge);
501}
502
503bool Directory::SaveChanges() {
504  bool success = false;
505
506  base::AutoLock scoped_lock(kernel_->save_changes_mutex);
507
508  // Snapshot and save.
509  SaveChangesSnapshot snapshot;
510  TakeSnapshotForSaveChanges(&snapshot);
511  success = store_->SaveChanges(snapshot);
512
513  // Handle success or failure.
514  if (success)
515    success = VacuumAfterSaveChanges(snapshot);
516  else
517    HandleSaveChangesFailure(snapshot);
518  return success;
519}
520
521bool Directory::VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot) {
522  if (snapshot.dirty_metas.empty())
523    return true;
524
525  // Need a write transaction as we are about to permanently purge entries.
526  WriteTransaction trans(FROM_HERE, VACUUM_AFTER_SAVE, this);
527  ScopedKernelLock lock(this);
528  // Now drop everything we can out of memory.
529  for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
530       i != snapshot.dirty_metas.end(); ++i) {
531    MetahandlesMap::iterator found =
532        kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
533    EntryKernel* entry = (found == kernel_->metahandles_map.end() ?
534                          NULL : found->second);
535    if (entry && SafeToPurgeFromMemory(&trans, entry)) {
536      // We now drop deleted metahandles that are up to date on both the client
537      // and the server.
538      size_t num_erased = 0;
539      num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
540      DCHECK_EQ(1u, num_erased);
541      num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
542      DCHECK_EQ(1u, num_erased);
543      if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
544        num_erased =
545            kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
546        DCHECK_EQ(1u, num_erased);
547      }
548      if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
549        num_erased =
550            kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
551        DCHECK_EQ(1u, num_erased);
552      }
553      if (!SyncAssert(!kernel_->parent_child_index.Contains(entry),
554                      FROM_HERE,
555                      "Deleted entry still present",
556                      (&trans)))
557        return false;
558      delete entry;
559    }
560    if (trans.unrecoverable_error_set())
561      return false;
562  }
563  return true;
564}
565
566void Directory::UnapplyEntry(EntryKernel* entry) {
567  int64 handle = entry->ref(META_HANDLE);
568  ModelType server_type = GetModelTypeFromSpecifics(
569      entry->ref(SERVER_SPECIFICS));
570
571  // Clear enough so that on the next sync cycle all local data will
572  // be overwritten.
573  // Note: do not modify the root node in order to preserve the
574  // initial sync ended bit for this type (else on the next restart
575  // this type will be treated as disabled and therefore fully purged).
576  if (IsRealDataType(server_type) &&
577      ModelTypeToRootTag(server_type) == entry->ref(UNIQUE_SERVER_TAG)) {
578    return;
579  }
580
581  // Set the unapplied bit if this item has server data.
582  if (IsRealDataType(server_type) && !entry->ref(IS_UNAPPLIED_UPDATE)) {
583    entry->put(IS_UNAPPLIED_UPDATE, true);
584    kernel_->unapplied_update_metahandles[server_type].insert(handle);
585    entry->mark_dirty(&kernel_->dirty_metahandles);
586  }
587
588  // Unset the unsynced bit.
589  if (entry->ref(IS_UNSYNCED)) {
590    kernel_->unsynced_metahandles.erase(handle);
591    entry->put(IS_UNSYNCED, false);
592    entry->mark_dirty(&kernel_->dirty_metahandles);
593  }
594
595  // Mark the item as locally deleted. No deleted items are allowed in the
596  // parent child index.
597  if (!entry->ref(IS_DEL)) {
598    kernel_->parent_child_index.Remove(entry);
599    entry->put(IS_DEL, true);
600    entry->mark_dirty(&kernel_->dirty_metahandles);
601  }
602
603  // Set the version to the "newly created" version.
604  if (entry->ref(BASE_VERSION) != CHANGES_VERSION) {
605    entry->put(BASE_VERSION, CHANGES_VERSION);
606    entry->mark_dirty(&kernel_->dirty_metahandles);
607  }
608
609  // At this point locally created items that aren't synced will become locally
610  // deleted items, and purged on the next snapshot. All other items will match
611  // the state they would have had if they were just created via a server
612  // update. See MutableEntry::MutableEntry(.., CreateNewUpdateItem, ..).
613}
614
615void Directory::DeleteEntry(bool save_to_journal,
616                            EntryKernel* entry,
617                            EntryKernelSet* entries_to_journal) {
618  int64 handle = entry->ref(META_HANDLE);
619  ModelType server_type = GetModelTypeFromSpecifics(
620      entry->ref(SERVER_SPECIFICS));
621
622  kernel_->metahandles_to_purge.insert(handle);
623
624  size_t num_erased = 0;
625  num_erased = kernel_->metahandles_map.erase(entry->ref(META_HANDLE));
626  DCHECK_EQ(1u, num_erased);
627  num_erased = kernel_->ids_map.erase(entry->ref(ID).value());
628  DCHECK_EQ(1u, num_erased);
629  num_erased = kernel_->unsynced_metahandles.erase(handle);
630  DCHECK_EQ(entry->ref(IS_UNSYNCED), num_erased > 0);
631  num_erased =
632      kernel_->unapplied_update_metahandles[server_type].erase(handle);
633  DCHECK_EQ(entry->ref(IS_UNAPPLIED_UPDATE), num_erased > 0);
634  if (kernel_->parent_child_index.Contains(entry))
635    kernel_->parent_child_index.Remove(entry);
636
637  if (!entry->ref(UNIQUE_CLIENT_TAG).empty()) {
638    num_erased =
639        kernel_->client_tags_map.erase(entry->ref(UNIQUE_CLIENT_TAG));
640    DCHECK_EQ(1u, num_erased);
641  }
642  if (!entry->ref(UNIQUE_SERVER_TAG).empty()) {
643    num_erased =
644        kernel_->server_tags_map.erase(entry->ref(UNIQUE_SERVER_TAG));
645    DCHECK_EQ(1u, num_erased);
646  }
647
648  if (save_to_journal) {
649    entries_to_journal->insert(entry);
650  } else {
651    delete entry;
652  }
653}
654
655bool Directory::PurgeEntriesWithTypeIn(ModelTypeSet disabled_types,
656                                       ModelTypeSet types_to_journal,
657                                       ModelTypeSet types_to_unapply) {
658  disabled_types.RemoveAll(ProxyTypes());
659
660  if (disabled_types.Empty())
661    return true;
662
663  {
664    WriteTransaction trans(FROM_HERE, PURGE_ENTRIES, this);
665
666    EntryKernelSet entries_to_journal;
667    STLElementDeleter<EntryKernelSet> journal_deleter(&entries_to_journal);
668
669    {
670      ScopedKernelLock lock(this);
671
672      // We iterate in two passes to avoid a bug in STLport (which is used in
673      // the Android build).  There are some versions of that library where a
674      // hash_map's iterators can be invalidated when an item is erased from the
675      // hash_map.
676      // See http://sourceforge.net/p/stlport/bugs/239/.
677
678      std::set<EntryKernel*> to_purge;
679      for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
680           it != kernel_->metahandles_map.end(); ++it) {
681        const sync_pb::EntitySpecifics& local_specifics =
682            it->second->ref(SPECIFICS);
683        const sync_pb::EntitySpecifics& server_specifics =
684            it->second->ref(SERVER_SPECIFICS);
685        ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
686        ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
687
688        if ((IsRealDataType(local_type) && disabled_types.Has(local_type)) ||
689            (IsRealDataType(server_type) && disabled_types.Has(server_type))) {
690          to_purge.insert(it->second);
691        }
692      }
693
694      for (std::set<EntryKernel*>::iterator it = to_purge.begin();
695           it != to_purge.end(); ++it) {
696        EntryKernel* entry = *it;
697
698        const sync_pb::EntitySpecifics& local_specifics =
699            (*it)->ref(SPECIFICS);
700        const sync_pb::EntitySpecifics& server_specifics =
701            (*it)->ref(SERVER_SPECIFICS);
702        ModelType local_type = GetModelTypeFromSpecifics(local_specifics);
703        ModelType server_type = GetModelTypeFromSpecifics(server_specifics);
704
705        if (types_to_unapply.Has(local_type) ||
706            types_to_unapply.Has(server_type)) {
707          UnapplyEntry(entry);
708        } else {
709          bool save_to_journal =
710              (types_to_journal.Has(local_type) ||
711               types_to_journal.Has(server_type)) &&
712              (delete_journal_->IsDeleteJournalEnabled(local_type) ||
713               delete_journal_->IsDeleteJournalEnabled(server_type));
714          DeleteEntry(save_to_journal, entry, &entries_to_journal);
715        }
716      }
717
718      delete_journal_->AddJournalBatch(&trans, entries_to_journal);
719
720      // Ensure meta tracking for these data types reflects the purged state.
721      for (ModelTypeSet::Iterator it = disabled_types.First();
722           it.Good(); it.Inc()) {
723        kernel_->persisted_info.transaction_version[it.Get()] = 0;
724
725        // Don't discard progress markers for unapplied types.
726        if (!types_to_unapply.Has(it.Get()))
727          kernel_->persisted_info.reset_download_progress(it.Get());
728      }
729    }
730  }
731  return true;
732}
733
734void Directory::HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot) {
735  WriteTransaction trans(FROM_HERE, HANDLE_SAVE_FAILURE, this);
736  ScopedKernelLock lock(this);
737  kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
738
739  // Because we optimistically cleared the dirty bit on the real entries when
740  // taking the snapshot, we must restore it on failure.  Not doing this could
741  // cause lost data, if no other changes are made to the in-memory entries
742  // that would cause the dirty bit to get set again. Setting the bit ensures
743  // that SaveChanges will at least try again later.
744  for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
745       i != snapshot.dirty_metas.end(); ++i) {
746    MetahandlesMap::iterator found =
747        kernel_->metahandles_map.find((*i)->ref(META_HANDLE));
748    if (found != kernel_->metahandles_map.end()) {
749      found->second->mark_dirty(&kernel_->dirty_metahandles);
750    }
751  }
752
753  kernel_->metahandles_to_purge.insert(snapshot.metahandles_to_purge.begin(),
754                                       snapshot.metahandles_to_purge.end());
755
756  // Restore delete journals.
757  delete_journal_->AddJournalBatch(&trans, snapshot.delete_journals);
758  delete_journal_->PurgeDeleteJournals(&trans,
759                                       snapshot.delete_journals_to_purge);
760}
761
762void Directory::GetDownloadProgress(
763    ModelType model_type,
764    sync_pb::DataTypeProgressMarker* value_out) const {
765  ScopedKernelLock lock(this);
766  return value_out->CopyFrom(
767      kernel_->persisted_info.download_progress[model_type]);
768}
769
770void Directory::GetDownloadProgressAsString(
771    ModelType model_type,
772    std::string* value_out) const {
773  ScopedKernelLock lock(this);
774  kernel_->persisted_info.download_progress[model_type].SerializeToString(
775      value_out);
776}
777
778size_t Directory::GetEntriesCount() const {
779  ScopedKernelLock lock(this);
780  return kernel_->metahandles_map.size();
781}
782
783void Directory::SetDownloadProgress(
784    ModelType model_type,
785    const sync_pb::DataTypeProgressMarker& new_progress) {
786  ScopedKernelLock lock(this);
787  kernel_->persisted_info.download_progress[model_type].CopyFrom(new_progress);
788  kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
789}
790
791int64 Directory::GetTransactionVersion(ModelType type) const {
792  kernel_->transaction_mutex.AssertAcquired();
793  return kernel_->persisted_info.transaction_version[type];
794}
795
796void Directory::IncrementTransactionVersion(ModelType type) {
797  kernel_->transaction_mutex.AssertAcquired();
798  kernel_->persisted_info.transaction_version[type]++;
799}
800
801ModelTypeSet Directory::InitialSyncEndedTypes() {
802  syncable::ReadTransaction trans(FROM_HERE, this);
803  ModelTypeSet protocol_types = ProtocolTypes();
804  ModelTypeSet initial_sync_ended_types;
805  for (ModelTypeSet::Iterator i = protocol_types.First(); i.Good(); i.Inc()) {
806    if (InitialSyncEndedForType(&trans, i.Get())) {
807      initial_sync_ended_types.Put(i.Get());
808    }
809  }
810  return initial_sync_ended_types;
811}
812
813bool Directory::InitialSyncEndedForType(ModelType type) {
814  syncable::ReadTransaction trans(FROM_HERE, this);
815  return InitialSyncEndedForType(&trans, type);
816}
817
818bool Directory::InitialSyncEndedForType(
819    BaseTransaction* trans, ModelType type) {
820  // True iff the type's root node has been received and applied.
821  syncable::Entry entry(trans,
822                        syncable::GET_BY_SERVER_TAG,
823                        ModelTypeToRootTag(type));
824  return entry.good() && entry.Get(syncable::BASE_VERSION) != CHANGES_VERSION;
825}
826
827string Directory::store_birthday() const {
828  ScopedKernelLock lock(this);
829  return kernel_->persisted_info.store_birthday;
830}
831
832void Directory::set_store_birthday(const string& store_birthday) {
833  ScopedKernelLock lock(this);
834  if (kernel_->persisted_info.store_birthday == store_birthday)
835    return;
836  kernel_->persisted_info.store_birthday = store_birthday;
837  kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
838}
839
840string Directory::bag_of_chips() const {
841  ScopedKernelLock lock(this);
842  return kernel_->persisted_info.bag_of_chips;
843}
844
845void Directory::set_bag_of_chips(const string& bag_of_chips) {
846  ScopedKernelLock lock(this);
847  if (kernel_->persisted_info.bag_of_chips == bag_of_chips)
848    return;
849  kernel_->persisted_info.bag_of_chips = bag_of_chips;
850  kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
851}
852
853
854string Directory::cache_guid() const {
855  // No need to lock since nothing ever writes to it after load.
856  return kernel_->cache_guid;
857}
858
859NigoriHandler* Directory::GetNigoriHandler() {
860  return nigori_handler_;
861}
862
863Cryptographer* Directory::GetCryptographer(const BaseTransaction* trans) {
864  DCHECK_EQ(this, trans->directory());
865  return cryptographer_;
866}
867
868void Directory::GetAllMetaHandles(BaseTransaction* trans,
869                                  MetahandleSet* result) {
870  result->clear();
871  ScopedKernelLock lock(this);
872  for (MetahandlesMap::iterator i = kernel_->metahandles_map.begin();
873       i != kernel_->metahandles_map.end(); ++i) {
874    result->insert(i->first);
875  }
876}
877
878void Directory::GetAllEntryKernels(BaseTransaction* trans,
879                                   std::vector<const EntryKernel*>* result) {
880  result->clear();
881  ScopedKernelLock lock(this);
882  for (MetahandlesMap::iterator i = kernel_->metahandles_map.begin();
883       i != kernel_->metahandles_map.end(); ++i) {
884    result->push_back(i->second);
885  }
886}
887
888void Directory::GetUnsyncedMetaHandles(BaseTransaction* trans,
889                                       Metahandles* result) {
890  result->clear();
891  ScopedKernelLock lock(this);
892  copy(kernel_->unsynced_metahandles.begin(),
893       kernel_->unsynced_metahandles.end(), back_inserter(*result));
894}
895
896int64 Directory::unsynced_entity_count() const {
897  ScopedKernelLock lock(this);
898  return kernel_->unsynced_metahandles.size();
899}
900
901FullModelTypeSet Directory::GetServerTypesWithUnappliedUpdates(
902    BaseTransaction* trans) const {
903  FullModelTypeSet server_types;
904  ScopedKernelLock lock(this);
905  for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
906    const ModelType type = ModelTypeFromInt(i);
907    if (!kernel_->unapplied_update_metahandles[type].empty()) {
908      server_types.Put(type);
909    }
910  }
911  return server_types;
912}
913
914void Directory::GetUnappliedUpdateMetaHandles(
915    BaseTransaction* trans,
916    FullModelTypeSet server_types,
917    std::vector<int64>* result) {
918  result->clear();
919  ScopedKernelLock lock(this);
920  for (int i = UNSPECIFIED; i < MODEL_TYPE_COUNT; ++i) {
921    const ModelType type = ModelTypeFromInt(i);
922    if (server_types.Has(type)) {
923      std::copy(kernel_->unapplied_update_metahandles[type].begin(),
924                kernel_->unapplied_update_metahandles[type].end(),
925                back_inserter(*result));
926    }
927  }
928}
929
930void Directory::CollectMetaHandleCounts(
931    std::vector<int>* num_entries_by_type,
932    std::vector<int>* num_to_delete_entries_by_type) {
933  syncable::ReadTransaction trans(FROM_HERE, this);
934  ScopedKernelLock lock(this);
935
936  for (MetahandlesMap::iterator it = kernel_->metahandles_map.begin();
937       it != kernel_->metahandles_map.end(); ++it) {
938    EntryKernel* entry = it->second;
939    const ModelType type = GetModelTypeFromSpecifics(entry->ref(SPECIFICS));
940    (*num_entries_by_type)[type]++;
941    if (entry->ref(IS_DEL))
942      (*num_to_delete_entries_by_type)[type]++;
943  }
944}
945
946bool Directory::CheckInvariantsOnTransactionClose(
947    syncable::BaseTransaction* trans,
948    const EntryKernelMutationMap& mutations) {
949  // NOTE: The trans may be in the process of being destructed.  Be careful if
950  // you wish to call any of its virtual methods.
951  MetahandleSet handles;
952
953  switch (invariant_check_level_) {
954  case FULL_DB_VERIFICATION:
955    GetAllMetaHandles(trans, &handles);
956    break;
957  case VERIFY_CHANGES:
958    for (EntryKernelMutationMap::const_iterator i = mutations.begin();
959         i != mutations.end(); ++i) {
960      handles.insert(i->first);
961    }
962    break;
963  case OFF:
964    break;
965  }
966
967  return CheckTreeInvariants(trans, handles);
968}
969
970bool Directory::FullyCheckTreeInvariants(syncable::BaseTransaction* trans) {
971  MetahandleSet handles;
972  GetAllMetaHandles(trans, &handles);
973  return CheckTreeInvariants(trans, handles);
974}
975
976bool Directory::CheckTreeInvariants(syncable::BaseTransaction* trans,
977                                    const MetahandleSet& handles) {
978  MetahandleSet::const_iterator i;
979  for (i = handles.begin() ; i != handles.end() ; ++i) {
980    int64 metahandle = *i;
981    Entry e(trans, GET_BY_HANDLE, metahandle);
982    if (!SyncAssert(e.good(), FROM_HERE, "Entry is bad", trans))
983      return false;
984    syncable::Id id = e.Get(ID);
985    syncable::Id parentid = e.Get(PARENT_ID);
986
987    if (id.IsRoot()) {
988      if (!SyncAssert(e.Get(IS_DIR), FROM_HERE,
989                      "Entry should be a directory",
990                      trans))
991        return false;
992      if (!SyncAssert(parentid.IsRoot(), FROM_HERE,
993                      "Entry should be root",
994                      trans))
995         return false;
996      if (!SyncAssert(!e.Get(IS_UNSYNCED), FROM_HERE,
997                      "Entry should be sycned",
998                      trans))
999         return false;
1000      continue;
1001    }
1002
1003    if (!e.Get(IS_DEL)) {
1004      if (!SyncAssert(id != parentid, FROM_HERE,
1005                      "Id should be different from parent id.",
1006                      trans))
1007         return false;
1008      if (!SyncAssert(!e.Get(NON_UNIQUE_NAME).empty(), FROM_HERE,
1009                      "Non unique name should not be empty.",
1010                      trans))
1011        return false;
1012      int safety_count = handles.size() + 1;
1013      while (!parentid.IsRoot()) {
1014        Entry parent(trans, GET_BY_ID, parentid);
1015        if (!SyncAssert(parent.good(), FROM_HERE,
1016                        "Parent entry is not valid.",
1017                        trans))
1018          return false;
1019        if (handles.end() == handles.find(parent.Get(META_HANDLE)))
1020            break; // Skip further checking if parent was unmodified.
1021        if (!SyncAssert(parent.Get(IS_DIR), FROM_HERE,
1022                        "Parent should be a directory",
1023                        trans))
1024          return false;
1025        if (!SyncAssert(!parent.Get(IS_DEL), FROM_HERE,
1026                        "Parent should not have been marked for deletion.",
1027                        trans))
1028          return false;
1029        if (!SyncAssert(handles.end() != handles.find(parent.Get(META_HANDLE)),
1030                        FROM_HERE,
1031                        "Parent should be in the index.",
1032                        trans))
1033          return false;
1034        parentid = parent.Get(PARENT_ID);
1035        if (!SyncAssert(--safety_count > 0, FROM_HERE,
1036                        "Count should be greater than zero.",
1037                        trans))
1038          return false;
1039      }
1040    }
1041    int64 base_version = e.Get(BASE_VERSION);
1042    int64 server_version = e.Get(SERVER_VERSION);
1043    bool using_unique_client_tag = !e.Get(UNIQUE_CLIENT_TAG).empty();
1044    if (CHANGES_VERSION == base_version || 0 == base_version) {
1045      if (e.Get(IS_UNAPPLIED_UPDATE)) {
1046        // Must be a new item, or a de-duplicated unique client tag
1047        // that was created both locally and remotely.
1048        if (!using_unique_client_tag) {
1049          if (!SyncAssert(e.Get(IS_DEL), FROM_HERE,
1050                          "The entry should not have been deleted.",
1051                          trans))
1052            return false;
1053        }
1054        // It came from the server, so it must have a server ID.
1055        if (!SyncAssert(id.ServerKnows(), FROM_HERE,
1056                        "The id should be from a server.",
1057                        trans))
1058          return false;
1059      } else {
1060        if (e.Get(IS_DIR)) {
1061          // TODO(chron): Implement this mode if clients ever need it.
1062          // For now, you can't combine a client tag and a directory.
1063          if (!SyncAssert(!using_unique_client_tag, FROM_HERE,
1064                          "Directory cannot have a client tag.",
1065                          trans))
1066            return false;
1067        }
1068        // Should be an uncomitted item, or a successfully deleted one.
1069        if (!e.Get(IS_DEL)) {
1070          if (!SyncAssert(e.Get(IS_UNSYNCED), FROM_HERE,
1071                          "The item should be unsynced.",
1072                          trans))
1073            return false;
1074        }
1075        // If the next check failed, it would imply that an item exists
1076        // on the server, isn't waiting for application locally, but either
1077        // is an unsynced create or a sucessful delete in the local copy.
1078        // Either way, that's a mismatch.
1079        if (!SyncAssert(0 == server_version, FROM_HERE,
1080                        "Server version should be zero.",
1081                        trans))
1082          return false;
1083        // Items that aren't using the unique client tag should have a zero
1084        // base version only if they have a local ID.  Items with unique client
1085        // tags are allowed to use the zero base version for undeletion and
1086        // de-duplication; the unique client tag trumps the server ID.
1087        if (!using_unique_client_tag) {
1088          if (!SyncAssert(!id.ServerKnows(), FROM_HERE,
1089                          "Should be a client only id.",
1090                          trans))
1091            return false;
1092        }
1093      }
1094    } else {
1095      if (!SyncAssert(id.ServerKnows(),
1096                      FROM_HERE,
1097                      "Should be a server id.",
1098                      trans))
1099        return false;
1100    }
1101    // Server-unknown items that are locally deleted should not be sent up to
1102    // the server.  They must be !IS_UNSYNCED.
1103    if (!SyncAssert(!(!id.ServerKnows() &&
1104                      e.Get(IS_DEL) &&
1105                      e.Get(IS_UNSYNCED)), FROM_HERE,
1106                    "Locally deleted item must not be unsynced.",
1107                    trans)) {
1108      return false;
1109    }
1110  }
1111  return true;
1112}
1113
1114void Directory::SetInvariantCheckLevel(InvariantCheckLevel check_level) {
1115  invariant_check_level_ = check_level;
1116}
1117
1118int64 Directory::NextMetahandle() {
1119  ScopedKernelLock lock(this);
1120  int64 metahandle = (kernel_->next_metahandle)++;
1121  return metahandle;
1122}
1123
1124// Always returns a client ID that is the string representation of a negative
1125// number.
1126Id Directory::NextId() {
1127  int64 result;
1128  {
1129    ScopedKernelLock lock(this);
1130    result = (kernel_->persisted_info.next_id)--;
1131    kernel_->info_status = KERNEL_SHARE_INFO_DIRTY;
1132  }
1133  DCHECK_LT(result, 0);
1134  return Id::CreateFromClientString(base::Int64ToString(result));
1135}
1136
1137bool Directory::HasChildren(BaseTransaction* trans, const Id& id) {
1138  ScopedKernelLock lock(this);
1139  return kernel_->parent_child_index.GetChildren(id) != NULL;
1140}
1141
1142Id Directory::GetFirstChildId(BaseTransaction* trans,
1143                              const EntryKernel* parent) {
1144  DCHECK(parent);
1145  DCHECK(parent->ref(IS_DIR));
1146
1147  ScopedKernelLock lock(this);
1148  const OrderedChildSet* children =
1149      kernel_->parent_child_index.GetChildren(parent->ref(ID));
1150
1151  // We're expected to return root if there are no children.
1152  if (!children)
1153    return Id();
1154
1155  return (*children->begin())->ref(ID);
1156}
1157
1158syncable::Id Directory::GetPredecessorId(EntryKernel* e) {
1159  ScopedKernelLock lock(this);
1160
1161  DCHECK(ParentChildIndex::ShouldInclude(e));
1162  const OrderedChildSet* children =
1163      kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
1164  DCHECK(children && !children->empty());
1165  OrderedChildSet::const_iterator i = children->find(e);
1166  DCHECK(i != children->end());
1167
1168  if (i == children->begin()) {
1169    return Id();
1170  } else {
1171    i--;
1172    return (*i)->ref(ID);
1173  }
1174}
1175
1176syncable::Id Directory::GetSuccessorId(EntryKernel* e) {
1177  ScopedKernelLock lock(this);
1178
1179  DCHECK(ParentChildIndex::ShouldInclude(e));
1180  const OrderedChildSet* children =
1181      kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
1182  DCHECK(children && !children->empty());
1183  OrderedChildSet::const_iterator i = children->find(e);
1184  DCHECK(i != children->end());
1185
1186  i++;
1187  if (i == children->end()) {
1188    return Id();
1189  } else {
1190    return (*i)->ref(ID);
1191  }
1192}
1193
1194// TODO(rlarocque): Remove all support for placing ShouldMaintainPosition()
1195// items as siblings of items that do not maintain postions.  It is required
1196// only for tests.  See crbug.com/178282.
1197void Directory::PutPredecessor(EntryKernel* e, EntryKernel* predecessor) {
1198  DCHECK(!e->ref(IS_DEL));
1199  if (!e->ShouldMaintainPosition()) {
1200    DCHECK(!e->ref(UNIQUE_POSITION).IsValid());
1201    return;
1202  }
1203  std::string suffix = e->ref(UNIQUE_BOOKMARK_TAG);
1204  DCHECK(!suffix.empty());
1205
1206  // Remove our item from the ParentChildIndex and remember to re-add it later.
1207  ScopedKernelLock lock(this);
1208  ScopedParentChildIndexUpdater updater(lock, e, &kernel_->parent_child_index);
1209
1210  // Note: The ScopedParentChildIndexUpdater will update this set for us as we
1211  // leave this function.
1212  const OrderedChildSet* siblings =
1213      kernel_->parent_child_index.GetChildren(e->ref(PARENT_ID));
1214
1215  if (!siblings) {
1216    // This parent currently has no other children.
1217    DCHECK(predecessor->ref(ID).IsRoot());
1218    UniquePosition pos = UniquePosition::InitialPosition(suffix);
1219    e->put(UNIQUE_POSITION, pos);
1220    return;
1221  }
1222
1223  if (predecessor->ref(ID).IsRoot()) {
1224    // We have at least one sibling, and we're inserting to the left of them.
1225    UniquePosition successor_pos = (*siblings->begin())->ref(UNIQUE_POSITION);
1226
1227    UniquePosition pos;
1228    if (!successor_pos.IsValid()) {
1229      // If all our successors are of non-positionable types, just create an
1230      // initial position.  We arbitrarily choose to sort invalid positions to
1231      // the right of the valid positions.
1232      //
1233      // We really shouldn't need to support this.  See TODO above.
1234      pos = UniquePosition::InitialPosition(suffix);
1235    } else  {
1236      DCHECK(!siblings->empty());
1237      pos = UniquePosition::Before(successor_pos, suffix);
1238    }
1239
1240    e->put(UNIQUE_POSITION, pos);
1241    return;
1242  }
1243
1244  // We can't support placing an item after an invalid position.  Fortunately,
1245  // the tests don't exercise this particular case.  We should not support
1246  // siblings with invalid positions at all.  See TODO above.
1247  DCHECK(predecessor->ref(UNIQUE_POSITION).IsValid());
1248
1249  OrderedChildSet::const_iterator neighbour = siblings->find(predecessor);
1250  DCHECK(neighbour != siblings->end());
1251
1252  ++neighbour;
1253  if (neighbour == siblings->end()) {
1254    // Inserting at the end of the list.
1255    UniquePosition pos = UniquePosition::After(
1256        predecessor->ref(UNIQUE_POSITION),
1257        suffix);
1258    e->put(UNIQUE_POSITION, pos);
1259    return;
1260  }
1261
1262  EntryKernel* successor = *neighbour;
1263
1264  // Another mixed valid and invalid position case.  This one could be supported
1265  // in theory, but we're trying to deprecate support for siblings with and
1266  // without valid positions.  See TODO above.
1267  DCHECK(successor->ref(UNIQUE_POSITION).IsValid());
1268
1269  // Finally, the normal case: inserting between two elements.
1270  UniquePosition pos = UniquePosition::Between(
1271      predecessor->ref(UNIQUE_POSITION),
1272      successor->ref(UNIQUE_POSITION),
1273      suffix);
1274  e->put(UNIQUE_POSITION, pos);
1275  return;
1276}
1277
1278// TODO(rlarocque): Avoid this indirection.  Just return the set.
1279void Directory::AppendChildHandles(const ScopedKernelLock& lock,
1280                                   const Id& parent_id,
1281                                   Directory::Metahandles* result) {
1282  const OrderedChildSet* children =
1283      kernel_->parent_child_index.GetChildren(parent_id);
1284  if (!children)
1285    return;
1286
1287  for (OrderedChildSet::const_iterator i = children->begin();
1288       i != children->end(); ++i) {
1289    DCHECK_EQ(parent_id, (*i)->ref(PARENT_ID));
1290    result->push_back((*i)->ref(META_HANDLE));
1291  }
1292}
1293
1294}  // namespace syncable
1295}  // namespace syncer
1296