mutable_entry.cc revision c2e0dbddbe15c98d52c4786dac06cb8952a8ae6d
1// Copyright 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "sync/syncable/mutable_entry.h"
6
7#include "base/memory/scoped_ptr.h"
8#include "sync/internal_api/public/base/unique_position.h"
9#include "sync/syncable/directory.h"
10#include "sync/syncable/scoped_index_updater.h"
11#include "sync/syncable/scoped_kernel_lock.h"
12#include "sync/syncable/scoped_parent_child_index_updater.h"
13#include "sync/syncable/syncable-inl.h"
14#include "sync/syncable/syncable_changes_version.h"
15#include "sync/syncable/syncable_util.h"
16#include "sync/syncable/syncable_write_transaction.h"
17
18using std::string;
19
20namespace syncer {
21namespace syncable {
22
23void MutableEntry::Init(WriteTransaction* trans,
24                        ModelType model_type,
25                        const Id& parent_id,
26                        const string& name) {
27  scoped_ptr<EntryKernel> kernel(new EntryKernel);
28  kernel_ = NULL;
29
30  kernel->put(ID, trans->directory_->NextId());
31  kernel->put(META_HANDLE, trans->directory_->NextMetahandle());
32  kernel->mark_dirty(trans->directory_->kernel_->dirty_metahandles);
33  kernel->put(PARENT_ID, parent_id);
34  kernel->put(NON_UNIQUE_NAME, name);
35  const base::Time& now = base::Time::Now();
36  kernel->put(CTIME, now);
37  kernel->put(MTIME, now);
38  // We match the database defaults here
39  kernel->put(BASE_VERSION, CHANGES_VERSION);
40
41  // Normally the SPECIFICS setting code is wrapped in logic to deal with
42  // unknown fields and encryption.  Since all we want to do here is ensure that
43  // GetModelType() returns a correct value from the very beginning, these
44  // few lines are sufficient.
45  sync_pb::EntitySpecifics specifics;
46  AddDefaultFieldValue(model_type, &specifics);
47  kernel->put(SPECIFICS, specifics);
48
49  // Because this entry is new, it was originally deleted.
50  kernel->put(IS_DEL, true);
51  trans->SaveOriginal(kernel.get());
52  kernel->put(IS_DEL, false);
53
54  // Now swap the pointers.
55  kernel_ = kernel.release();
56}
57
58MutableEntry::MutableEntry(WriteTransaction* trans,
59                           Create,
60                           ModelType model_type,
61                           const Id& parent_id,
62                           const string& name)
63    : Entry(trans),
64      write_transaction_(trans) {
65  Init(trans, model_type, parent_id, name);
66  // We need to have a valid position ready before we can index the item.
67  if (model_type == BOOKMARKS) {
68    // Base the tag off of our cache-guid and local "c-" style ID.
69    std::string unique_tag = syncable::GenerateSyncableBookmarkHash(
70        trans->directory()->cache_guid(), Get(ID).GetServerId());
71    kernel_->put(UNIQUE_BOOKMARK_TAG, unique_tag);
72    kernel_->put(UNIQUE_POSITION, UniquePosition::InitialPosition(unique_tag));
73  } else {
74    DCHECK(!ShouldMaintainPosition());
75  }
76
77  bool result = trans->directory()->InsertEntry(trans, kernel_);
78  DCHECK(result);
79}
80
81MutableEntry::MutableEntry(WriteTransaction* trans, CreateNewUpdateItem,
82                           const Id& id)
83    : Entry(trans), write_transaction_(trans) {
84  Entry same_id(trans, GET_BY_ID, id);
85  kernel_ = NULL;
86  if (same_id.good()) {
87    return;  // already have an item with this ID.
88  }
89  scoped_ptr<EntryKernel> kernel(new EntryKernel());
90
91  kernel->put(ID, id);
92  kernel->put(META_HANDLE, trans->directory_->NextMetahandle());
93  kernel->mark_dirty(trans->directory_->kernel_->dirty_metahandles);
94  kernel->put(IS_DEL, true);
95  // We match the database defaults here
96  kernel->put(BASE_VERSION, CHANGES_VERSION);
97  if (!trans->directory()->InsertEntry(trans, kernel.get())) {
98    return;  // Failed inserting.
99  }
100  trans->SaveOriginal(kernel.get());
101
102  kernel_ = kernel.release();
103}
104
105MutableEntry::MutableEntry(WriteTransaction* trans, GetById, const Id& id)
106    : Entry(trans, GET_BY_ID, id), write_transaction_(trans) {
107}
108
109MutableEntry::MutableEntry(WriteTransaction* trans, GetByHandle,
110                           int64 metahandle)
111    : Entry(trans, GET_BY_HANDLE, metahandle), write_transaction_(trans) {
112}
113
114MutableEntry::MutableEntry(WriteTransaction* trans, GetByClientTag,
115                           const std::string& tag)
116    : Entry(trans, GET_BY_CLIENT_TAG, tag), write_transaction_(trans) {
117}
118
119MutableEntry::MutableEntry(WriteTransaction* trans, GetByServerTag,
120                           const string& tag)
121    : Entry(trans, GET_BY_SERVER_TAG, tag), write_transaction_(trans) {
122}
123
124bool MutableEntry::PutIsDel(bool is_del) {
125  DCHECK(kernel_);
126  write_transaction_->SaveOriginal(kernel_);
127  if (is_del == kernel_->ref(IS_DEL)) {
128    return true;
129  }
130  if (is_del) {
131    // If the server never knew about this item and it's deleted then we don't
132    // need to keep it around.  Unsetting IS_UNSYNCED will:
133    // - Ensure that the item is never committed to the server.
134    // - Allow any items with the same UNIQUE_CLIENT_TAG created on other
135    //   clients to override this entry.
136    // - Let us delete this entry permanently through
137    //   DirectoryBackingStore::DropDeletedEntries() when we next restart sync.
138    //   This will save memory and avoid crbug.com/125381.
139    if (!Get(ID).ServerKnows()) {
140      Put(IS_UNSYNCED, false);
141    }
142  }
143
144  {
145    ScopedKernelLock lock(dir());
146    // Some indices don't include deleted items and must be updated
147    // upon a value change.
148    ScopedParentChildIndexUpdater updater(lock, kernel_,
149        dir()->kernel_->parent_child_index);
150
151    kernel_->put(IS_DEL, is_del);
152    kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
153  }
154
155  return true;
156}
157
158bool MutableEntry::Put(Int64Field field, const int64& value) {
159  DCHECK(kernel_);
160  write_transaction_->SaveOriginal(kernel_);
161  if (kernel_->ref(field) != value) {
162    ScopedKernelLock lock(dir());
163    kernel_->put(field, value);
164    kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
165  }
166  return true;
167}
168
169bool MutableEntry::Put(TimeField field, const base::Time& value) {
170  DCHECK(kernel_);
171  write_transaction_->SaveOriginal(kernel_);
172  if (kernel_->ref(field) != value) {
173    kernel_->put(field, value);
174    kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
175  }
176  return true;
177}
178
179bool MutableEntry::Put(IdField field, const Id& value) {
180  DCHECK(kernel_);
181  write_transaction_->SaveOriginal(kernel_);
182  if (kernel_->ref(field) != value) {
183    if (ID == field) {
184      if (!dir()->ReindexId(write_transaction(), kernel_, value))
185        return false;
186    } else if (PARENT_ID == field) {
187      PutParentIdPropertyOnly(value);
188      if (!Get(IS_DEL)) {
189        if (!PutPredecessor(Id())) {
190          // TODO(lipalani) : Propagate the error to caller. crbug.com/100444.
191          NOTREACHED();
192        }
193      }
194    } else {
195      kernel_->put(field, value);
196    }
197    kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
198  }
199  return true;
200}
201
202bool MutableEntry::Put(UniquePositionField field, const UniquePosition& value) {
203  DCHECK(kernel_);
204  write_transaction_->SaveOriginal(kernel_);
205  if(!kernel_->ref(field).Equals(value)) {
206    // We should never overwrite a valid position with an invalid one.
207    DCHECK(value.IsValid());
208    ScopedKernelLock lock(dir());
209    if (UNIQUE_POSITION == field) {
210      ScopedParentChildIndexUpdater updater(
211          lock, kernel_, dir()->kernel_->parent_child_index);
212      kernel_->put(field, value);
213    } else {
214      kernel_->put(field, value);
215    }
216    kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
217  }
218  return true;
219}
220
221void MutableEntry::PutParentIdPropertyOnly(const Id& parent_id) {
222  write_transaction_->SaveOriginal(kernel_);
223  dir()->ReindexParentId(write_transaction(), kernel_, parent_id);
224  kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
225}
226
227bool MutableEntry::Put(BaseVersion field, int64 value) {
228  DCHECK(kernel_);
229  write_transaction_->SaveOriginal(kernel_);
230  if (kernel_->ref(field) != value) {
231    kernel_->put(field, value);
232    kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
233  }
234  return true;
235}
236
237bool MutableEntry::Put(StringField field, const string& value) {
238  DCHECK(kernel_);
239  write_transaction_->SaveOriginal(kernel_);
240  if (field == UNIQUE_CLIENT_TAG) {
241    return PutUniqueClientTag(value);
242  }
243
244  if (kernel_->ref(field) != value) {
245    kernel_->put(field, value);
246    kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
247  }
248  return true;
249}
250
251bool MutableEntry::Put(ProtoField field,
252                       const sync_pb::EntitySpecifics& value) {
253  DCHECK(kernel_);
254  write_transaction_->SaveOriginal(kernel_);
255  // TODO(ncarter): This is unfortunately heavyweight.  Can we do
256  // better?
257  if (kernel_->ref(field).SerializeAsString() != value.SerializeAsString()) {
258    const bool update_unapplied_updates_index =
259        (field == SERVER_SPECIFICS) && kernel_->ref(IS_UNAPPLIED_UPDATE);
260    if (update_unapplied_updates_index) {
261      // Remove ourselves from unapplied_update_metahandles with our
262      // old server type.
263      const ModelType old_server_type = kernel_->GetServerModelType();
264      const int64 metahandle = kernel_->ref(META_HANDLE);
265      size_t erase_count =
266          dir()->kernel_->unapplied_update_metahandles[old_server_type]
267          .erase(metahandle);
268      DCHECK_EQ(erase_count, 1u);
269    }
270
271    kernel_->put(field, value);
272    kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
273
274    if (update_unapplied_updates_index) {
275      // Add ourselves back into unapplied_update_metahandles with our
276      // new server type.
277      const ModelType new_server_type = kernel_->GetServerModelType();
278      const int64 metahandle = kernel_->ref(META_HANDLE);
279      dir()->kernel_->unapplied_update_metahandles[new_server_type]
280          .insert(metahandle);
281    }
282  }
283  return true;
284}
285
286bool MutableEntry::Put(BitField field, bool value) {
287  DCHECK(kernel_);
288  write_transaction_->SaveOriginal(kernel_);
289  bool old_value = kernel_->ref(field);
290  if (old_value != value) {
291    kernel_->put(field, value);
292    kernel_->mark_dirty(GetDirtyIndexHelper());
293  }
294
295  // Update delete journal for existence status change on server side here
296  // instead of in PutIsDel() because IS_DEL may not be updated due to
297  // early returns when processing updates. And because
298  // UpdateDeleteJournalForServerDelete() checks for SERVER_IS_DEL, it has
299  // to be called on sync thread.
300  if (field == SERVER_IS_DEL) {
301    dir()->delete_journal()->UpdateDeleteJournalForServerDelete(
302        write_transaction(), old_value, *kernel_);
303  }
304
305  return true;
306}
307
308MetahandleSet* MutableEntry::GetDirtyIndexHelper() {
309  return dir()->kernel_->dirty_metahandles;
310}
311
312bool MutableEntry::PutUniqueClientTag(const string& new_tag) {
313  write_transaction_->SaveOriginal(kernel_);
314  // There is no SERVER_UNIQUE_CLIENT_TAG. This field is similar to ID.
315  string old_tag = kernel_->ref(UNIQUE_CLIENT_TAG);
316  if (old_tag == new_tag) {
317    return true;
318  }
319
320  ScopedKernelLock lock(dir());
321  if (!new_tag.empty()) {
322    // Make sure your new value is not in there already.
323    EntryKernel lookup_kernel_ = *kernel_;
324    lookup_kernel_.put(UNIQUE_CLIENT_TAG, new_tag);
325    bool new_tag_conflicts =
326        (dir()->kernel_->client_tag_index->count(&lookup_kernel_) > 0);
327    if (new_tag_conflicts) {
328      return false;
329    }
330  }
331
332  {
333    ScopedIndexUpdater<ClientTagIndexer> index_updater(lock, kernel_,
334        dir()->kernel_->client_tag_index);
335    kernel_->put(UNIQUE_CLIENT_TAG, new_tag);
336    kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
337  }
338  return true;
339}
340
341bool MutableEntry::Put(IndexedBitField field, bool value) {
342  DCHECK(kernel_);
343  write_transaction_->SaveOriginal(kernel_);
344  if (kernel_->ref(field) != value) {
345    MetahandleSet* index;
346    if (IS_UNSYNCED == field) {
347      index = dir()->kernel_->unsynced_metahandles;
348    } else {
349      // Use kernel_->GetServerModelType() instead of
350      // GetServerModelType() as we may trigger some DCHECKs in the
351      // latter.
352      index =
353          &dir()->kernel_->unapplied_update_metahandles[
354              kernel_->GetServerModelType()];
355    }
356
357    ScopedKernelLock lock(dir());
358    if (value) {
359      if (!SyncAssert(index->insert(kernel_->ref(META_HANDLE)).second,
360                      FROM_HERE,
361                      "Could not insert",
362                      write_transaction())) {
363        return false;
364      }
365    } else {
366      if (!SyncAssert(1U == index->erase(kernel_->ref(META_HANDLE)),
367                      FROM_HERE,
368                      "Entry Not succesfully erased",
369                      write_transaction())) {
370        return false;
371      }
372    }
373    kernel_->put(field, value);
374    kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
375  }
376  return true;
377}
378
379void MutableEntry::PutUniqueBookmarkTag(const std::string& tag) {
380  // This unique tag will eventually be used as the unique suffix when adjusting
381  // this bookmark's position.  Let's make sure it's a valid suffix.
382  if (!UniquePosition::IsValidSuffix(tag)) {
383    NOTREACHED();
384    return;
385  }
386
387  if (!kernel_->ref(UNIQUE_BOOKMARK_TAG).empty()
388      && tag != kernel_->ref(UNIQUE_BOOKMARK_TAG)) {
389    // There is only one scenario where our tag is expected to change.  That
390    // scenario occurs when our current tag is a non-correct tag assigned during
391    // the UniquePosition migration.
392    std::string migration_generated_tag =
393        GenerateSyncableBookmarkHash(std::string(),
394                                     kernel_->ref(ID).GetServerId());
395    DCHECK_EQ(migration_generated_tag, kernel_->ref(UNIQUE_BOOKMARK_TAG));
396  }
397
398  kernel_->put(UNIQUE_BOOKMARK_TAG, tag);
399  kernel_->mark_dirty(dir()->kernel_->dirty_metahandles);
400}
401
402bool MutableEntry::PutPredecessor(const Id& predecessor_id) {
403  MutableEntry predecessor(write_transaction_, GET_BY_ID, predecessor_id);
404  if (!predecessor.good())
405    return false;
406  dir()->PutPredecessor(kernel_, predecessor.kernel_);
407  return true;
408}
409
410bool MutableEntry::Put(BitTemp field, bool value) {
411  DCHECK(kernel_);
412  kernel_->put(field, value);
413  return true;
414}
415
416// This function sets only the flags needed to get this entry to sync.
417bool MarkForSyncing(MutableEntry* e) {
418  DCHECK_NE(static_cast<MutableEntry*>(NULL), e);
419  DCHECK(!e->IsRoot()) << "We shouldn't mark a permanent object for syncing.";
420  if (!(e->Put(IS_UNSYNCED, true)))
421    return false;
422  e->Put(SYNCING, false);
423  return true;
424}
425
426}  // namespace syncable
427}  // namespace syncer
428