1// Copyright (c) 2011 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_H_
6#define CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_H_
7#pragma once
8
9#include <algorithm>
10#include <bitset>
11#include <iosfwd>
12#include <limits>
13#include <set>
14#include <string>
15#include <vector>
16
17#include "base/atomicops.h"
18#include "base/basictypes.h"
19#include "base/file_path.h"
20#include "base/gtest_prod_util.h"
21#include "base/synchronization/lock.h"
22#include "base/time.h"
23#include "chrome/browser/sync/protocol/sync.pb.h"
24#include "chrome/browser/sync/syncable/autofill_migration.h"
25#include "chrome/browser/sync/syncable/blob.h"
26#include "chrome/browser/sync/syncable/dir_open_result.h"
27#include "chrome/browser/sync/syncable/directory_event.h"
28#include "chrome/browser/sync/syncable/syncable_id.h"
29#include "chrome/browser/sync/syncable/model_type.h"
30#include "chrome/browser/sync/util/dbgq.h"
31#include "chrome/common/deprecated/event_sys.h"
32
33class DictionaryValue;
34struct PurgeInfo;
35
36namespace sync_api {
37class ReadTransaction;
38class WriteNode;
39class ReadNode;
40}
41
42namespace syncable {
43class DirectoryChangeListener;
44class Entry;
45
46std::ostream& operator<<(std::ostream& s, const Entry& e);
47
48class DirectoryBackingStore;
49
50static const int64 kInvalidMetaHandle = 0;
51
52// Update syncable_enum_conversions{.h,.cc,_unittest.cc} if you change
53// any fields in this file.
54
55enum {
56  BEGIN_FIELDS = 0,
57  INT64_FIELDS_BEGIN = BEGIN_FIELDS
58};
59
60enum MetahandleField {
61  // Primary key into the table.  Keep this as a handle to the meta entry
62  // across transactions.
63  META_HANDLE = INT64_FIELDS_BEGIN
64};
65
66enum BaseVersion {
67  // After initial upload, the version is controlled by the server, and is
68  // increased whenever the data or metadata changes on the server.
69  BASE_VERSION = META_HANDLE + 1,
70};
71
72enum Int64Field {
73  SERVER_VERSION = BASE_VERSION + 1,
74  MTIME,
75  SERVER_MTIME,
76  CTIME,
77  SERVER_CTIME,
78
79  // A numeric position value that indicates the relative ordering of
80  // this object among its siblings.
81  SERVER_POSITION_IN_PARENT,
82
83  LOCAL_EXTERNAL_ID,  // ID of an item in the external local storage that this
84                      // entry is associated with. (such as bookmarks.js)
85
86  INT64_FIELDS_END
87};
88
89enum {
90  INT64_FIELDS_COUNT = INT64_FIELDS_END,
91  ID_FIELDS_BEGIN = INT64_FIELDS_END,
92};
93
94enum IdField {
95  // Code in InitializeTables relies on ID being the first IdField value.
96  ID = ID_FIELDS_BEGIN,
97  PARENT_ID,
98  SERVER_PARENT_ID,
99
100  PREV_ID,
101  NEXT_ID,
102  ID_FIELDS_END
103};
104
105enum {
106  ID_FIELDS_COUNT = ID_FIELDS_END - ID_FIELDS_BEGIN,
107  BIT_FIELDS_BEGIN = ID_FIELDS_END
108};
109
110enum IndexedBitField {
111  IS_UNSYNCED = BIT_FIELDS_BEGIN,
112  IS_UNAPPLIED_UPDATE,
113  INDEXED_BIT_FIELDS_END,
114};
115
116enum IsDelField {
117  IS_DEL = INDEXED_BIT_FIELDS_END,
118};
119
120enum BitField {
121  IS_DIR = IS_DEL + 1,
122  SERVER_IS_DIR,
123  SERVER_IS_DEL,
124  BIT_FIELDS_END
125};
126
127enum {
128  BIT_FIELDS_COUNT = BIT_FIELDS_END - BIT_FIELDS_BEGIN,
129  STRING_FIELDS_BEGIN = BIT_FIELDS_END
130};
131
132enum StringField {
133  // Name, will be truncated by server. Can be duplicated in a folder.
134  NON_UNIQUE_NAME = STRING_FIELDS_BEGIN,
135  // The server version of |NON_UNIQUE_NAME|.
136  SERVER_NON_UNIQUE_NAME,
137
138  // A tag string which identifies this node as a particular top-level
139  // permanent object.  The tag can be thought of as a unique key that
140  // identifies a singleton instance.
141  UNIQUE_SERVER_TAG,  // Tagged by the server
142  UNIQUE_CLIENT_TAG,  // Tagged by the client
143  STRING_FIELDS_END,
144};
145
146enum {
147  STRING_FIELDS_COUNT = STRING_FIELDS_END - STRING_FIELDS_BEGIN,
148  PROTO_FIELDS_BEGIN = STRING_FIELDS_END
149};
150
151// From looking at the sqlite3 docs, it's not directly stated, but it
152// seems the overhead for storing a NULL blob is very small.
153enum ProtoField {
154  SPECIFICS = PROTO_FIELDS_BEGIN,
155  SERVER_SPECIFICS,
156  PROTO_FIELDS_END,
157};
158
159enum {
160  PROTO_FIELDS_COUNT = PROTO_FIELDS_END - PROTO_FIELDS_BEGIN
161};
162
163enum {
164  FIELD_COUNT = PROTO_FIELDS_END,
165  // Past this point we have temporaries, stored in memory only.
166  BEGIN_TEMPS = PROTO_FIELDS_END,
167  BIT_TEMPS_BEGIN = BEGIN_TEMPS,
168};
169
170enum BitTemp {
171  SYNCING = BIT_TEMPS_BEGIN,
172  BIT_TEMPS_END,
173};
174
175enum {
176  BIT_TEMPS_COUNT = BIT_TEMPS_END - BIT_TEMPS_BEGIN
177};
178
179class BaseTransaction;
180class WriteTransaction;
181class ReadTransaction;
182class Directory;
183class ScopedDirLookup;
184
185// Instead of:
186//   Entry e = transaction.GetById(id);
187// use:
188//   Entry e(transaction, GET_BY_ID, id);
189//
190// Why?  The former would require a copy constructor, and it would be difficult
191// to enforce that an entry never outlived its transaction if there were a copy
192// constructor.
193enum GetById {
194  GET_BY_ID
195};
196
197enum GetByClientTag {
198  GET_BY_CLIENT_TAG
199};
200
201enum GetByServerTag {
202  GET_BY_SERVER_TAG
203};
204
205enum GetByHandle {
206  GET_BY_HANDLE
207};
208
209enum Create {
210  CREATE
211};
212
213enum CreateNewUpdateItem {
214  CREATE_NEW_UPDATE_ITEM
215};
216
217typedef std::set<int64> MetahandleSet;
218
219// Why the singular enums?  So the code compile-time dispatches instead of
220// runtime dispatches as it would with a single enum and an if() statement.
221
222// The EntryKernel class contains the actual data for an entry.
223struct EntryKernel {
224 private:
225  std::string string_fields[STRING_FIELDS_COUNT];
226  sync_pb::EntitySpecifics specifics_fields[PROTO_FIELDS_COUNT];
227  int64 int64_fields[INT64_FIELDS_COUNT];
228  Id id_fields[ID_FIELDS_COUNT];
229  std::bitset<BIT_FIELDS_COUNT> bit_fields;
230  std::bitset<BIT_TEMPS_COUNT> bit_temps;
231
232 public:
233  EntryKernel();
234  ~EntryKernel();
235
236  // Set the dirty bit, and optionally add this entry's metahandle to
237  // a provided index on dirty bits in |dirty_index|. Parameter may be null,
238  // and will result only in setting the dirty bit of this entry.
239  inline void mark_dirty(syncable::MetahandleSet* dirty_index) {
240    if (!dirty_ && dirty_index) {
241      DCHECK_NE(0, ref(META_HANDLE));
242      dirty_index->insert(ref(META_HANDLE));
243    }
244    dirty_ = true;
245  }
246
247  // Clear the dirty bit, and optionally remove this entry's metahandle from
248  // a provided index on dirty bits in |dirty_index|. Parameter may be null,
249  // and will result only in clearing dirty bit of this entry.
250  inline void clear_dirty(syncable::MetahandleSet* dirty_index) {
251    if (dirty_ && dirty_index) {
252      DCHECK_NE(0, ref(META_HANDLE));
253      dirty_index->erase(ref(META_HANDLE));
254    }
255    dirty_ = false;
256  }
257
258  inline bool is_dirty() const {
259    return dirty_;
260  }
261
262  // Setters.
263  inline void put(MetahandleField field, int64 value) {
264    int64_fields[field - INT64_FIELDS_BEGIN] = value;
265  }
266  inline void put(Int64Field field, int64 value) {
267    int64_fields[field - INT64_FIELDS_BEGIN] = value;
268  }
269  inline void put(IdField field, const Id& value) {
270    id_fields[field - ID_FIELDS_BEGIN] = value;
271  }
272  inline void put(BaseVersion field, int64 value) {
273    int64_fields[field - INT64_FIELDS_BEGIN] = value;
274  }
275  inline void put(IndexedBitField field, bool value) {
276    bit_fields[field - BIT_FIELDS_BEGIN] = value;
277  }
278  inline void put(IsDelField field, bool value) {
279    bit_fields[field - BIT_FIELDS_BEGIN] = value;
280  }
281  inline void put(BitField field, bool value) {
282    bit_fields[field - BIT_FIELDS_BEGIN] = value;
283  }
284  inline void put(StringField field, const std::string& value) {
285    string_fields[field - STRING_FIELDS_BEGIN] = value;
286  }
287  inline void put(ProtoField field, const sync_pb::EntitySpecifics& value) {
288    specifics_fields[field - PROTO_FIELDS_BEGIN].CopyFrom(value);
289  }
290  inline void put(BitTemp field, bool value) {
291    bit_temps[field - BIT_TEMPS_BEGIN] = value;
292  }
293
294  // Const ref getters.
295  inline int64 ref(MetahandleField field) const {
296    return int64_fields[field - INT64_FIELDS_BEGIN];
297  }
298  inline int64 ref(Int64Field field) const {
299    return int64_fields[field - INT64_FIELDS_BEGIN];
300  }
301  inline const Id& ref(IdField field) const {
302    return id_fields[field - ID_FIELDS_BEGIN];
303  }
304  inline int64 ref(BaseVersion field) const {
305    return int64_fields[field - INT64_FIELDS_BEGIN];
306  }
307  inline bool ref(IndexedBitField field) const {
308    return bit_fields[field - BIT_FIELDS_BEGIN];
309  }
310  inline bool ref(IsDelField field) const {
311    return bit_fields[field - BIT_FIELDS_BEGIN];
312  }
313  inline bool ref(BitField field) const {
314    return bit_fields[field - BIT_FIELDS_BEGIN];
315  }
316  inline const std::string& ref(StringField field) const {
317    return string_fields[field - STRING_FIELDS_BEGIN];
318  }
319  inline const sync_pb::EntitySpecifics& ref(ProtoField field) const {
320    return specifics_fields[field - PROTO_FIELDS_BEGIN];
321  }
322  inline bool ref(BitTemp field) const {
323    return bit_temps[field - BIT_TEMPS_BEGIN];
324  }
325
326  // Non-const, mutable ref getters for object types only.
327  inline std::string& mutable_ref(StringField field) {
328    return string_fields[field - STRING_FIELDS_BEGIN];
329  }
330  inline sync_pb::EntitySpecifics& mutable_ref(ProtoField field) {
331    return specifics_fields[field - PROTO_FIELDS_BEGIN];
332  }
333  inline Id& mutable_ref(IdField field) {
334    return id_fields[field - ID_FIELDS_BEGIN];
335  }
336
337  // Dumps all kernel info into a DictionaryValue and returns it.
338  // Transfers ownership of the DictionaryValue to the caller.
339  DictionaryValue* ToValue() const;
340
341 private:
342  // Tracks whether this entry needs to be saved to the database.
343  bool dirty_;
344};
345
346// A read-only meta entry.
347class Entry {
348  friend class Directory;
349  friend std::ostream& operator << (std::ostream& s, const Entry& e);
350
351 public:
352  // After constructing, you must check good() to test whether the Get
353  // succeeded.
354  Entry(BaseTransaction* trans, GetByHandle, int64 handle);
355  Entry(BaseTransaction* trans, GetById, const Id& id);
356  Entry(BaseTransaction* trans, GetByServerTag, const std::string& tag);
357  Entry(BaseTransaction* trans, GetByClientTag, const std::string& tag);
358
359  bool good() const { return 0 != kernel_; }
360
361  BaseTransaction* trans() const { return basetrans_; }
362
363  // Field accessors.
364  inline int64 Get(MetahandleField field) const {
365    DCHECK(kernel_);
366    return kernel_->ref(field);
367  }
368  inline Id Get(IdField field) const {
369    DCHECK(kernel_);
370    return kernel_->ref(field);
371  }
372  inline int64 Get(Int64Field field) const {
373    DCHECK(kernel_);
374    return kernel_->ref(field);
375  }
376  inline int64 Get(BaseVersion field) const {
377    DCHECK(kernel_);
378    return kernel_->ref(field);
379  }
380  inline bool Get(IndexedBitField field) const {
381    DCHECK(kernel_);
382    return kernel_->ref(field);
383  }
384  inline bool Get(IsDelField field) const {
385    DCHECK(kernel_);
386    return kernel_->ref(field);
387  }
388  inline bool Get(BitField field) const {
389    DCHECK(kernel_);
390    return kernel_->ref(field);
391  }
392  const std::string& Get(StringField field) const;
393  inline const sync_pb::EntitySpecifics& Get(ProtoField field) const {
394    DCHECK(kernel_);
395    return kernel_->ref(field);
396  }
397  inline bool Get(BitTemp field) const {
398    DCHECK(kernel_);
399    return kernel_->ref(field);
400  }
401
402  ModelType GetServerModelType() const;
403  ModelType GetModelType() const;
404
405  // If this returns false, we shouldn't bother maintaining
406  // a position value (sibling ordering) for this item.
407  bool ShouldMaintainPosition() const {
408    return GetModelType() == BOOKMARKS;
409  }
410
411  inline bool ExistsOnClientBecauseNameIsNonEmpty() const {
412    DCHECK(kernel_);
413    return !kernel_->ref(NON_UNIQUE_NAME).empty();
414  }
415
416  inline bool IsRoot() const {
417    DCHECK(kernel_);
418    return kernel_->ref(ID).IsRoot();
419  }
420
421  Directory* dir() const;
422
423  const EntryKernel GetKernelCopy() const {
424    return *kernel_;
425  }
426
427  // Compute a local predecessor position for |update_item|, based on its
428  // absolute server position.  The returned ID will be a valid predecessor
429  // under SERVER_PARENT_ID that is consistent with the
430  // SERVER_POSITION_IN_PARENT ordering.
431  Id ComputePrevIdFromServerPosition(const Id& parent_id) const;
432
433  // Dumps all entry info into a DictionaryValue and returns it.
434  // Transfers ownership of the DictionaryValue to the caller.
435  DictionaryValue* ToValue() const;
436
437 protected:  // Don't allow creation on heap, except by sync API wrappers.
438  friend class sync_api::ReadNode;
439  void* operator new(size_t size) { return (::operator new)(size); }
440
441  inline Entry(BaseTransaction* trans)
442      : basetrans_(trans),
443        kernel_(NULL) { }
444
445 protected:
446
447  BaseTransaction* const basetrans_;
448
449  EntryKernel* kernel_;
450
451 private:
452  // Like GetServerModelType() but without the DCHECKs.
453  ModelType GetServerModelTypeHelper() const;
454
455  DISALLOW_COPY_AND_ASSIGN(Entry);
456};
457
458// A mutable meta entry.  Changes get committed to the database when the
459// WriteTransaction is destroyed.
460class MutableEntry : public Entry {
461  friend class WriteTransaction;
462  friend class Directory;
463  void Init(WriteTransaction* trans, const Id& parent_id,
464      const std::string& name);
465 public:
466  MutableEntry(WriteTransaction* trans, Create, const Id& parent_id,
467               const std::string& name);
468  MutableEntry(WriteTransaction* trans, CreateNewUpdateItem, const Id& id);
469  MutableEntry(WriteTransaction* trans, GetByHandle, int64);
470  MutableEntry(WriteTransaction* trans, GetById, const Id&);
471  MutableEntry(WriteTransaction* trans, GetByClientTag, const std::string& tag);
472  MutableEntry(WriteTransaction* trans, GetByServerTag, const std::string& tag);
473
474  inline WriteTransaction* write_transaction() const {
475    return write_transaction_;
476  }
477
478  // Field Accessors.  Some of them trigger the re-indexing of the entry.
479  // Return true on success, return false on failure, which means
480  // that putting the value would have caused a duplicate in the index.
481  // TODO(chron): Remove some of these unecessary return values.
482  bool Put(Int64Field field, const int64& value);
483  bool Put(IdField field, const Id& value);
484
485  // Do a simple property-only update if the PARENT_ID field.  Use with caution.
486  //
487  // The normal Put(IS_PARENT) call will move the item to the front of the
488  // sibling order to maintain the linked list invariants when the parent
489  // changes.  That's usually what you want to do, but it's inappropriate
490  // when the caller is trying to change the parent ID of a the whole set
491  // of children (e.g. because the ID changed during a commit).  For those
492  // cases, there's this function.  It will corrupt the sibling ordering
493  // if you're not careful.
494  void PutParentIdPropertyOnly(const Id& parent_id);
495
496  bool Put(StringField field, const std::string& value);
497  bool Put(BaseVersion field, int64 value);
498
499  bool Put(ProtoField field, const sync_pb::EntitySpecifics& value);
500  bool Put(BitField field, bool value);
501  inline bool Put(IsDelField field, bool value) {
502    return PutIsDel(value);
503  }
504  bool Put(IndexedBitField field, bool value);
505
506  // Sets the position of this item, and updates the entry kernels of the
507  // adjacent siblings so that list invariants are maintained.  Returns false
508  // and fails if |predecessor_id| does not identify a sibling.  Pass the root
509  // ID to put the node in first position.
510  bool PutPredecessor(const Id& predecessor_id);
511
512  bool Put(BitTemp field, bool value);
513
514 protected:
515  syncable::MetahandleSet* GetDirtyIndexHelper();
516
517  bool PutIsDel(bool value);
518
519 private:  // Don't allow creation on heap, except by sync API wrappers.
520  friend class sync_api::WriteNode;
521  void* operator new(size_t size) { return (::operator new)(size); }
522
523  bool PutImpl(StringField field, const std::string& value);
524  bool PutUniqueClientTag(const std::string& value);
525
526  // Adjusts the successor and predecessor entries so that they no longer
527  // refer to this entry.
528  void UnlinkFromOrder();
529
530  // Kind of redundant. We should reduce the number of pointers
531  // floating around if at all possible. Could we store this in Directory?
532  // Scope: Set on construction, never changed after that.
533  WriteTransaction* const write_transaction_;
534
535 protected:
536  MutableEntry();
537
538  DISALLOW_COPY_AND_ASSIGN(MutableEntry);
539};
540
541class LessParentIdAndHandle;
542template <typename FieldType, FieldType field_index>
543class LessField;
544class LessEntryMetaHandles {
545 public:
546  inline bool operator()(const syncable::EntryKernel& a,
547                         const syncable::EntryKernel& b) const {
548    return a.ref(META_HANDLE) < b.ref(META_HANDLE);
549  }
550};
551typedef std::set<EntryKernel, LessEntryMetaHandles> OriginalEntries;
552
553// How syncable indices & Indexers work.
554//
555// The syncable Directory maintains several indices on the Entries it tracks.
556// The indices follow a common pattern:
557//   (a) The index allows efficient lookup of an Entry* with particular
558//       field values.  This is done by use of a std::set<> and a custom
559//       comparator.
560//   (b) There may be conditions for inclusion in the index -- for example,
561//       deleted items might not be indexed.
562//   (c) Because the index set contains only Entry*, one must be careful
563//       to remove Entries from the set before updating the value of
564//       an indexed field.
565// The traits of an index are a Comparator (to define the set ordering) and a
566// ShouldInclude function (to define the conditions for inclusion).  For each
567// index, the traits are grouped into a class called an Indexer which
568// can be used as a template type parameter.
569
570// Traits type for metahandle index.
571struct MetahandleIndexer {
572  // This index is of the metahandle field values.
573  typedef LessField<MetahandleField, META_HANDLE> Comparator;
574
575  // This index includes all entries.
576  inline static bool ShouldInclude(const EntryKernel* a) {
577    return true;
578  }
579};
580
581// Traits type for ID field index.
582struct IdIndexer {
583  // This index is of the ID field values.
584  typedef LessField<IdField, ID> Comparator;
585
586  // This index includes all entries.
587  inline static bool ShouldInclude(const EntryKernel* a) {
588    return true;
589  }
590};
591
592// Traits type for unique client tag index.
593struct ClientTagIndexer {
594  // This index is of the client-tag values.
595  typedef LessField<StringField, UNIQUE_CLIENT_TAG> Comparator;
596
597  // Items are only in this index if they have a non-empty client tag value.
598  static bool ShouldInclude(const EntryKernel* a);
599};
600
601// This index contains EntryKernels ordered by parent ID and metahandle.
602// It allows efficient lookup of the children of a given parent.
603struct ParentIdAndHandleIndexer {
604  // This index is of the parent ID and metahandle.  We use a custom
605  // comparator.
606  class Comparator {
607   public:
608    bool operator() (const syncable::EntryKernel* a,
609                     const syncable::EntryKernel* b) const;
610  };
611
612  // This index does not include deleted items.
613  static bool ShouldInclude(const EntryKernel* a);
614};
615
616// Given an Indexer providing the semantics of an index, defines the
617// set type used to actually contain the index.
618template <typename Indexer>
619struct Index {
620  typedef std::set<EntryKernel*, typename Indexer::Comparator> Set;
621};
622
623// a WriteTransaction has a writer tag describing which body of code is doing
624// the write. This is defined up here since DirectoryChangeEvent also contains
625// one.
626enum WriterTag {
627  INVALID,
628  SYNCER,
629  AUTHWATCHER,
630  UNITTEST,
631  VACUUM_AFTER_SAVE,
632  PURGE_ENTRIES,
633  SYNCAPI
634};
635
636// The name Directory in this case means the entire directory
637// structure within a single user account.
638//
639// Sqlite is a little goofy, in that each thread must access a database
640// via its own handle.  So, a Directory object should only be accessed
641// from a single thread.  Use DirectoryManager's Open() method to
642// always get a directory that has been properly initialized on the
643// current thread.
644//
645// The db is protected against concurrent modification by a reader/
646// writer lock, negotiated by the ReadTransaction and WriteTransaction
647// friend classes.  The in-memory indices are protected against
648// concurrent modification by the kernel lock.
649//
650// All methods which require the reader/writer lock to be held either
651//   are protected and only called from friends in a transaction
652//   or are public and take a Transaction* argument.
653//
654// All methods which require the kernel lock to be already held take a
655// ScopeKernelLock* argument.
656//
657// To prevent deadlock, the reader writer transaction lock must always
658// be held before acquiring the kernel lock.
659class ScopedKernelLock;
660class IdFilter;
661class DirectoryManager;
662
663class Directory {
664  friend class BaseTransaction;
665  friend class Entry;
666  friend class MutableEntry;
667  friend class ReadTransaction;
668  friend class ReadTransactionWithoutDB;
669  friend class ScopedKernelLock;
670  friend class ScopedKernelUnlock;
671  friend class WriteTransaction;
672  friend class SyncableDirectoryTest;
673  FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest,
674                           TakeSnapshotGetsAllDirtyHandlesTest);
675  FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest,
676                           TakeSnapshotGetsOnlyDirtyHandlesTest);
677  FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest, TestPurgeEntriesWithTypeIn);
678  FRIEND_TEST_ALL_PREFIXES(SyncableDirectoryTest,
679                           TakeSnapshotGetsMetahandlesToPurge);
680
681 public:
682  class EventListenerHookup;
683
684  // Various data that the Directory::Kernel we are backing (persisting data
685  // for) needs saved across runs of the application.
686  struct PersistedKernelInfo {
687    AutofillMigrationDebugInfo autofill_migration_debug_info;
688
689    PersistedKernelInfo();
690    ~PersistedKernelInfo();
691
692    // Set the |download_progress| entry for the given model to a
693    // "first sync" start point.  When such a value is sent to the server,
694    // a full download of all objects of the model will be initiated.
695    void reset_download_progress(ModelType model_type);
696
697    // Last sync timestamp fetched from the server.
698    sync_pb::DataTypeProgressMarker download_progress[MODEL_TYPE_COUNT];
699    // true iff we ever reached the end of the changelog.
700    ModelTypeBitSet initial_sync_ended;
701    // The store birthday we were given by the server. Contents are opaque to
702    // the client.
703    std::string store_birthday;
704    // The next local ID that has not been used with this cache-GUID.
705    int64 next_id;
706    // The persisted notification state.
707    std::string notification_state;
708
709    AutofillMigrationState autofill_migration_state;
710  };
711
712  // What the Directory needs on initialization to create itself and its Kernel.
713  // Filled by DirectoryBackingStore::Load.
714  struct KernelLoadInfo {
715    PersistedKernelInfo kernel_info;
716    std::string cache_guid;  // Created on first initialization, never changes.
717    int64 max_metahandle;    // Computed (using sql MAX aggregate) on init.
718    KernelLoadInfo() : max_metahandle(0) {
719    }
720  };
721
722  // The dirty/clean state of kernel fields backed by the share_info table.
723  // This is public so it can be used in SaveChangesSnapshot for persistence.
724  enum KernelShareInfoStatus {
725    KERNEL_SHARE_INFO_INVALID,
726    KERNEL_SHARE_INFO_VALID,
727    KERNEL_SHARE_INFO_DIRTY
728  };
729
730  // When the Directory is told to SaveChanges, a SaveChangesSnapshot is
731  // constructed and forms a consistent snapshot of what needs to be sent to
732  // the backing store.
733  struct SaveChangesSnapshot {
734    SaveChangesSnapshot();
735    ~SaveChangesSnapshot();
736
737    KernelShareInfoStatus kernel_info_status;
738    PersistedKernelInfo kernel_info;
739    OriginalEntries dirty_metas;
740    MetahandleSet metahandles_to_purge;
741  };
742
743  Directory();
744  virtual ~Directory();
745
746  DirOpenResult Open(const FilePath& file_path, const std::string& name);
747
748  void Close();
749
750  int64 NextMetahandle();
751  // Always returns a negative id.  Positive client ids are generated
752  // by the server only.
753  Id NextId();
754
755  const FilePath& file_path() const { return kernel_->db_path; }
756  bool good() const { return NULL != store_; }
757
758  // The download progress is an opaque token provided by the sync server
759  // to indicate the continuation state of the next GetUpdates operation.
760  void GetDownloadProgress(
761      ModelType type,
762      sync_pb::DataTypeProgressMarker* value_out) const;
763  void GetDownloadProgressAsString(
764      ModelType type,
765      std::string* value_out) const;
766  void SetDownloadProgress(
767      ModelType type,
768      const sync_pb::DataTypeProgressMarker& value);
769
770  bool initial_sync_ended_for_type(ModelType type) const;
771  void set_initial_sync_ended_for_type(ModelType type, bool value);
772  AutofillMigrationState get_autofill_migration_state() const;
773
774  AutofillMigrationDebugInfo get_autofill_migration_debug_info() const;
775
776  void set_autofill_migration_state(AutofillMigrationState state);
777
778  void set_autofill_migration_state_debug_info(
779      syncable::AutofillMigrationDebugInfo::PropertyToSet property_to_set,
780      const syncable::AutofillMigrationDebugInfo& info);
781
782  const std::string& name() const { return kernel_->name; }
783
784  // (Account) Store birthday is opaque to the client, so we keep it in the
785  // format it is in the proto buffer in case we switch to a binary birthday
786  // later.
787  std::string store_birthday() const;
788  void set_store_birthday(const std::string& store_birthday);
789
790  std::string GetAndClearNotificationState();
791  void SetNotificationState(const std::string& notification_state);
792
793  // Unique to each account / client pair.
794  std::string cache_guid() const;
795
796  void SetChangeListener(DirectoryChangeListener* listener);
797
798 protected:  // for friends, mainly used by Entry constructors
799  virtual EntryKernel* GetEntryByHandle(int64 handle);
800  virtual EntryKernel* GetEntryByHandle(int64 metahandle,
801      ScopedKernelLock* lock);
802  virtual EntryKernel* GetEntryById(const Id& id);
803  EntryKernel* GetEntryByServerTag(const std::string& tag);
804  virtual EntryKernel* GetEntryByClientTag(const std::string& tag);
805  EntryKernel* GetRootEntry();
806  bool ReindexId(EntryKernel* const entry, const Id& new_id);
807  void ReindexParentId(EntryKernel* const entry, const Id& new_parent_id);
808  void ClearDirtyMetahandles();
809
810  // These don't do semantic checking.
811  // The semantic checking is implemented higher up.
812  void UnlinkEntryFromOrder(EntryKernel* entry,
813                            WriteTransaction* trans,
814                            ScopedKernelLock* lock);
815
816  // Overridden by tests.
817  virtual DirectoryBackingStore* CreateBackingStore(
818      const std::string& dir_name,
819      const FilePath& backing_filepath);
820
821 private:
822  // These private versions expect the kernel lock to already be held
823  // before calling.
824  EntryKernel* GetEntryById(const Id& id, ScopedKernelLock* const lock);
825
826  DirOpenResult OpenImpl(const FilePath& file_path, const std::string& name);
827
828  template <class T> void TestAndSet(T* kernel_data, const T* data_to_set);
829
830  struct DirectoryEventTraits {
831    typedef DirectoryEvent EventType;
832    static inline bool IsChannelShutdownEvent(const DirectoryEvent& event) {
833      return DIRECTORY_DESTROYED == event;
834    }
835  };
836 public:
837  typedef EventChannel<DirectoryEventTraits, base::Lock> Channel;
838  typedef std::vector<int64> ChildHandles;
839
840  // Returns the child meta handles for given parent id.
841  void GetChildHandles(BaseTransaction*, const Id& parent_id,
842      ChildHandles* result);
843
844  // Find the first or last child in the positional ordering under a parent,
845  // and return its id.  Returns a root Id if parent has no children.
846  virtual Id GetFirstChildId(BaseTransaction* trans, const Id& parent_id);
847  Id GetLastChildId(BaseTransaction* trans, const Id& parent_id);
848
849  // Compute a local predecessor position for |update_item|.  The position
850  // is determined by the SERVER_POSITION_IN_PARENT value of |update_item|,
851  // as well as the SERVER_POSITION_IN_PARENT values of any up-to-date
852  // children of |parent_id|.
853  Id ComputePrevIdFromServerPosition(
854      const EntryKernel* update_item,
855      const syncable::Id& parent_id);
856
857  // SaveChanges works by taking a consistent snapshot of the current Directory
858  // state and indices (by deep copy) under a ReadTransaction, passing this
859  // snapshot to the backing store under no transaction, and finally cleaning
860  // up by either purging entries no longer needed (this part done under a
861  // WriteTransaction) or rolling back the dirty bits.  It also uses
862  // internal locking to enforce SaveChanges operations are mutually exclusive.
863  //
864  // WARNING: THIS METHOD PERFORMS SYNCHRONOUS I/O VIA SQLITE.
865  bool SaveChanges();
866
867  // Returns the number of entities with the unsynced bit set.
868  int64 unsynced_entity_count() const;
869
870  // Get GetUnsyncedMetaHandles should only be called after SaveChanges and
871  // before any new entries have been created. The intention is that the
872  // syncer should call it from its PerformSyncQueries member.
873  typedef std::vector<int64> UnsyncedMetaHandles;
874  void GetUnsyncedMetaHandles(BaseTransaction* trans,
875                              UnsyncedMetaHandles* result);
876
877  // Get all the metahandles for unapplied updates
878  typedef std::vector<int64> UnappliedUpdateMetaHandles;
879  void GetUnappliedUpdateMetaHandles(BaseTransaction* trans,
880                                     UnappliedUpdateMetaHandles* result);
881
882  // Get the channel for post save notification, used by the syncer.
883  inline Channel* channel() const {
884    return kernel_->channel;
885  }
886
887  // Checks tree metadata consistency.
888  // If full_scan is false, the function will avoid pulling any entries from the
889  // db and scan entries currently in ram.
890  // If full_scan is true, all entries will be pulled from the database.
891  // No return value, CHECKs will be triggered if we're given bad
892  // information.
893  void CheckTreeInvariants(syncable::BaseTransaction* trans,
894                           bool full_scan);
895
896  void CheckTreeInvariants(syncable::BaseTransaction* trans,
897                           const OriginalEntries* originals);
898
899  void CheckTreeInvariants(syncable::BaseTransaction* trans,
900                           const MetahandleSet& handles,
901                           const IdFilter& idfilter);
902
903  // Purges all data associated with any entries whose ModelType or
904  // ServerModelType is found in |types|, from _both_ memory and disk.
905  // Only  valid, "real" model types are allowed in |types| (see model_type.h
906  // for definitions).  "Purge" is just meant to distinguish from "deleting"
907  // entries, which means something different in the syncable namespace.
908  // WARNING! This can be real slow, as it iterates over all entries.
909  // WARNING! Performs synchronous I/O.
910  virtual void PurgeEntriesWithTypeIn(const std::set<ModelType>& types);
911
912 private:
913  // Helper to prime ids_index, parent_id_and_names_index, unsynced_metahandles
914  // and unapplied_metahandles from metahandles_index.
915  void InitializeIndices();
916
917  // Constructs a consistent snapshot of the current Directory state and
918  // indices (by deep copy) under a ReadTransaction for use in |snapshot|.
919  // See SaveChanges() for more information.
920  void TakeSnapshotForSaveChanges(SaveChangesSnapshot* snapshot);
921
922  // Purges from memory any unused, safe to remove entries that were
923  // successfully deleted on disk as a result of the SaveChanges that processed
924  // |snapshot|.  See SaveChanges() for more information.
925  void VacuumAfterSaveChanges(const SaveChangesSnapshot& snapshot);
926
927  // Rolls back dirty bits in the event that the SaveChanges that
928  // processed |snapshot| failed, for example, due to no disk space.
929  void HandleSaveChangesFailure(const SaveChangesSnapshot& snapshot);
930
931  // For new entry creation only
932  void InsertEntry(EntryKernel* entry, ScopedKernelLock* lock);
933  void InsertEntry(EntryKernel* entry);
934
935  // Used by CheckTreeInvariants
936  void GetAllMetaHandles(BaseTransaction* trans, MetahandleSet* result);
937  bool SafeToPurgeFromMemory(const EntryKernel* const entry) const;
938
939  // Internal setters that do not acquire a lock internally.  These are unsafe
940  // on their own; caller must guarantee exclusive access manually by holding
941  // a ScopedKernelLock.
942  void set_initial_sync_ended_for_type_unsafe(ModelType type, bool x);
943  void SetNotificationStateUnsafe(const std::string& notification_state);
944
945  Directory& operator = (const Directory&);
946
947 public:
948  typedef Index<MetahandleIndexer>::Set MetahandlesIndex;
949  typedef Index<IdIndexer>::Set IdsIndex;
950  // All entries in memory must be in both the MetahandlesIndex and
951  // the IdsIndex, but only non-deleted entries will be the
952  // ParentIdChildIndex.
953  typedef Index<ParentIdAndHandleIndexer>::Set ParentIdChildIndex;
954
955  // Contains both deleted and existing entries with tags.
956  // We can't store only existing tags because the client would create
957  // items that had a duplicated ID in the end, resulting in a DB key
958  // violation. ID reassociation would fail after an attempted commit.
959  typedef Index<ClientTagIndexer>::Set ClientTagIndex;
960
961 protected:
962  // Used by tests.
963  void init_kernel(const std::string& name);
964
965 private:
966
967  struct Kernel {
968    Kernel(const FilePath& db_path, const std::string& name,
969           const KernelLoadInfo& info);
970
971    ~Kernel();
972
973    void AddRef();  // For convenience.
974    void Release();
975
976    FilePath const db_path;
977    // TODO(timsteele): audit use of the member and remove if possible
978    volatile base::subtle::AtomicWord refcount;
979
980    // Implements ReadTransaction / WriteTransaction using a simple lock.
981    base::Lock transaction_mutex;
982
983    // The name of this directory.
984    std::string const name;
985
986    // Protects all members below.
987    // The mutex effectively protects all the indices, but not the
988    // entries themselves.  So once a pointer to an entry is pulled
989    // from the index, the mutex can be unlocked and entry read or written.
990    //
991    // Never hold the mutex and do anything with the database or any
992    // other buffered IO.  Violating this rule will result in deadlock.
993    base::Lock mutex;
994    // Entries indexed by metahandle
995    MetahandlesIndex* metahandles_index;
996    // Entries indexed by id
997    IdsIndex* ids_index;
998    ParentIdChildIndex* parent_id_child_index;
999    ClientTagIndex* client_tag_index;
1000    // So we don't have to create an EntryKernel every time we want to
1001    // look something up in an index.  Needle in haystack metaphor.
1002    EntryKernel needle;
1003
1004    // 3 in-memory indices on bits used extremely frequently by the syncer.
1005    MetahandleSet* const unapplied_update_metahandles;
1006    MetahandleSet* const unsynced_metahandles;
1007    // Contains metahandles that are most likely dirty (though not
1008    // necessarily).  Dirtyness is confirmed in TakeSnapshotForSaveChanges().
1009    MetahandleSet* const dirty_metahandles;
1010
1011    // When a purge takes place, we remove items from all our indices and stash
1012    // them in here so that SaveChanges can persist their permanent deletion.
1013    MetahandleSet* const metahandles_to_purge;
1014
1015    // TODO(ncarter): Figure out what the hell this is, and comment it.
1016    Channel* const channel;
1017
1018    // The listener for directory change events, triggered when the transaction
1019    // is ending.
1020    DirectoryChangeListener* change_listener_;
1021
1022    KernelShareInfoStatus info_status;
1023
1024    // These 3 members are backed in the share_info table, and
1025    // their state is marked by the flag above.
1026
1027    // A structure containing the Directory state that is written back into the
1028    // database on SaveChanges.
1029    PersistedKernelInfo persisted_info;
1030
1031    // A unique identifier for this account's cache db, used to generate
1032    // unique server IDs. No need to lock, only written at init time.
1033    std::string cache_guid;
1034
1035    // It doesn't make sense for two threads to run SaveChanges at the same
1036    // time; this mutex protects that activity.
1037    base::Lock save_changes_mutex;
1038
1039    // The next metahandle is protected by kernel mutex.
1040    int64 next_metahandle;
1041
1042    // Keep a history of recently flushed metahandles for debugging
1043    // purposes.  Protected by the save_changes_mutex.
1044    DebugQueue<int64, 1000> flushed_metahandles;
1045  };
1046
1047  // Helper method used to do searches on |parent_id_child_index|.
1048  ParentIdChildIndex::iterator LocateInParentChildIndex(
1049      const ScopedKernelLock& lock,
1050      const Id& parent_id,
1051      int64 position_in_parent,
1052      const Id& item_id_for_tiebreaking);
1053
1054  // Return an iterator to the beginning of the range of the children of
1055  // |parent_id| in the kernel's parent_id_child_index.
1056  ParentIdChildIndex::iterator GetParentChildIndexLowerBound(
1057      const ScopedKernelLock& lock,
1058      const Id& parent_id);
1059
1060  // Return an iterator to just past the end of the range of the
1061  // children of |parent_id| in the kernel's parent_id_child_index.
1062  ParentIdChildIndex::iterator GetParentChildIndexUpperBound(
1063      const ScopedKernelLock& lock,
1064      const Id& parent_id);
1065
1066  Kernel* kernel_;
1067
1068  DirectoryBackingStore* store_;
1069};
1070
1071class ScopedKernelLock {
1072 public:
1073  explicit ScopedKernelLock(const Directory*);
1074  ~ScopedKernelLock() {}
1075
1076  base::AutoLock scoped_lock_;
1077  Directory* const dir_;
1078  DISALLOW_COPY_AND_ASSIGN(ScopedKernelLock);
1079};
1080
1081// Transactions are now processed FIFO with a straight lock
1082class BaseTransaction {
1083  friend class Entry;
1084 public:
1085  inline Directory* directory() const { return directory_; }
1086  inline Id root_id() const { return Id(); }
1087
1088  virtual ~BaseTransaction();
1089
1090 protected:
1091  BaseTransaction(Directory* directory, const char* name,
1092                  const char* source_file, int line, WriterTag writer);
1093
1094  // For unit testing. Everything will be mocked out no point initializing.
1095  explicit BaseTransaction(Directory* directory);
1096
1097  void UnlockAndLog(OriginalEntries* entries);
1098  virtual bool NotifyTransactionChangingAndEnding(
1099      OriginalEntries* entries,
1100      ModelTypeBitSet* models_with_changes);
1101  virtual void NotifyTransactionComplete(ModelTypeBitSet models_with_changes);
1102
1103  Directory* const directory_;
1104  Directory::Kernel* const dirkernel_;  // for brevity
1105  const char* const name_;
1106  base::TimeTicks time_acquired_;
1107  const char* const source_file_;
1108  const int line_;
1109  WriterTag writer_;
1110
1111 private:
1112  void Lock();
1113
1114  DISALLOW_COPY_AND_ASSIGN(BaseTransaction);
1115};
1116
1117// Locks db in constructor, unlocks in destructor.
1118class ReadTransaction : public BaseTransaction {
1119 public:
1120  ReadTransaction(Directory* directory, const char* source_file,
1121                  int line);
1122  ReadTransaction(const ScopedDirLookup& scoped_dir,
1123                  const char* source_file, int line);
1124
1125  virtual ~ReadTransaction();
1126
1127 protected:  // Don't allow creation on heap, except by sync API wrapper.
1128  friend class sync_api::ReadTransaction;
1129  void* operator new(size_t size) { return (::operator new)(size); }
1130
1131  DISALLOW_COPY_AND_ASSIGN(ReadTransaction);
1132};
1133
1134// Locks db in constructor, unlocks in destructor.
1135class WriteTransaction : public BaseTransaction {
1136  friend class MutableEntry;
1137 public:
1138  explicit WriteTransaction(Directory* directory, WriterTag writer,
1139                            const char* source_file, int line);
1140  explicit WriteTransaction(const ScopedDirLookup& directory,
1141                            WriterTag writer, const char* source_file,
1142                            int line);
1143  virtual ~WriteTransaction();
1144
1145  void SaveOriginal(EntryKernel* entry);
1146
1147 protected:
1148  // Before an entry gets modified, we copy the original into a list
1149  // so that we can issue change notifications when the transaction
1150  // is done.
1151  OriginalEntries* const originals_;
1152
1153  explicit WriteTransaction(Directory *directory);
1154
1155  DISALLOW_COPY_AND_ASSIGN(WriteTransaction);
1156};
1157
1158bool IsLegalNewParent(BaseTransaction* trans, const Id& id, const Id& parentid);
1159
1160int64 Now();
1161
1162// This function sets only the flags needed to get this entry to sync.
1163void MarkForSyncing(syncable::MutableEntry* e);
1164
1165// This is not a reset.  It just sets the numeric fields which are not
1166// initialized by the constructor to zero.
1167void ZeroFields(EntryKernel* entry, int first_field);
1168
1169}  // namespace syncable
1170
1171std::ostream& operator <<(std::ostream&, const syncable::Blob&);
1172
1173#endif  // CHROME_BROWSER_SYNC_SYNCABLE_SYNCABLE_H_
1174