directory_backing_store.cc revision c2e0dbddbe15c98d52c4786dac06cb8952a8ae6d
1// Copyright 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "sync/syncable/directory_backing_store.h"
6
7#include "build/build_config.h"
8
9#include <limits>
10
11#include "base/base64.h"
12#include "base/debug/trace_event.h"
13#include "base/logging.h"
14#include "base/rand_util.h"
15#include "base/stringprintf.h"
16#include "base/time.h"
17#include "sql/connection.h"
18#include "sql/statement.h"
19#include "sql/transaction.h"
20#include "sync/internal_api/public/base/node_ordinal.h"
21#include "sync/protocol/bookmark_specifics.pb.h"
22#include "sync/protocol/sync.pb.h"
23#include "sync/syncable/syncable-inl.h"
24#include "sync/syncable/syncable_columns.h"
25#include "sync/syncable/syncable_util.h"
26#include "sync/util/time.h"
27
28using std::string;
29
30namespace syncer {
31namespace syncable {
32
33// This just has to be big enough to hold an UPDATE or INSERT statement that
34// modifies all the columns in the entry table.
35static const string::size_type kUpdateStatementBufferSize = 2048;
36
37// Increment this version whenever updating DB tables.
38const int32 kCurrentDBVersion = 86;
39
40// Iterate over the fields of |entry| and bind each to |statement| for
41// updating.  Returns the number of args bound.
42void BindFields(const EntryKernel& entry,
43                sql::Statement* statement) {
44  int index = 0;
45  int i = 0;
46  for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
47    statement->BindInt64(index++, entry.ref(static_cast<Int64Field>(i)));
48  }
49  for ( ; i < TIME_FIELDS_END; ++i) {
50    statement->BindInt64(index++,
51                         TimeToProtoTime(
52                             entry.ref(static_cast<TimeField>(i))));
53  }
54  for ( ; i < ID_FIELDS_END; ++i) {
55    statement->BindString(index++, entry.ref(static_cast<IdField>(i)).s_);
56  }
57  for ( ; i < BIT_FIELDS_END; ++i) {
58    statement->BindInt(index++, entry.ref(static_cast<BitField>(i)));
59  }
60  for ( ; i < STRING_FIELDS_END; ++i) {
61    statement->BindString(index++, entry.ref(static_cast<StringField>(i)));
62  }
63  for ( ; i < PROTO_FIELDS_END; ++i) {
64    std::string temp;
65    entry.ref(static_cast<ProtoField>(i)).SerializeToString(&temp);
66    statement->BindBlob(index++, temp.data(), temp.length());
67  }
68  for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
69    std::string temp;
70    entry.ref(static_cast<UniquePositionField>(i)).SerializeToString(&temp);
71    statement->BindBlob(index++, temp.data(), temp.length());
72  }
73}
74
75// The caller owns the returned EntryKernel*.  Assumes the statement currently
76// points to a valid row in the metas table. Returns NULL to indicate that
77// it detected a corruption in the data on unpacking.
78scoped_ptr<EntryKernel> UnpackEntry(sql::Statement* statement) {
79  scoped_ptr<EntryKernel> kernel(new EntryKernel());
80  DCHECK_EQ(statement->ColumnCount(), static_cast<int>(FIELD_COUNT));
81  int i = 0;
82  for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
83    kernel->put(static_cast<Int64Field>(i), statement->ColumnInt64(i));
84  }
85  for ( ; i < TIME_FIELDS_END; ++i) {
86    kernel->put(static_cast<TimeField>(i),
87                ProtoTimeToTime(statement->ColumnInt64(i)));
88  }
89  for ( ; i < ID_FIELDS_END; ++i) {
90    kernel->mutable_ref(static_cast<IdField>(i)).s_ =
91        statement->ColumnString(i);
92  }
93  for ( ; i < BIT_FIELDS_END; ++i) {
94    kernel->put(static_cast<BitField>(i), (0 != statement->ColumnInt(i)));
95  }
96  for ( ; i < STRING_FIELDS_END; ++i) {
97    kernel->put(static_cast<StringField>(i),
98                statement->ColumnString(i));
99  }
100  for ( ; i < PROTO_FIELDS_END; ++i) {
101    kernel->mutable_ref(static_cast<ProtoField>(i)).ParseFromArray(
102        statement->ColumnBlob(i), statement->ColumnByteLength(i));
103  }
104  for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
105    std::string temp;
106    statement->ColumnBlobAsString(i, &temp);
107
108    sync_pb::UniquePosition proto;
109    if (!proto.ParseFromString(temp)) {
110      DVLOG(1) << "Unpacked invalid position.  Assuming the DB is corrupt";
111      return scoped_ptr<EntryKernel>(NULL);
112    }
113
114    kernel->mutable_ref(static_cast<UniquePositionField>(i)) =
115        UniquePosition::FromProto(proto);
116  }
117  return kernel.Pass();
118}
119
120namespace {
121
122string ComposeCreateTableColumnSpecs() {
123  const ColumnSpec* begin = g_metas_columns;
124  const ColumnSpec* end = g_metas_columns + arraysize(g_metas_columns);
125  string query;
126  query.reserve(kUpdateStatementBufferSize);
127  char separator = '(';
128  for (const ColumnSpec* column = begin; column != end; ++column) {
129    query.push_back(separator);
130    separator = ',';
131    query.append(column->name);
132    query.push_back(' ');
133    query.append(column->spec);
134  }
135  query.push_back(')');
136  return query;
137}
138
139void AppendColumnList(std::string* output) {
140  const char* joiner = " ";
141  // Be explicit in SELECT order to match up with UnpackEntry.
142  for (int i = BEGIN_FIELDS; i < FIELD_COUNT; ++i) {
143    output->append(joiner);
144    output->append(ColumnName(i));
145    joiner = ", ";
146  }
147}
148
149}  // namespace
150
151///////////////////////////////////////////////////////////////////////////////
152// DirectoryBackingStore implementation.
153
154DirectoryBackingStore::DirectoryBackingStore(const string& dir_name)
155  : db_(new sql::Connection()),
156    dir_name_(dir_name),
157    needs_column_refresh_(false) {
158}
159
160DirectoryBackingStore::DirectoryBackingStore(const string& dir_name,
161                                             sql::Connection* db)
162  : db_(db),
163    dir_name_(dir_name),
164    needs_column_refresh_(false) {
165}
166
167DirectoryBackingStore::~DirectoryBackingStore() {
168}
169
170bool DirectoryBackingStore::DeleteEntries(EntryTable from,
171                                          const MetahandleSet& handles) {
172  if (handles.empty())
173    return true;
174
175  sql::Statement statement;
176  // Call GetCachedStatement() separately to get different statements for
177  // different tables.
178  switch (from) {
179    case METAS_TABLE:
180      statement.Assign(db_->GetCachedStatement(
181          SQL_FROM_HERE, "DELETE FROM metas WHERE metahandle = ?"));
182      break;
183    case DELETE_JOURNAL_TABLE:
184      statement.Assign(db_->GetCachedStatement(
185          SQL_FROM_HERE, "DELETE FROM deleted_metas WHERE metahandle = ?"));
186      break;
187  }
188
189  for (MetahandleSet::const_iterator i = handles.begin(); i != handles.end();
190       ++i) {
191    statement.BindInt64(0, *i);
192    if (!statement.Run())
193      return false;
194    statement.Reset(true);
195  }
196  return true;
197}
198
199bool DirectoryBackingStore::SaveChanges(
200    const Directory::SaveChangesSnapshot& snapshot) {
201  DCHECK(CalledOnValidThread());
202  DCHECK(db_->is_open());
203
204  // Back out early if there is nothing to write.
205  bool save_info =
206    (Directory::KERNEL_SHARE_INFO_DIRTY == snapshot.kernel_info_status);
207  if (snapshot.dirty_metas.empty() && snapshot.metahandles_to_purge.empty() &&
208      snapshot.delete_journals.empty() &&
209      snapshot.delete_journals_to_purge.empty() && !save_info) {
210    return true;
211  }
212
213  sql::Transaction transaction(db_.get());
214  if (!transaction.Begin())
215    return false;
216
217  PrepareSaveEntryStatement(METAS_TABLE, &save_meta_statment_);
218  for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
219       i != snapshot.dirty_metas.end(); ++i) {
220    DCHECK((*i)->is_dirty());
221    if (!SaveEntryToDB(&save_meta_statment_, **i))
222      return false;
223  }
224
225  if (!DeleteEntries(METAS_TABLE, snapshot.metahandles_to_purge))
226    return false;
227
228  PrepareSaveEntryStatement(DELETE_JOURNAL_TABLE,
229                            &save_delete_journal_statment_);
230  for (EntryKernelSet::const_iterator i = snapshot.delete_journals.begin();
231       i != snapshot.delete_journals.end(); ++i) {
232    if (!SaveEntryToDB(&save_delete_journal_statment_, **i))
233      return false;
234  }
235
236  if (!DeleteEntries(DELETE_JOURNAL_TABLE, snapshot.delete_journals_to_purge))
237    return false;
238
239  if (save_info) {
240    const Directory::PersistedKernelInfo& info = snapshot.kernel_info;
241    sql::Statement s1(db_->GetCachedStatement(
242            SQL_FROM_HERE,
243            "UPDATE share_info "
244            "SET store_birthday = ?, "
245            "next_id = ?, "
246            "bag_of_chips = ?"));
247    s1.BindString(0, info.store_birthday);
248    s1.BindInt64(1, info.next_id);
249    s1.BindBlob(2, info.bag_of_chips.data(), info.bag_of_chips.size());
250
251    if (!s1.Run())
252      return false;
253    DCHECK_EQ(db_->GetLastChangeCount(), 1);
254
255    sql::Statement s2(db_->GetCachedStatement(
256            SQL_FROM_HERE,
257            "INSERT OR REPLACE "
258            "INTO models (model_id, progress_marker, transaction_version) "
259            "VALUES (?, ?, ?)"));
260
261    ModelTypeSet protocol_types = ProtocolTypes();
262    for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
263         iter.Inc()) {
264      ModelType type = iter.Get();
265      // We persist not ModelType but rather a protobuf-derived ID.
266      string model_id = ModelTypeEnumToModelId(type);
267      string progress_marker;
268      info.download_progress[type].SerializeToString(&progress_marker);
269      s2.BindBlob(0, model_id.data(), model_id.length());
270      s2.BindBlob(1, progress_marker.data(), progress_marker.length());
271      s2.BindInt64(2, info.transaction_version[type]);
272      if (!s2.Run())
273        return false;
274      DCHECK_EQ(db_->GetLastChangeCount(), 1);
275      s2.Reset(true);
276    }
277  }
278
279  return transaction.Commit();
280}
281
282bool DirectoryBackingStore::InitializeTables() {
283  sql::Transaction transaction(db_.get());
284  if (!transaction.Begin())
285    return false;
286
287  int version_on_disk = GetVersion();
288
289  // Upgrade from version 67. Version 67 was widely distributed as the original
290  // Bookmark Sync release. Version 68 removed unique naming.
291  if (version_on_disk == 67) {
292    if (MigrateVersion67To68())
293      version_on_disk = 68;
294  }
295  // Version 69 introduced additional datatypes.
296  if (version_on_disk == 68) {
297    if (MigrateVersion68To69())
298      version_on_disk = 69;
299  }
300
301  if (version_on_disk == 69) {
302    if (MigrateVersion69To70())
303      version_on_disk = 70;
304  }
305
306  // Version 71 changed the sync progress information to be per-datatype.
307  if (version_on_disk == 70) {
308    if (MigrateVersion70To71())
309      version_on_disk = 71;
310  }
311
312  // Version 72 removed extended attributes, a legacy way to do extensible
313  // key/value information, stored in their own table.
314  if (version_on_disk == 71) {
315    if (MigrateVersion71To72())
316      version_on_disk = 72;
317  }
318
319  // Version 73 added a field for notification state.
320  if (version_on_disk == 72) {
321    if (MigrateVersion72To73())
322      version_on_disk = 73;
323  }
324
325  // Version 74 added state for the autofill migration.
326  if (version_on_disk == 73) {
327    if (MigrateVersion73To74())
328      version_on_disk = 74;
329  }
330
331  // Version 75 migrated from int64-based timestamps to per-datatype tokens.
332  if (version_on_disk == 74) {
333    if (MigrateVersion74To75())
334      version_on_disk = 75;
335  }
336
337  // Version 76 removed all (5) autofill migration related columns.
338  if (version_on_disk == 75) {
339    if (MigrateVersion75To76())
340      version_on_disk = 76;
341  }
342
343  // Version 77 standardized all time fields to ms since the Unix
344  // epoch.
345  if (version_on_disk == 76) {
346    if (MigrateVersion76To77())
347      version_on_disk = 77;
348  }
349
350  // Version 78 added the column base_server_specifics to the metas table.
351  if (version_on_disk == 77) {
352    if (MigrateVersion77To78())
353      version_on_disk = 78;
354  }
355
356  // Version 79 migration is a one-time fix for some users in a bad state.
357  if (version_on_disk == 78) {
358    if (MigrateVersion78To79())
359      version_on_disk = 79;
360  }
361
362  // Version 80 migration is adding the bag_of_chips column.
363  if (version_on_disk == 79) {
364    if (MigrateVersion79To80())
365      version_on_disk = 80;
366  }
367
368  // Version 81 replaces the int64 server_position_in_parent_field
369  // with a blob server_ordinal_in_parent field.
370  if (version_on_disk == 80) {
371    if (MigrateVersion80To81())
372      version_on_disk = 81;
373  }
374
375  // Version 82 migration added transaction_version column per data type.
376  if (version_on_disk == 81) {
377    if (MigrateVersion81To82())
378      version_on_disk = 82;
379  }
380
381  // Version 83 migration added transaction_version column per sync entry.
382  if (version_on_disk == 82) {
383    if (MigrateVersion82To83())
384      version_on_disk = 83;
385  }
386
387  // Version 84 migration added deleted_metas table.
388  if (version_on_disk == 83) {
389    if (MigrateVersion83To84())
390      version_on_disk = 84;
391  }
392
393  // Version 85 migration removes the initial_sync_ended bits.
394  if (version_on_disk == 84) {
395    if (MigrateVersion84To85())
396      version_on_disk = 85;
397  }
398
399  // Version 86 migration converts bookmarks to the unique positioning system.
400  // It also introduces a new field to store a unique ID for each bookmark.
401  if (version_on_disk == 85) {
402    if (MigrateVersion85To86())
403      version_on_disk = 86;
404  }
405
406  // If one of the migrations requested it, drop columns that aren't current.
407  // It's only safe to do this after migrating all the way to the current
408  // version.
409  if (version_on_disk == kCurrentDBVersion && needs_column_refresh_) {
410    if (!RefreshColumns())
411      version_on_disk = 0;
412  }
413
414  // A final, alternative catch-all migration to simply re-sync everything.
415  if (version_on_disk != kCurrentDBVersion) {
416    if (version_on_disk > kCurrentDBVersion)
417      return false;
418
419    // Fallback (re-sync everything) migration path.
420    DVLOG(1) << "Old/null sync database, version " << version_on_disk;
421    // Delete the existing database (if any), and create a fresh one.
422    DropAllTables();
423    if (!CreateTables())
424      return false;
425  }
426
427  sql::Statement s(db_->GetUniqueStatement(
428          "SELECT db_create_version, db_create_time FROM share_info"));
429  if (!s.Step())
430    return false;
431  string db_create_version = s.ColumnString(0);
432  int db_create_time = s.ColumnInt(1);
433  DVLOG(1) << "DB created at " << db_create_time << " by version " <<
434      db_create_version;
435
436  return transaction.Commit();
437}
438
439// This function drops unused columns by creating a new table that contains only
440// the currently used columns then copying all rows from the old tables into
441// this new one.  The tables are then rearranged so the new replaces the old.
442bool DirectoryBackingStore::RefreshColumns() {
443  DCHECK(needs_column_refresh_);
444
445  // Create a new table named temp_metas.
446  SafeDropTable("temp_metas");
447  if (!CreateMetasTable(true))
448    return false;
449
450  // Populate temp_metas from metas.
451  //
452  // At this point, the metas table may contain columns belonging to obsolete
453  // schema versions.  This statement explicitly lists only the columns that
454  // belong to the current schema version, so the obsolete columns will be
455  // effectively dropped once we rename temp_metas over top of metas.
456  std::string query = "INSERT INTO temp_metas (";
457  AppendColumnList(&query);
458  query.append(") SELECT ");
459  AppendColumnList(&query);
460  query.append(" FROM metas");
461  if (!db_->Execute(query.c_str()))
462    return false;
463
464  // Drop metas.
465  SafeDropTable("metas");
466
467  // Rename temp_metas -> metas.
468  if (!db_->Execute("ALTER TABLE temp_metas RENAME TO metas"))
469    return false;
470
471  // Repeat the process for share_info.
472  SafeDropTable("temp_share_info");
473  if (!CreateShareInfoTable(true))
474    return false;
475
476  // TODO(rlarocque, 124140): Remove notification_state.
477  if (!db_->Execute(
478          "INSERT INTO temp_share_info (id, name, store_birthday, "
479          "db_create_version, db_create_time, next_id, cache_guid,"
480          "notification_state, bag_of_chips) "
481          "SELECT id, name, store_birthday, db_create_version, "
482          "db_create_time, next_id, cache_guid, notification_state, "
483          "bag_of_chips "
484          "FROM share_info"))
485    return false;
486
487  SafeDropTable("share_info");
488  if (!db_->Execute("ALTER TABLE temp_share_info RENAME TO share_info"))
489    return false;
490
491  needs_column_refresh_ = false;
492  return true;
493}
494
495bool DirectoryBackingStore::LoadEntries(MetahandlesIndex* entry_bucket) {
496  return LoadEntriesInternal("metas", entry_bucket);
497}
498
499bool DirectoryBackingStore::LoadDeleteJournals(
500    JournalIndex* delete_journals) {
501  return LoadEntriesInternal("deleted_metas", delete_journals);
502}
503
504bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) {
505  {
506    sql::Statement s(
507        db_->GetUniqueStatement(
508            "SELECT store_birthday, next_id, cache_guid, bag_of_chips "
509            "FROM share_info"));
510    if (!s.Step())
511      return false;
512
513    info->kernel_info.store_birthday = s.ColumnString(0);
514    info->kernel_info.next_id = s.ColumnInt64(1);
515    info->cache_guid = s.ColumnString(2);
516    s.ColumnBlobAsString(3, &(info->kernel_info.bag_of_chips));
517
518    // Verify there was only one row returned.
519    DCHECK(!s.Step());
520    DCHECK(s.Succeeded());
521  }
522
523  {
524    sql::Statement s(
525        db_->GetUniqueStatement(
526            "SELECT model_id, progress_marker, "
527            "transaction_version FROM models"));
528
529    while (s.Step()) {
530      ModelType type = ModelIdToModelTypeEnum(s.ColumnBlob(0),
531                                              s.ColumnByteLength(0));
532      if (type != UNSPECIFIED && type != TOP_LEVEL_FOLDER) {
533        info->kernel_info.download_progress[type].ParseFromArray(
534            s.ColumnBlob(1), s.ColumnByteLength(1));
535        info->kernel_info.transaction_version[type] = s.ColumnInt64(2);
536      }
537    }
538    if (!s.Succeeded())
539      return false;
540  }
541  {
542    sql::Statement s(
543        db_->GetUniqueStatement(
544            "SELECT MAX(metahandle) FROM metas"));
545    if (!s.Step())
546      return false;
547
548    info->max_metahandle = s.ColumnInt64(0);
549
550    // Verify only one row was returned.
551    DCHECK(!s.Step());
552    DCHECK(s.Succeeded());
553  }
554  return true;
555}
556
557/* static */
558bool DirectoryBackingStore::SaveEntryToDB(sql::Statement* save_statement,
559                                          const EntryKernel& entry) {
560  save_statement->Reset(true);
561  BindFields(entry, save_statement);
562  return save_statement->Run();
563}
564
565bool DirectoryBackingStore::DropDeletedEntries() {
566  if (!db_->Execute("DELETE FROM metas "
567                    "WHERE is_del > 0 "
568                    "AND is_unsynced < 1 "
569                    "AND is_unapplied_update < 1")) {
570    return false;
571  }
572  if (!db_->Execute("DELETE FROM metas "
573                    "WHERE is_del > 0 "
574                    "AND id LIKE 'c%'")) {
575    return false;
576  }
577  return true;
578}
579
580bool DirectoryBackingStore::SafeDropTable(const char* table_name) {
581  string query = "DROP TABLE IF EXISTS ";
582  query.append(table_name);
583  return db_->Execute(query.c_str());
584}
585
586void DirectoryBackingStore::DropAllTables() {
587  SafeDropTable("metas");
588  SafeDropTable("temp_metas");
589  SafeDropTable("share_info");
590  SafeDropTable("temp_share_info");
591  SafeDropTable("share_version");
592  SafeDropTable("extended_attributes");
593  SafeDropTable("models");
594  SafeDropTable("temp_models");
595  needs_column_refresh_ = false;
596}
597
598// static
599ModelType DirectoryBackingStore::ModelIdToModelTypeEnum(
600    const void* data, int size) {
601  sync_pb::EntitySpecifics specifics;
602  if (!specifics.ParseFromArray(data, size))
603    return UNSPECIFIED;
604  return GetModelTypeFromSpecifics(specifics);
605}
606
607// static
608string DirectoryBackingStore::ModelTypeEnumToModelId(ModelType model_type) {
609  sync_pb::EntitySpecifics specifics;
610  AddDefaultFieldValue(model_type, &specifics);
611  return specifics.SerializeAsString();
612}
613
614// static
615std::string DirectoryBackingStore::GenerateCacheGUID() {
616  // Generate a GUID with 128 bits of randomness.
617  const int kGuidBytes = 128 / 8;
618  std::string guid;
619  base::Base64Encode(base::RandBytesAsString(kGuidBytes), &guid);
620  return guid;
621}
622
623bool DirectoryBackingStore::MigrateToSpecifics(
624    const char* old_columns,
625    const char* specifics_column,
626    void (*handler_function)(sql::Statement* old_value_query,
627                             int old_value_column,
628                             sync_pb::EntitySpecifics* mutable_new_value)) {
629  std::string query_sql = base::StringPrintf(
630      "SELECT metahandle, %s, %s FROM metas", specifics_column, old_columns);
631  std::string update_sql = base::StringPrintf(
632      "UPDATE metas SET %s = ? WHERE metahandle = ?", specifics_column);
633
634  sql::Statement query(db_->GetUniqueStatement(query_sql.c_str()));
635  sql::Statement update(db_->GetUniqueStatement(update_sql.c_str()));
636
637  while (query.Step()) {
638    int64 metahandle = query.ColumnInt64(0);
639    std::string new_value_bytes;
640    query.ColumnBlobAsString(1, &new_value_bytes);
641    sync_pb::EntitySpecifics new_value;
642    new_value.ParseFromString(new_value_bytes);
643    handler_function(&query, 2, &new_value);
644    new_value.SerializeToString(&new_value_bytes);
645
646    update.BindBlob(0, new_value_bytes.data(), new_value_bytes.length());
647    update.BindInt64(1, metahandle);
648    if (!update.Run())
649      return false;
650    update.Reset(true);
651  }
652  return query.Succeeded();
653}
654
655bool DirectoryBackingStore::SetVersion(int version) {
656  sql::Statement s(db_->GetCachedStatement(
657          SQL_FROM_HERE, "UPDATE share_version SET data = ?"));
658  s.BindInt(0, version);
659
660  return s.Run();
661}
662
663int DirectoryBackingStore::GetVersion() {
664  if (!db_->DoesTableExist("share_version"))
665    return 0;
666
667  sql::Statement statement(db_->GetUniqueStatement(
668          "SELECT data FROM share_version"));
669  if (statement.Step()) {
670    return statement.ColumnInt(0);
671  } else {
672    return 0;
673  }
674}
675
676bool DirectoryBackingStore::MigrateVersion67To68() {
677  // This change simply removed three columns:
678  //   string NAME
679  //   string UNSANITIZED_NAME
680  //   string SERVER_NAME
681  // No data migration is necessary, but we should do a column refresh.
682  SetVersion(68);
683  needs_column_refresh_ = true;
684  return true;
685}
686
687bool DirectoryBackingStore::MigrateVersion69To70() {
688  // Added "unique_client_tag", renamed "singleton_tag" to unique_server_tag
689  SetVersion(70);
690  if (!db_->Execute(
691          "ALTER TABLE metas ADD COLUMN unique_server_tag varchar"))
692    return false;
693  if (!db_->Execute(
694          "ALTER TABLE metas ADD COLUMN unique_client_tag varchar"))
695    return false;
696  needs_column_refresh_ = true;
697
698  if (!db_->Execute(
699          "UPDATE metas SET unique_server_tag = singleton_tag"))
700    return false;
701
702  return true;
703}
704
705namespace {
706
707// Callback passed to MigrateToSpecifics for the v68->v69 migration.  See
708// MigrateVersion68To69().
709void EncodeBookmarkURLAndFavicon(sql::Statement* old_value_query,
710                                 int old_value_column,
711                                 sync_pb::EntitySpecifics* mutable_new_value) {
712  // Extract data from the column trio we expect.
713  bool old_is_bookmark_object = old_value_query->ColumnBool(old_value_column);
714  std::string old_url = old_value_query->ColumnString(old_value_column + 1);
715  std::string old_favicon;
716  old_value_query->ColumnBlobAsString(old_value_column + 2, &old_favicon);
717  bool old_is_dir = old_value_query->ColumnBool(old_value_column + 3);
718
719  if (old_is_bookmark_object) {
720    sync_pb::BookmarkSpecifics* bookmark_data =
721        mutable_new_value->mutable_bookmark();
722    if (!old_is_dir) {
723      bookmark_data->set_url(old_url);
724      bookmark_data->set_favicon(old_favicon);
725    }
726  }
727}
728
729}  // namespace
730
731bool DirectoryBackingStore::MigrateVersion68To69() {
732  // In Version 68, there were columns on table 'metas':
733  //   string BOOKMARK_URL
734  //   string SERVER_BOOKMARK_URL
735  //   blob BOOKMARK_FAVICON
736  //   blob SERVER_BOOKMARK_FAVICON
737  // In version 69, these columns went away in favor of storing
738  // a serialized EntrySpecifics protobuf in the columns:
739  //   protobuf blob SPECIFICS
740  //   protobuf blob SERVER_SPECIFICS
741  // For bookmarks, EntrySpecifics is extended as per
742  // bookmark_specifics.proto. This migration converts bookmarks from the
743  // former scheme to the latter scheme.
744
745  // First, add the two new columns to the schema.
746  if (!db_->Execute(
747          "ALTER TABLE metas ADD COLUMN specifics blob"))
748    return false;
749  if (!db_->Execute(
750          "ALTER TABLE metas ADD COLUMN server_specifics blob"))
751    return false;
752
753  // Next, fold data from the old columns into the new protobuf columns.
754  if (!MigrateToSpecifics(("is_bookmark_object, bookmark_url, "
755                           "bookmark_favicon, is_dir"),
756                          "specifics",
757                          &EncodeBookmarkURLAndFavicon)) {
758    return false;
759  }
760  if (!MigrateToSpecifics(("server_is_bookmark_object, "
761                           "server_bookmark_url, "
762                           "server_bookmark_favicon, "
763                           "server_is_dir"),
764                          "server_specifics",
765                          &EncodeBookmarkURLAndFavicon)) {
766    return false;
767  }
768
769  // Lastly, fix up the "Google Chrome" folder, which is of the TOP_LEVEL_FOLDER
770  // ModelType: it shouldn't have BookmarkSpecifics.
771  if (!db_->Execute(
772          "UPDATE metas SET specifics = NULL, server_specifics = NULL WHERE "
773          "singleton_tag IN ('google_chrome')"))
774    return false;
775
776  SetVersion(69);
777  needs_column_refresh_ = true;  // Trigger deletion of old columns.
778  return true;
779}
780
781// Version 71, the columns 'initial_sync_ended' and 'last_sync_timestamp'
782// were removed from the share_info table.  They were replaced by
783// the 'models' table, which has these values on a per-datatype basis.
784bool DirectoryBackingStore::MigrateVersion70To71() {
785  if (!CreateV71ModelsTable())
786    return false;
787
788  // Move data from the old share_info columns to the new models table.
789  {
790    sql::Statement fetch(db_->GetUniqueStatement(
791            "SELECT last_sync_timestamp, initial_sync_ended FROM share_info"));
792    if (!fetch.Step())
793      return false;
794
795    int64 last_sync_timestamp = fetch.ColumnInt64(0);
796    bool initial_sync_ended = fetch.ColumnBool(1);
797
798    // Verify there were no additional rows returned.
799    DCHECK(!fetch.Step());
800    DCHECK(fetch.Succeeded());
801
802    sql::Statement update(db_->GetUniqueStatement(
803            "INSERT INTO models (model_id, "
804            "last_download_timestamp, initial_sync_ended) VALUES (?, ?, ?)"));
805    string bookmark_model_id = ModelTypeEnumToModelId(BOOKMARKS);
806    update.BindBlob(0, bookmark_model_id.data(), bookmark_model_id.size());
807    update.BindInt64(1, last_sync_timestamp);
808    update.BindBool(2, initial_sync_ended);
809
810    if (!update.Run())
811      return false;
812  }
813
814  // Drop the columns from the old share_info table via a temp table.
815  const bool kCreateAsTempShareInfo = true;
816
817  if (!CreateShareInfoTableVersion71(kCreateAsTempShareInfo))
818    return false;
819  if (!db_->Execute(
820          "INSERT INTO temp_share_info (id, name, store_birthday, "
821          "db_create_version, db_create_time, next_id, cache_guid) "
822          "SELECT id, name, store_birthday, db_create_version, "
823          "db_create_time, next_id, cache_guid FROM share_info"))
824    return false;
825  SafeDropTable("share_info");
826  if (!db_->Execute(
827          "ALTER TABLE temp_share_info RENAME TO share_info"))
828    return false;
829  SetVersion(71);
830  return true;
831}
832
833bool DirectoryBackingStore::MigrateVersion71To72() {
834  // Version 72 removed a table 'extended_attributes', whose
835  // contents didn't matter.
836  SafeDropTable("extended_attributes");
837  SetVersion(72);
838  return true;
839}
840
841bool DirectoryBackingStore::MigrateVersion72To73() {
842  // Version 73 added one column to the table 'share_info': notification_state
843  if (!db_->Execute(
844          "ALTER TABLE share_info ADD COLUMN notification_state BLOB"))
845    return false;
846  SetVersion(73);
847  return true;
848}
849
850bool DirectoryBackingStore::MigrateVersion73To74() {
851  // Version 74 added the following columns to the table 'share_info':
852  //   autofill_migration_state
853  //   bookmarks_added_during_autofill_migration
854  //   autofill_migration_time
855  //   autofill_entries_added_during_migration
856  //   autofill_profiles_added_during_migration
857
858  if (!db_->Execute(
859          "ALTER TABLE share_info ADD COLUMN "
860          "autofill_migration_state INT default 0"))
861    return false;
862
863  if (!db_->Execute(
864          "ALTER TABLE share_info ADD COLUMN "
865          "bookmarks_added_during_autofill_migration "
866          "INT default 0"))
867    return false;
868
869  if (!db_->Execute(
870          "ALTER TABLE share_info ADD COLUMN autofill_migration_time "
871          "INT default 0"))
872    return false;
873
874  if (!db_->Execute(
875          "ALTER TABLE share_info ADD COLUMN "
876          "autofill_entries_added_during_migration "
877          "INT default 0"))
878    return false;
879
880  if (!db_->Execute(
881          "ALTER TABLE share_info ADD COLUMN "
882          "autofill_profiles_added_during_migration "
883          "INT default 0"))
884    return false;
885
886  SetVersion(74);
887  return true;
888}
889
890bool DirectoryBackingStore::MigrateVersion74To75() {
891  // In version 74, there was a table 'models':
892  //     blob model_id (entity specifics, primary key)
893  //     int last_download_timestamp
894  //     boolean initial_sync_ended
895  // In version 75, we deprecated the integer-valued last_download_timestamp,
896  // using insted a protobuf-valued progress_marker field:
897  //     blob progress_marker
898  // The progress_marker values are initialized from the value of
899  // last_download_timestamp, thereby preserving the download state.
900
901  // Move aside the old table and create a new empty one at the current schema.
902  if (!db_->Execute("ALTER TABLE models RENAME TO temp_models"))
903    return false;
904  if (!CreateV75ModelsTable())
905    return false;
906
907  sql::Statement query(db_->GetUniqueStatement(
908          "SELECT model_id, last_download_timestamp, initial_sync_ended "
909          "FROM temp_models"));
910
911  sql::Statement update(db_->GetUniqueStatement(
912          "INSERT INTO models (model_id, "
913          "progress_marker, initial_sync_ended) VALUES (?, ?, ?)"));
914
915  while (query.Step()) {
916    ModelType type = ModelIdToModelTypeEnum(query.ColumnBlob(0),
917                                            query.ColumnByteLength(0));
918    if (type != UNSPECIFIED) {
919      // Set the |timestamp_token_for_migration| on a new
920      // DataTypeProgressMarker, using the old value of last_download_timestamp.
921      // The server will turn this into a real token on our behalf the next
922      // time we check for updates.
923      sync_pb::DataTypeProgressMarker progress_marker;
924      progress_marker.set_data_type_id(
925          GetSpecificsFieldNumberFromModelType(type));
926      progress_marker.set_timestamp_token_for_migration(query.ColumnInt64(1));
927      std::string progress_blob;
928      progress_marker.SerializeToString(&progress_blob);
929
930      update.BindBlob(0, query.ColumnBlob(0), query.ColumnByteLength(0));
931      update.BindBlob(1, progress_blob.data(), progress_blob.length());
932      update.BindBool(2, query.ColumnBool(2));
933      if (!update.Run())
934        return false;
935      update.Reset(true);
936    }
937  }
938  if (!query.Succeeded())
939    return false;
940
941  // Drop the old table.
942  SafeDropTable("temp_models");
943
944  SetVersion(75);
945  return true;
946}
947
948bool DirectoryBackingStore::MigrateVersion75To76() {
949  // This change removed five columns:
950  //   autofill_migration_state
951  //   bookmarks_added_during_autofill_migration
952  //   autofill_migration_time
953  //   autofill_entries_added_during_migration
954  //   autofill_profiles_added_during_migration
955  // No data migration is necessary, but we should do a column refresh.
956  SetVersion(76);
957  needs_column_refresh_ = true;
958  return true;
959}
960
961bool DirectoryBackingStore::MigrateVersion76To77() {
962  // This change changes the format of stored timestamps to ms since
963  // the Unix epoch.
964#if defined(OS_WIN)
965// On Windows, we used to store timestamps in FILETIME format (100s of
966// ns since Jan 1, 1601).  Magic numbers taken from
967// http://stackoverflow.com/questions/5398557/
968//     java-library-for-dealing-with-win32-filetime
969// .
970#define TO_UNIX_TIME_MS(x) #x " = " #x " / 10000 - 11644473600000"
971#else
972// On other platforms, we used to store timestamps in time_t format (s
973// since the Unix epoch).
974#define TO_UNIX_TIME_MS(x) #x " = " #x " * 1000"
975#endif
976  sql::Statement update_timestamps(db_->GetUniqueStatement(
977          "UPDATE metas SET "
978          TO_UNIX_TIME_MS(mtime) ", "
979          TO_UNIX_TIME_MS(server_mtime) ", "
980          TO_UNIX_TIME_MS(ctime) ", "
981          TO_UNIX_TIME_MS(server_ctime)));
982#undef TO_UNIX_TIME_MS
983  if (!update_timestamps.Run())
984    return false;
985  SetVersion(77);
986  return true;
987}
988
989bool DirectoryBackingStore::MigrateVersion77To78() {
990  // Version 78 added one column to table 'metas': base_server_specifics.
991  if (!db_->Execute(
992          "ALTER TABLE metas ADD COLUMN base_server_specifics BLOB")) {
993    return false;
994  }
995  SetVersion(78);
996  return true;
997}
998
999bool DirectoryBackingStore::MigrateVersion78To79() {
1000  // Some users are stuck with a DB that causes them to reuse existing IDs.  We
1001  // perform this one-time fixup on all users to help the few that are stuck.
1002  // See crbug.com/142987 for details.
1003  if (!db_->Execute(
1004          "UPDATE share_info SET next_id = next_id - 65536")) {
1005    return false;
1006  }
1007  SetVersion(79);
1008  return true;
1009}
1010
1011bool DirectoryBackingStore::MigrateVersion79To80() {
1012  if (!db_->Execute(
1013          "ALTER TABLE share_info ADD COLUMN bag_of_chips BLOB"))
1014    return false;
1015  sql::Statement update(db_->GetUniqueStatement(
1016          "UPDATE share_info SET bag_of_chips = ?"));
1017  // An empty message is serialized to an empty string.
1018  update.BindBlob(0, NULL, 0);
1019  if (!update.Run())
1020    return false;
1021  SetVersion(80);
1022  return true;
1023}
1024
1025bool DirectoryBackingStore::MigrateVersion80To81() {
1026  if(!db_->Execute(
1027         "ALTER TABLE metas ADD COLUMN server_ordinal_in_parent BLOB"))
1028    return false;
1029
1030  sql::Statement get_positions(db_->GetUniqueStatement(
1031      "SELECT metahandle, server_position_in_parent FROM metas"));
1032
1033  sql::Statement put_ordinals(db_->GetUniqueStatement(
1034      "UPDATE metas SET server_ordinal_in_parent = ?"
1035      "WHERE metahandle = ?"));
1036
1037  while(get_positions.Step()) {
1038    int64 metahandle = get_positions.ColumnInt64(0);
1039    int64 position = get_positions.ColumnInt64(1);
1040
1041    const std::string& ordinal = Int64ToNodeOrdinal(position).ToInternalValue();
1042    put_ordinals.BindBlob(0, ordinal.data(), ordinal.length());
1043    put_ordinals.BindInt64(1, metahandle);
1044
1045    if(!put_ordinals.Run())
1046      return false;
1047    put_ordinals.Reset(true);
1048  }
1049
1050  SetVersion(81);
1051  needs_column_refresh_ = true;
1052  return true;
1053}
1054
1055bool DirectoryBackingStore::MigrateVersion81To82() {
1056  if (!db_->Execute(
1057      "ALTER TABLE models ADD COLUMN transaction_version BIGINT default 0"))
1058    return false;
1059  sql::Statement update(db_->GetUniqueStatement(
1060      "UPDATE models SET transaction_version = 0"));
1061  if (!update.Run())
1062    return false;
1063  SetVersion(82);
1064  return true;
1065}
1066
1067bool DirectoryBackingStore::MigrateVersion82To83() {
1068  // Version 83 added transaction_version on sync node.
1069  if (!db_->Execute(
1070      "ALTER TABLE metas ADD COLUMN transaction_version BIGINT default 0"))
1071    return false;
1072  sql::Statement update(db_->GetUniqueStatement(
1073      "UPDATE metas SET transaction_version = 0"));
1074  if (!update.Run())
1075    return false;
1076  SetVersion(83);
1077  return true;
1078}
1079
1080bool DirectoryBackingStore::MigrateVersion83To84() {
1081  // Version 84 added deleted_metas table to store deleted metas until we know
1082  // for sure that the deletions are persisted in native models.
1083  string query = "CREATE TABLE deleted_metas ";
1084  query.append(ComposeCreateTableColumnSpecs());
1085  if (!db_->Execute(query.c_str()))
1086    return false;
1087  SetVersion(84);
1088  return true;
1089}
1090
1091bool DirectoryBackingStore::MigrateVersion84To85() {
1092  // Version 85 removes the initial_sync_ended flag.
1093  if (!db_->Execute("ALTER TABLE models RENAME TO temp_models"))
1094    return false;
1095  if (!CreateModelsTable())
1096    return false;
1097  if (!db_->Execute("INSERT INTO models SELECT "
1098                    "model_id, progress_marker, transaction_version "
1099                    "FROM temp_models")) {
1100    return false;
1101  }
1102  SafeDropTable("temp_models");
1103
1104  SetVersion(85);
1105  return true;
1106}
1107
1108bool DirectoryBackingStore::MigrateVersion85To86() {
1109  // Version 86 removes both server ordinals and local NEXT_ID, PREV_ID and
1110  // SERVER_{POSITION,ORDINAL}_IN_PARENT and replaces them with UNIQUE_POSITION
1111  // and SERVER_UNIQUE_POSITION.
1112  if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
1113                    "server_unique_position BLOB")) {
1114    return false;
1115  }
1116  if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
1117                    "unique_position BLOB")) {
1118    return false;
1119  }
1120  if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
1121                    "unique_bookmark_tag VARCHAR")) {
1122    return false;
1123  }
1124
1125  // Fetch the cache_guid from the DB, because we don't otherwise have access to
1126  // it from here.
1127  sql::Statement get_cache_guid(db_->GetUniqueStatement(
1128      "SELECT cache_guid FROM share_info"));
1129  if (!get_cache_guid.Step()) {
1130    return false;
1131  }
1132  std::string cache_guid = get_cache_guid.ColumnString(0);
1133  DCHECK(!get_cache_guid.Step());
1134  DCHECK(get_cache_guid.Succeeded());
1135
1136  sql::Statement get(db_->GetUniqueStatement(
1137      "SELECT "
1138      "  metahandle, "
1139      "  id, "
1140      "  specifics, "
1141      "  is_dir, "
1142      "  unique_server_tag, "
1143      "  server_ordinal_in_parent "
1144      "FROM metas"));
1145
1146  // Note that we set both the local and server position based on the server
1147  // position.  We wll lose any unsynced local position changes.  Unfortunately,
1148  // there's nothing we can do to avoid that.  The NEXT_ID / PREV_ID values
1149  // can't be translated into a UNIQUE_POSTION in a reliable way.
1150  sql::Statement put(db_->GetCachedStatement(
1151      SQL_FROM_HERE,
1152      "UPDATE metas SET"
1153      "  server_unique_position = ?,"
1154      "  unique_position = ?,"
1155      "  unique_bookmark_tag = ?"
1156      "WHERE metahandle = ?"));
1157
1158  while (get.Step()) {
1159    int64 metahandle = get.ColumnInt64(0);
1160
1161    std::string id_string;
1162    get.ColumnBlobAsString(1, &id_string);
1163
1164    sync_pb::EntitySpecifics specifics;
1165    specifics.ParseFromArray(
1166        get.ColumnBlob(2), get.ColumnByteLength(2));
1167
1168    bool is_dir = get.ColumnBool(3);
1169
1170    std::string server_unique_tag = get.ColumnString(4);
1171
1172    std::string ordinal_string;
1173    get.ColumnBlobAsString(5, &ordinal_string);
1174    NodeOrdinal ordinal(ordinal_string);
1175
1176
1177    std::string unique_bookmark_tag;
1178
1179    // We only maintain positions for bookmarks that are not server-defined
1180    // top-level folders.
1181    UniquePosition position;
1182    if (GetModelTypeFromSpecifics(specifics) == BOOKMARKS
1183        && !(is_dir && !server_unique_tag.empty())) {
1184      if (id_string.at(0) == 'c') {
1185        // We found an uncommitted item.  This is rare, but fortunate.  This
1186        // means we can set the bookmark tag according to the originator client
1187        // item ID and originator cache guid, because (unlike the other case) we
1188        // know that this client is the originator.
1189        unique_bookmark_tag = syncable::GenerateSyncableBookmarkHash(
1190            cache_guid,
1191            id_string.substr(1));
1192      } else {
1193        // If we've already committed the item, then we don't know who the
1194        // originator was.  We do not have access to the originator client item
1195        // ID and originator cache guid at this point.
1196        //
1197        // We will base our hash entirely on the server ID instead.  This is
1198        // incorrect, but at least all clients that undergo this migration step
1199        // will be incorrect in the same way.
1200        //
1201        // To get everyone back into a synced state, we will update the bookmark
1202        // tag according to the originator_cache_guid and originator_item_id
1203        // when we see updates for this item.  That should ensure that commonly
1204        // modified items will end up with the proper tag values eventually.
1205        unique_bookmark_tag = syncable::GenerateSyncableBookmarkHash(
1206            std::string(), // cache_guid left intentionally blank.
1207            id_string.substr(1));
1208      }
1209
1210      int64 int_position = NodeOrdinalToInt64(ordinal);
1211      position = UniquePosition::FromInt64(int_position, unique_bookmark_tag);
1212    } else {
1213      // Leave bookmark_tag and position at their default (invalid) values.
1214    }
1215
1216    std::string position_blob;
1217    position.SerializeToString(&position_blob);
1218    put.BindBlob(0, position_blob.data(), position_blob.length());
1219    put.BindBlob(1, position_blob.data(), position_blob.length());
1220    put.BindBlob(2, unique_bookmark_tag.data(), unique_bookmark_tag.length());
1221    put.BindInt64(3, metahandle);
1222
1223    if (!put.Run())
1224      return false;
1225    put.Reset(true);
1226  }
1227
1228  SetVersion(86);
1229  needs_column_refresh_ = true;
1230  return true;
1231}
1232
1233bool DirectoryBackingStore::CreateTables() {
1234  DVLOG(1) << "First run, creating tables";
1235  // Create two little tables share_version and share_info
1236  if (!db_->Execute(
1237          "CREATE TABLE share_version ("
1238          "id VARCHAR(128) primary key, data INT)")) {
1239    return false;
1240  }
1241
1242  {
1243    sql::Statement s(db_->GetUniqueStatement(
1244            "INSERT INTO share_version VALUES(?, ?)"));
1245    s.BindString(0, dir_name_);
1246    s.BindInt(1, kCurrentDBVersion);
1247
1248    if (!s.Run())
1249      return false;
1250  }
1251
1252  const bool kCreateAsTempShareInfo = false;
1253  if (!CreateShareInfoTable(kCreateAsTempShareInfo)) {
1254    return false;
1255  }
1256
1257  {
1258    sql::Statement s(db_->GetUniqueStatement(
1259            "INSERT INTO share_info VALUES"
1260            "(?, "  // id
1261            "?, "   // name
1262            "?, "   // store_birthday
1263            "?, "   // db_create_version
1264            "?, "   // db_create_time
1265            "-2, "  // next_id
1266            "?, "   // cache_guid
1267            // TODO(rlarocque, 124140): Remove notification_state field.
1268            "?, "   // notification_state
1269            "?);"));  // bag_of_chips
1270    s.BindString(0, dir_name_);                   // id
1271    s.BindString(1, dir_name_);                   // name
1272    s.BindString(2, std::string());               // store_birthday
1273    // TODO(akalin): Remove this unused db_create_version field. (Or
1274    // actually use it for something.) http://crbug.com/118356
1275    s.BindString(3, "Unknown");                   // db_create_version
1276    s.BindInt(4, static_cast<int32>(time(0)));    // db_create_time
1277    s.BindString(5, GenerateCacheGUID());         // cache_guid
1278    // TODO(rlarocque, 124140): Remove this unused notification-state field.
1279    s.BindBlob(6, NULL, 0);                       // notification_state
1280    s.BindBlob(7, NULL, 0);                       // bag_of_chips
1281    if (!s.Run())
1282      return false;
1283  }
1284
1285  if (!CreateModelsTable())
1286    return false;
1287
1288  // Create the big metas table.
1289  if (!CreateMetasTable(false))
1290    return false;
1291
1292  {
1293    // Insert the entry for the root into the metas table.
1294    const int64 now = TimeToProtoTime(base::Time::Now());
1295    sql::Statement s(db_->GetUniqueStatement(
1296            "INSERT INTO metas "
1297            "( id, metahandle, is_dir, ctime, mtime ) "
1298            "VALUES ( \"r\", 1, 1, ?, ? )"));
1299    s.BindInt64(0, now);
1300    s.BindInt64(1, now);
1301
1302    if (!s.Run())
1303      return false;
1304  }
1305
1306  return true;
1307}
1308
1309bool DirectoryBackingStore::CreateMetasTable(bool is_temporary) {
1310  string query = "CREATE TABLE ";
1311  query.append(is_temporary ? "temp_metas" : "metas");
1312  query.append(ComposeCreateTableColumnSpecs());
1313  if (!db_->Execute(query.c_str()))
1314    return false;
1315
1316  // Create a deleted_metas table to save copies of deleted metas until the
1317  // deletions are persisted. For simplicity, don't try to migrate existing
1318  // data because it's rarely used.
1319  SafeDropTable("deleted_metas");
1320  query = "CREATE TABLE deleted_metas ";
1321  query.append(ComposeCreateTableColumnSpecs());
1322  return db_->Execute(query.c_str());
1323}
1324
1325bool DirectoryBackingStore::CreateV71ModelsTable() {
1326  // This is an old schema for the Models table, used from versions 71 to 74.
1327  return db_->Execute(
1328      "CREATE TABLE models ("
1329      "model_id BLOB primary key, "
1330      "last_download_timestamp INT, "
1331      // Gets set if the syncer ever gets updates from the
1332      // server and the server returns 0.  Lets us detect the
1333      // end of the initial sync.
1334      "initial_sync_ended BOOLEAN default 0)");
1335}
1336
1337bool DirectoryBackingStore::CreateV75ModelsTable() {
1338  // This is an old schema for the Models table, used from versions 75 to 80.
1339  return db_->Execute(
1340      "CREATE TABLE models ("
1341      "model_id BLOB primary key, "
1342      "progress_marker BLOB, "
1343      // Gets set if the syncer ever gets updates from the
1344      // server and the server returns 0.  Lets us detect the
1345      // end of the initial sync.
1346      "initial_sync_ended BOOLEAN default 0)");
1347}
1348
1349bool DirectoryBackingStore::CreateModelsTable() {
1350  // This is the current schema for the Models table, from version 81
1351  // onward.  If you change the schema, you'll probably want to double-check
1352  // the use of this function in the v84-v85 migration.
1353  return db_->Execute(
1354      "CREATE TABLE models ("
1355      "model_id BLOB primary key, "
1356      "progress_marker BLOB, "
1357      // Gets set if the syncer ever gets updates from the
1358      // server and the server returns 0.  Lets us detect the
1359      // end of the initial sync.
1360      "transaction_version BIGINT default 0)");
1361}
1362
1363bool DirectoryBackingStore::CreateShareInfoTable(bool is_temporary) {
1364  const char* name = is_temporary ? "temp_share_info" : "share_info";
1365  string query = "CREATE TABLE ";
1366  query.append(name);
1367  // This is the current schema for the ShareInfo table, from version 76
1368  // onward.
1369  query.append(" ("
1370      "id TEXT primary key, "
1371      "name TEXT, "
1372      "store_birthday TEXT, "
1373      "db_create_version TEXT, "
1374      "db_create_time INT, "
1375      "next_id INT default -2, "
1376      "cache_guid TEXT, "
1377      // TODO(rlarocque, 124140): Remove notification_state field.
1378      "notification_state BLOB, "
1379      "bag_of_chips BLOB"
1380      ")");
1381  return db_->Execute(query.c_str());
1382}
1383
1384bool DirectoryBackingStore::CreateShareInfoTableVersion71(
1385    bool is_temporary) {
1386  const char* name = is_temporary ? "temp_share_info" : "share_info";
1387  string query = "CREATE TABLE ";
1388  query.append(name);
1389  // This is the schema for the ShareInfo table used from versions 71 to 72.
1390  query.append(" ("
1391      "id TEXT primary key, "
1392      "name TEXT, "
1393      "store_birthday TEXT, "
1394      "db_create_version TEXT, "
1395      "db_create_time INT, "
1396      "next_id INT default -2, "
1397      "cache_guid TEXT )");
1398  return db_->Execute(query.c_str());
1399}
1400
1401// This function checks to see if the given list of Metahandles has any nodes
1402// whose PARENT_ID values refer to ID values that do not actually exist.
1403// Returns true on success.
1404bool DirectoryBackingStore::VerifyReferenceIntegrity(
1405    const syncable::MetahandlesIndex &index) {
1406  TRACE_EVENT0("sync", "SyncDatabaseIntegrityCheck");
1407  using namespace syncable;
1408  typedef base::hash_set<std::string> IdsSet;
1409
1410  IdsSet ids_set;
1411  bool is_ok = true;
1412
1413  for (MetahandlesIndex::const_iterator it = index.begin();
1414       it != index.end(); ++it) {
1415    EntryKernel* entry = *it;
1416    bool is_duplicate_id = !(ids_set.insert(entry->ref(ID).value()).second);
1417    is_ok = is_ok && !is_duplicate_id;
1418  }
1419
1420  IdsSet::iterator end = ids_set.end();
1421  for (MetahandlesIndex::const_iterator it = index.begin();
1422       it != index.end(); ++it) {
1423    EntryKernel* entry = *it;
1424    bool parent_exists = (ids_set.find(entry->ref(PARENT_ID).value()) != end);
1425    if (!parent_exists) {
1426      return false;
1427    }
1428  }
1429  return is_ok;
1430}
1431
1432template<class T>
1433bool DirectoryBackingStore::LoadEntriesInternal(const std::string& table,
1434                                                T* bucket) {
1435  string select;
1436  select.reserve(kUpdateStatementBufferSize);
1437  select.append("SELECT ");
1438  AppendColumnList(&select);
1439  select.append(" FROM " + table);
1440
1441  sql::Statement s(db_->GetUniqueStatement(select.c_str()));
1442
1443  while (s.Step()) {
1444    scoped_ptr<EntryKernel> kernel = UnpackEntry(&s);
1445    // A null kernel is evidence of external data corruption.
1446    if (!kernel)
1447      return false;
1448    bucket->insert(kernel.release());
1449  }
1450  return s.Succeeded();
1451}
1452
1453void DirectoryBackingStore::PrepareSaveEntryStatement(
1454    EntryTable table, sql::Statement* save_statement) {
1455  if (save_statement->is_valid())
1456    return;
1457
1458  string query;
1459  query.reserve(kUpdateStatementBufferSize);
1460  switch (table) {
1461    case METAS_TABLE:
1462      query.append("INSERT OR REPLACE INTO metas ");
1463      break;
1464    case DELETE_JOURNAL_TABLE:
1465      query.append("INSERT OR REPLACE INTO deleted_metas ");
1466      break;
1467  }
1468
1469  string values;
1470  values.reserve(kUpdateStatementBufferSize);
1471  values.append(" VALUES ");
1472  const char* separator = "( ";
1473  int i = 0;
1474  for (i = BEGIN_FIELDS; i < FIELD_COUNT; ++i) {
1475    query.append(separator);
1476    values.append(separator);
1477    separator = ", ";
1478    query.append(ColumnName(i));
1479    values.append("?");
1480  }
1481  query.append(" ) ");
1482  values.append(" )");
1483  query.append(values);
1484  save_statement->Assign(db_->GetUniqueStatement(
1485      base::StringPrintf(query.c_str(), "metas").c_str()));
1486}
1487
1488}  // namespace syncable
1489}  // namespace syncer
1490