directory_backing_store.cc revision f2477e01787aa58f445919b809d89e252beef54f
1// Copyright 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "sync/syncable/directory_backing_store.h"
6
7#include "build/build_config.h"
8
9#include <limits>
10
11#include "base/base64.h"
12#include "base/debug/trace_event.h"
13#include "base/logging.h"
14#include "base/rand_util.h"
15#include "base/strings/stringprintf.h"
16#include "base/time/time.h"
17#include "sql/connection.h"
18#include "sql/statement.h"
19#include "sql/transaction.h"
20#include "sync/internal_api/public/base/node_ordinal.h"
21#include "sync/protocol/bookmark_specifics.pb.h"
22#include "sync/protocol/sync.pb.h"
23#include "sync/syncable/syncable-inl.h"
24#include "sync/syncable/syncable_columns.h"
25#include "sync/syncable/syncable_util.h"
26#include "sync/util/time.h"
27
28using std::string;
29
30namespace syncer {
31namespace syncable {
32
33// This just has to be big enough to hold an UPDATE or INSERT statement that
34// modifies all the columns in the entry table.
35static const string::size_type kUpdateStatementBufferSize = 2048;
36
37// Increment this version whenever updating DB tables.
38const int32 kCurrentDBVersion = 86;
39
40// Iterate over the fields of |entry| and bind each to |statement| for
41// updating.  Returns the number of args bound.
42void BindFields(const EntryKernel& entry,
43                sql::Statement* statement) {
44  int index = 0;
45  int i = 0;
46  for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
47    statement->BindInt64(index++, entry.ref(static_cast<Int64Field>(i)));
48  }
49  for ( ; i < TIME_FIELDS_END; ++i) {
50    statement->BindInt64(index++,
51                         TimeToProtoTime(
52                             entry.ref(static_cast<TimeField>(i))));
53  }
54  for ( ; i < ID_FIELDS_END; ++i) {
55    statement->BindString(index++, entry.ref(static_cast<IdField>(i)).s_);
56  }
57  for ( ; i < BIT_FIELDS_END; ++i) {
58    statement->BindInt(index++, entry.ref(static_cast<BitField>(i)));
59  }
60  for ( ; i < STRING_FIELDS_END; ++i) {
61    statement->BindString(index++, entry.ref(static_cast<StringField>(i)));
62  }
63  for ( ; i < PROTO_FIELDS_END; ++i) {
64    std::string temp;
65    entry.ref(static_cast<ProtoField>(i)).SerializeToString(&temp);
66    statement->BindBlob(index++, temp.data(), temp.length());
67  }
68  for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
69    std::string temp;
70    entry.ref(static_cast<UniquePositionField>(i)).SerializeToString(&temp);
71    statement->BindBlob(index++, temp.data(), temp.length());
72  }
73}
74
75// The caller owns the returned EntryKernel*.  Assumes the statement currently
76// points to a valid row in the metas table. Returns NULL to indicate that
77// it detected a corruption in the data on unpacking.
78scoped_ptr<EntryKernel> UnpackEntry(sql::Statement* statement) {
79  scoped_ptr<EntryKernel> kernel(new EntryKernel());
80  DCHECK_EQ(statement->ColumnCount(), static_cast<int>(FIELD_COUNT));
81  int i = 0;
82  for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
83    kernel->put(static_cast<Int64Field>(i), statement->ColumnInt64(i));
84  }
85  for ( ; i < TIME_FIELDS_END; ++i) {
86    kernel->put(static_cast<TimeField>(i),
87                ProtoTimeToTime(statement->ColumnInt64(i)));
88  }
89  for ( ; i < ID_FIELDS_END; ++i) {
90    kernel->mutable_ref(static_cast<IdField>(i)).s_ =
91        statement->ColumnString(i);
92  }
93  for ( ; i < BIT_FIELDS_END; ++i) {
94    kernel->put(static_cast<BitField>(i), (0 != statement->ColumnInt(i)));
95  }
96  for ( ; i < STRING_FIELDS_END; ++i) {
97    kernel->put(static_cast<StringField>(i),
98                statement->ColumnString(i));
99  }
100  for ( ; i < PROTO_FIELDS_END; ++i) {
101    kernel->mutable_ref(static_cast<ProtoField>(i)).ParseFromArray(
102        statement->ColumnBlob(i), statement->ColumnByteLength(i));
103  }
104  for ( ; i < UNIQUE_POSITION_FIELDS_END; ++i) {
105    std::string temp;
106    statement->ColumnBlobAsString(i, &temp);
107
108    sync_pb::UniquePosition proto;
109    if (!proto.ParseFromString(temp)) {
110      DVLOG(1) << "Unpacked invalid position.  Assuming the DB is corrupt";
111      return scoped_ptr<EntryKernel>();
112    }
113
114    kernel->mutable_ref(static_cast<UniquePositionField>(i)) =
115        UniquePosition::FromProto(proto);
116  }
117  return kernel.Pass();
118}
119
120namespace {
121
122string ComposeCreateTableColumnSpecs() {
123  const ColumnSpec* begin = g_metas_columns;
124  const ColumnSpec* end = g_metas_columns + arraysize(g_metas_columns);
125  string query;
126  query.reserve(kUpdateStatementBufferSize);
127  char separator = '(';
128  for (const ColumnSpec* column = begin; column != end; ++column) {
129    query.push_back(separator);
130    separator = ',';
131    query.append(column->name);
132    query.push_back(' ');
133    query.append(column->spec);
134  }
135  query.push_back(')');
136  return query;
137}
138
139void AppendColumnList(std::string* output) {
140  const char* joiner = " ";
141  // Be explicit in SELECT order to match up with UnpackEntry.
142  for (int i = BEGIN_FIELDS; i < FIELD_COUNT; ++i) {
143    output->append(joiner);
144    output->append(ColumnName(i));
145    joiner = ", ";
146  }
147}
148
149}  // namespace
150
151///////////////////////////////////////////////////////////////////////////////
152// DirectoryBackingStore implementation.
153
154DirectoryBackingStore::DirectoryBackingStore(const string& dir_name)
155  : db_(new sql::Connection()),
156    dir_name_(dir_name),
157    needs_column_refresh_(false) {
158  db_->set_histogram_tag("SyncDirectory");
159  db_->set_page_size(4096);
160  db_->set_cache_size(32);
161}
162
163DirectoryBackingStore::DirectoryBackingStore(const string& dir_name,
164                                             sql::Connection* db)
165  : db_(db),
166    dir_name_(dir_name),
167    needs_column_refresh_(false) {
168}
169
170DirectoryBackingStore::~DirectoryBackingStore() {
171}
172
173bool DirectoryBackingStore::DeleteEntries(EntryTable from,
174                                          const MetahandleSet& handles) {
175  if (handles.empty())
176    return true;
177
178  sql::Statement statement;
179  // Call GetCachedStatement() separately to get different statements for
180  // different tables.
181  switch (from) {
182    case METAS_TABLE:
183      statement.Assign(db_->GetCachedStatement(
184          SQL_FROM_HERE, "DELETE FROM metas WHERE metahandle = ?"));
185      break;
186    case DELETE_JOURNAL_TABLE:
187      statement.Assign(db_->GetCachedStatement(
188          SQL_FROM_HERE, "DELETE FROM deleted_metas WHERE metahandle = ?"));
189      break;
190  }
191
192  for (MetahandleSet::const_iterator i = handles.begin(); i != handles.end();
193       ++i) {
194    statement.BindInt64(0, *i);
195    if (!statement.Run())
196      return false;
197    statement.Reset(true);
198  }
199  return true;
200}
201
202bool DirectoryBackingStore::SaveChanges(
203    const Directory::SaveChangesSnapshot& snapshot) {
204  DCHECK(CalledOnValidThread());
205  DCHECK(db_->is_open());
206
207  // Back out early if there is nothing to write.
208  bool save_info =
209    (Directory::KERNEL_SHARE_INFO_DIRTY == snapshot.kernel_info_status);
210  if (snapshot.dirty_metas.empty() && snapshot.metahandles_to_purge.empty() &&
211      snapshot.delete_journals.empty() &&
212      snapshot.delete_journals_to_purge.empty() && !save_info) {
213    return true;
214  }
215
216  sql::Transaction transaction(db_.get());
217  if (!transaction.Begin())
218    return false;
219
220  PrepareSaveEntryStatement(METAS_TABLE, &save_meta_statment_);
221  for (EntryKernelSet::const_iterator i = snapshot.dirty_metas.begin();
222       i != snapshot.dirty_metas.end(); ++i) {
223    DCHECK((*i)->is_dirty());
224    if (!SaveEntryToDB(&save_meta_statment_, **i))
225      return false;
226  }
227
228  if (!DeleteEntries(METAS_TABLE, snapshot.metahandles_to_purge))
229    return false;
230
231  PrepareSaveEntryStatement(DELETE_JOURNAL_TABLE,
232                            &save_delete_journal_statment_);
233  for (EntryKernelSet::const_iterator i = snapshot.delete_journals.begin();
234       i != snapshot.delete_journals.end(); ++i) {
235    if (!SaveEntryToDB(&save_delete_journal_statment_, **i))
236      return false;
237  }
238
239  if (!DeleteEntries(DELETE_JOURNAL_TABLE, snapshot.delete_journals_to_purge))
240    return false;
241
242  if (save_info) {
243    const Directory::PersistedKernelInfo& info = snapshot.kernel_info;
244    sql::Statement s1(db_->GetCachedStatement(
245            SQL_FROM_HERE,
246            "UPDATE share_info "
247            "SET store_birthday = ?, "
248            "next_id = ?, "
249            "bag_of_chips = ?"));
250    s1.BindString(0, info.store_birthday);
251    s1.BindInt64(1, info.next_id);
252    s1.BindBlob(2, info.bag_of_chips.data(), info.bag_of_chips.size());
253
254    if (!s1.Run())
255      return false;
256    DCHECK_EQ(db_->GetLastChangeCount(), 1);
257
258    sql::Statement s2(db_->GetCachedStatement(
259            SQL_FROM_HERE,
260            "INSERT OR REPLACE "
261            "INTO models (model_id, progress_marker, transaction_version) "
262            "VALUES (?, ?, ?)"));
263
264    ModelTypeSet protocol_types = ProtocolTypes();
265    for (ModelTypeSet::Iterator iter = protocol_types.First(); iter.Good();
266         iter.Inc()) {
267      ModelType type = iter.Get();
268      // We persist not ModelType but rather a protobuf-derived ID.
269      string model_id = ModelTypeEnumToModelId(type);
270      string progress_marker;
271      info.download_progress[type].SerializeToString(&progress_marker);
272      s2.BindBlob(0, model_id.data(), model_id.length());
273      s2.BindBlob(1, progress_marker.data(), progress_marker.length());
274      s2.BindInt64(2, info.transaction_version[type]);
275      if (!s2.Run())
276        return false;
277      DCHECK_EQ(db_->GetLastChangeCount(), 1);
278      s2.Reset(true);
279    }
280  }
281
282  return transaction.Commit();
283}
284
285bool DirectoryBackingStore::InitializeTables() {
286  sql::Transaction transaction(db_.get());
287  if (!transaction.Begin())
288    return false;
289
290  int version_on_disk = GetVersion();
291
292  // Upgrade from version 67. Version 67 was widely distributed as the original
293  // Bookmark Sync release. Version 68 removed unique naming.
294  if (version_on_disk == 67) {
295    if (MigrateVersion67To68())
296      version_on_disk = 68;
297  }
298  // Version 69 introduced additional datatypes.
299  if (version_on_disk == 68) {
300    if (MigrateVersion68To69())
301      version_on_disk = 69;
302  }
303
304  if (version_on_disk == 69) {
305    if (MigrateVersion69To70())
306      version_on_disk = 70;
307  }
308
309  // Version 71 changed the sync progress information to be per-datatype.
310  if (version_on_disk == 70) {
311    if (MigrateVersion70To71())
312      version_on_disk = 71;
313  }
314
315  // Version 72 removed extended attributes, a legacy way to do extensible
316  // key/value information, stored in their own table.
317  if (version_on_disk == 71) {
318    if (MigrateVersion71To72())
319      version_on_disk = 72;
320  }
321
322  // Version 73 added a field for notification state.
323  if (version_on_disk == 72) {
324    if (MigrateVersion72To73())
325      version_on_disk = 73;
326  }
327
328  // Version 74 added state for the autofill migration.
329  if (version_on_disk == 73) {
330    if (MigrateVersion73To74())
331      version_on_disk = 74;
332  }
333
334  // Version 75 migrated from int64-based timestamps to per-datatype tokens.
335  if (version_on_disk == 74) {
336    if (MigrateVersion74To75())
337      version_on_disk = 75;
338  }
339
340  // Version 76 removed all (5) autofill migration related columns.
341  if (version_on_disk == 75) {
342    if (MigrateVersion75To76())
343      version_on_disk = 76;
344  }
345
346  // Version 77 standardized all time fields to ms since the Unix
347  // epoch.
348  if (version_on_disk == 76) {
349    if (MigrateVersion76To77())
350      version_on_disk = 77;
351  }
352
353  // Version 78 added the column base_server_specifics to the metas table.
354  if (version_on_disk == 77) {
355    if (MigrateVersion77To78())
356      version_on_disk = 78;
357  }
358
359  // Version 79 migration is a one-time fix for some users in a bad state.
360  if (version_on_disk == 78) {
361    if (MigrateVersion78To79())
362      version_on_disk = 79;
363  }
364
365  // Version 80 migration is adding the bag_of_chips column.
366  if (version_on_disk == 79) {
367    if (MigrateVersion79To80())
368      version_on_disk = 80;
369  }
370
371  // Version 81 replaces the int64 server_position_in_parent_field
372  // with a blob server_ordinal_in_parent field.
373  if (version_on_disk == 80) {
374    if (MigrateVersion80To81())
375      version_on_disk = 81;
376  }
377
378  // Version 82 migration added transaction_version column per data type.
379  if (version_on_disk == 81) {
380    if (MigrateVersion81To82())
381      version_on_disk = 82;
382  }
383
384  // Version 83 migration added transaction_version column per sync entry.
385  if (version_on_disk == 82) {
386    if (MigrateVersion82To83())
387      version_on_disk = 83;
388  }
389
390  // Version 84 migration added deleted_metas table.
391  if (version_on_disk == 83) {
392    if (MigrateVersion83To84())
393      version_on_disk = 84;
394  }
395
396  // Version 85 migration removes the initial_sync_ended bits.
397  if (version_on_disk == 84) {
398    if (MigrateVersion84To85())
399      version_on_disk = 85;
400  }
401
402  // Version 86 migration converts bookmarks to the unique positioning system.
403  // It also introduces a new field to store a unique ID for each bookmark.
404  if (version_on_disk == 85) {
405    if (MigrateVersion85To86())
406      version_on_disk = 86;
407  }
408
409  // If one of the migrations requested it, drop columns that aren't current.
410  // It's only safe to do this after migrating all the way to the current
411  // version.
412  if (version_on_disk == kCurrentDBVersion && needs_column_refresh_) {
413    if (!RefreshColumns())
414      version_on_disk = 0;
415  }
416
417  // A final, alternative catch-all migration to simply re-sync everything.
418  if (version_on_disk != kCurrentDBVersion) {
419    if (version_on_disk > kCurrentDBVersion)
420      return false;
421
422    // Fallback (re-sync everything) migration path.
423    DVLOG(1) << "Old/null sync database, version " << version_on_disk;
424    // Delete the existing database (if any), and create a fresh one.
425    DropAllTables();
426    if (!CreateTables())
427      return false;
428  }
429
430  sql::Statement s(db_->GetUniqueStatement(
431          "SELECT db_create_version, db_create_time FROM share_info"));
432  if (!s.Step())
433    return false;
434  string db_create_version = s.ColumnString(0);
435  int db_create_time = s.ColumnInt(1);
436  DVLOG(1) << "DB created at " << db_create_time << " by version " <<
437      db_create_version;
438
439  return transaction.Commit();
440}
441
442// This function drops unused columns by creating a new table that contains only
443// the currently used columns then copying all rows from the old tables into
444// this new one.  The tables are then rearranged so the new replaces the old.
445bool DirectoryBackingStore::RefreshColumns() {
446  DCHECK(needs_column_refresh_);
447
448  // Create a new table named temp_metas.
449  SafeDropTable("temp_metas");
450  if (!CreateMetasTable(true))
451    return false;
452
453  // Populate temp_metas from metas.
454  //
455  // At this point, the metas table may contain columns belonging to obsolete
456  // schema versions.  This statement explicitly lists only the columns that
457  // belong to the current schema version, so the obsolete columns will be
458  // effectively dropped once we rename temp_metas over top of metas.
459  std::string query = "INSERT INTO temp_metas (";
460  AppendColumnList(&query);
461  query.append(") SELECT ");
462  AppendColumnList(&query);
463  query.append(" FROM metas");
464  if (!db_->Execute(query.c_str()))
465    return false;
466
467  // Drop metas.
468  SafeDropTable("metas");
469
470  // Rename temp_metas -> metas.
471  if (!db_->Execute("ALTER TABLE temp_metas RENAME TO metas"))
472    return false;
473
474  // Repeat the process for share_info.
475  SafeDropTable("temp_share_info");
476  if (!CreateShareInfoTable(true))
477    return false;
478
479  // TODO(rlarocque, 124140): Remove notification_state.
480  if (!db_->Execute(
481          "INSERT INTO temp_share_info (id, name, store_birthday, "
482          "db_create_version, db_create_time, next_id, cache_guid,"
483          "notification_state, bag_of_chips) "
484          "SELECT id, name, store_birthday, db_create_version, "
485          "db_create_time, next_id, cache_guid, notification_state, "
486          "bag_of_chips "
487          "FROM share_info"))
488    return false;
489
490  SafeDropTable("share_info");
491  if (!db_->Execute("ALTER TABLE temp_share_info RENAME TO share_info"))
492    return false;
493
494  needs_column_refresh_ = false;
495  return true;
496}
497
498bool DirectoryBackingStore::LoadEntries(
499    Directory::MetahandlesMap* handles_map) {
500  string select;
501  select.reserve(kUpdateStatementBufferSize);
502  select.append("SELECT ");
503  AppendColumnList(&select);
504  select.append(" FROM metas");
505
506  sql::Statement s(db_->GetUniqueStatement(select.c_str()));
507
508  while (s.Step()) {
509    scoped_ptr<EntryKernel> kernel = UnpackEntry(&s);
510    // A null kernel is evidence of external data corruption.
511    if (!kernel)
512      return false;
513
514    int64 handle = kernel->ref(META_HANDLE);
515    (*handles_map)[handle] = kernel.release();
516  }
517  return s.Succeeded();
518}
519
520bool DirectoryBackingStore::LoadDeleteJournals(
521    JournalIndex* delete_journals) {
522  string select;
523  select.reserve(kUpdateStatementBufferSize);
524  select.append("SELECT ");
525  AppendColumnList(&select);
526  select.append(" FROM deleted_metas");
527
528  sql::Statement s(db_->GetUniqueStatement(select.c_str()));
529
530  while (s.Step()) {
531    scoped_ptr<EntryKernel> kernel = UnpackEntry(&s);
532    // A null kernel is evidence of external data corruption.
533    if (!kernel)
534      return false;
535    delete_journals->insert(kernel.release());
536  }
537  return s.Succeeded();
538}
539
540bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) {
541  {
542    sql::Statement s(
543        db_->GetUniqueStatement(
544            "SELECT store_birthday, next_id, cache_guid, bag_of_chips "
545            "FROM share_info"));
546    if (!s.Step())
547      return false;
548
549    info->kernel_info.store_birthday = s.ColumnString(0);
550    info->kernel_info.next_id = s.ColumnInt64(1);
551    info->cache_guid = s.ColumnString(2);
552    s.ColumnBlobAsString(3, &(info->kernel_info.bag_of_chips));
553
554    // Verify there was only one row returned.
555    DCHECK(!s.Step());
556    DCHECK(s.Succeeded());
557  }
558
559  {
560    sql::Statement s(
561        db_->GetUniqueStatement(
562            "SELECT model_id, progress_marker, "
563            "transaction_version FROM models"));
564
565    while (s.Step()) {
566      ModelType type = ModelIdToModelTypeEnum(s.ColumnBlob(0),
567                                              s.ColumnByteLength(0));
568      if (type != UNSPECIFIED && type != TOP_LEVEL_FOLDER) {
569        info->kernel_info.download_progress[type].ParseFromArray(
570            s.ColumnBlob(1), s.ColumnByteLength(1));
571        info->kernel_info.transaction_version[type] = s.ColumnInt64(2);
572      }
573    }
574    if (!s.Succeeded())
575      return false;
576  }
577  {
578    sql::Statement s(
579        db_->GetUniqueStatement(
580            "SELECT MAX(metahandle) FROM metas"));
581    if (!s.Step())
582      return false;
583
584    info->max_metahandle = s.ColumnInt64(0);
585
586    // Verify only one row was returned.
587    DCHECK(!s.Step());
588    DCHECK(s.Succeeded());
589  }
590  return true;
591}
592
593/* static */
594bool DirectoryBackingStore::SaveEntryToDB(sql::Statement* save_statement,
595                                          const EntryKernel& entry) {
596  save_statement->Reset(true);
597  BindFields(entry, save_statement);
598  return save_statement->Run();
599}
600
601bool DirectoryBackingStore::DropDeletedEntries() {
602  if (!db_->Execute("DELETE FROM metas "
603                    "WHERE is_del > 0 "
604                    "AND is_unsynced < 1 "
605                    "AND is_unapplied_update < 1")) {
606    return false;
607  }
608  if (!db_->Execute("DELETE FROM metas "
609                    "WHERE is_del > 0 "
610                    "AND id LIKE 'c%'")) {
611    return false;
612  }
613  return true;
614}
615
616bool DirectoryBackingStore::SafeDropTable(const char* table_name) {
617  string query = "DROP TABLE IF EXISTS ";
618  query.append(table_name);
619  return db_->Execute(query.c_str());
620}
621
622void DirectoryBackingStore::DropAllTables() {
623  SafeDropTable("metas");
624  SafeDropTable("temp_metas");
625  SafeDropTable("share_info");
626  SafeDropTable("temp_share_info");
627  SafeDropTable("share_version");
628  SafeDropTable("extended_attributes");
629  SafeDropTable("models");
630  SafeDropTable("temp_models");
631  needs_column_refresh_ = false;
632}
633
634// static
635ModelType DirectoryBackingStore::ModelIdToModelTypeEnum(
636    const void* data, int size) {
637  sync_pb::EntitySpecifics specifics;
638  if (!specifics.ParseFromArray(data, size))
639    return UNSPECIFIED;
640  return GetModelTypeFromSpecifics(specifics);
641}
642
643// static
644string DirectoryBackingStore::ModelTypeEnumToModelId(ModelType model_type) {
645  sync_pb::EntitySpecifics specifics;
646  AddDefaultFieldValue(model_type, &specifics);
647  return specifics.SerializeAsString();
648}
649
650// static
651std::string DirectoryBackingStore::GenerateCacheGUID() {
652  // Generate a GUID with 128 bits of randomness.
653  const int kGuidBytes = 128 / 8;
654  std::string guid;
655  base::Base64Encode(base::RandBytesAsString(kGuidBytes), &guid);
656  return guid;
657}
658
659bool DirectoryBackingStore::MigrateToSpecifics(
660    const char* old_columns,
661    const char* specifics_column,
662    void (*handler_function)(sql::Statement* old_value_query,
663                             int old_value_column,
664                             sync_pb::EntitySpecifics* mutable_new_value)) {
665  std::string query_sql = base::StringPrintf(
666      "SELECT metahandle, %s, %s FROM metas", specifics_column, old_columns);
667  std::string update_sql = base::StringPrintf(
668      "UPDATE metas SET %s = ? WHERE metahandle = ?", specifics_column);
669
670  sql::Statement query(db_->GetUniqueStatement(query_sql.c_str()));
671  sql::Statement update(db_->GetUniqueStatement(update_sql.c_str()));
672
673  while (query.Step()) {
674    int64 metahandle = query.ColumnInt64(0);
675    std::string new_value_bytes;
676    query.ColumnBlobAsString(1, &new_value_bytes);
677    sync_pb::EntitySpecifics new_value;
678    new_value.ParseFromString(new_value_bytes);
679    handler_function(&query, 2, &new_value);
680    new_value.SerializeToString(&new_value_bytes);
681
682    update.BindBlob(0, new_value_bytes.data(), new_value_bytes.length());
683    update.BindInt64(1, metahandle);
684    if (!update.Run())
685      return false;
686    update.Reset(true);
687  }
688  return query.Succeeded();
689}
690
691bool DirectoryBackingStore::SetVersion(int version) {
692  sql::Statement s(db_->GetCachedStatement(
693          SQL_FROM_HERE, "UPDATE share_version SET data = ?"));
694  s.BindInt(0, version);
695
696  return s.Run();
697}
698
699int DirectoryBackingStore::GetVersion() {
700  if (!db_->DoesTableExist("share_version"))
701    return 0;
702
703  sql::Statement statement(db_->GetUniqueStatement(
704          "SELECT data FROM share_version"));
705  if (statement.Step()) {
706    return statement.ColumnInt(0);
707  } else {
708    return 0;
709  }
710}
711
712bool DirectoryBackingStore::MigrateVersion67To68() {
713  // This change simply removed three columns:
714  //   string NAME
715  //   string UNSANITIZED_NAME
716  //   string SERVER_NAME
717  // No data migration is necessary, but we should do a column refresh.
718  SetVersion(68);
719  needs_column_refresh_ = true;
720  return true;
721}
722
723bool DirectoryBackingStore::MigrateVersion69To70() {
724  // Added "unique_client_tag", renamed "singleton_tag" to unique_server_tag
725  SetVersion(70);
726  if (!db_->Execute(
727          "ALTER TABLE metas ADD COLUMN unique_server_tag varchar"))
728    return false;
729  if (!db_->Execute(
730          "ALTER TABLE metas ADD COLUMN unique_client_tag varchar"))
731    return false;
732  needs_column_refresh_ = true;
733
734  if (!db_->Execute(
735          "UPDATE metas SET unique_server_tag = singleton_tag"))
736    return false;
737
738  return true;
739}
740
741namespace {
742
743// Callback passed to MigrateToSpecifics for the v68->v69 migration.  See
744// MigrateVersion68To69().
745void EncodeBookmarkURLAndFavicon(sql::Statement* old_value_query,
746                                 int old_value_column,
747                                 sync_pb::EntitySpecifics* mutable_new_value) {
748  // Extract data from the column trio we expect.
749  bool old_is_bookmark_object = old_value_query->ColumnBool(old_value_column);
750  std::string old_url = old_value_query->ColumnString(old_value_column + 1);
751  std::string old_favicon;
752  old_value_query->ColumnBlobAsString(old_value_column + 2, &old_favicon);
753  bool old_is_dir = old_value_query->ColumnBool(old_value_column + 3);
754
755  if (old_is_bookmark_object) {
756    sync_pb::BookmarkSpecifics* bookmark_data =
757        mutable_new_value->mutable_bookmark();
758    if (!old_is_dir) {
759      bookmark_data->set_url(old_url);
760      bookmark_data->set_favicon(old_favicon);
761    }
762  }
763}
764
765}  // namespace
766
767bool DirectoryBackingStore::MigrateVersion68To69() {
768  // In Version 68, there were columns on table 'metas':
769  //   string BOOKMARK_URL
770  //   string SERVER_BOOKMARK_URL
771  //   blob BOOKMARK_FAVICON
772  //   blob SERVER_BOOKMARK_FAVICON
773  // In version 69, these columns went away in favor of storing
774  // a serialized EntrySpecifics protobuf in the columns:
775  //   protobuf blob SPECIFICS
776  //   protobuf blob SERVER_SPECIFICS
777  // For bookmarks, EntrySpecifics is extended as per
778  // bookmark_specifics.proto. This migration converts bookmarks from the
779  // former scheme to the latter scheme.
780
781  // First, add the two new columns to the schema.
782  if (!db_->Execute(
783          "ALTER TABLE metas ADD COLUMN specifics blob"))
784    return false;
785  if (!db_->Execute(
786          "ALTER TABLE metas ADD COLUMN server_specifics blob"))
787    return false;
788
789  // Next, fold data from the old columns into the new protobuf columns.
790  if (!MigrateToSpecifics(("is_bookmark_object, bookmark_url, "
791                           "bookmark_favicon, is_dir"),
792                          "specifics",
793                          &EncodeBookmarkURLAndFavicon)) {
794    return false;
795  }
796  if (!MigrateToSpecifics(("server_is_bookmark_object, "
797                           "server_bookmark_url, "
798                           "server_bookmark_favicon, "
799                           "server_is_dir"),
800                          "server_specifics",
801                          &EncodeBookmarkURLAndFavicon)) {
802    return false;
803  }
804
805  // Lastly, fix up the "Google Chrome" folder, which is of the TOP_LEVEL_FOLDER
806  // ModelType: it shouldn't have BookmarkSpecifics.
807  if (!db_->Execute(
808          "UPDATE metas SET specifics = NULL, server_specifics = NULL WHERE "
809          "singleton_tag IN ('google_chrome')"))
810    return false;
811
812  SetVersion(69);
813  needs_column_refresh_ = true;  // Trigger deletion of old columns.
814  return true;
815}
816
817// Version 71, the columns 'initial_sync_ended' and 'last_sync_timestamp'
818// were removed from the share_info table.  They were replaced by
819// the 'models' table, which has these values on a per-datatype basis.
820bool DirectoryBackingStore::MigrateVersion70To71() {
821  if (!CreateV71ModelsTable())
822    return false;
823
824  // Move data from the old share_info columns to the new models table.
825  {
826    sql::Statement fetch(db_->GetUniqueStatement(
827            "SELECT last_sync_timestamp, initial_sync_ended FROM share_info"));
828    if (!fetch.Step())
829      return false;
830
831    int64 last_sync_timestamp = fetch.ColumnInt64(0);
832    bool initial_sync_ended = fetch.ColumnBool(1);
833
834    // Verify there were no additional rows returned.
835    DCHECK(!fetch.Step());
836    DCHECK(fetch.Succeeded());
837
838    sql::Statement update(db_->GetUniqueStatement(
839            "INSERT INTO models (model_id, "
840            "last_download_timestamp, initial_sync_ended) VALUES (?, ?, ?)"));
841    string bookmark_model_id = ModelTypeEnumToModelId(BOOKMARKS);
842    update.BindBlob(0, bookmark_model_id.data(), bookmark_model_id.size());
843    update.BindInt64(1, last_sync_timestamp);
844    update.BindBool(2, initial_sync_ended);
845
846    if (!update.Run())
847      return false;
848  }
849
850  // Drop the columns from the old share_info table via a temp table.
851  const bool kCreateAsTempShareInfo = true;
852
853  if (!CreateShareInfoTableVersion71(kCreateAsTempShareInfo))
854    return false;
855  if (!db_->Execute(
856          "INSERT INTO temp_share_info (id, name, store_birthday, "
857          "db_create_version, db_create_time, next_id, cache_guid) "
858          "SELECT id, name, store_birthday, db_create_version, "
859          "db_create_time, next_id, cache_guid FROM share_info"))
860    return false;
861  SafeDropTable("share_info");
862  if (!db_->Execute(
863          "ALTER TABLE temp_share_info RENAME TO share_info"))
864    return false;
865  SetVersion(71);
866  return true;
867}
868
869bool DirectoryBackingStore::MigrateVersion71To72() {
870  // Version 72 removed a table 'extended_attributes', whose
871  // contents didn't matter.
872  SafeDropTable("extended_attributes");
873  SetVersion(72);
874  return true;
875}
876
877bool DirectoryBackingStore::MigrateVersion72To73() {
878  // Version 73 added one column to the table 'share_info': notification_state
879  if (!db_->Execute(
880          "ALTER TABLE share_info ADD COLUMN notification_state BLOB"))
881    return false;
882  SetVersion(73);
883  return true;
884}
885
886bool DirectoryBackingStore::MigrateVersion73To74() {
887  // Version 74 added the following columns to the table 'share_info':
888  //   autofill_migration_state
889  //   bookmarks_added_during_autofill_migration
890  //   autofill_migration_time
891  //   autofill_entries_added_during_migration
892  //   autofill_profiles_added_during_migration
893
894  if (!db_->Execute(
895          "ALTER TABLE share_info ADD COLUMN "
896          "autofill_migration_state INT default 0"))
897    return false;
898
899  if (!db_->Execute(
900          "ALTER TABLE share_info ADD COLUMN "
901          "bookmarks_added_during_autofill_migration "
902          "INT default 0"))
903    return false;
904
905  if (!db_->Execute(
906          "ALTER TABLE share_info ADD COLUMN autofill_migration_time "
907          "INT default 0"))
908    return false;
909
910  if (!db_->Execute(
911          "ALTER TABLE share_info ADD COLUMN "
912          "autofill_entries_added_during_migration "
913          "INT default 0"))
914    return false;
915
916  if (!db_->Execute(
917          "ALTER TABLE share_info ADD COLUMN "
918          "autofill_profiles_added_during_migration "
919          "INT default 0"))
920    return false;
921
922  SetVersion(74);
923  return true;
924}
925
926bool DirectoryBackingStore::MigrateVersion74To75() {
927  // In version 74, there was a table 'models':
928  //     blob model_id (entity specifics, primary key)
929  //     int last_download_timestamp
930  //     boolean initial_sync_ended
931  // In version 75, we deprecated the integer-valued last_download_timestamp,
932  // using insted a protobuf-valued progress_marker field:
933  //     blob progress_marker
934  // The progress_marker values are initialized from the value of
935  // last_download_timestamp, thereby preserving the download state.
936
937  // Move aside the old table and create a new empty one at the current schema.
938  if (!db_->Execute("ALTER TABLE models RENAME TO temp_models"))
939    return false;
940  if (!CreateV75ModelsTable())
941    return false;
942
943  sql::Statement query(db_->GetUniqueStatement(
944          "SELECT model_id, last_download_timestamp, initial_sync_ended "
945          "FROM temp_models"));
946
947  sql::Statement update(db_->GetUniqueStatement(
948          "INSERT INTO models (model_id, "
949          "progress_marker, initial_sync_ended) VALUES (?, ?, ?)"));
950
951  while (query.Step()) {
952    ModelType type = ModelIdToModelTypeEnum(query.ColumnBlob(0),
953                                            query.ColumnByteLength(0));
954    if (type != UNSPECIFIED) {
955      // Set the |timestamp_token_for_migration| on a new
956      // DataTypeProgressMarker, using the old value of last_download_timestamp.
957      // The server will turn this into a real token on our behalf the next
958      // time we check for updates.
959      sync_pb::DataTypeProgressMarker progress_marker;
960      progress_marker.set_data_type_id(
961          GetSpecificsFieldNumberFromModelType(type));
962      progress_marker.set_timestamp_token_for_migration(query.ColumnInt64(1));
963      std::string progress_blob;
964      progress_marker.SerializeToString(&progress_blob);
965
966      update.BindBlob(0, query.ColumnBlob(0), query.ColumnByteLength(0));
967      update.BindBlob(1, progress_blob.data(), progress_blob.length());
968      update.BindBool(2, query.ColumnBool(2));
969      if (!update.Run())
970        return false;
971      update.Reset(true);
972    }
973  }
974  if (!query.Succeeded())
975    return false;
976
977  // Drop the old table.
978  SafeDropTable("temp_models");
979
980  SetVersion(75);
981  return true;
982}
983
984bool DirectoryBackingStore::MigrateVersion75To76() {
985  // This change removed five columns:
986  //   autofill_migration_state
987  //   bookmarks_added_during_autofill_migration
988  //   autofill_migration_time
989  //   autofill_entries_added_during_migration
990  //   autofill_profiles_added_during_migration
991  // No data migration is necessary, but we should do a column refresh.
992  SetVersion(76);
993  needs_column_refresh_ = true;
994  return true;
995}
996
997bool DirectoryBackingStore::MigrateVersion76To77() {
998  // This change changes the format of stored timestamps to ms since
999  // the Unix epoch.
1000#if defined(OS_WIN)
1001// On Windows, we used to store timestamps in FILETIME format (100s of
1002// ns since Jan 1, 1601).  Magic numbers taken from
1003// http://stackoverflow.com/questions/5398557/
1004//     java-library-for-dealing-with-win32-filetime
1005// .
1006#define TO_UNIX_TIME_MS(x) #x " = " #x " / 10000 - 11644473600000"
1007#else
1008// On other platforms, we used to store timestamps in time_t format (s
1009// since the Unix epoch).
1010#define TO_UNIX_TIME_MS(x) #x " = " #x " * 1000"
1011#endif
1012  sql::Statement update_timestamps(db_->GetUniqueStatement(
1013          "UPDATE metas SET "
1014          TO_UNIX_TIME_MS(mtime) ", "
1015          TO_UNIX_TIME_MS(server_mtime) ", "
1016          TO_UNIX_TIME_MS(ctime) ", "
1017          TO_UNIX_TIME_MS(server_ctime)));
1018#undef TO_UNIX_TIME_MS
1019  if (!update_timestamps.Run())
1020    return false;
1021  SetVersion(77);
1022  return true;
1023}
1024
1025bool DirectoryBackingStore::MigrateVersion77To78() {
1026  // Version 78 added one column to table 'metas': base_server_specifics.
1027  if (!db_->Execute(
1028          "ALTER TABLE metas ADD COLUMN base_server_specifics BLOB")) {
1029    return false;
1030  }
1031  SetVersion(78);
1032  return true;
1033}
1034
1035bool DirectoryBackingStore::MigrateVersion78To79() {
1036  // Some users are stuck with a DB that causes them to reuse existing IDs.  We
1037  // perform this one-time fixup on all users to help the few that are stuck.
1038  // See crbug.com/142987 for details.
1039  if (!db_->Execute(
1040          "UPDATE share_info SET next_id = next_id - 65536")) {
1041    return false;
1042  }
1043  SetVersion(79);
1044  return true;
1045}
1046
1047bool DirectoryBackingStore::MigrateVersion79To80() {
1048  if (!db_->Execute(
1049          "ALTER TABLE share_info ADD COLUMN bag_of_chips BLOB"))
1050    return false;
1051  sql::Statement update(db_->GetUniqueStatement(
1052          "UPDATE share_info SET bag_of_chips = ?"));
1053  // An empty message is serialized to an empty string.
1054  update.BindBlob(0, NULL, 0);
1055  if (!update.Run())
1056    return false;
1057  SetVersion(80);
1058  return true;
1059}
1060
1061bool DirectoryBackingStore::MigrateVersion80To81() {
1062  if(!db_->Execute(
1063         "ALTER TABLE metas ADD COLUMN server_ordinal_in_parent BLOB"))
1064    return false;
1065
1066  sql::Statement get_positions(db_->GetUniqueStatement(
1067      "SELECT metahandle, server_position_in_parent FROM metas"));
1068
1069  sql::Statement put_ordinals(db_->GetUniqueStatement(
1070      "UPDATE metas SET server_ordinal_in_parent = ?"
1071      "WHERE metahandle = ?"));
1072
1073  while(get_positions.Step()) {
1074    int64 metahandle = get_positions.ColumnInt64(0);
1075    int64 position = get_positions.ColumnInt64(1);
1076
1077    const std::string& ordinal = Int64ToNodeOrdinal(position).ToInternalValue();
1078    put_ordinals.BindBlob(0, ordinal.data(), ordinal.length());
1079    put_ordinals.BindInt64(1, metahandle);
1080
1081    if(!put_ordinals.Run())
1082      return false;
1083    put_ordinals.Reset(true);
1084  }
1085
1086  SetVersion(81);
1087  needs_column_refresh_ = true;
1088  return true;
1089}
1090
1091bool DirectoryBackingStore::MigrateVersion81To82() {
1092  if (!db_->Execute(
1093      "ALTER TABLE models ADD COLUMN transaction_version BIGINT default 0"))
1094    return false;
1095  sql::Statement update(db_->GetUniqueStatement(
1096      "UPDATE models SET transaction_version = 0"));
1097  if (!update.Run())
1098    return false;
1099  SetVersion(82);
1100  return true;
1101}
1102
1103bool DirectoryBackingStore::MigrateVersion82To83() {
1104  // Version 83 added transaction_version on sync node.
1105  if (!db_->Execute(
1106      "ALTER TABLE metas ADD COLUMN transaction_version BIGINT default 0"))
1107    return false;
1108  sql::Statement update(db_->GetUniqueStatement(
1109      "UPDATE metas SET transaction_version = 0"));
1110  if (!update.Run())
1111    return false;
1112  SetVersion(83);
1113  return true;
1114}
1115
1116bool DirectoryBackingStore::MigrateVersion83To84() {
1117  // Version 84 added deleted_metas table to store deleted metas until we know
1118  // for sure that the deletions are persisted in native models.
1119  string query = "CREATE TABLE deleted_metas ";
1120  query.append(ComposeCreateTableColumnSpecs());
1121  if (!db_->Execute(query.c_str()))
1122    return false;
1123  SetVersion(84);
1124  return true;
1125}
1126
1127bool DirectoryBackingStore::MigrateVersion84To85() {
1128  // Version 85 removes the initial_sync_ended flag.
1129  if (!db_->Execute("ALTER TABLE models RENAME TO temp_models"))
1130    return false;
1131  if (!CreateModelsTable())
1132    return false;
1133  if (!db_->Execute("INSERT INTO models SELECT "
1134                    "model_id, progress_marker, transaction_version "
1135                    "FROM temp_models")) {
1136    return false;
1137  }
1138  SafeDropTable("temp_models");
1139
1140  SetVersion(85);
1141  return true;
1142}
1143
1144bool DirectoryBackingStore::MigrateVersion85To86() {
1145  // Version 86 removes both server ordinals and local NEXT_ID, PREV_ID and
1146  // SERVER_{POSITION,ORDINAL}_IN_PARENT and replaces them with UNIQUE_POSITION
1147  // and SERVER_UNIQUE_POSITION.
1148  if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
1149                    "server_unique_position BLOB")) {
1150    return false;
1151  }
1152  if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
1153                    "unique_position BLOB")) {
1154    return false;
1155  }
1156  if (!db_->Execute("ALTER TABLE metas ADD COLUMN "
1157                    "unique_bookmark_tag VARCHAR")) {
1158    return false;
1159  }
1160
1161  // Fetch the cache_guid from the DB, because we don't otherwise have access to
1162  // it from here.
1163  sql::Statement get_cache_guid(db_->GetUniqueStatement(
1164      "SELECT cache_guid FROM share_info"));
1165  if (!get_cache_guid.Step()) {
1166    return false;
1167  }
1168  std::string cache_guid = get_cache_guid.ColumnString(0);
1169  DCHECK(!get_cache_guid.Step());
1170  DCHECK(get_cache_guid.Succeeded());
1171
1172  sql::Statement get(db_->GetUniqueStatement(
1173      "SELECT "
1174      "  metahandle, "
1175      "  id, "
1176      "  specifics, "
1177      "  is_dir, "
1178      "  unique_server_tag, "
1179      "  server_ordinal_in_parent "
1180      "FROM metas"));
1181
1182  // Note that we set both the local and server position based on the server
1183  // position.  We wll lose any unsynced local position changes.  Unfortunately,
1184  // there's nothing we can do to avoid that.  The NEXT_ID / PREV_ID values
1185  // can't be translated into a UNIQUE_POSTION in a reliable way.
1186  sql::Statement put(db_->GetCachedStatement(
1187      SQL_FROM_HERE,
1188      "UPDATE metas SET"
1189      "  server_unique_position = ?,"
1190      "  unique_position = ?,"
1191      "  unique_bookmark_tag = ?"
1192      "WHERE metahandle = ?"));
1193
1194  while (get.Step()) {
1195    int64 metahandle = get.ColumnInt64(0);
1196
1197    std::string id_string;
1198    get.ColumnBlobAsString(1, &id_string);
1199
1200    sync_pb::EntitySpecifics specifics;
1201    specifics.ParseFromArray(
1202        get.ColumnBlob(2), get.ColumnByteLength(2));
1203
1204    bool is_dir = get.ColumnBool(3);
1205
1206    std::string server_unique_tag = get.ColumnString(4);
1207
1208    std::string ordinal_string;
1209    get.ColumnBlobAsString(5, &ordinal_string);
1210    NodeOrdinal ordinal(ordinal_string);
1211
1212
1213    std::string unique_bookmark_tag;
1214
1215    // We only maintain positions for bookmarks that are not server-defined
1216    // top-level folders.
1217    UniquePosition position;
1218    if (GetModelTypeFromSpecifics(specifics) == BOOKMARKS
1219        && !(is_dir && !server_unique_tag.empty())) {
1220      if (id_string.at(0) == 'c') {
1221        // We found an uncommitted item.  This is rare, but fortunate.  This
1222        // means we can set the bookmark tag according to the originator client
1223        // item ID and originator cache guid, because (unlike the other case) we
1224        // know that this client is the originator.
1225        unique_bookmark_tag = syncable::GenerateSyncableBookmarkHash(
1226            cache_guid,
1227            id_string.substr(1));
1228      } else {
1229        // If we've already committed the item, then we don't know who the
1230        // originator was.  We do not have access to the originator client item
1231        // ID and originator cache guid at this point.
1232        //
1233        // We will base our hash entirely on the server ID instead.  This is
1234        // incorrect, but at least all clients that undergo this migration step
1235        // will be incorrect in the same way.
1236        //
1237        // To get everyone back into a synced state, we will update the bookmark
1238        // tag according to the originator_cache_guid and originator_item_id
1239        // when we see updates for this item.  That should ensure that commonly
1240        // modified items will end up with the proper tag values eventually.
1241        unique_bookmark_tag = syncable::GenerateSyncableBookmarkHash(
1242            std::string(), // cache_guid left intentionally blank.
1243            id_string.substr(1));
1244      }
1245
1246      int64 int_position = NodeOrdinalToInt64(ordinal);
1247      position = UniquePosition::FromInt64(int_position, unique_bookmark_tag);
1248    } else {
1249      // Leave bookmark_tag and position at their default (invalid) values.
1250    }
1251
1252    std::string position_blob;
1253    position.SerializeToString(&position_blob);
1254    put.BindBlob(0, position_blob.data(), position_blob.length());
1255    put.BindBlob(1, position_blob.data(), position_blob.length());
1256    put.BindBlob(2, unique_bookmark_tag.data(), unique_bookmark_tag.length());
1257    put.BindInt64(3, metahandle);
1258
1259    if (!put.Run())
1260      return false;
1261    put.Reset(true);
1262  }
1263
1264  SetVersion(86);
1265  needs_column_refresh_ = true;
1266  return true;
1267}
1268
1269bool DirectoryBackingStore::CreateTables() {
1270  DVLOG(1) << "First run, creating tables";
1271  // Create two little tables share_version and share_info
1272  if (!db_->Execute(
1273          "CREATE TABLE share_version ("
1274          "id VARCHAR(128) primary key, data INT)")) {
1275    return false;
1276  }
1277
1278  {
1279    sql::Statement s(db_->GetUniqueStatement(
1280            "INSERT INTO share_version VALUES(?, ?)"));
1281    s.BindString(0, dir_name_);
1282    s.BindInt(1, kCurrentDBVersion);
1283
1284    if (!s.Run())
1285      return false;
1286  }
1287
1288  const bool kCreateAsTempShareInfo = false;
1289  if (!CreateShareInfoTable(kCreateAsTempShareInfo)) {
1290    return false;
1291  }
1292
1293  {
1294    sql::Statement s(db_->GetUniqueStatement(
1295            "INSERT INTO share_info VALUES"
1296            "(?, "  // id
1297            "?, "   // name
1298            "?, "   // store_birthday
1299            "?, "   // db_create_version
1300            "?, "   // db_create_time
1301            "-2, "  // next_id
1302            "?, "   // cache_guid
1303            // TODO(rlarocque, 124140): Remove notification_state field.
1304            "?, "   // notification_state
1305            "?);"));  // bag_of_chips
1306    s.BindString(0, dir_name_);                   // id
1307    s.BindString(1, dir_name_);                   // name
1308    s.BindString(2, std::string());               // store_birthday
1309    // TODO(akalin): Remove this unused db_create_version field. (Or
1310    // actually use it for something.) http://crbug.com/118356
1311    s.BindString(3, "Unknown");                   // db_create_version
1312    s.BindInt(4, static_cast<int32>(time(0)));    // db_create_time
1313    s.BindString(5, GenerateCacheGUID());         // cache_guid
1314    // TODO(rlarocque, 124140): Remove this unused notification-state field.
1315    s.BindBlob(6, NULL, 0);                       // notification_state
1316    s.BindBlob(7, NULL, 0);                       // bag_of_chips
1317    if (!s.Run())
1318      return false;
1319  }
1320
1321  if (!CreateModelsTable())
1322    return false;
1323
1324  // Create the big metas table.
1325  if (!CreateMetasTable(false))
1326    return false;
1327
1328  {
1329    // Insert the entry for the root into the metas table.
1330    const int64 now = TimeToProtoTime(base::Time::Now());
1331    sql::Statement s(db_->GetUniqueStatement(
1332            "INSERT INTO metas "
1333            "( id, metahandle, is_dir, ctime, mtime ) "
1334            "VALUES ( \"r\", 1, 1, ?, ? )"));
1335    s.BindInt64(0, now);
1336    s.BindInt64(1, now);
1337
1338    if (!s.Run())
1339      return false;
1340  }
1341
1342  return true;
1343}
1344
1345bool DirectoryBackingStore::CreateMetasTable(bool is_temporary) {
1346  string query = "CREATE TABLE ";
1347  query.append(is_temporary ? "temp_metas" : "metas");
1348  query.append(ComposeCreateTableColumnSpecs());
1349  if (!db_->Execute(query.c_str()))
1350    return false;
1351
1352  // Create a deleted_metas table to save copies of deleted metas until the
1353  // deletions are persisted. For simplicity, don't try to migrate existing
1354  // data because it's rarely used.
1355  SafeDropTable("deleted_metas");
1356  query = "CREATE TABLE deleted_metas ";
1357  query.append(ComposeCreateTableColumnSpecs());
1358  return db_->Execute(query.c_str());
1359}
1360
1361bool DirectoryBackingStore::CreateV71ModelsTable() {
1362  // This is an old schema for the Models table, used from versions 71 to 74.
1363  return db_->Execute(
1364      "CREATE TABLE models ("
1365      "model_id BLOB primary key, "
1366      "last_download_timestamp INT, "
1367      // Gets set if the syncer ever gets updates from the
1368      // server and the server returns 0.  Lets us detect the
1369      // end of the initial sync.
1370      "initial_sync_ended BOOLEAN default 0)");
1371}
1372
1373bool DirectoryBackingStore::CreateV75ModelsTable() {
1374  // This is an old schema for the Models table, used from versions 75 to 80.
1375  return db_->Execute(
1376      "CREATE TABLE models ("
1377      "model_id BLOB primary key, "
1378      "progress_marker BLOB, "
1379      // Gets set if the syncer ever gets updates from the
1380      // server and the server returns 0.  Lets us detect the
1381      // end of the initial sync.
1382      "initial_sync_ended BOOLEAN default 0)");
1383}
1384
1385bool DirectoryBackingStore::CreateModelsTable() {
1386  // This is the current schema for the Models table, from version 81
1387  // onward.  If you change the schema, you'll probably want to double-check
1388  // the use of this function in the v84-v85 migration.
1389  return db_->Execute(
1390      "CREATE TABLE models ("
1391      "model_id BLOB primary key, "
1392      "progress_marker BLOB, "
1393      // Gets set if the syncer ever gets updates from the
1394      // server and the server returns 0.  Lets us detect the
1395      // end of the initial sync.
1396      "transaction_version BIGINT default 0)");
1397}
1398
1399bool DirectoryBackingStore::CreateShareInfoTable(bool is_temporary) {
1400  const char* name = is_temporary ? "temp_share_info" : "share_info";
1401  string query = "CREATE TABLE ";
1402  query.append(name);
1403  // This is the current schema for the ShareInfo table, from version 76
1404  // onward.
1405  query.append(" ("
1406      "id TEXT primary key, "
1407      "name TEXT, "
1408      "store_birthday TEXT, "
1409      "db_create_version TEXT, "
1410      "db_create_time INT, "
1411      "next_id INT default -2, "
1412      "cache_guid TEXT, "
1413      // TODO(rlarocque, 124140): Remove notification_state field.
1414      "notification_state BLOB, "
1415      "bag_of_chips BLOB"
1416      ")");
1417  return db_->Execute(query.c_str());
1418}
1419
1420bool DirectoryBackingStore::CreateShareInfoTableVersion71(
1421    bool is_temporary) {
1422  const char* name = is_temporary ? "temp_share_info" : "share_info";
1423  string query = "CREATE TABLE ";
1424  query.append(name);
1425  // This is the schema for the ShareInfo table used from versions 71 to 72.
1426  query.append(" ("
1427      "id TEXT primary key, "
1428      "name TEXT, "
1429      "store_birthday TEXT, "
1430      "db_create_version TEXT, "
1431      "db_create_time INT, "
1432      "next_id INT default -2, "
1433      "cache_guid TEXT )");
1434  return db_->Execute(query.c_str());
1435}
1436
1437// This function checks to see if the given list of Metahandles has any nodes
1438// whose PARENT_ID values refer to ID values that do not actually exist.
1439// Returns true on success.
1440bool DirectoryBackingStore::VerifyReferenceIntegrity(
1441    const Directory::MetahandlesMap* handles_map) {
1442  TRACE_EVENT0("sync", "SyncDatabaseIntegrityCheck");
1443  using namespace syncable;
1444  typedef base::hash_set<std::string> IdsSet;
1445
1446  IdsSet ids_set;
1447  bool is_ok = true;
1448
1449  for (Directory::MetahandlesMap::const_iterator it = handles_map->begin();
1450       it != handles_map->end(); ++it) {
1451    EntryKernel* entry = it->second;
1452    bool is_duplicate_id = !(ids_set.insert(entry->ref(ID).value()).second);
1453    is_ok = is_ok && !is_duplicate_id;
1454  }
1455
1456  IdsSet::iterator end = ids_set.end();
1457  for (Directory::MetahandlesMap::const_iterator it = handles_map->begin();
1458       it != handles_map->end(); ++it) {
1459    EntryKernel* entry = it->second;
1460    bool parent_exists = (ids_set.find(entry->ref(PARENT_ID).value()) != end);
1461    if (!parent_exists) {
1462      return false;
1463    }
1464  }
1465  return is_ok;
1466}
1467
1468void DirectoryBackingStore::PrepareSaveEntryStatement(
1469    EntryTable table, sql::Statement* save_statement) {
1470  if (save_statement->is_valid())
1471    return;
1472
1473  string query;
1474  query.reserve(kUpdateStatementBufferSize);
1475  switch (table) {
1476    case METAS_TABLE:
1477      query.append("INSERT OR REPLACE INTO metas ");
1478      break;
1479    case DELETE_JOURNAL_TABLE:
1480      query.append("INSERT OR REPLACE INTO deleted_metas ");
1481      break;
1482  }
1483
1484  string values;
1485  values.reserve(kUpdateStatementBufferSize);
1486  values.append(" VALUES ");
1487  const char* separator = "( ";
1488  int i = 0;
1489  for (i = BEGIN_FIELDS; i < FIELD_COUNT; ++i) {
1490    query.append(separator);
1491    values.append(separator);
1492    separator = ", ";
1493    query.append(ColumnName(i));
1494    values.append("?");
1495  }
1496  query.append(" ) ");
1497  values.append(" )");
1498  query.append(values);
1499  save_statement->Assign(db_->GetUniqueStatement(
1500      base::StringPrintf(query.c_str(), "metas").c_str()));
1501}
1502
1503}  // namespace syncable
1504}  // namespace syncer
1505