1// Copyright (c) 2010 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "chrome/browser/sync/syncable/directory_backing_store.h"
6
7#include "build/build_config.h"
8
9#if defined(OS_MACOSX)
10#include <CoreFoundation/CoreFoundation.h>
11#endif
12
13#include <limits>
14
15#include "base/file_util.h"
16#include "base/hash_tables.h"
17#include "base/logging.h"
18#include "base/metrics/histogram.h"
19#include "base/stl_util-inl.h"
20#include "base/string_number_conversions.h"
21#include "base/string_util.h"
22#include "chrome/browser/sync/protocol/bookmark_specifics.pb.h"
23#include "chrome/browser/sync/protocol/service_constants.h"
24#include "chrome/browser/sync/protocol/sync.pb.h"
25#include "chrome/browser/sync/syncable/syncable-inl.h"
26#include "chrome/browser/sync/syncable/syncable_columns.h"
27#include "chrome/browser/sync/util/crypto_helpers.h"
28#include "chrome/common/sqlite_utils.h"
29#include "third_party/sqlite/sqlite3.h"
30
31// Sometimes threads contend on the DB lock itself, especially when one thread
32// is calling SaveChanges.  In the worst case scenario, the user can put his
33// laptop to sleep during db contention, and wake up the laptop days later, so
34// infinity seems like the best choice here.
35const int kDirectoryBackingStoreBusyTimeoutMs = std::numeric_limits<int>::max();
36
37using std::string;
38
39namespace syncable {
40
41// This just has to be big enough to hold an UPDATE or INSERT statement that
42// modifies all the columns in the entry table.
43static const string::size_type kUpdateStatementBufferSize = 2048;
44
45// Increment this version whenever updating DB tables.
46extern const int32 kCurrentDBVersion;  // Global visibility for our unittest.
47const int32 kCurrentDBVersion = 75;
48
49namespace {
50
51int ExecQuery(sqlite3* dbhandle, const char* query) {
52  SQLStatement statement;
53  int result = statement.prepare(dbhandle, query);
54  if (SQLITE_OK != result)
55    return result;
56  do {
57    result = statement.step();
58  } while (SQLITE_ROW == result);
59
60  return result;
61}
62
63string GenerateCacheGUID() {
64  return Generate128BitRandomHexString();
65}
66
67}  // namespace
68
69
70// Iterate over the fields of |entry| and bind each to |statement| for
71// updating.  Returns the number of args bound.
72int BindFields(const EntryKernel& entry, SQLStatement* statement) {
73  int index = 0;
74  int i = 0;
75  for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
76    statement->bind_int64(index++, entry.ref(static_cast<Int64Field>(i)));
77  }
78  for ( ; i < ID_FIELDS_END; ++i) {
79    statement->bind_string(index++, entry.ref(static_cast<IdField>(i)).s_);
80  }
81  for ( ; i < BIT_FIELDS_END; ++i) {
82    statement->bind_int(index++, entry.ref(static_cast<BitField>(i)));
83  }
84  for ( ; i < STRING_FIELDS_END; ++i) {
85    statement->bind_string(index++, entry.ref(static_cast<StringField>(i)));
86  }
87  std::string temp;
88  for ( ; i < PROTO_FIELDS_END; ++i) {
89    entry.ref(static_cast<ProtoField>(i)).SerializeToString(&temp);
90    statement->bind_blob(index++, temp.data(), temp.length());
91  }
92  return index;
93}
94
95// The caller owns the returned EntryKernel*.
96int UnpackEntry(SQLStatement* statement, EntryKernel** kernel) {
97  *kernel = NULL;
98  int query_result = statement->step();
99  if (SQLITE_ROW == query_result) {
100    *kernel = new EntryKernel;
101    (*kernel)->clear_dirty(NULL);
102    DCHECK(statement->column_count() == static_cast<int>(FIELD_COUNT));
103    int i = 0;
104    for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
105      (*kernel)->put(static_cast<Int64Field>(i), statement->column_int64(i));
106    }
107    for ( ; i < ID_FIELDS_END; ++i) {
108      (*kernel)->mutable_ref(static_cast<IdField>(i)).s_ =
109          statement->column_string(i);
110    }
111    for ( ; i < BIT_FIELDS_END; ++i) {
112      (*kernel)->put(static_cast<BitField>(i), (0 != statement->column_int(i)));
113    }
114    for ( ; i < STRING_FIELDS_END; ++i) {
115      (*kernel)->put(static_cast<StringField>(i),
116          statement->column_string(i));
117    }
118    for ( ; i < PROTO_FIELDS_END; ++i) {
119      (*kernel)->mutable_ref(static_cast<ProtoField>(i)).ParseFromArray(
120          statement->column_blob(i), statement->column_bytes(i));
121    }
122    ZeroFields((*kernel), i);
123  } else {
124    DCHECK(SQLITE_DONE == query_result);
125    (*kernel) = NULL;
126  }
127  return query_result;
128}
129
130namespace {
131
132string ComposeCreateTableColumnSpecs() {
133  const ColumnSpec* begin = g_metas_columns;
134  const ColumnSpec* end = g_metas_columns + arraysize(g_metas_columns);
135  string query;
136  query.reserve(kUpdateStatementBufferSize);
137  char separator = '(';
138  for (const ColumnSpec* column = begin; column != end; ++column) {
139    query.push_back(separator);
140    separator = ',';
141    query.append(column->name);
142    query.push_back(' ');
143    query.append(column->spec);
144  }
145  query.push_back(')');
146  return query;
147}
148
149void AppendColumnList(std::string* output) {
150  const char* joiner = " ";
151  // Be explicit in SELECT order to match up with UnpackEntry.
152  for (int i = BEGIN_FIELDS; i < BEGIN_FIELDS + FIELD_COUNT; ++i) {
153    output->append(joiner);
154    output->append(ColumnName(i));
155    joiner = ", ";
156  }
157}
158
159}  // namespace
160
161///////////////////////////////////////////////////////////////////////////////
162// DirectoryBackingStore implementation.
163
164DirectoryBackingStore::DirectoryBackingStore(const string& dir_name,
165                                             const FilePath& backing_filepath)
166    : load_dbhandle_(NULL),
167      save_dbhandle_(NULL),
168      dir_name_(dir_name),
169      backing_filepath_(backing_filepath),
170      needs_column_refresh_(false) {
171}
172
173DirectoryBackingStore::~DirectoryBackingStore() {
174  if (NULL != load_dbhandle_) {
175    sqlite3_close(load_dbhandle_);
176    load_dbhandle_ = NULL;
177  }
178  if (NULL != save_dbhandle_) {
179    sqlite3_close(save_dbhandle_);
180    save_dbhandle_ = NULL;
181  }
182}
183
184bool DirectoryBackingStore::OpenAndConfigureHandleHelper(
185    sqlite3** handle) const {
186  if (SQLITE_OK == sqlite_utils::OpenSqliteDb(backing_filepath_, handle)) {
187    sqlite_utils::scoped_sqlite_db_ptr scoped_handle(*handle);
188    sqlite3_busy_timeout(scoped_handle.get(), std::numeric_limits<int>::max());
189    {
190      string integrity_error;
191      bool is_ok = CheckIntegrity(scoped_handle.get(), &integrity_error);
192      if (!is_ok) {
193        LOG(ERROR) << "Integrity check failed: " << integrity_error;
194        return false;
195      }
196    }
197    {
198      SQLStatement statement;
199      statement.prepare(scoped_handle.get(), "PRAGMA fullfsync = 1");
200      if (SQLITE_DONE != statement.step()) {
201        LOG(ERROR) << sqlite3_errmsg(scoped_handle.get());
202        return false;
203      }
204    }
205    {
206      SQLStatement statement;
207      statement.prepare(scoped_handle.get(), "PRAGMA synchronous = 2");
208      if (SQLITE_DONE != statement.step()) {
209        LOG(ERROR) << sqlite3_errmsg(scoped_handle.get());
210        return false;
211      }
212    }
213    sqlite3_busy_timeout(scoped_handle.release(),
214                         kDirectoryBackingStoreBusyTimeoutMs);
215#if defined(OS_WIN)
216    // Do not index this file. Scanning can occur every time we close the file,
217    // which causes long delays in SQLite's file locking.
218    const DWORD attrs = GetFileAttributes(backing_filepath_.value().c_str());
219    const BOOL attrs_set =
220      SetFileAttributes(backing_filepath_.value().c_str(),
221                        attrs | FILE_ATTRIBUTE_NOT_CONTENT_INDEXED);
222#endif
223
224    return true;
225  }
226  return false;
227}
228
229bool DirectoryBackingStore::CheckIntegrity(sqlite3* handle, string* error)
230    const {
231  SQLStatement statement;
232  statement.prepare(handle, "PRAGMA integrity_check(1)");
233  if (SQLITE_ROW != statement.step()) {
234    *error =  sqlite3_errmsg(handle);
235    return false;
236  }
237  string integrity_result = statement.column_text(0);
238  if (integrity_result != "ok") {
239    *error = integrity_result;
240    return false;
241  }
242  return true;
243}
244
245DirOpenResult DirectoryBackingStore::DoLoad(MetahandlesIndex* entry_bucket,
246    Directory::KernelLoadInfo* kernel_load_info) {
247  {
248    DirOpenResult result = InitializeTables();
249    if (result != OPENED)
250      return result;
251  }
252
253  if (!DropDeletedEntries())
254    return FAILED_DATABASE_CORRUPT;
255  if (!LoadEntries(entry_bucket))
256    return FAILED_DATABASE_CORRUPT;
257  if (!LoadInfo(kernel_load_info))
258    return FAILED_DATABASE_CORRUPT;
259
260  return OPENED;
261}
262
263DirOpenResult DirectoryBackingStore::Load(MetahandlesIndex* entry_bucket,
264    Directory::KernelLoadInfo* kernel_load_info) {
265
266  // Open database handle.
267  if (!BeginLoad())
268    return FAILED_OPEN_DATABASE;
269
270  // Load data from the database.
271  DirOpenResult result = DoLoad(entry_bucket, kernel_load_info);
272
273  // Clean up partial results after failure.
274  if (result != OPENED)
275    STLDeleteElements(entry_bucket);
276
277  // Close database handle.
278  EndLoad();
279
280  return result;
281}
282
283bool DirectoryBackingStore::BeginLoad() {
284  DCHECK(load_dbhandle_ == NULL);
285  bool ret = OpenAndConfigureHandleHelper(&load_dbhandle_);
286  if (ret)
287    return true;
288  // Something's gone wrong. Nuke the database and try again.
289  using ::operator<<;  // For string16.
290  LOG(ERROR) << "Sync database " << backing_filepath_.value()
291             << " corrupt. Deleting and recreating.";
292  file_util::Delete(backing_filepath_, false);
293  bool failed_again = !OpenAndConfigureHandleHelper(&load_dbhandle_);
294
295  // Using failed_again here lets us distinguish from cases where corruption
296  // occurred even when re-opening a fresh directory (they'll go in a separate
297  // double weight histogram bucket).  Failing twice in a row means we disable
298  // sync, so it's useful to see this number separately.
299  int bucket = failed_again ? 2 : 1;
300#if defined(OS_WIN)
301  UMA_HISTOGRAM_COUNTS_100("Sync.DirectoryOpenFailedWin", bucket);
302#elif defined(OS_MACOSX)
303  UMA_HISTOGRAM_COUNTS_100("Sync.DirectoryOpenFailedMac", bucket);
304#else
305  UMA_HISTOGRAM_COUNTS_100("Sync.DirectoryOpenFailedNotWinMac", bucket);
306
307#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
308  UMA_HISTOGRAM_COUNTS_100("Sync.DirectoryOpenFailedLinux", bucket);
309#elif defined(OS_CHROMEOS)
310  UMA_HISTOGRAM_COUNTS_100("Sync.DirectoryOpenFailedCros", bucket);
311#else
312  UMA_HISTOGRAM_COUNTS_100("Sync.DirectoryOpenFailedOther", bucket);
313#endif  // OS_LINUX && !OS_CHROMEOS
314#endif  // OS_WIN
315  return !failed_again;
316}
317
318void DirectoryBackingStore::EndLoad() {
319  sqlite3_close(load_dbhandle_);
320  load_dbhandle_ = NULL;  // No longer used.
321}
322
323void DirectoryBackingStore::EndSave() {
324  sqlite3_close(save_dbhandle_);
325  save_dbhandle_ = NULL;
326}
327
328bool DirectoryBackingStore::DeleteEntries(const MetahandleSet& handles) {
329  if (handles.empty())
330    return true;
331
332  sqlite3* dbhandle = LazyGetSaveHandle();
333
334  string query = "DELETE FROM metas WHERE metahandle IN (";
335  for (MetahandleSet::const_iterator it = handles.begin(); it != handles.end();
336       ++it) {
337    if (it != handles.begin())
338      query.append(",");
339    query.append(base::Int64ToString(*it));
340  }
341  query.append(")");
342  SQLStatement statement;
343  int result = statement.prepare(dbhandle, query.data(), query.size());
344  if (SQLITE_OK == result)
345    result = statement.step();
346
347  return SQLITE_DONE == result;
348}
349
350bool DirectoryBackingStore::SaveChanges(
351    const Directory::SaveChangesSnapshot& snapshot) {
352  sqlite3* dbhandle = LazyGetSaveHandle();
353
354  // SQLTransaction::BeginExclusive causes a disk write to occur. This is not
355  // something that should happen every 10 seconds when this function runs, so
356  // just stop here if there's nothing to save.
357  bool save_info =
358    (Directory::KERNEL_SHARE_INFO_DIRTY == snapshot.kernel_info_status);
359  if (snapshot.dirty_metas.size() < 1 && !save_info)
360    return true;
361
362  SQLTransaction transaction(dbhandle);
363  if (SQLITE_OK != transaction.BeginExclusive())
364    return false;
365
366  for (OriginalEntries::const_iterator i = snapshot.dirty_metas.begin();
367       i != snapshot.dirty_metas.end(); ++i) {
368    DCHECK(i->is_dirty());
369    if (!SaveEntryToDB(*i))
370      return false;
371  }
372
373  if (!DeleteEntries(snapshot.metahandles_to_purge))
374    return false;
375
376  if (save_info) {
377    const Directory::PersistedKernelInfo& info = snapshot.kernel_info;
378    SQLStatement update;
379    update.prepare(dbhandle, "UPDATE share_info "
380                   "SET store_birthday = ?, "
381                   "next_id = ?, "
382                   "notification_state = ?, "
383                   "autofill_migration_state = ?, "
384                   "bookmarks_added_during_autofill_migration = ?, "
385                   "autofill_migration_time = ?, "
386                   "autofill_entries_added_during_migration = ?, "
387                   "autofill_profiles_added_during_migration = ? ");
388
389    const syncable::AutofillMigrationDebugInfo& debug_info =
390        info.autofill_migration_debug_info;
391    update.bind_string(0, info.store_birthday);
392    update.bind_int64(1, info.next_id);
393    update.bind_blob(2, info.notification_state.data(),
394                     info.notification_state.size());
395    update.bind_int(3, info.autofill_migration_state);
396    update.bind_int(4,
397        debug_info.bookmarks_added_during_migration);
398    update.bind_int64(5,
399        debug_info.autofill_migration_time);
400    update.bind_int(6,
401        debug_info.autofill_entries_added_during_migration);
402    update.bind_int(7,
403        debug_info.autofill_profile_added_during_migration);
404
405    if (!(SQLITE_DONE == update.step() &&
406          SQLITE_OK == update.reset() &&
407          1 == update.changes())) {
408      return false;
409    }
410
411    for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
412      SQLStatement op;
413      op.prepare(dbhandle, "INSERT OR REPLACE INTO models (model_id, "
414      "progress_marker, initial_sync_ended) VALUES ( ?, ?, ?)");
415      // We persist not ModelType but rather a protobuf-derived ID.
416      string model_id = ModelTypeEnumToModelId(ModelTypeFromInt(i));
417      string progress_marker;
418      info.download_progress[i].SerializeToString(&progress_marker);
419      op.bind_blob(0, model_id.data(), model_id.length());
420      op.bind_blob(1, progress_marker.data(), progress_marker.length());
421      op.bind_bool(2, info.initial_sync_ended[i]);
422
423      if (!(SQLITE_DONE == op.step() &&
424            SQLITE_OK == op.reset() &&
425            1 == op.changes())) {
426        return false;
427      }
428    }
429  }
430
431  return (SQLITE_OK == transaction.Commit());
432}
433
434DirOpenResult DirectoryBackingStore::InitializeTables() {
435  SQLTransaction transaction(load_dbhandle_);
436  if (SQLITE_OK != transaction.BeginExclusive()) {
437    return FAILED_DISK_FULL;
438  }
439  int version_on_disk = GetVersion();
440  int last_result = SQLITE_DONE;
441
442  // Upgrade from version 67. Version 67 was widely distributed as the original
443  // Bookmark Sync release. Version 68 removed unique naming.
444  if (version_on_disk == 67) {
445    if (MigrateVersion67To68())
446      version_on_disk = 68;
447  }
448  // Version 69 introduced additional datatypes.
449  if (version_on_disk == 68) {
450    if (MigrateVersion68To69())
451      version_on_disk = 69;
452  }
453
454  if (version_on_disk == 69) {
455    if (MigrateVersion69To70())
456      version_on_disk = 70;
457  }
458
459  // Version 71 changed the sync progress information to be per-datatype.
460  if (version_on_disk == 70) {
461    if (MigrateVersion70To71())
462      version_on_disk = 71;
463  }
464
465  // Version 72 removed extended attributes, a legacy way to do extensible
466  // key/value information, stored in their own table.
467  if (version_on_disk == 71) {
468    if (MigrateVersion71To72())
469      version_on_disk = 72;
470  }
471
472  // Version 73 added a field for notification state.
473  if (version_on_disk == 72) {
474    if (MigrateVersion72To73())
475      version_on_disk = 73;
476  }
477
478  // Version 74 added state for the autofill migration.
479  if (version_on_disk == 73) {
480    if (MigrateVersion73To74())
481      version_on_disk = 74;
482  }
483
484  // Version 75 migrated from int64-based timestamps to per-datatype tokens.
485  if (version_on_disk == 74) {
486    if (MigrateVersion74To75())
487      version_on_disk = 75;
488  }
489
490  // If one of the migrations requested it, drop columns that aren't current.
491  // It's only safe to do this after migrating all the way to the current
492  // version.
493  if (version_on_disk == kCurrentDBVersion && needs_column_refresh_) {
494    if (!RefreshColumns())
495      version_on_disk = 0;
496  }
497
498  // A final, alternative catch-all migration to simply re-sync everything.
499  if (version_on_disk != kCurrentDBVersion) {
500    if (version_on_disk > kCurrentDBVersion) {
501      transaction.Rollback();
502      return FAILED_NEWER_VERSION;
503    }
504    // Fallback (re-sync everything) migration path.
505    VLOG(1) << "Old/null sync database, version " << version_on_disk;
506    // Delete the existing database (if any), and create a fresh one.
507    DropAllTables();
508    last_result = CreateTables();
509  }
510  if (SQLITE_DONE == last_result) {
511    {
512      SQLStatement statement;
513      statement.prepare(load_dbhandle_,
514          "SELECT db_create_version, db_create_time FROM share_info");
515      if (SQLITE_ROW != statement.step()) {
516        transaction.Rollback();
517        return FAILED_DISK_FULL;
518      }
519      string db_create_version = statement.column_text(0);
520      int db_create_time = statement.column_int(1);
521      statement.reset();
522      VLOG(1) << "DB created at " << db_create_time << " by version " <<
523          db_create_version;
524    }
525    // COMMIT TRANSACTION rolls back on failure.
526    if (SQLITE_OK == transaction.Commit())
527      return OPENED;
528  } else {
529    transaction.Rollback();
530  }
531  return FAILED_DISK_FULL;
532}
533
534bool DirectoryBackingStore::RefreshColumns() {
535  DCHECK(needs_column_refresh_);
536
537  // Create a new table named temp_metas.
538  SafeDropTable("temp_metas");
539  if (CreateMetasTable(true) != SQLITE_DONE)
540    return false;
541
542  // Populate temp_metas from metas.
543  std::string query = "INSERT INTO temp_metas (";
544  AppendColumnList(&query);
545  query.append(") SELECT ");
546  AppendColumnList(&query);
547  query.append(" FROM metas");
548  if (ExecQuery(load_dbhandle_, query.c_str()) != SQLITE_DONE)
549    return false;
550
551  // Drop metas.
552  SafeDropTable("metas");
553
554  // Rename temp_metas -> metas.
555  int result = ExecQuery(load_dbhandle_,
556                         "ALTER TABLE temp_metas RENAME TO metas");
557  if (result != SQLITE_DONE)
558    return false;
559  needs_column_refresh_ = false;
560  return true;
561}
562
563bool DirectoryBackingStore::LoadEntries(MetahandlesIndex* entry_bucket) {
564  string select;
565  select.reserve(kUpdateStatementBufferSize);
566  select.append("SELECT ");
567  AppendColumnList(&select);
568  select.append(" FROM metas ");
569  SQLStatement statement;
570  statement.prepare(load_dbhandle_, select.c_str());
571  base::hash_set<int64> handles;
572  EntryKernel* kernel = NULL;
573  int query_result;
574  while (SQLITE_ROW == (query_result = UnpackEntry(&statement, &kernel))) {
575    DCHECK(handles.insert(kernel->ref(META_HANDLE)).second);  // Only in debug.
576    entry_bucket->insert(kernel);
577  }
578  return SQLITE_DONE == query_result;
579}
580
581bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) {
582  {
583    SQLStatement query;
584    query.prepare(load_dbhandle_,
585                  "SELECT store_birthday, next_id, cache_guid, "
586                  "notification_state, autofill_migration_state, "
587                  "bookmarks_added_during_autofill_migration, "
588                  "autofill_migration_time, "
589                  "autofill_entries_added_during_migration, "
590                  "autofill_profiles_added_during_migration "
591                  "FROM share_info");
592    if (SQLITE_ROW != query.step())
593      return false;
594    info->kernel_info.store_birthday = query.column_string(0);
595    info->kernel_info.next_id = query.column_int64(1);
596    info->cache_guid = query.column_string(2);
597    query.column_blob_as_string(3, &info->kernel_info.notification_state);
598    info->kernel_info.autofill_migration_state =
599        static_cast<AutofillMigrationState> (query.column_int(4));
600    syncable::AutofillMigrationDebugInfo& debug_info =
601        info->kernel_info.autofill_migration_debug_info;
602    debug_info.bookmarks_added_during_migration =
603      query.column_int(5);
604    debug_info.autofill_migration_time =
605      query.column_int64(6);
606    debug_info.autofill_entries_added_during_migration =
607      query.column_int(7);
608    debug_info.autofill_profile_added_during_migration =
609      query.column_int(8);
610  }
611  {
612    SQLStatement query;
613    query.prepare(load_dbhandle_,
614        "SELECT model_id, progress_marker, initial_sync_ended "
615        "FROM models");
616    while (SQLITE_ROW == query.step()) {
617      ModelType type = ModelIdToModelTypeEnum(query.column_blob(0),
618                                              query.column_bytes(0));
619      if (type != UNSPECIFIED && type != TOP_LEVEL_FOLDER) {
620        info->kernel_info.download_progress[type].ParseFromArray(
621            query.column_blob(1), query.column_bytes(1));
622        info->kernel_info.initial_sync_ended[type] = query.column_bool(2);
623      }
624    }
625  }
626  {
627    SQLStatement query;
628    query.prepare(load_dbhandle_,
629                  "SELECT MAX(metahandle) FROM metas");
630    if (SQLITE_ROW != query.step())
631      return false;
632    info->max_metahandle = query.column_int64(0);
633  }
634  return true;
635}
636
637bool DirectoryBackingStore::SaveEntryToDB(const EntryKernel& entry) {
638  DCHECK(save_dbhandle_);
639  string query;
640  query.reserve(kUpdateStatementBufferSize);
641  query.append("INSERT OR REPLACE INTO metas ");
642  string values;
643  values.reserve(kUpdateStatementBufferSize);
644  values.append("VALUES ");
645  const char* separator = "( ";
646  int i = 0;
647  for (i = BEGIN_FIELDS; i < PROTO_FIELDS_END; ++i) {
648    query.append(separator);
649    values.append(separator);
650    separator = ", ";
651    query.append(ColumnName(i));
652    values.append("?");
653  }
654
655  query.append(" ) ");
656  values.append(" )");
657  query.append(values);
658  SQLStatement statement;
659  statement.prepare(save_dbhandle_, query.c_str());
660  BindFields(entry, &statement);
661  return (SQLITE_DONE == statement.step() &&
662          SQLITE_OK == statement.reset() &&
663          1 == statement.changes());
664}
665
666bool DirectoryBackingStore::DropDeletedEntries() {
667  static const char delete_metas[] = "DELETE FROM metas WHERE metahandle IN "
668                                     "(SELECT metahandle from death_row)";
669  // Put all statements into a transaction for better performance
670  SQLTransaction transaction(load_dbhandle_);
671  transaction.Begin();
672  if (SQLITE_DONE != ExecQuery(
673                         load_dbhandle_,
674                         "CREATE TEMP TABLE death_row (metahandle BIGINT)")) {
675    return false;
676  }
677  if (SQLITE_DONE != ExecQuery(load_dbhandle_,
678                               "INSERT INTO death_row "
679                               "SELECT metahandle from metas WHERE is_del > 0 "
680                               " AND is_unsynced < 1"
681                               " AND is_unapplied_update < 1")) {
682    return false;
683  }
684  if (SQLITE_DONE != ExecQuery(load_dbhandle_, delete_metas)) {
685    return false;
686  }
687  if (SQLITE_DONE != ExecQuery(load_dbhandle_, "DROP TABLE death_row")) {
688    return false;
689  }
690  transaction.Commit();
691  return true;
692}
693
694int DirectoryBackingStore::SafeDropTable(const char* table_name) {
695  string query = "DROP TABLE IF EXISTS ";
696  query.append(table_name);
697  SQLStatement statement;
698  int result = statement.prepare(load_dbhandle_, query.data(),
699                                 query.size());
700  if (SQLITE_OK == result) {
701    result = statement.step();
702    if (SQLITE_DONE == result)
703      statement.finalize();
704  }
705
706  return result;
707}
708
709void DirectoryBackingStore::DropAllTables() {
710  SafeDropTable("metas");
711  SafeDropTable("temp_metas");
712  SafeDropTable("share_info");
713  SafeDropTable("temp_share_info");
714  SafeDropTable("share_version");
715  SafeDropTable("extended_attributes");
716  SafeDropTable("models");
717  SafeDropTable("temp_models");
718  needs_column_refresh_ = false;
719}
720
721// static
722ModelType DirectoryBackingStore::ModelIdToModelTypeEnum(
723    const void* data, int size) {
724  sync_pb::EntitySpecifics specifics;
725  if (!specifics.ParseFromArray(data, size))
726    return syncable::UNSPECIFIED;
727  return syncable::GetModelTypeFromSpecifics(specifics);
728}
729
730// static
731string DirectoryBackingStore::ModelTypeEnumToModelId(ModelType model_type) {
732  sync_pb::EntitySpecifics specifics;
733  syncable::AddDefaultExtensionValue(model_type, &specifics);
734  return specifics.SerializeAsString();
735}
736
737bool DirectoryBackingStore::MigrateToSpecifics(
738    const char* old_columns,
739    const char* specifics_column,
740    void (*handler_function)(SQLStatement* old_value_query,
741                             int old_value_column,
742                             sync_pb::EntitySpecifics* mutable_new_value)) {
743  std::string query_sql = StringPrintf("SELECT metahandle, %s, %s FROM metas",
744                                       specifics_column, old_columns);
745  std::string update_sql = StringPrintf(
746      "UPDATE metas SET %s = ? WHERE metahandle = ?", specifics_column);
747  SQLStatement query;
748  query.prepare(load_dbhandle_, query_sql.c_str());
749  while (query.step() == SQLITE_ROW) {
750    int64 metahandle = query.column_int64(0);
751    std::string new_value_bytes;
752    query.column_blob_as_string(1, &new_value_bytes);
753    sync_pb::EntitySpecifics new_value;
754    new_value.ParseFromString(new_value_bytes);
755    handler_function(&query, 2, &new_value);
756    new_value.SerializeToString(&new_value_bytes);
757
758    SQLStatement update;
759    update.prepare(load_dbhandle_, update_sql.data(), update_sql.length());
760    update.bind_blob(0, new_value_bytes.data(), new_value_bytes.length());
761    update.bind_int64(1, metahandle);
762    if (update.step() != SQLITE_DONE) {
763      NOTREACHED();
764      return false;
765    }
766  }
767  return true;
768}
769
770bool DirectoryBackingStore::AddColumn(const ColumnSpec* column) {
771  SQLStatement add_column;
772  std::string sql = StringPrintf("ALTER TABLE metas ADD COLUMN %s %s",
773                                 column->name, column->spec);
774  add_column.prepare(load_dbhandle_, sql.c_str());
775  return add_column.step() == SQLITE_DONE;
776}
777
778bool DirectoryBackingStore::SetVersion(int version) {
779  SQLStatement statement;
780  statement.prepare(load_dbhandle_, "UPDATE share_version SET data = ?");
781  statement.bind_int(0, version);
782  return statement.step() == SQLITE_DONE;
783}
784
785int DirectoryBackingStore::GetVersion() {
786  if (!sqlite_utils::DoesSqliteTableExist(load_dbhandle_, "share_version"))
787    return 0;
788  SQLStatement version_query;
789  version_query.prepare(load_dbhandle_, "SELECT data from share_version");
790  if (SQLITE_ROW != version_query.step())
791    return 0;
792  int value = version_query.column_int(0);
793  if (version_query.reset() != SQLITE_OK)
794    return 0;
795  return value;
796}
797
798bool DirectoryBackingStore::MigrateVersion67To68() {
799  // This change simply removed three columns:
800  //   string NAME
801  //   string UNSANITIZED_NAME
802  //   string SERVER_NAME
803  // No data migration is necessary, but we should do a column refresh.
804  SetVersion(68);
805  needs_column_refresh_ = true;
806  return true;
807}
808
809bool DirectoryBackingStore::MigrateVersion69To70() {
810  // Added "unique_client_tag", renamed "singleton_tag" to unique_server_tag
811  SetVersion(70);
812  // We use these metas column names but if in the future
813  // we rename the column again, we need to inline the old
814  // intermediate name / column spec.
815  if (!AddColumn(&g_metas_columns[UNIQUE_SERVER_TAG])) {
816    return false;
817  }
818  if (!AddColumn(&g_metas_columns[UNIQUE_CLIENT_TAG])) {
819    return false;
820  }
821  needs_column_refresh_ = true;
822
823  SQLStatement statement;
824  statement.prepare(load_dbhandle_,
825      "UPDATE metas SET unique_server_tag = singleton_tag");
826  return statement.step() == SQLITE_DONE;
827}
828
829namespace {
830
831// Callback passed to MigrateToSpecifics for the v68->v69 migration.  See
832// MigrateVersion68To69().
833void EncodeBookmarkURLAndFavicon(SQLStatement* old_value_query,
834                                 int old_value_column,
835                                 sync_pb::EntitySpecifics* mutable_new_value) {
836  // Extract data from the column trio we expect.
837  bool old_is_bookmark_object = old_value_query->column_bool(old_value_column);
838  std::string old_url = old_value_query->column_string(old_value_column + 1);
839  std::string old_favicon;
840  old_value_query->column_blob_as_string(old_value_column + 2, &old_favicon);
841  bool old_is_dir = old_value_query->column_bool(old_value_column + 3);
842
843  if (old_is_bookmark_object) {
844    sync_pb::BookmarkSpecifics* bookmark_data =
845        mutable_new_value->MutableExtension(sync_pb::bookmark);
846    if (!old_is_dir) {
847      bookmark_data->set_url(old_url);
848      bookmark_data->set_favicon(old_favicon);
849    }
850  }
851}
852
853}  // namespace
854
855bool DirectoryBackingStore::MigrateVersion68To69() {
856  // In Version 68, there were columns on table 'metas':
857  //   string BOOKMARK_URL
858  //   string SERVER_BOOKMARK_URL
859  //   blob BOOKMARK_FAVICON
860  //   blob SERVER_BOOKMARK_FAVICON
861  // In version 69, these columns went away in favor of storing
862  // a serialized EntrySpecifics protobuf in the columns:
863  //   protobuf blob SPECIFICS
864  //   protobuf blob SERVER_SPECIFICS
865  // For bookmarks, EntrySpecifics is extended as per
866  // bookmark_specifics.proto. This migration converts bookmarks from the
867  // former scheme to the latter scheme.
868
869  // First, add the two new columns to the schema.
870  if (!AddColumn(&g_metas_columns[SPECIFICS]))
871    return false;
872  if (!AddColumn(&g_metas_columns[SERVER_SPECIFICS]))
873    return false;
874
875  // Next, fold data from the old columns into the new protobuf columns.
876  if (!MigrateToSpecifics(("is_bookmark_object, bookmark_url, "
877                           "bookmark_favicon, is_dir"),
878                          "specifics",
879                          &EncodeBookmarkURLAndFavicon)) {
880    return false;
881  }
882  if (!MigrateToSpecifics(("server_is_bookmark_object, "
883                           "server_bookmark_url, "
884                           "server_bookmark_favicon, "
885                           "server_is_dir"),
886                          "server_specifics",
887                          &EncodeBookmarkURLAndFavicon)) {
888    return false;
889  }
890
891  // Lastly, fix up the "Google Chrome" folder, which is of the TOP_LEVEL_FOLDER
892  // ModelType: it shouldn't have BookmarkSpecifics.
893  SQLStatement clear_permanent_items;
894  clear_permanent_items.prepare(load_dbhandle_,
895      "UPDATE metas SET specifics = NULL, server_specifics = NULL WHERE "
896      "singleton_tag IN ('google_chrome')");
897  if (clear_permanent_items.step() != SQLITE_DONE)
898    return false;
899
900  SetVersion(69);
901  needs_column_refresh_ = true;  // Trigger deletion of old columns.
902  return true;
903}
904
905// Version 71, the columns 'initial_sync_ended' and 'last_sync_timestamp'
906// were removed from the share_info table.  They were replaced by
907// the 'models' table, which has these values on a per-datatype basis.
908bool DirectoryBackingStore::MigrateVersion70To71() {
909  if (SQLITE_DONE != CreateV71ModelsTable())
910    return false;
911
912  // Move data from the old share_info columns to the new models table.
913  {
914    SQLStatement fetch;
915    fetch.prepare(load_dbhandle_,
916        "SELECT last_sync_timestamp, initial_sync_ended FROM share_info");
917
918    if (SQLITE_ROW != fetch.step())
919      return false;
920    int64 last_sync_timestamp = fetch.column_int64(0);
921    bool initial_sync_ended = fetch.column_bool(1);
922    if (SQLITE_DONE != fetch.step())
923      return false;
924    SQLStatement update;
925    update.prepare(load_dbhandle_, "INSERT INTO models (model_id, "
926        "last_download_timestamp, initial_sync_ended) VALUES (?, ?, ?)");
927    string bookmark_model_id = ModelTypeEnumToModelId(BOOKMARKS);
928    update.bind_blob(0, bookmark_model_id.data(), bookmark_model_id.size());
929    update.bind_int64(1, last_sync_timestamp);
930    update.bind_bool(2, initial_sync_ended);
931    if (SQLITE_DONE != update.step())
932      return false;
933  }
934
935  // Drop the columns from the old share_info table via a temp table.
936  const bool kCreateAsTempShareInfo = true;
937
938  int result =
939      CreateShareInfoTableVersion71(kCreateAsTempShareInfo);
940  if (result != SQLITE_DONE)
941    return false;
942  result = ExecQuery(load_dbhandle_,
943                     "INSERT INTO temp_share_info (id, name, store_birthday, "
944                     "db_create_version, db_create_time, next_id, cache_guid) "
945                     "SELECT id, name, store_birthday, db_create_version, "
946                     "db_create_time, next_id, cache_guid FROM share_info");
947  if (result != SQLITE_DONE)
948    return false;
949  SafeDropTable("share_info");
950  result = ExecQuery(load_dbhandle_,
951      "ALTER TABLE temp_share_info RENAME TO share_info");
952  if (result != SQLITE_DONE)
953    return false;
954  SetVersion(71);
955  return true;
956}
957
958bool DirectoryBackingStore::MigrateVersion71To72() {
959  // Version 72 removed a table 'extended_attributes', whose
960  // contents didn't matter.
961  SafeDropTable("extended_attributes");
962  SetVersion(72);
963  return true;
964}
965
966bool DirectoryBackingStore::MigrateVersion72To73() {
967  // Version 73 added one column to the table 'share_info': notification_state
968  int result =
969      ExecQuery(load_dbhandle_,
970                "ALTER TABLE share_info ADD COLUMN notification_state BLOB");
971  if (result != SQLITE_DONE)
972    return false;
973  SetVersion(73);
974  return true;
975}
976
977bool DirectoryBackingStore::MigrateVersion73To74() {
978  // Version 74 added the following columns to the table 'share_info':
979  //   autofill_migration_state
980  //   bookmarks_added_during_autofill_migration
981  //   autofill_migration_time
982  //   autofill_entries_added_during_migration
983  //   autofill_profiles_added_during_migration
984
985  int result =
986      ExecQuery(load_dbhandle_,
987                "ALTER TABLE share_info ADD COLUMN autofill_migration_state "
988                "INT default 0");
989  if (result != SQLITE_DONE)
990    return false;
991
992  result = ExecQuery(load_dbhandle_,
993                "ALTER TABLE share_info ADD COLUMN "
994                "bookmarks_added_during_autofill_migration "
995                "INT default 0");
996
997  if (result != SQLITE_DONE)
998    return false;
999
1000  result = ExecQuery(load_dbhandle_,
1001                "ALTER TABLE share_info ADD COLUMN autofill_migration_time "
1002                "INT default 0");
1003
1004  if (result != SQLITE_DONE)
1005    return false;
1006
1007  result = ExecQuery(load_dbhandle_,
1008                "ALTER TABLE share_info ADD COLUMN "
1009                "autofill_entries_added_during_migration "
1010                "INT default 0");
1011
1012  if (result != SQLITE_DONE)
1013    return false;
1014
1015  result = ExecQuery(load_dbhandle_,
1016                "ALTER TABLE share_info ADD COLUMN "
1017                "autofill_profiles_added_during_migration "
1018                "INT default 0");
1019
1020  if (result != SQLITE_DONE)
1021    return false;
1022
1023  SetVersion(74);
1024  return true;
1025}
1026
1027bool DirectoryBackingStore::MigrateVersion74To75() {
1028  // In version 74, there was a table 'models':
1029  //     blob model_id (entity specifics, primary key)
1030  //     int last_download_timestamp
1031  //     boolean initial_sync_ended
1032  // In version 75, we deprecated the integer-valued last_download_timestamp,
1033  // using insted a protobuf-valued progress_marker field:
1034  //     blob progress_marker
1035  // The progress_marker values are initialized from the value of
1036  // last_download_timestamp, thereby preserving the download state.
1037
1038  // Move aside the old table and create a new empty one at the current schema.
1039  if (SQLITE_DONE != ExecQuery(load_dbhandle_,
1040          "ALTER TABLE models RENAME TO temp_models")) {
1041    return false;
1042  }
1043  if (!CreateModelsTable())
1044    return false;
1045
1046  SQLStatement query;
1047  query.prepare(load_dbhandle_,
1048      "SELECT model_id, last_download_timestamp, initial_sync_ended "
1049      "FROM temp_models");
1050  while (SQLITE_ROW == query.step()) {
1051    ModelType type = ModelIdToModelTypeEnum(query.column_blob(0),
1052                                            query.column_bytes(0));
1053    if (type != UNSPECIFIED) {
1054      // Set the |timestamp_token_for_migration| on a new
1055      // DataTypeProgressMarker, using the old value of last_download_timestamp.
1056      // The server will turn this into a real token on our behalf the next
1057      // time we check for updates.
1058      sync_pb::DataTypeProgressMarker progress_marker;
1059      progress_marker.set_data_type_id(
1060          GetExtensionFieldNumberFromModelType(type));
1061      progress_marker.set_timestamp_token_for_migration(query.column_int64(1));
1062      std::string progress_blob;
1063      progress_marker.SerializeToString(&progress_blob);
1064
1065      SQLStatement update;
1066      update.prepare(load_dbhandle_, "INSERT INTO models (model_id, "
1067          "progress_marker, initial_sync_ended) VALUES (?, ?, ?)");
1068      update.bind_blob(0, query.column_blob(0), query.column_bytes(0));
1069      update.bind_blob(1, progress_blob.data(), progress_blob.length());
1070      update.bind_bool(2, query.column_bool(2));
1071      if (SQLITE_DONE != update.step())
1072        return false;
1073    }
1074  }
1075  // Drop the old table.
1076  SafeDropTable("temp_models");
1077
1078  SetVersion(75);
1079  return true;
1080}
1081
1082int DirectoryBackingStore::CreateTables() {
1083  VLOG(1) << "First run, creating tables";
1084  // Create two little tables share_version and share_info
1085  int result = ExecQuery(load_dbhandle_,
1086                         "CREATE TABLE share_version ("
1087                         "id VARCHAR(128) primary key, data INT)");
1088  if (result != SQLITE_DONE)
1089    return result;
1090  {
1091    SQLStatement statement;
1092    statement.prepare(load_dbhandle_, "INSERT INTO share_version VALUES(?, ?)");
1093    statement.bind_string(0, dir_name_);
1094    statement.bind_int(1, kCurrentDBVersion);
1095    result = statement.step();
1096  }
1097  if (result != SQLITE_DONE)
1098    return result;
1099
1100  const bool kCreateAsTempShareInfo = false;
1101  result =
1102      CreateShareInfoTable(kCreateAsTempShareInfo);
1103  if (result != SQLITE_DONE)
1104    return result;
1105  {
1106    SQLStatement statement;
1107    statement.prepare(load_dbhandle_, "INSERT INTO share_info VALUES"
1108                                      "(?, "  // id
1109                                      "?, "   // name
1110                                      "?, "   // store_birthday
1111                                      "?, "   // db_create_version
1112                                      "?, "   // db_create_time
1113                                      "-2, "  // next_id
1114                                      "?, "   // cache_guid
1115                                      "?, "   // autofill_migration_state
1116                                      "?, "   // bookmarks_added
1117                                              // _during_autofill_migration
1118                                      "?, "   // autofill_migration_time
1119                                      "?, "   // autofill_entries
1120                                              // _added_during_migration
1121                                      "?, "   // autofill_profiles_added
1122                                              // _during_migration
1123                                      "?);");  // notification_state
1124    statement.bind_string(0, dir_name_);                   // id
1125    statement.bind_string(1, dir_name_);                   // name
1126    statement.bind_string(2, "");                          // store_birthday
1127    statement.bind_string(3, SYNC_ENGINE_VERSION_STRING);  // db_create_version
1128    statement.bind_int(4, static_cast<int32>(time(0)));    // db_create_time
1129    statement.bind_string(5, GenerateCacheGUID());         // cache_guid
1130    statement.bind_int(6, 0);  // autofill_migration_state
1131    statement.bind_int(7, 0);  // autofill_migration_time
1132    statement.bind_int(8, 0);  // bookmarks_added_during_autofill_migration
1133    statement.bind_int(9, 0);  // autofill_entries_added_during_migration
1134    statement.bind_int(10, 0);  // autofill_profiles_added_during_migration
1135    statement.bind_blob(11, NULL, 0);                      // notification_state
1136    result = statement.step();
1137  }
1138  if (result != SQLITE_DONE)
1139    return result;
1140
1141  result = CreateModelsTable();
1142  if (result != SQLITE_DONE)
1143    return result;
1144  // Create the big metas table.
1145  result = CreateMetasTable(false);
1146  if (result != SQLITE_DONE)
1147    return result;
1148  {
1149    // Insert the entry for the root into the metas table.
1150    const int64 now = Now();
1151    SQLStatement statement;
1152    statement.prepare(load_dbhandle_,
1153                      "INSERT INTO metas "
1154                      "( id, metahandle, is_dir, ctime, mtime) "
1155                      "VALUES ( \"r\", 1, 1, ?, ?)");
1156    statement.bind_int64(0, now);
1157    statement.bind_int64(1, now);
1158    result = statement.step();
1159  }
1160  return result;
1161}
1162
1163sqlite3* DirectoryBackingStore::LazyGetSaveHandle() {
1164  if (!save_dbhandle_ && !OpenAndConfigureHandleHelper(&save_dbhandle_)) {
1165    NOTREACHED() << "Unable to open handle for saving";
1166    return NULL;
1167  }
1168  return save_dbhandle_;
1169}
1170
1171int DirectoryBackingStore::CreateMetasTable(bool is_temporary) {
1172  const char* name = is_temporary ? "temp_metas" : "metas";
1173  string query = "CREATE TABLE ";
1174  query.append(name);
1175  query.append(ComposeCreateTableColumnSpecs());
1176  return ExecQuery(load_dbhandle_, query.c_str());
1177}
1178
1179int DirectoryBackingStore::CreateV71ModelsTable() {
1180  // This is an old schema for the Models table, used from versions 71 to 74.
1181  return ExecQuery(load_dbhandle_,
1182      "CREATE TABLE models ("
1183      "model_id BLOB primary key, "
1184      "last_download_timestamp INT, "
1185      // Gets set if the syncer ever gets updates from the
1186      // server and the server returns 0.  Lets us detect the
1187      // end of the initial sync.
1188      "initial_sync_ended BOOLEAN default 0)");
1189}
1190
1191int DirectoryBackingStore::CreateModelsTable() {
1192  // This is the current schema for the Models table, from version 75
1193  // onward.  If you change the schema, you'll probably want to double-check
1194  // the use of this function in the v74-v75 migration.
1195  return ExecQuery(load_dbhandle_,
1196      "CREATE TABLE models ("
1197      "model_id BLOB primary key, "
1198      "progress_marker BLOB, "
1199      // Gets set if the syncer ever gets updates from the
1200      // server and the server returns 0.  Lets us detect the
1201      // end of the initial sync.
1202      "initial_sync_ended BOOLEAN default 0)");
1203}
1204
1205int DirectoryBackingStore::CreateShareInfoTable(bool is_temporary) {
1206  const char* name = is_temporary ? "temp_share_info" : "share_info";
1207  string query = "CREATE TABLE ";
1208  query.append(name);
1209  // This is the current schema for the ShareInfo table, from version 74
1210  // onward.
1211  query.append(" ("
1212      "id TEXT primary key, "
1213      "name TEXT, "
1214      "store_birthday TEXT, "
1215      "db_create_version TEXT, "
1216      "db_create_time INT, "
1217      "next_id INT default -2, "
1218      "cache_guid TEXT, "
1219      "autofill_migration_state INT default 0, "
1220      "bookmarks_added_during_autofill_migration INT default 0, "
1221      "autofill_migration_time INT default 0, "
1222      "autofill_entries_added_during_migration INT default 0, "
1223      "autofill_profiles_added_during_migration INT default 0 ");
1224
1225  query.append(", notification_state BLOB");
1226  query.append(")");
1227  return ExecQuery(load_dbhandle_, query.c_str());
1228}
1229
1230int DirectoryBackingStore::CreateShareInfoTableVersion71(
1231    bool is_temporary) {
1232  const char* name = is_temporary ? "temp_share_info" : "share_info";
1233  string query = "CREATE TABLE ";
1234  query.append(name);
1235  // This is the schema for the ShareInfo table used from versions 71 to 72.
1236  query.append(" ("
1237      "id TEXT primary key, "
1238      "name TEXT, "
1239      "store_birthday TEXT, "
1240      "db_create_version TEXT, "
1241      "db_create_time INT, "
1242      "next_id INT default -2, "
1243      "cache_guid TEXT )");
1244  return ExecQuery(load_dbhandle_, query.c_str());
1245}
1246}  // namespace syncable
1247