directory_backing_store.cc revision 3345a6884c488ff3a535c2c9acdd33d74b37e311
1// Copyright (c) 2010 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "chrome/browser/sync/syncable/directory_backing_store.h"
6
7#include "build/build_config.h"
8
9#if defined(OS_MACOSX)
10#include <CoreFoundation/CoreFoundation.h>
11#endif
12
13#include <limits>
14
15#include "base/file_util.h"
16#include "base/hash_tables.h"
17#include "base/histogram.h"
18#include "base/logging.h"
19#include "base/stl_util-inl.h"
20#include "base/string_number_conversions.h"
21#include "base/string_util.h"
22#include "chrome/browser/sync/protocol/bookmark_specifics.pb.h"
23#include "chrome/browser/sync/protocol/service_constants.h"
24#include "chrome/browser/sync/protocol/sync.pb.h"
25#include "chrome/browser/sync/syncable/syncable-inl.h"
26#include "chrome/browser/sync/syncable/syncable_columns.h"
27#include "chrome/browser/sync/util/crypto_helpers.h"
28#include "chrome/common/sqlite_utils.h"
29#include "third_party/sqlite/sqlite3.h"
30
31// Sometimes threads contend on the DB lock itself, especially when one thread
32// is calling SaveChanges.  In the worst case scenario, the user can put his
33// laptop to sleep during db contention, and wake up the laptop days later, so
34// infinity seems like the best choice here.
35const int kDirectoryBackingStoreBusyTimeoutMs = std::numeric_limits<int>::max();
36
37using std::string;
38
39namespace syncable {
40
41// This just has to be big enough to hold an UPDATE or INSERT statement that
42// modifies all the columns in the entry table.
43static const string::size_type kUpdateStatementBufferSize = 2048;
44
45// Increment this version whenever updating DB tables.
46extern const int32 kCurrentDBVersion;  // Global visibility for our unittest.
47const int32 kCurrentDBVersion = 72;
48
49namespace {
50
51int ExecQuery(sqlite3* dbhandle, const char* query) {
52  SQLStatement statement;
53  int result = statement.prepare(dbhandle, query);
54  if (SQLITE_OK != result)
55    return result;
56  do {
57    result = statement.step();
58  } while (SQLITE_ROW == result);
59
60  return result;
61}
62
63string GenerateCacheGUID() {
64  return Generate128BitRandomHexString();
65}
66
67}  // namespace
68
69
70// Iterate over the fields of |entry| and bind each to |statement| for
71// updating.  Returns the number of args bound.
72int BindFields(const EntryKernel& entry, SQLStatement* statement) {
73  int index = 0;
74  int i = 0;
75  for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
76    statement->bind_int64(index++, entry.ref(static_cast<Int64Field>(i)));
77  }
78  for ( ; i < ID_FIELDS_END; ++i) {
79    statement->bind_string(index++, entry.ref(static_cast<IdField>(i)).s_);
80  }
81  for ( ; i < BIT_FIELDS_END; ++i) {
82    statement->bind_int(index++, entry.ref(static_cast<BitField>(i)));
83  }
84  for ( ; i < STRING_FIELDS_END; ++i) {
85    statement->bind_string(index++, entry.ref(static_cast<StringField>(i)));
86  }
87  std::string temp;
88  for ( ; i < PROTO_FIELDS_END; ++i) {
89    entry.ref(static_cast<ProtoField>(i)).SerializeToString(&temp);
90    statement->bind_blob(index++, temp.data(), temp.length());
91  }
92  return index;
93}
94
95// The caller owns the returned EntryKernel*.
96int UnpackEntry(SQLStatement* statement, EntryKernel** kernel) {
97  *kernel = NULL;
98  int query_result = statement->step();
99  if (SQLITE_ROW == query_result) {
100    *kernel = new EntryKernel;
101    (*kernel)->clear_dirty(NULL);
102    DCHECK(statement->column_count() == static_cast<int>(FIELD_COUNT));
103    int i = 0;
104    for (i = BEGIN_FIELDS; i < INT64_FIELDS_END; ++i) {
105      (*kernel)->put(static_cast<Int64Field>(i), statement->column_int64(i));
106    }
107    for ( ; i < ID_FIELDS_END; ++i) {
108      (*kernel)->mutable_ref(static_cast<IdField>(i)).s_ =
109          statement->column_string(i);
110    }
111    for ( ; i < BIT_FIELDS_END; ++i) {
112      (*kernel)->put(static_cast<BitField>(i), (0 != statement->column_int(i)));
113    }
114    for ( ; i < STRING_FIELDS_END; ++i) {
115      (*kernel)->put(static_cast<StringField>(i),
116          statement->column_string(i));
117    }
118    for ( ; i < PROTO_FIELDS_END; ++i) {
119      (*kernel)->mutable_ref(static_cast<ProtoField>(i)).ParseFromArray(
120          statement->column_blob(i), statement->column_bytes(i));
121    }
122    ZeroFields((*kernel), i);
123  } else {
124    DCHECK(SQLITE_DONE == query_result);
125    (*kernel) = NULL;
126  }
127  return query_result;
128}
129
130namespace {
131
132string ComposeCreateTableColumnSpecs() {
133  const ColumnSpec* begin = g_metas_columns;
134  const ColumnSpec* end = g_metas_columns + arraysize(g_metas_columns);
135  string query;
136  query.reserve(kUpdateStatementBufferSize);
137  char separator = '(';
138  for (const ColumnSpec* column = begin; column != end; ++column) {
139    query.push_back(separator);
140    separator = ',';
141    query.append(column->name);
142    query.push_back(' ');
143    query.append(column->spec);
144  }
145  query.push_back(')');
146  return query;
147}
148
149void AppendColumnList(std::string* output) {
150  const char* joiner = " ";
151  // Be explicit in SELECT order to match up with UnpackEntry.
152  for (int i = BEGIN_FIELDS; i < BEGIN_FIELDS + FIELD_COUNT; ++i) {
153    output->append(joiner);
154    output->append(ColumnName(i));
155    joiner = ", ";
156  }
157}
158
159}  // namespace
160
161///////////////////////////////////////////////////////////////////////////////
162// DirectoryBackingStore implementation.
163
164DirectoryBackingStore::DirectoryBackingStore(const string& dir_name,
165                                             const FilePath& backing_filepath)
166    : load_dbhandle_(NULL),
167      save_dbhandle_(NULL),
168      dir_name_(dir_name),
169      backing_filepath_(backing_filepath),
170      needs_column_refresh_(false) {
171}
172
173DirectoryBackingStore::~DirectoryBackingStore() {
174  if (NULL != load_dbhandle_) {
175    sqlite3_close(load_dbhandle_);
176    load_dbhandle_ = NULL;
177  }
178  if (NULL != save_dbhandle_) {
179    sqlite3_close(save_dbhandle_);
180    save_dbhandle_ = NULL;
181  }
182}
183
184bool DirectoryBackingStore::OpenAndConfigureHandleHelper(
185    sqlite3** handle) const {
186  if (SQLITE_OK == sqlite_utils::OpenSqliteDb(backing_filepath_, handle)) {
187    sqlite_utils::scoped_sqlite_db_ptr scoped_handle(*handle);
188    sqlite3_busy_timeout(scoped_handle.get(), std::numeric_limits<int>::max());
189    {
190      string integrity_error;
191      bool is_ok = CheckIntegrity(scoped_handle.get(), &integrity_error);
192      if (!is_ok) {
193        LOG(ERROR) << "Integrity check failed: " << integrity_error;
194        return false;
195      }
196    }
197    {
198      SQLStatement statement;
199      statement.prepare(scoped_handle.get(), "PRAGMA fullfsync = 1");
200      if (SQLITE_DONE != statement.step()) {
201        LOG(ERROR) << sqlite3_errmsg(scoped_handle.get());
202        return false;
203      }
204    }
205    {
206      SQLStatement statement;
207      statement.prepare(scoped_handle.get(), "PRAGMA synchronous = 2");
208      if (SQLITE_DONE != statement.step()) {
209        LOG(ERROR) << sqlite3_errmsg(scoped_handle.get());
210        return false;
211      }
212    }
213    sqlite3_busy_timeout(scoped_handle.release(),
214                         kDirectoryBackingStoreBusyTimeoutMs);
215#if defined(OS_WIN)
216    // Do not index this file. Scanning can occur every time we close the file,
217    // which causes long delays in SQLite's file locking.
218    const DWORD attrs = GetFileAttributes(backing_filepath_.value().c_str());
219    const BOOL attrs_set =
220      SetFileAttributes(backing_filepath_.value().c_str(),
221                        attrs | FILE_ATTRIBUTE_NOT_CONTENT_INDEXED);
222#endif
223
224    return true;
225  }
226  return false;
227}
228
229bool DirectoryBackingStore::CheckIntegrity(sqlite3* handle, string* error)
230    const {
231  SQLStatement statement;
232  statement.prepare(handle, "PRAGMA integrity_check(1)");
233  if (SQLITE_ROW != statement.step()) {
234    *error =  sqlite3_errmsg(handle);
235    return false;
236  }
237  string integrity_result = statement.column_text(0);
238  if (integrity_result != "ok") {
239    *error = integrity_result;
240    return false;
241  }
242  return true;
243}
244
245DirOpenResult DirectoryBackingStore::DoLoad(MetahandlesIndex* entry_bucket,
246    Directory::KernelLoadInfo* kernel_load_info) {
247  {
248    DirOpenResult result = InitializeTables();
249    if (result != OPENED)
250      return result;
251  }
252
253  if (!DropDeletedEntries())
254    return FAILED_DATABASE_CORRUPT;
255  if (!LoadEntries(entry_bucket))
256    return FAILED_DATABASE_CORRUPT;
257  if (!LoadInfo(kernel_load_info))
258    return FAILED_DATABASE_CORRUPT;
259
260  return OPENED;
261}
262
263DirOpenResult DirectoryBackingStore::Load(MetahandlesIndex* entry_bucket,
264    Directory::KernelLoadInfo* kernel_load_info) {
265
266  // Open database handle.
267  if (!BeginLoad())
268    return FAILED_OPEN_DATABASE;
269
270  // Load data from the database.
271  DirOpenResult result = DoLoad(entry_bucket, kernel_load_info);
272
273  // Clean up partial results after failure.
274  if (result != OPENED)
275    STLDeleteElements(entry_bucket);
276
277  // Close database handle.
278  EndLoad();
279
280  return result;
281}
282
283bool DirectoryBackingStore::BeginLoad() {
284  DCHECK(load_dbhandle_ == NULL);
285  bool ret = OpenAndConfigureHandleHelper(&load_dbhandle_);
286  if (ret)
287    return true;
288  // Something's gone wrong. Nuke the database and try again.
289  using ::operator<<;  // For string16.
290  LOG(ERROR) << "Sync database " << backing_filepath_.value()
291             << " corrupt. Deleting and recreating.";
292  file_util::Delete(backing_filepath_, false);
293  bool failed_again = !OpenAndConfigureHandleHelper(&load_dbhandle_);
294
295  // Using failed_again here lets us distinguish from cases where corruption
296  // occurred even when re-opening a fresh directory (they'll go in a separate
297  // double weight histogram bucket).  Failing twice in a row means we disable
298  // sync, so it's useful to see this number separately.
299  int bucket = failed_again ? 2 : 1;
300#if defined(OS_WIN)
301  UMA_HISTOGRAM_COUNTS_100("Sync.DirectoryOpenFailedWin", bucket);
302#elif defined(OS_MACOSX)
303  UMA_HISTOGRAM_COUNTS_100("Sync.DirectoryOpenFailedMac", bucket);
304#else
305  UMA_HISTOGRAM_COUNTS_100("Sync.DirectoryOpenFailedNotWinMac", bucket);
306
307#if defined(OS_LINUX) && !defined(OS_CHROMEOS)
308  UMA_HISTOGRAM_COUNTS_100("Sync.DirectoryOpenFailedLinux", bucket);
309#elif defined(OS_CHROMEOS)
310  UMA_HISTOGRAM_COUNTS_100("Sync.DirectoryOpenFailedCros", bucket);
311#else
312  UMA_HISTOGRAM_COUNTS_100("Sync.DirectoryOpenFailedOther", bucket);
313#endif  // OS_LINUX && !OS_CHROMEOS
314#endif  // OS_WIN
315  return !failed_again;
316}
317
318void DirectoryBackingStore::EndLoad() {
319  sqlite3_close(load_dbhandle_);
320  load_dbhandle_ = NULL;  // No longer used.
321}
322
323void DirectoryBackingStore::EndSave() {
324  sqlite3_close(save_dbhandle_);
325  save_dbhandle_ = NULL;
326}
327
328bool DirectoryBackingStore::DeleteEntries(const MetahandleSet& handles) {
329  if (handles.empty())
330    return true;
331
332  sqlite3* dbhandle = LazyGetSaveHandle();
333
334  string query = "DELETE FROM metas WHERE metahandle IN (";
335  for (MetahandleSet::const_iterator it = handles.begin(); it != handles.end();
336       ++it) {
337    if (it != handles.begin())
338      query.append(",");
339    query.append(base::Int64ToString(*it));
340  }
341  query.append(")");
342  SQLStatement statement;
343  int result = statement.prepare(dbhandle, query.data(), query.size());
344  if (SQLITE_OK == result)
345    result = statement.step();
346
347  return SQLITE_DONE == result;
348}
349
350bool DirectoryBackingStore::SaveChanges(
351    const Directory::SaveChangesSnapshot& snapshot) {
352  sqlite3* dbhandle = LazyGetSaveHandle();
353
354  // SQLTransaction::BeginExclusive causes a disk write to occur. This is not
355  // something that should happen every 10 seconds when this function runs, so
356  // just stop here if there's nothing to save.
357  bool save_info =
358    (Directory::KERNEL_SHARE_INFO_DIRTY == snapshot.kernel_info_status);
359  if (snapshot.dirty_metas.size() < 1 && !save_info)
360    return true;
361
362  SQLTransaction transaction(dbhandle);
363  if (SQLITE_OK != transaction.BeginExclusive())
364    return false;
365
366  for (OriginalEntries::const_iterator i = snapshot.dirty_metas.begin();
367       i != snapshot.dirty_metas.end(); ++i) {
368    DCHECK(i->is_dirty());
369    if (!SaveEntryToDB(*i))
370      return false;
371  }
372
373  if (!DeleteEntries(snapshot.metahandles_to_purge))
374    return false;
375
376  if (save_info) {
377    const Directory::PersistedKernelInfo& info = snapshot.kernel_info;
378    SQLStatement update;
379    update.prepare(dbhandle, "UPDATE share_info "
380                   "SET store_birthday = ?, "
381                   "next_id = ?");
382    update.bind_string(0, info.store_birthday);
383    update.bind_int64(1, info.next_id);
384
385    if (!(SQLITE_DONE == update.step() &&
386          SQLITE_OK == update.reset() &&
387          1 == update.changes())) {
388      return false;
389    }
390
391    for (int i = FIRST_REAL_MODEL_TYPE; i < MODEL_TYPE_COUNT; ++i) {
392      SQLStatement op;
393      op.prepare(dbhandle, "INSERT OR REPLACE INTO models (model_id, "
394      "last_download_timestamp, initial_sync_ended) VALUES ( ?, ?, ?)");
395      // We persist not ModelType but rather a protobuf-derived ID.
396      string model_id = ModelTypeEnumToModelId(ModelTypeFromInt(i));
397      op.bind_blob(0, model_id.data(), model_id.length());
398      op.bind_int64(1, info.last_download_timestamp[i]);
399      op.bind_bool(2, info.initial_sync_ended[i]);
400
401      if (!(SQLITE_DONE == op.step() &&
402            SQLITE_OK == op.reset() &&
403            1 == op.changes())) {
404        return false;
405      }
406    }
407  }
408
409  return (SQLITE_OK == transaction.Commit());
410}
411
412DirOpenResult DirectoryBackingStore::InitializeTables() {
413  SQLTransaction transaction(load_dbhandle_);
414  if (SQLITE_OK != transaction.BeginExclusive()) {
415    return FAILED_DISK_FULL;
416  }
417  int version_on_disk = GetVersion();
418  int last_result = SQLITE_OK;
419
420  // Upgrade from version 67. Version 67 was widely distributed as the original
421  // Bookmark Sync release. Version 68 removed unique naming.
422  if (version_on_disk == 67) {
423    if (MigrateVersion67To68())
424      version_on_disk = 68;
425  }
426  // Version 69 introduced additional datatypes.
427  if (version_on_disk == 68) {
428    if (MigrateVersion68To69())
429      version_on_disk = 69;
430  }
431
432  if (version_on_disk == 69) {
433    if (MigrateVersion69To70())
434      version_on_disk = 70;
435  }
436
437  // Version 71 changed the sync progress information to be per-datatype.
438  if (version_on_disk == 70) {
439    if (MigrateVersion70To71())
440      version_on_disk = 71;
441  }
442
443  // Version 72 removed extended attributes, a legacy way to do extensible
444  // key/value information, stored in their own table.
445  if (version_on_disk == 71) {
446    if (MigrateVersion71To72())
447      version_on_disk = 72;
448  }
449
450  // If one of the migrations requested it, drop columns that aren't current.
451  // It's only safe to do this after migrating all the way to the current
452  // version.
453  if (version_on_disk == kCurrentDBVersion && needs_column_refresh_) {
454    if (!RefreshColumns())
455      version_on_disk = 0;
456  }
457
458  // A final, alternative catch-all migration to simply re-sync everything.
459  if (version_on_disk != kCurrentDBVersion) {
460    if (version_on_disk > kCurrentDBVersion) {
461      transaction.Rollback();
462      return FAILED_NEWER_VERSION;
463    }
464    // Fallback (re-sync everything) migration path.
465    LOG(INFO) << "Old/null sync database, version " << version_on_disk;
466    // Delete the existing database (if any), and create a fresh one.
467    if (SQLITE_OK == last_result) {
468      DropAllTables();
469      if (SQLITE_DONE == CreateTables()) {
470        last_result = SQLITE_OK;
471      }
472    }
473  }
474  if (SQLITE_OK == last_result) {
475    {
476      SQLStatement statement;
477      statement.prepare(load_dbhandle_,
478          "SELECT db_create_version, db_create_time FROM share_info");
479      if (SQLITE_ROW != statement.step()) {
480        transaction.Rollback();
481        return FAILED_DISK_FULL;
482      }
483      string db_create_version = statement.column_text(0);
484      int db_create_time = statement.column_int(1);
485      statement.reset();
486      LOG(INFO) << "DB created at " << db_create_time << " by version " <<
487          db_create_version;
488    }
489    // COMMIT TRANSACTION rolls back on failure.
490    if (SQLITE_OK == transaction.Commit())
491      return OPENED;
492  } else {
493    transaction.Rollback();
494  }
495  return FAILED_DISK_FULL;
496}
497
498bool DirectoryBackingStore::RefreshColumns() {
499  DCHECK(needs_column_refresh_);
500
501  // Create a new table named temp_metas.
502  SafeDropTable("temp_metas");
503  if (CreateMetasTable(true) != SQLITE_DONE)
504    return false;
505
506  // Populate temp_metas from metas.
507  std::string query = "INSERT INTO temp_metas (";
508  AppendColumnList(&query);
509  query.append(") SELECT ");
510  AppendColumnList(&query);
511  query.append(" FROM metas");
512  if (ExecQuery(load_dbhandle_, query.c_str()) != SQLITE_DONE)
513    return false;
514
515  // Drop metas.
516  SafeDropTable("metas");
517
518  // Rename temp_metas -> metas.
519  int result = ExecQuery(load_dbhandle_,
520                         "ALTER TABLE temp_metas RENAME TO metas");
521  if (result != SQLITE_DONE)
522    return false;
523  needs_column_refresh_ = false;
524  return true;
525}
526
527bool DirectoryBackingStore::LoadEntries(MetahandlesIndex* entry_bucket) {
528  string select;
529  select.reserve(kUpdateStatementBufferSize);
530  select.append("SELECT ");
531  AppendColumnList(&select);
532  select.append(" FROM metas ");
533  SQLStatement statement;
534  statement.prepare(load_dbhandle_, select.c_str());
535  base::hash_set<int64> handles;
536  EntryKernel* kernel = NULL;
537  int query_result;
538  while (SQLITE_ROW == (query_result = UnpackEntry(&statement, &kernel))) {
539    DCHECK(handles.insert(kernel->ref(META_HANDLE)).second);  // Only in debug.
540    entry_bucket->insert(kernel);
541  }
542  return SQLITE_DONE == query_result;
543}
544
545bool DirectoryBackingStore::LoadInfo(Directory::KernelLoadInfo* info) {
546  {
547    SQLStatement query;
548    query.prepare(load_dbhandle_,
549                  "SELECT store_birthday, next_id, cache_guid "
550                  "FROM share_info");
551    if (SQLITE_ROW != query.step())
552      return false;
553    info->kernel_info.store_birthday = query.column_string(0);
554    info->kernel_info.next_id = query.column_int64(1);
555    info->cache_guid = query.column_string(2);
556  }
557  {
558    SQLStatement query;
559    query.prepare(load_dbhandle_,
560        "SELECT model_id, last_download_timestamp, initial_sync_ended "
561        "FROM models");
562    while (SQLITE_ROW == query.step()) {
563      string model_id;
564      query.column_blob_as_string(0, &model_id);
565      ModelType type = ModelIdToModelTypeEnum(model_id);
566      if (type != UNSPECIFIED) {
567        info->kernel_info.last_download_timestamp[type] = query.column_int64(1);
568        info->kernel_info.initial_sync_ended[type] = query.column_bool(2);
569      }
570    }
571  }
572  {
573    SQLStatement query;
574    query.prepare(load_dbhandle_,
575                  "SELECT MAX(metahandle) FROM metas");
576    if (SQLITE_ROW != query.step())
577      return false;
578    info->max_metahandle = query.column_int64(0);
579  }
580  return true;
581}
582
583bool DirectoryBackingStore::SaveEntryToDB(const EntryKernel& entry) {
584  DCHECK(save_dbhandle_);
585  string query;
586  query.reserve(kUpdateStatementBufferSize);
587  query.append("INSERT OR REPLACE INTO metas ");
588  string values;
589  values.reserve(kUpdateStatementBufferSize);
590  values.append("VALUES ");
591  const char* separator = "( ";
592  int i = 0;
593  for (i = BEGIN_FIELDS; i < PROTO_FIELDS_END; ++i) {
594    query.append(separator);
595    values.append(separator);
596    separator = ", ";
597    query.append(ColumnName(i));
598    values.append("?");
599  }
600
601  query.append(" ) ");
602  values.append(" )");
603  query.append(values);
604  SQLStatement statement;
605  statement.prepare(save_dbhandle_, query.c_str());
606  BindFields(entry, &statement);
607  return (SQLITE_DONE == statement.step() &&
608          SQLITE_OK == statement.reset() &&
609          1 == statement.changes());
610}
611
612bool DirectoryBackingStore::DropDeletedEntries() {
613  static const char delete_metas[] = "DELETE FROM metas WHERE metahandle IN "
614                                     "(SELECT metahandle from death_row)";
615  // Put all statements into a transaction for better performance
616  SQLTransaction transaction(load_dbhandle_);
617  transaction.Begin();
618  if (SQLITE_DONE != ExecQuery(
619                         load_dbhandle_,
620                         "CREATE TEMP TABLE death_row (metahandle BIGINT)")) {
621    return false;
622  }
623  if (SQLITE_DONE != ExecQuery(load_dbhandle_,
624                               "INSERT INTO death_row "
625                               "SELECT metahandle from metas WHERE is_del > 0 "
626                               " AND is_unsynced < 1"
627                               " AND is_unapplied_update < 1")) {
628    return false;
629  }
630  if (SQLITE_DONE != ExecQuery(load_dbhandle_, delete_metas)) {
631    return false;
632  }
633  if (SQLITE_DONE != ExecQuery(load_dbhandle_, "DROP TABLE death_row")) {
634    return false;
635  }
636  transaction.Commit();
637  return true;
638}
639
640int DirectoryBackingStore::SafeDropTable(const char* table_name) {
641  string query = "DROP TABLE IF EXISTS ";
642  query.append(table_name);
643  SQLStatement statement;
644  int result = statement.prepare(load_dbhandle_, query.data(),
645                                 query.size());
646  if (SQLITE_OK == result) {
647    result = statement.step();
648    if (SQLITE_DONE == result)
649      statement.finalize();
650  }
651
652  return result;
653}
654
655void DirectoryBackingStore::DropAllTables() {
656  SafeDropTable("metas");
657  SafeDropTable("temp_metas");
658  SafeDropTable("share_info");
659  SafeDropTable("temp_share_info");
660  SafeDropTable("share_version");
661  SafeDropTable("extended_attributes");
662  SafeDropTable("models");
663  needs_column_refresh_ = false;
664}
665
666// static
667ModelType DirectoryBackingStore::ModelIdToModelTypeEnum(
668    const string& model_id) {
669  sync_pb::EntitySpecifics specifics;
670  if (!specifics.ParseFromString(model_id))
671    return syncable::UNSPECIFIED;
672  return syncable::GetModelTypeFromSpecifics(specifics);
673}
674
675// static
676string DirectoryBackingStore::ModelTypeEnumToModelId(ModelType model_type) {
677  sync_pb::EntitySpecifics specifics;
678  syncable::AddDefaultExtensionValue(model_type, &specifics);
679  return specifics.SerializeAsString();
680}
681
682bool DirectoryBackingStore::MigrateToSpecifics(
683    const char* old_columns,
684    const char* specifics_column,
685    void (*handler_function)(SQLStatement* old_value_query,
686                             int old_value_column,
687                             sync_pb::EntitySpecifics* mutable_new_value)) {
688  std::string query_sql = StringPrintf("SELECT metahandle, %s, %s FROM metas",
689                                       specifics_column, old_columns);
690  std::string update_sql = StringPrintf(
691      "UPDATE metas SET %s = ? WHERE metahandle = ?", specifics_column);
692  SQLStatement query;
693  query.prepare(load_dbhandle_, query_sql.c_str());
694  while (query.step() == SQLITE_ROW) {
695    int64 metahandle = query.column_int64(0);
696    std::string new_value_bytes;
697    query.column_blob_as_string(1, &new_value_bytes);
698    sync_pb::EntitySpecifics new_value;
699    new_value.ParseFromString(new_value_bytes);
700    handler_function(&query, 2, &new_value);
701    new_value.SerializeToString(&new_value_bytes);
702
703    SQLStatement update;
704    update.prepare(load_dbhandle_, update_sql.data(), update_sql.length());
705    update.bind_blob(0, new_value_bytes.data(), new_value_bytes.length());
706    update.bind_int64(1, metahandle);
707    if (update.step() != SQLITE_DONE) {
708      NOTREACHED();
709      return false;
710    }
711  }
712  return true;
713}
714
715bool DirectoryBackingStore::AddColumn(const ColumnSpec* column) {
716  SQLStatement add_column;
717  std::string sql = StringPrintf("ALTER TABLE metas ADD COLUMN %s %s",
718                                 column->name, column->spec);
719  add_column.prepare(load_dbhandle_, sql.c_str());
720  return add_column.step() == SQLITE_DONE;
721}
722
723bool DirectoryBackingStore::SetVersion(int version) {
724  SQLStatement statement;
725  statement.prepare(load_dbhandle_, "UPDATE share_version SET data = ?");
726  statement.bind_int(0, version);
727  return statement.step() == SQLITE_DONE;
728}
729
730int DirectoryBackingStore::GetVersion() {
731  if (!sqlite_utils::DoesSqliteTableExist(load_dbhandle_, "share_version"))
732    return 0;
733  SQLStatement version_query;
734  version_query.prepare(load_dbhandle_, "SELECT data from share_version");
735  if (SQLITE_ROW != version_query.step())
736    return 0;
737  int value = version_query.column_int(0);
738  if (version_query.reset() != SQLITE_OK)
739    return 0;
740  return value;
741}
742
743bool DirectoryBackingStore::MigrateVersion67To68() {
744  // This change simply removed three columns:
745  //   string NAME
746  //   string UNSANITIZED_NAME
747  //   string SERVER_NAME
748  // No data migration is necessary, but we should do a column refresh.
749  SetVersion(68);
750  needs_column_refresh_ = true;
751  return true;
752}
753
754bool DirectoryBackingStore::MigrateVersion69To70() {
755  // Added "unique_client_tag", renamed "singleton_tag" to unique_server_tag
756  SetVersion(70);
757  // We use these metas column names but if in the future
758  // we rename the column again, we need to inline the old
759  // intermediate name / column spec.
760  if (!AddColumn(&g_metas_columns[UNIQUE_SERVER_TAG])) {
761    return false;
762  }
763  if (!AddColumn(&g_metas_columns[UNIQUE_CLIENT_TAG])) {
764    return false;
765  }
766  needs_column_refresh_ = true;
767
768  SQLStatement statement;
769  statement.prepare(load_dbhandle_,
770      "UPDATE metas SET unique_server_tag = singleton_tag");
771  return statement.step() == SQLITE_DONE;
772}
773
774namespace {
775
776// Callback passed to MigrateToSpecifics for the v68->v69 migration.  See
777// MigrateVersion68To69().
778void EncodeBookmarkURLAndFavicon(SQLStatement* old_value_query,
779                                 int old_value_column,
780                                 sync_pb::EntitySpecifics* mutable_new_value) {
781  // Extract data from the column trio we expect.
782  bool old_is_bookmark_object = old_value_query->column_bool(old_value_column);
783  std::string old_url = old_value_query->column_string(old_value_column + 1);
784  std::string old_favicon;
785  old_value_query->column_blob_as_string(old_value_column + 2, &old_favicon);
786  bool old_is_dir = old_value_query->column_bool(old_value_column + 3);
787
788  if (old_is_bookmark_object) {
789    sync_pb::BookmarkSpecifics* bookmark_data =
790        mutable_new_value->MutableExtension(sync_pb::bookmark);
791    if (!old_is_dir) {
792      bookmark_data->set_url(old_url);
793      bookmark_data->set_favicon(old_favicon);
794    }
795  }
796}
797
798}  // namespace
799
800bool DirectoryBackingStore::MigrateVersion68To69() {
801  // In Version 68, there were columns on table 'metas':
802  //   string BOOKMARK_URL
803  //   string SERVER_BOOKMARK_URL
804  //   blob BOOKMARK_FAVICON
805  //   blob SERVER_BOOKMARK_FAVICON
806  // In version 69, these columns went away in favor of storing
807  // a serialized EntrySpecifics protobuf in the columns:
808  //   protobuf blob SPECIFICS
809  //   protobuf blob SERVER_SPECIFICS
810  // For bookmarks, EntrySpecifics is extended as per
811  // bookmark_specifics.proto. This migration converts bookmarks from the
812  // former scheme to the latter scheme.
813
814  // First, add the two new columns to the schema.
815  if (!AddColumn(&g_metas_columns[SPECIFICS]))
816    return false;
817  if (!AddColumn(&g_metas_columns[SERVER_SPECIFICS]))
818    return false;
819
820  // Next, fold data from the old columns into the new protobuf columns.
821  if (!MigrateToSpecifics(("is_bookmark_object, bookmark_url, "
822                           "bookmark_favicon, is_dir"),
823                          "specifics",
824                          &EncodeBookmarkURLAndFavicon)) {
825    return false;
826  }
827  if (!MigrateToSpecifics(("server_is_bookmark_object, "
828                           "server_bookmark_url, "
829                           "server_bookmark_favicon, "
830                           "server_is_dir"),
831                          "server_specifics",
832                          &EncodeBookmarkURLAndFavicon)) {
833    return false;
834  }
835
836  // Lastly, fix up the "Google Chrome" folder, which is of the TOP_LEVEL_FOLDER
837  // ModelType: it shouldn't have BookmarkSpecifics.
838  SQLStatement clear_permanent_items;
839  clear_permanent_items.prepare(load_dbhandle_,
840      "UPDATE metas SET specifics = NULL, server_specifics = NULL WHERE "
841      "singleton_tag IN ('google_chrome')");
842  if (clear_permanent_items.step() != SQLITE_DONE)
843    return false;
844
845  SetVersion(69);
846  needs_column_refresh_ = true;  // Trigger deletion of old columns.
847  return true;
848}
849
850// Version 71, the columns 'initial_sync_ended' and 'last_sync_timestamp'
851// were removed from the share_info table.  They were replaced by
852// the 'models' table, which has these values on a per-datatype basis.
853bool DirectoryBackingStore::MigrateVersion70To71() {
854  if (SQLITE_DONE != CreateModelsTable())
855    return false;
856
857  // Move data from the old share_info columns to the new models table.
858  {
859    SQLStatement fetch;
860    fetch.prepare(load_dbhandle_,
861        "SELECT last_sync_timestamp, initial_sync_ended FROM share_info");
862
863    if (SQLITE_ROW != fetch.step())
864      return false;
865    int64 last_sync_timestamp = fetch.column_int64(0);
866    bool initial_sync_ended = fetch.column_bool(1);
867    if (SQLITE_DONE != fetch.step())
868      return false;
869    SQLStatement update;
870    update.prepare(load_dbhandle_, "INSERT INTO models (model_id, "
871        "last_download_timestamp, initial_sync_ended) VALUES (?, ?, ?)");
872    string bookmark_model_id = ModelTypeEnumToModelId(BOOKMARKS);
873    update.bind_blob(0, bookmark_model_id.data(), bookmark_model_id.size());
874    update.bind_int64(1, last_sync_timestamp);
875    update.bind_bool(2, initial_sync_ended);
876    if (SQLITE_DONE != update.step())
877      return false;
878  }
879
880  // Drop the columns from the old share_info table via a temp table.
881  const bool kCreateAsTempShareInfo = true;
882  int result = CreateShareInfoTable(kCreateAsTempShareInfo);
883  if (result != SQLITE_DONE)
884    return false;
885  ExecQuery(load_dbhandle_,
886            "INSERT INTO temp_share_info (id, name, store_birthday, "
887            "db_create_version, db_create_time, next_id, cache_guid) "
888            "SELECT id, name, store_birthday, db_create_version, "
889            "db_create_time, next_id, cache_guid FROM share_info");
890  if (result != SQLITE_DONE)
891    return false;
892  SafeDropTable("share_info");
893  result = ExecQuery(load_dbhandle_,
894      "ALTER TABLE temp_share_info RENAME TO share_info");
895  if (result != SQLITE_DONE)
896    return false;
897  SetVersion(71);
898  return true;
899}
900
901bool DirectoryBackingStore::MigrateVersion71To72() {
902  SafeDropTable("extended_attributes");
903  SetVersion(72);
904  return true;
905}
906
907int DirectoryBackingStore::CreateTables() {
908  LOG(INFO) << "First run, creating tables";
909  // Create two little tables share_version and share_info
910  int result = ExecQuery(load_dbhandle_,
911                         "CREATE TABLE share_version ("
912                         "id VARCHAR(128) primary key, data INT)");
913  if (result != SQLITE_DONE)
914    return result;
915  {
916    SQLStatement statement;
917    statement.prepare(load_dbhandle_, "INSERT INTO share_version VALUES(?, ?)");
918    statement.bind_string(0, dir_name_);
919    statement.bind_int(1, kCurrentDBVersion);
920    result = statement.step();
921  }
922  if (result != SQLITE_DONE)
923    return result;
924
925  result = CreateShareInfoTable(false);
926  if (result != SQLITE_DONE)
927    return result;
928  {
929    SQLStatement statement;
930    statement.prepare(load_dbhandle_, "INSERT INTO share_info VALUES"
931                                      "(?, "  // id
932                                      "?, "   // name
933                                      "?, "   // store_birthday
934                                      "?, "   // db_create_version
935                                      "?, "   // db_create_time
936                                      "-2, "  // next_id
937                                      "?)");   // cache_guid);
938    statement.bind_string(0, dir_name_);                   // id
939    statement.bind_string(1, dir_name_);                   // name
940    statement.bind_string(2, "");                          // store_birthday
941    statement.bind_string(3, SYNC_ENGINE_VERSION_STRING);  // db_create_version
942    statement.bind_int(4, static_cast<int32>(time(0)));    // db_create_time
943    statement.bind_string(5, GenerateCacheGUID());         // cache_guid
944    result = statement.step();
945  }
946  if (result != SQLITE_DONE)
947    return result;
948
949  result = CreateModelsTable();
950  if (result != SQLITE_DONE)
951    return result;
952  // Create the big metas table.
953  result = CreateMetasTable(false);
954  if (result != SQLITE_DONE)
955    return result;
956  {
957    // Insert the entry for the root into the metas table.
958    const int64 now = Now();
959    SQLStatement statement;
960    statement.prepare(load_dbhandle_,
961                      "INSERT INTO metas "
962                      "( id, metahandle, is_dir, ctime, mtime) "
963                      "VALUES ( \"r\", 1, 1, ?, ?)");
964    statement.bind_int64(0, now);
965    statement.bind_int64(1, now);
966    result = statement.step();
967  }
968  return result;
969}
970
971sqlite3* DirectoryBackingStore::LazyGetSaveHandle() {
972  if (!save_dbhandle_ && !OpenAndConfigureHandleHelper(&save_dbhandle_)) {
973    NOTREACHED() << "Unable to open handle for saving";
974    return NULL;
975  }
976  return save_dbhandle_;
977}
978
979int DirectoryBackingStore::CreateMetasTable(bool is_temporary) {
980  const char* name = is_temporary ? "temp_metas" : "metas";
981  string query = "CREATE TABLE ";
982  query.append(name);
983  query.append(ComposeCreateTableColumnSpecs());
984  return ExecQuery(load_dbhandle_, query.c_str());
985}
986
987int DirectoryBackingStore::CreateModelsTable() {
988  // This is the current schema for the Models table, from version 71
989  // onward.  If you change the schema, you'll probably want to double-check
990  // the use of this function in the v70-v71 migration.
991  return ExecQuery(load_dbhandle_,
992      "CREATE TABLE models ("
993      "model_id BLOB primary key, "
994      "last_download_timestamp INT, "
995      // Gets set if the syncer ever gets updates from the
996      // server and the server returns 0.  Lets us detect the
997      // end of the initial sync.
998      "initial_sync_ended BOOLEAN default 0)");
999}
1000
1001int DirectoryBackingStore::CreateShareInfoTable(bool is_temporary) {
1002  const char* name = is_temporary ? "temp_share_info" : "share_info";
1003  string query = "CREATE TABLE ";
1004  query.append(name);
1005  // This is the current schema for the ShareInfo table, from version 71
1006  // onward.  If you change the schema, you'll probably want to double-check
1007  // the use of this function in the v70-v71 migration.
1008  query.append(" ("
1009      "id TEXT primary key, "
1010      "name TEXT, "
1011      "store_birthday TEXT, "
1012      "db_create_version TEXT, "
1013      "db_create_time INT, "
1014      "next_id INT default -2, "
1015      "cache_guid TEXT)");
1016  return ExecQuery(load_dbhandle_, query.c_str());
1017}
1018
1019}  // namespace syncable
1020