1//
2// Copyright (C) 2012 The Android Open Source Project
3//
4// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
7//
8//      http://www.apache.org/licenses/LICENSE-2.0
9//
10// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
15//
16
17#include "update_engine/payload_consumer/delta_performer.h"
18
19#include <endian.h>
20#include <errno.h>
21#include <linux/fs.h>
22
23#include <algorithm>
24#include <cstring>
25#include <memory>
26#include <string>
27#include <vector>
28
29#include <base/files/file_util.h>
30#include <base/format_macros.h>
31#include <base/strings/string_util.h>
32#include <base/strings/stringprintf.h>
33#include <brillo/data_encoding.h>
34#include <brillo/make_unique_ptr.h>
35#include <google/protobuf/repeated_field.h>
36
37#include "update_engine/common/constants.h"
38#include "update_engine/common/hardware_interface.h"
39#include "update_engine/common/prefs_interface.h"
40#include "update_engine/common/subprocess.h"
41#include "update_engine/common/terminator.h"
42#include "update_engine/payload_consumer/bzip_extent_writer.h"
43#include "update_engine/payload_consumer/download_action.h"
44#include "update_engine/payload_consumer/extent_writer.h"
45#if USE_MTD
46#include "update_engine/payload_consumer/mtd_file_descriptor.h"
47#endif
48#include "update_engine/payload_consumer/payload_constants.h"
49#include "update_engine/payload_consumer/payload_verifier.h"
50#include "update_engine/payload_consumer/xz_extent_writer.h"
51
52using google::protobuf::RepeatedPtrField;
53using std::min;
54using std::string;
55using std::vector;
56
57namespace chromeos_update_engine {
58
59const uint64_t DeltaPerformer::kDeltaVersionOffset = sizeof(kDeltaMagic);
60const uint64_t DeltaPerformer::kDeltaVersionSize = 8;
61const uint64_t DeltaPerformer::kDeltaManifestSizeOffset =
62    kDeltaVersionOffset + kDeltaVersionSize;
63const uint64_t DeltaPerformer::kDeltaManifestSizeSize = 8;
64const uint64_t DeltaPerformer::kDeltaMetadataSignatureSizeSize = 4;
65const uint64_t DeltaPerformer::kMaxPayloadHeaderSize = 24;
66const uint64_t DeltaPerformer::kSupportedMajorPayloadVersion = 2;
67const uint32_t DeltaPerformer::kSupportedMinorPayloadVersion = 3;
68
69const unsigned DeltaPerformer::kProgressLogMaxChunks = 10;
70const unsigned DeltaPerformer::kProgressLogTimeoutSeconds = 30;
71const unsigned DeltaPerformer::kProgressDownloadWeight = 50;
72const unsigned DeltaPerformer::kProgressOperationsWeight = 50;
73
74namespace {
75const int kUpdateStateOperationInvalid = -1;
76const int kMaxResumedUpdateFailures = 10;
77#if USE_MTD
78const int kUbiVolumeAttachTimeout = 5 * 60;
79#endif
80
81FileDescriptorPtr CreateFileDescriptor(const char* path) {
82  FileDescriptorPtr ret;
83#if USE_MTD
84  if (strstr(path, "/dev/ubi") == path) {
85    if (!UbiFileDescriptor::IsUbi(path)) {
86      // The volume might not have been attached at boot time.
87      int volume_no;
88      if (utils::SplitPartitionName(path, nullptr, &volume_no)) {
89        utils::TryAttachingUbiVolume(volume_no, kUbiVolumeAttachTimeout);
90      }
91    }
92    if (UbiFileDescriptor::IsUbi(path)) {
93      LOG(INFO) << path << " is a UBI device.";
94      ret.reset(new UbiFileDescriptor);
95    }
96  } else if (MtdFileDescriptor::IsMtd(path)) {
97    LOG(INFO) << path << " is an MTD device.";
98    ret.reset(new MtdFileDescriptor);
99  } else {
100    LOG(INFO) << path << " is not an MTD nor a UBI device.";
101#endif
102    ret.reset(new EintrSafeFileDescriptor);
103#if USE_MTD
104  }
105#endif
106  return ret;
107}
108
109// Opens path for read/write. On success returns an open FileDescriptor
110// and sets *err to 0. On failure, sets *err to errno and returns nullptr.
111FileDescriptorPtr OpenFile(const char* path, int mode, int* err) {
112  // Try to mark the block device read-only based on the mode. Ignore any
113  // failure since this won't work when passing regular files.
114  utils::SetBlockDeviceReadOnly(path, (mode & O_ACCMODE) == O_RDONLY);
115
116  FileDescriptorPtr fd = CreateFileDescriptor(path);
117#if USE_MTD
118  // On NAND devices, we can either read, or write, but not both. So here we
119  // use O_WRONLY.
120  if (UbiFileDescriptor::IsUbi(path) || MtdFileDescriptor::IsMtd(path)) {
121    mode = O_WRONLY;
122  }
123#endif
124  if (!fd->Open(path, mode, 000)) {
125    *err = errno;
126    PLOG(ERROR) << "Unable to open file " << path;
127    return nullptr;
128  }
129  *err = 0;
130  return fd;
131}
132}  // namespace
133
134
135// Computes the ratio of |part| and |total|, scaled to |norm|, using integer
136// arithmetic.
137static uint64_t IntRatio(uint64_t part, uint64_t total, uint64_t norm) {
138  return part * norm / total;
139}
140
141void DeltaPerformer::LogProgress(const char* message_prefix) {
142  // Format operations total count and percentage.
143  string total_operations_str("?");
144  string completed_percentage_str("");
145  if (num_total_operations_) {
146    total_operations_str = std::to_string(num_total_operations_);
147    // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
148    completed_percentage_str =
149        base::StringPrintf(" (%" PRIu64 "%%)",
150                           IntRatio(next_operation_num_, num_total_operations_,
151                                    100));
152  }
153
154  // Format download total count and percentage.
155  size_t payload_size = install_plan_->payload_size;
156  string payload_size_str("?");
157  string downloaded_percentage_str("");
158  if (payload_size) {
159    payload_size_str = std::to_string(payload_size);
160    // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
161    downloaded_percentage_str =
162        base::StringPrintf(" (%" PRIu64 "%%)",
163                           IntRatio(total_bytes_received_, payload_size, 100));
164  }
165
166  LOG(INFO) << (message_prefix ? message_prefix : "") << next_operation_num_
167            << "/" << total_operations_str << " operations"
168            << completed_percentage_str << ", " << total_bytes_received_
169            << "/" << payload_size_str << " bytes downloaded"
170            << downloaded_percentage_str << ", overall progress "
171            << overall_progress_ << "%";
172}
173
174void DeltaPerformer::UpdateOverallProgress(bool force_log,
175                                           const char* message_prefix) {
176  // Compute our download and overall progress.
177  unsigned new_overall_progress = 0;
178  static_assert(kProgressDownloadWeight + kProgressOperationsWeight == 100,
179                "Progress weights don't add up");
180  // Only consider download progress if its total size is known; otherwise
181  // adjust the operations weight to compensate for the absence of download
182  // progress. Also, make sure to cap the download portion at
183  // kProgressDownloadWeight, in case we end up downloading more than we
184  // initially expected (this indicates a problem, but could generally happen).
185  // TODO(garnold) the correction of operations weight when we do not have the
186  // total payload size, as well as the conditional guard below, should both be
187  // eliminated once we ensure that the payload_size in the install plan is
188  // always given and is non-zero. This currently isn't the case during unit
189  // tests (see chromium-os:37969).
190  size_t payload_size = install_plan_->payload_size;
191  unsigned actual_operations_weight = kProgressOperationsWeight;
192  if (payload_size)
193    new_overall_progress += min(
194        static_cast<unsigned>(IntRatio(total_bytes_received_, payload_size,
195                                       kProgressDownloadWeight)),
196        kProgressDownloadWeight);
197  else
198    actual_operations_weight += kProgressDownloadWeight;
199
200  // Only add completed operations if their total number is known; we definitely
201  // expect an update to have at least one operation, so the expectation is that
202  // this will eventually reach |actual_operations_weight|.
203  if (num_total_operations_)
204    new_overall_progress += IntRatio(next_operation_num_, num_total_operations_,
205                                     actual_operations_weight);
206
207  // Progress ratio cannot recede, unless our assumptions about the total
208  // payload size, total number of operations, or the monotonicity of progress
209  // is breached.
210  if (new_overall_progress < overall_progress_) {
211    LOG(WARNING) << "progress counter receded from " << overall_progress_
212                 << "% down to " << new_overall_progress << "%; this is a bug";
213    force_log = true;
214  }
215  overall_progress_ = new_overall_progress;
216
217  // Update chunk index, log as needed: if forced by called, or we completed a
218  // progress chunk, or a timeout has expired.
219  base::Time curr_time = base::Time::Now();
220  unsigned curr_progress_chunk =
221      overall_progress_ * kProgressLogMaxChunks / 100;
222  if (force_log || curr_progress_chunk > last_progress_chunk_ ||
223      curr_time > forced_progress_log_time_) {
224    forced_progress_log_time_ = curr_time + forced_progress_log_wait_;
225    LogProgress(message_prefix);
226  }
227  last_progress_chunk_ = curr_progress_chunk;
228}
229
230
231size_t DeltaPerformer::CopyDataToBuffer(const char** bytes_p, size_t* count_p,
232                                        size_t max) {
233  const size_t count = *count_p;
234  if (!count)
235    return 0;  // Special case shortcut.
236  size_t read_len = min(count, max - buffer_.size());
237  const char* bytes_start = *bytes_p;
238  const char* bytes_end = bytes_start + read_len;
239  buffer_.insert(buffer_.end(), bytes_start, bytes_end);
240  *bytes_p = bytes_end;
241  *count_p = count - read_len;
242  return read_len;
243}
244
245
246bool DeltaPerformer::HandleOpResult(bool op_result, const char* op_type_name,
247                                    ErrorCode* error) {
248  if (op_result)
249    return true;
250
251  LOG(ERROR) << "Failed to perform " << op_type_name << " operation "
252             << next_operation_num_;
253  *error = ErrorCode::kDownloadOperationExecutionError;
254  return false;
255}
256
257int DeltaPerformer::Close() {
258  int err = -CloseCurrentPartition();
259  LOG_IF(ERROR, !payload_hash_calculator_.Finalize() ||
260                !signed_hash_calculator_.Finalize())
261      << "Unable to finalize the hash.";
262  if (!buffer_.empty()) {
263    LOG(INFO) << "Discarding " << buffer_.size() << " unused downloaded bytes";
264    if (err >= 0)
265      err = 1;
266  }
267  return -err;
268}
269
270int DeltaPerformer::CloseCurrentPartition() {
271  int err = 0;
272  if (source_fd_ && !source_fd_->Close()) {
273    err = errno;
274    PLOG(ERROR) << "Error closing source partition";
275    if (!err)
276      err = 1;
277  }
278  source_fd_.reset();
279  source_path_.clear();
280
281  if (target_fd_ && !target_fd_->Close()) {
282    err = errno;
283    PLOG(ERROR) << "Error closing target partition";
284    if (!err)
285      err = 1;
286  }
287  target_fd_.reset();
288  target_path_.clear();
289  return -err;
290}
291
292bool DeltaPerformer::OpenCurrentPartition() {
293  if (current_partition_ >= partitions_.size())
294    return false;
295
296  const PartitionUpdate& partition = partitions_[current_partition_];
297  // Open source fds if we have a delta payload with minor version >= 2.
298  if (install_plan_->payload_type == InstallPayloadType::kDelta &&
299      GetMinorVersion() != kInPlaceMinorPayloadVersion) {
300    source_path_ = install_plan_->partitions[current_partition_].source_path;
301    int err;
302    source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, &err);
303    if (!source_fd_) {
304      LOG(ERROR) << "Unable to open source partition "
305                 << partition.partition_name() << " on slot "
306                 << BootControlInterface::SlotName(install_plan_->source_slot)
307                 << ", file " << source_path_;
308      return false;
309    }
310  }
311
312  target_path_ = install_plan_->partitions[current_partition_].target_path;
313  int err;
314  target_fd_ = OpenFile(target_path_.c_str(), O_RDWR, &err);
315  if (!target_fd_) {
316    LOG(ERROR) << "Unable to open target partition "
317               << partition.partition_name() << " on slot "
318               << BootControlInterface::SlotName(install_plan_->target_slot)
319               << ", file " << target_path_;
320    return false;
321  }
322  return true;
323}
324
325namespace {
326
327void LogPartitionInfoHash(const PartitionInfo& info, const string& tag) {
328  string sha256 = brillo::data_encoding::Base64Encode(info.hash());
329  LOG(INFO) << "PartitionInfo " << tag << " sha256: " << sha256
330            << " size: " << info.size();
331}
332
333void LogPartitionInfo(const vector<PartitionUpdate>& partitions) {
334  for (const PartitionUpdate& partition : partitions) {
335    LogPartitionInfoHash(partition.old_partition_info(),
336                         "old " + partition.partition_name());
337    LogPartitionInfoHash(partition.new_partition_info(),
338                         "new " + partition.partition_name());
339  }
340}
341
342}  // namespace
343
344bool DeltaPerformer::GetMetadataSignatureSizeOffset(
345    uint64_t* out_offset) const {
346  if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
347    *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
348    return true;
349  }
350  return false;
351}
352
353bool DeltaPerformer::GetManifestOffset(uint64_t* out_offset) const {
354  // Actual manifest begins right after the manifest size field or
355  // metadata signature size field if major version >= 2.
356  if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
357    *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
358    return true;
359  }
360  if (major_payload_version_ == kBrilloMajorPayloadVersion) {
361    *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize +
362                  kDeltaMetadataSignatureSizeSize;
363    return true;
364  }
365  LOG(ERROR) << "Unknown major payload version: " << major_payload_version_;
366  return false;
367}
368
369uint64_t DeltaPerformer::GetMetadataSize() const {
370  return metadata_size_;
371}
372
373uint64_t DeltaPerformer::GetMajorVersion() const {
374  return major_payload_version_;
375}
376
377uint32_t DeltaPerformer::GetMinorVersion() const {
378  if (manifest_.has_minor_version()) {
379    return manifest_.minor_version();
380  } else {
381    return install_plan_->payload_type == InstallPayloadType::kDelta
382               ? kSupportedMinorPayloadVersion
383               : kFullPayloadMinorVersion;
384  }
385}
386
387bool DeltaPerformer::GetManifest(DeltaArchiveManifest* out_manifest_p) const {
388  if (!manifest_parsed_)
389    return false;
390  *out_manifest_p = manifest_;
391  return true;
392}
393
394bool DeltaPerformer::IsHeaderParsed() const {
395  return metadata_size_ != 0;
396}
397
398DeltaPerformer::MetadataParseResult DeltaPerformer::ParsePayloadMetadata(
399    const brillo::Blob& payload, ErrorCode* error) {
400  *error = ErrorCode::kSuccess;
401  uint64_t manifest_offset;
402
403  if (!IsHeaderParsed()) {
404    // Ensure we have data to cover the major payload version.
405    if (payload.size() < kDeltaManifestSizeOffset)
406      return kMetadataParseInsufficientData;
407
408    // Validate the magic string.
409    if (memcmp(payload.data(), kDeltaMagic, sizeof(kDeltaMagic)) != 0) {
410      LOG(ERROR) << "Bad payload format -- invalid delta magic.";
411      *error = ErrorCode::kDownloadInvalidMetadataMagicString;
412      return kMetadataParseError;
413    }
414
415    // Extract the payload version from the metadata.
416    static_assert(sizeof(major_payload_version_) == kDeltaVersionSize,
417                  "Major payload version size mismatch");
418    memcpy(&major_payload_version_,
419           &payload[kDeltaVersionOffset],
420           kDeltaVersionSize);
421    // switch big endian to host
422    major_payload_version_ = be64toh(major_payload_version_);
423
424    if (major_payload_version_ != supported_major_version_ &&
425        major_payload_version_ != kChromeOSMajorPayloadVersion) {
426      LOG(ERROR) << "Bad payload format -- unsupported payload version: "
427          << major_payload_version_;
428      *error = ErrorCode::kUnsupportedMajorPayloadVersion;
429      return kMetadataParseError;
430    }
431
432    // Get the manifest offset now that we have payload version.
433    if (!GetManifestOffset(&manifest_offset)) {
434      *error = ErrorCode::kUnsupportedMajorPayloadVersion;
435      return kMetadataParseError;
436    }
437    // Check again with the manifest offset.
438    if (payload.size() < manifest_offset)
439      return kMetadataParseInsufficientData;
440
441    // Next, parse the manifest size.
442    static_assert(sizeof(manifest_size_) == kDeltaManifestSizeSize,
443                  "manifest_size size mismatch");
444    memcpy(&manifest_size_,
445           &payload[kDeltaManifestSizeOffset],
446           kDeltaManifestSizeSize);
447    manifest_size_ = be64toh(manifest_size_);  // switch big endian to host
448
449    if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
450      // Parse the metadata signature size.
451      static_assert(sizeof(metadata_signature_size_) ==
452                    kDeltaMetadataSignatureSizeSize,
453                    "metadata_signature_size size mismatch");
454      uint64_t metadata_signature_size_offset;
455      if (!GetMetadataSignatureSizeOffset(&metadata_signature_size_offset)) {
456        *error = ErrorCode::kError;
457        return kMetadataParseError;
458      }
459      memcpy(&metadata_signature_size_,
460             &payload[metadata_signature_size_offset],
461             kDeltaMetadataSignatureSizeSize);
462      metadata_signature_size_ = be32toh(metadata_signature_size_);
463    }
464
465    // If the metadata size is present in install plan, check for it immediately
466    // even before waiting for that many number of bytes to be downloaded in the
467    // payload. This will prevent any attack which relies on us downloading data
468    // beyond the expected metadata size.
469    metadata_size_ = manifest_offset + manifest_size_;
470    if (install_plan_->hash_checks_mandatory) {
471      if (install_plan_->metadata_size != metadata_size_) {
472        LOG(ERROR) << "Mandatory metadata size in Omaha response ("
473                   << install_plan_->metadata_size
474                   << ") is missing/incorrect, actual = " << metadata_size_;
475        *error = ErrorCode::kDownloadInvalidMetadataSize;
476        return kMetadataParseError;
477      }
478    }
479  }
480
481  // Now that we have validated the metadata size, we should wait for the full
482  // metadata and its signature (if exist) to be read in before we can parse it.
483  if (payload.size() < metadata_size_ + metadata_signature_size_)
484    return kMetadataParseInsufficientData;
485
486  // Log whether we validated the size or simply trusting what's in the payload
487  // here. This is logged here (after we received the full metadata data) so
488  // that we just log once (instead of logging n times) if it takes n
489  // DeltaPerformer::Write calls to download the full manifest.
490  if (install_plan_->metadata_size == metadata_size_) {
491    LOG(INFO) << "Manifest size in payload matches expected value from Omaha";
492  } else {
493    // For mandatory-cases, we'd have already returned a kMetadataParseError
494    // above. We'll be here only for non-mandatory cases. Just send a UMA stat.
495    LOG(WARNING) << "Ignoring missing/incorrect metadata size ("
496                 << install_plan_->metadata_size
497                 << ") in Omaha response as validation is not mandatory. "
498                 << "Trusting metadata size in payload = " << metadata_size_;
499  }
500
501  // We have the full metadata in |payload|. Verify its integrity
502  // and authenticity based on the information we have in Omaha response.
503  *error = ValidateMetadataSignature(payload);
504  if (*error != ErrorCode::kSuccess) {
505    if (install_plan_->hash_checks_mandatory) {
506      // The autoupdate_CatchBadSignatures test checks for this string
507      // in log-files. Keep in sync.
508      LOG(ERROR) << "Mandatory metadata signature validation failed";
509      return kMetadataParseError;
510    }
511
512    // For non-mandatory cases, just send a UMA stat.
513    LOG(WARNING) << "Ignoring metadata signature validation failures";
514    *error = ErrorCode::kSuccess;
515  }
516
517  if (!GetManifestOffset(&manifest_offset)) {
518    *error = ErrorCode::kUnsupportedMajorPayloadVersion;
519    return kMetadataParseError;
520  }
521  // The payload metadata is deemed valid, it's safe to parse the protobuf.
522  if (!manifest_.ParseFromArray(&payload[manifest_offset], manifest_size_)) {
523    LOG(ERROR) << "Unable to parse manifest in update file.";
524    *error = ErrorCode::kDownloadManifestParseError;
525    return kMetadataParseError;
526  }
527
528  manifest_parsed_ = true;
529  return kMetadataParseSuccess;
530}
531
532// Wrapper around write. Returns true if all requested bytes
533// were written, or false on any error, regardless of progress
534// and stores an action exit code in |error|.
535bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode *error) {
536  *error = ErrorCode::kSuccess;
537
538  const char* c_bytes = reinterpret_cast<const char*>(bytes);
539
540  // Update the total byte downloaded count and the progress logs.
541  total_bytes_received_ += count;
542  UpdateOverallProgress(false, "Completed ");
543
544  while (!manifest_valid_) {
545    // Read data up to the needed limit; this is either maximium payload header
546    // size, or the full metadata size (once it becomes known).
547    const bool do_read_header = !IsHeaderParsed();
548    CopyDataToBuffer(&c_bytes, &count,
549                     (do_read_header ? kMaxPayloadHeaderSize :
550                      metadata_size_ + metadata_signature_size_));
551
552    MetadataParseResult result = ParsePayloadMetadata(buffer_, error);
553    if (result == kMetadataParseError)
554      return false;
555    if (result == kMetadataParseInsufficientData) {
556      // If we just processed the header, make an attempt on the manifest.
557      if (do_read_header && IsHeaderParsed())
558        continue;
559
560      return true;
561    }
562
563    // Checks the integrity of the payload manifest.
564    if ((*error = ValidateManifest()) != ErrorCode::kSuccess)
565      return false;
566    manifest_valid_ = true;
567
568    // Clear the download buffer.
569    DiscardBuffer(false, metadata_size_);
570
571    // This populates |partitions_| and the |install_plan.partitions| with the
572    // list of partitions from the manifest.
573    if (!ParseManifestPartitions(error))
574      return false;
575
576    num_total_operations_ = 0;
577    for (const auto& partition : partitions_) {
578      num_total_operations_ += partition.operations_size();
579      acc_num_operations_.push_back(num_total_operations_);
580    }
581
582    LOG_IF(WARNING, !prefs_->SetInt64(kPrefsManifestMetadataSize,
583                                      metadata_size_))
584        << "Unable to save the manifest metadata size.";
585    LOG_IF(WARNING, !prefs_->SetInt64(kPrefsManifestSignatureSize,
586                                      metadata_signature_size_))
587        << "Unable to save the manifest signature size.";
588
589    if (!PrimeUpdateState()) {
590      *error = ErrorCode::kDownloadStateInitializationError;
591      LOG(ERROR) << "Unable to prime the update state.";
592      return false;
593    }
594
595    if (!OpenCurrentPartition()) {
596      *error = ErrorCode::kInstallDeviceOpenError;
597      return false;
598    }
599
600    if (next_operation_num_ > 0)
601      UpdateOverallProgress(true, "Resuming after ");
602    LOG(INFO) << "Starting to apply update payload operations";
603  }
604
605  while (next_operation_num_ < num_total_operations_) {
606    // Check if we should cancel the current attempt for any reason.
607    // In this case, *error will have already been populated with the reason
608    // why we're canceling.
609    if (download_delegate_ && download_delegate_->ShouldCancel(error))
610      return false;
611
612    // We know there are more operations to perform because we didn't reach the
613    // |num_total_operations_| limit yet.
614    while (next_operation_num_ >= acc_num_operations_[current_partition_]) {
615      CloseCurrentPartition();
616      current_partition_++;
617      if (!OpenCurrentPartition()) {
618        *error = ErrorCode::kInstallDeviceOpenError;
619        return false;
620      }
621    }
622    const size_t partition_operation_num = next_operation_num_ - (
623        current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0);
624
625    const InstallOperation& op =
626        partitions_[current_partition_].operations(partition_operation_num);
627
628    CopyDataToBuffer(&c_bytes, &count, op.data_length());
629
630    // Check whether we received all of the next operation's data payload.
631    if (!CanPerformInstallOperation(op))
632      return true;
633
634    // Validate the operation only if the metadata signature is present.
635    // Otherwise, keep the old behavior. This serves as a knob to disable
636    // the validation logic in case we find some regression after rollout.
637    // NOTE: If hash checks are mandatory and if metadata_signature is empty,
638    // we would have already failed in ParsePayloadMetadata method and thus not
639    // even be here. So no need to handle that case again here.
640    if (!install_plan_->metadata_signature.empty()) {
641      // Note: Validate must be called only if CanPerformInstallOperation is
642      // called. Otherwise, we might be failing operations before even if there
643      // isn't sufficient data to compute the proper hash.
644      *error = ValidateOperationHash(op);
645      if (*error != ErrorCode::kSuccess) {
646        if (install_plan_->hash_checks_mandatory) {
647          LOG(ERROR) << "Mandatory operation hash check failed";
648          return false;
649        }
650
651        // For non-mandatory cases, just send a UMA stat.
652        LOG(WARNING) << "Ignoring operation validation errors";
653        *error = ErrorCode::kSuccess;
654      }
655    }
656
657    // Makes sure we unblock exit when this operation completes.
658    ScopedTerminatorExitUnblocker exit_unblocker =
659        ScopedTerminatorExitUnblocker();  // Avoids a compiler unused var bug.
660
661    bool op_result;
662    switch (op.type()) {
663      case InstallOperation::REPLACE:
664      case InstallOperation::REPLACE_BZ:
665      case InstallOperation::REPLACE_XZ:
666        op_result = PerformReplaceOperation(op);
667        break;
668      case InstallOperation::ZERO:
669      case InstallOperation::DISCARD:
670        op_result = PerformZeroOrDiscardOperation(op);
671        break;
672      case InstallOperation::MOVE:
673        op_result = PerformMoveOperation(op);
674        break;
675      case InstallOperation::BSDIFF:
676        op_result = PerformBsdiffOperation(op);
677        break;
678      case InstallOperation::SOURCE_COPY:
679        op_result = PerformSourceCopyOperation(op);
680        break;
681      case InstallOperation::SOURCE_BSDIFF:
682        op_result = PerformSourceBsdiffOperation(op);
683        break;
684      default:
685       op_result = false;
686    }
687    if (!HandleOpResult(op_result, InstallOperationTypeName(op.type()), error))
688      return false;
689
690    next_operation_num_++;
691    UpdateOverallProgress(false, "Completed ");
692    CheckpointUpdateProgress();
693  }
694
695  // In major version 2, we don't add dummy operation to the payload.
696  // If we already extracted the signature we should skip this step.
697  if (major_payload_version_ == kBrilloMajorPayloadVersion &&
698      manifest_.has_signatures_offset() && manifest_.has_signatures_size() &&
699      signatures_message_data_.empty()) {
700    if (manifest_.signatures_offset() != buffer_offset_) {
701      LOG(ERROR) << "Payload signatures offset points to blob offset "
702                 << manifest_.signatures_offset()
703                 << " but signatures are expected at offset "
704                 << buffer_offset_;
705      *error = ErrorCode::kDownloadPayloadVerificationError;
706      return false;
707    }
708    CopyDataToBuffer(&c_bytes, &count, manifest_.signatures_size());
709    // Needs more data to cover entire signature.
710    if (buffer_.size() < manifest_.signatures_size())
711      return true;
712    if (!ExtractSignatureMessage()) {
713      LOG(ERROR) << "Extract payload signature failed.";
714      *error = ErrorCode::kDownloadPayloadVerificationError;
715      return false;
716    }
717    DiscardBuffer(true, 0);
718    // Since we extracted the SignatureMessage we need to advance the
719    // checkpoint, otherwise we would reload the signature and try to extract
720    // it again.
721    CheckpointUpdateProgress();
722  }
723
724  return true;
725}
726
727bool DeltaPerformer::IsManifestValid() {
728  return manifest_valid_;
729}
730
731bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) {
732  if (major_payload_version_ == kBrilloMajorPayloadVersion) {
733    partitions_.clear();
734    for (const PartitionUpdate& partition : manifest_.partitions()) {
735      partitions_.push_back(partition);
736    }
737    manifest_.clear_partitions();
738  } else if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
739    LOG(INFO) << "Converting update information from old format.";
740    PartitionUpdate root_part;
741    root_part.set_partition_name(kLegacyPartitionNameRoot);
742#ifdef __ANDROID__
743    LOG(WARNING) << "Legacy payload major version provided to an Android "
744                    "build. Assuming no post-install. Please use major version "
745                    "2 or newer.";
746    root_part.set_run_postinstall(false);
747#else
748    root_part.set_run_postinstall(true);
749#endif  // __ANDROID__
750    if (manifest_.has_old_rootfs_info()) {
751      *root_part.mutable_old_partition_info() = manifest_.old_rootfs_info();
752      manifest_.clear_old_rootfs_info();
753    }
754    if (manifest_.has_new_rootfs_info()) {
755      *root_part.mutable_new_partition_info() = manifest_.new_rootfs_info();
756      manifest_.clear_new_rootfs_info();
757    }
758    *root_part.mutable_operations() = manifest_.install_operations();
759    manifest_.clear_install_operations();
760    partitions_.push_back(std::move(root_part));
761
762    PartitionUpdate kern_part;
763    kern_part.set_partition_name(kLegacyPartitionNameKernel);
764    kern_part.set_run_postinstall(false);
765    if (manifest_.has_old_kernel_info()) {
766      *kern_part.mutable_old_partition_info() = manifest_.old_kernel_info();
767      manifest_.clear_old_kernel_info();
768    }
769    if (manifest_.has_new_kernel_info()) {
770      *kern_part.mutable_new_partition_info() = manifest_.new_kernel_info();
771      manifest_.clear_new_kernel_info();
772    }
773    *kern_part.mutable_operations() = manifest_.kernel_install_operations();
774    manifest_.clear_kernel_install_operations();
775    partitions_.push_back(std::move(kern_part));
776  }
777
778  // TODO(deymo): Remove this block of code once we switched to optional
779  // source partition verification. This list of partitions in the InstallPlan
780  // is initialized with the expected hashes in the payload major version 1,
781  // so we need to check those now if already set. See b/23182225.
782  if (!install_plan_->partitions.empty()) {
783    if (!VerifySourcePartitions()) {
784      *error = ErrorCode::kDownloadStateInitializationError;
785      return false;
786    }
787  }
788
789  // Fill in the InstallPlan::partitions based on the partitions from the
790  // payload.
791  install_plan_->partitions.clear();
792  for (const auto& partition : partitions_) {
793    InstallPlan::Partition install_part;
794    install_part.name = partition.partition_name();
795    install_part.run_postinstall =
796        partition.has_run_postinstall() && partition.run_postinstall();
797    if (install_part.run_postinstall) {
798      install_part.postinstall_path =
799          (partition.has_postinstall_path() ? partition.postinstall_path()
800                                            : kPostinstallDefaultScript);
801      install_part.filesystem_type = partition.filesystem_type();
802    }
803
804    if (partition.has_old_partition_info()) {
805      const PartitionInfo& info = partition.old_partition_info();
806      install_part.source_size = info.size();
807      install_part.source_hash.assign(info.hash().begin(), info.hash().end());
808    }
809
810    if (!partition.has_new_partition_info()) {
811      LOG(ERROR) << "Unable to get new partition hash info on partition "
812                 << install_part.name << ".";
813      *error = ErrorCode::kDownloadNewPartitionInfoError;
814      return false;
815    }
816    const PartitionInfo& info = partition.new_partition_info();
817    install_part.target_size = info.size();
818    install_part.target_hash.assign(info.hash().begin(), info.hash().end());
819
820    install_plan_->partitions.push_back(install_part);
821  }
822
823  if (!install_plan_->LoadPartitionsFromSlots(boot_control_)) {
824    LOG(ERROR) << "Unable to determine all the partition devices.";
825    *error = ErrorCode::kInstallDeviceOpenError;
826    return false;
827  }
828  LogPartitionInfo(partitions_);
829  return true;
830}
831
832bool DeltaPerformer::CanPerformInstallOperation(
833    const chromeos_update_engine::InstallOperation& operation) {
834  // If we don't have a data blob we can apply it right away.
835  if (!operation.has_data_offset() && !operation.has_data_length())
836    return true;
837
838  // See if we have the entire data blob in the buffer
839  if (operation.data_offset() < buffer_offset_) {
840    LOG(ERROR) << "we threw away data it seems?";
841    return false;
842  }
843
844  return (operation.data_offset() + operation.data_length() <=
845          buffer_offset_ + buffer_.size());
846}
847
848bool DeltaPerformer::PerformReplaceOperation(
849    const InstallOperation& operation) {
850  CHECK(operation.type() == InstallOperation::REPLACE ||
851        operation.type() == InstallOperation::REPLACE_BZ ||
852        operation.type() == InstallOperation::REPLACE_XZ);
853
854  // Since we delete data off the beginning of the buffer as we use it,
855  // the data we need should be exactly at the beginning of the buffer.
856  TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
857  TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
858
859  // Extract the signature message if it's in this operation.
860  if (ExtractSignatureMessageFromOperation(operation)) {
861    // If this is dummy replace operation, we ignore it after extracting the
862    // signature.
863    DiscardBuffer(true, 0);
864    return true;
865  }
866
867  // Setup the ExtentWriter stack based on the operation type.
868  std::unique_ptr<ExtentWriter> writer =
869    brillo::make_unique_ptr(new ZeroPadExtentWriter(
870      brillo::make_unique_ptr(new DirectExtentWriter())));
871
872  if (operation.type() == InstallOperation::REPLACE_BZ) {
873    writer.reset(new BzipExtentWriter(std::move(writer)));
874  } else if (operation.type() == InstallOperation::REPLACE_XZ) {
875    writer.reset(new XzExtentWriter(std::move(writer)));
876  }
877
878  // Create a vector of extents to pass to the ExtentWriter.
879  vector<Extent> extents;
880  for (int i = 0; i < operation.dst_extents_size(); i++) {
881    extents.push_back(operation.dst_extents(i));
882  }
883
884  TEST_AND_RETURN_FALSE(writer->Init(target_fd_, extents, block_size_));
885  TEST_AND_RETURN_FALSE(writer->Write(buffer_.data(), operation.data_length()));
886  TEST_AND_RETURN_FALSE(writer->End());
887
888  // Update buffer
889  DiscardBuffer(true, buffer_.size());
890  return true;
891}
892
893bool DeltaPerformer::PerformZeroOrDiscardOperation(
894    const InstallOperation& operation) {
895  CHECK(operation.type() == InstallOperation::DISCARD ||
896        operation.type() == InstallOperation::ZERO);
897
898  // These operations have no blob.
899  TEST_AND_RETURN_FALSE(!operation.has_data_offset());
900  TEST_AND_RETURN_FALSE(!operation.has_data_length());
901
902#ifdef BLKZEROOUT
903  bool attempt_ioctl = true;
904  int request =
905      (operation.type() == InstallOperation::ZERO ? BLKZEROOUT : BLKDISCARD);
906#else  // !defined(BLKZEROOUT)
907  bool attempt_ioctl = false;
908  int request = 0;
909#endif  // !defined(BLKZEROOUT)
910
911  brillo::Blob zeros;
912  for (int i = 0; i < operation.dst_extents_size(); i++) {
913    Extent extent = operation.dst_extents(i);
914    const uint64_t start = extent.start_block() * block_size_;
915    const uint64_t length = extent.num_blocks() * block_size_;
916    if (attempt_ioctl) {
917      int result = 0;
918      if (target_fd_->BlkIoctl(request, start, length, &result) && result == 0)
919        continue;
920      attempt_ioctl = false;
921      zeros.resize(16 * block_size_);
922    }
923    // In case of failure, we fall back to writing 0 to the selected region.
924    for (uint64_t offset = 0; offset < length; offset += zeros.size()) {
925      uint64_t chunk_length = min(length - offset,
926                                  static_cast<uint64_t>(zeros.size()));
927      TEST_AND_RETURN_FALSE(
928          utils::PWriteAll(target_fd_, zeros.data(), chunk_length, start + offset));
929    }
930  }
931  return true;
932}
933
934bool DeltaPerformer::PerformMoveOperation(const InstallOperation& operation) {
935  // Calculate buffer size. Note, this function doesn't do a sliding
936  // window to copy in case the source and destination blocks overlap.
937  // If we wanted to do a sliding window, we could program the server
938  // to generate deltas that effectively did a sliding window.
939
940  uint64_t blocks_to_read = 0;
941  for (int i = 0; i < operation.src_extents_size(); i++)
942    blocks_to_read += operation.src_extents(i).num_blocks();
943
944  uint64_t blocks_to_write = 0;
945  for (int i = 0; i < operation.dst_extents_size(); i++)
946    blocks_to_write += operation.dst_extents(i).num_blocks();
947
948  DCHECK_EQ(blocks_to_write, blocks_to_read);
949  brillo::Blob buf(blocks_to_write * block_size_);
950
951  // Read in bytes.
952  ssize_t bytes_read = 0;
953  for (int i = 0; i < operation.src_extents_size(); i++) {
954    ssize_t bytes_read_this_iteration = 0;
955    const Extent& extent = operation.src_extents(i);
956    const size_t bytes = extent.num_blocks() * block_size_;
957    TEST_AND_RETURN_FALSE(extent.start_block() != kSparseHole);
958    TEST_AND_RETURN_FALSE(utils::PReadAll(target_fd_,
959                                          &buf[bytes_read],
960                                          bytes,
961                                          extent.start_block() * block_size_,
962                                          &bytes_read_this_iteration));
963    TEST_AND_RETURN_FALSE(
964        bytes_read_this_iteration == static_cast<ssize_t>(bytes));
965    bytes_read += bytes_read_this_iteration;
966  }
967
968  // Write bytes out.
969  ssize_t bytes_written = 0;
970  for (int i = 0; i < operation.dst_extents_size(); i++) {
971    const Extent& extent = operation.dst_extents(i);
972    const size_t bytes = extent.num_blocks() * block_size_;
973    TEST_AND_RETURN_FALSE(extent.start_block() != kSparseHole);
974    TEST_AND_RETURN_FALSE(utils::PWriteAll(target_fd_,
975                                           &buf[bytes_written],
976                                           bytes,
977                                           extent.start_block() * block_size_));
978    bytes_written += bytes;
979  }
980  DCHECK_EQ(bytes_written, bytes_read);
981  DCHECK_EQ(bytes_written, static_cast<ssize_t>(buf.size()));
982  return true;
983}
984
985namespace {
986
987// Takes |extents| and fills an empty vector |blocks| with a block index for
988// each block in |extents|. For example, [(3, 2), (8, 1)] would give [3, 4, 8].
989void ExtentsToBlocks(const RepeatedPtrField<Extent>& extents,
990                     vector<uint64_t>* blocks) {
991  for (Extent ext : extents) {
992    for (uint64_t j = 0; j < ext.num_blocks(); j++)
993      blocks->push_back(ext.start_block() + j);
994  }
995}
996
997// Takes |extents| and returns the number of blocks in those extents.
998uint64_t GetBlockCount(const RepeatedPtrField<Extent>& extents) {
999  uint64_t sum = 0;
1000  for (Extent ext : extents) {
1001    sum += ext.num_blocks();
1002  }
1003  return sum;
1004}
1005
1006// Compare |calculated_hash| with source hash in |operation|, return false and
1007// dump hash if don't match.
1008bool ValidateSourceHash(const brillo::Blob& calculated_hash,
1009                        const InstallOperation& operation) {
1010  brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
1011                                    operation.src_sha256_hash().end());
1012  if (calculated_hash != expected_source_hash) {
1013    LOG(ERROR) << "Hash verification failed. Expected hash = ";
1014    utils::HexDumpVector(expected_source_hash);
1015    LOG(ERROR) << "Calculated hash = ";
1016    utils::HexDumpVector(calculated_hash);
1017    return false;
1018  }
1019  return true;
1020}
1021
1022}  // namespace
1023
1024bool DeltaPerformer::PerformSourceCopyOperation(
1025    const InstallOperation& operation) {
1026  if (operation.has_src_length())
1027    TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
1028  if (operation.has_dst_length())
1029    TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
1030
1031  uint64_t blocks_to_read = GetBlockCount(operation.src_extents());
1032  uint64_t blocks_to_write = GetBlockCount(operation.dst_extents());
1033  TEST_AND_RETURN_FALSE(blocks_to_write ==  blocks_to_read);
1034
1035  // Create vectors of all the individual src/dst blocks.
1036  vector<uint64_t> src_blocks;
1037  vector<uint64_t> dst_blocks;
1038  ExtentsToBlocks(operation.src_extents(), &src_blocks);
1039  ExtentsToBlocks(operation.dst_extents(), &dst_blocks);
1040  DCHECK_EQ(src_blocks.size(), blocks_to_read);
1041  DCHECK_EQ(src_blocks.size(), dst_blocks.size());
1042
1043  brillo::Blob buf(block_size_);
1044  ssize_t bytes_read = 0;
1045  HashCalculator source_hasher;
1046  // Read/write one block at a time.
1047  for (uint64_t i = 0; i < blocks_to_read; i++) {
1048    ssize_t bytes_read_this_iteration = 0;
1049    uint64_t src_block = src_blocks[i];
1050    uint64_t dst_block = dst_blocks[i];
1051
1052    // Read in bytes.
1053    TEST_AND_RETURN_FALSE(
1054        utils::PReadAll(source_fd_,
1055                        buf.data(),
1056                        block_size_,
1057                        src_block * block_size_,
1058                        &bytes_read_this_iteration));
1059
1060    // Write bytes out.
1061    TEST_AND_RETURN_FALSE(
1062        utils::PWriteAll(target_fd_,
1063                         buf.data(),
1064                         block_size_,
1065                         dst_block * block_size_));
1066
1067    bytes_read += bytes_read_this_iteration;
1068    TEST_AND_RETURN_FALSE(bytes_read_this_iteration ==
1069                          static_cast<ssize_t>(block_size_));
1070
1071    if (operation.has_src_sha256_hash())
1072      TEST_AND_RETURN_FALSE(source_hasher.Update(buf.data(), buf.size()));
1073  }
1074
1075  if (operation.has_src_sha256_hash()) {
1076    TEST_AND_RETURN_FALSE(source_hasher.Finalize());
1077    TEST_AND_RETURN_FALSE(
1078        ValidateSourceHash(source_hasher.raw_hash(), operation));
1079  }
1080
1081  DCHECK_EQ(bytes_read, static_cast<ssize_t>(blocks_to_read * block_size_));
1082  return true;
1083}
1084
1085bool DeltaPerformer::ExtentsToBsdiffPositionsString(
1086    const RepeatedPtrField<Extent>& extents,
1087    uint64_t block_size,
1088    uint64_t full_length,
1089    string* positions_string) {
1090  string ret;
1091  uint64_t length = 0;
1092  for (int i = 0; i < extents.size(); i++) {
1093    Extent extent = extents.Get(i);
1094    int64_t start = extent.start_block() * block_size;
1095    uint64_t this_length = min(full_length - length,
1096                               extent.num_blocks() * block_size);
1097    ret += base::StringPrintf("%" PRIi64 ":%" PRIu64 ",", start, this_length);
1098    length += this_length;
1099  }
1100  TEST_AND_RETURN_FALSE(length == full_length);
1101  if (!ret.empty())
1102    ret.resize(ret.size() - 1);  // Strip trailing comma off
1103  *positions_string = ret;
1104  return true;
1105}
1106
1107bool DeltaPerformer::PerformBsdiffOperation(const InstallOperation& operation) {
1108  // Since we delete data off the beginning of the buffer as we use it,
1109  // the data we need should be exactly at the beginning of the buffer.
1110  TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
1111  TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
1112
1113  string input_positions;
1114  TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.src_extents(),
1115                                                       block_size_,
1116                                                       operation.src_length(),
1117                                                       &input_positions));
1118  string output_positions;
1119  TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.dst_extents(),
1120                                                       block_size_,
1121                                                       operation.dst_length(),
1122                                                       &output_positions));
1123
1124  string temp_filename;
1125  TEST_AND_RETURN_FALSE(utils::MakeTempFile("au_patch.XXXXXX",
1126                                            &temp_filename,
1127                                            nullptr));
1128  ScopedPathUnlinker path_unlinker(temp_filename);
1129  {
1130    int fd = open(temp_filename.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0644);
1131    ScopedFdCloser fd_closer(&fd);
1132    TEST_AND_RETURN_FALSE(
1133        utils::WriteAll(fd, buffer_.data(), operation.data_length()));
1134  }
1135
1136  // Update the buffer to release the patch data memory as soon as the patch
1137  // file is written out.
1138  DiscardBuffer(true, buffer_.size());
1139
1140  vector<string> cmd{kBspatchPath, target_path_, target_path_, temp_filename,
1141                     input_positions, output_positions};
1142
1143  int return_code = 0;
1144  TEST_AND_RETURN_FALSE(
1145      Subprocess::SynchronousExecFlags(cmd, Subprocess::kSearchPath,
1146                                       &return_code, nullptr));
1147  TEST_AND_RETURN_FALSE(return_code == 0);
1148
1149  if (operation.dst_length() % block_size_) {
1150    // Zero out rest of final block.
1151    // TODO(adlr): build this into bspatch; it's more efficient that way.
1152    const Extent& last_extent =
1153        operation.dst_extents(operation.dst_extents_size() - 1);
1154    const uint64_t end_byte =
1155        (last_extent.start_block() + last_extent.num_blocks()) * block_size_;
1156    const uint64_t begin_byte =
1157        end_byte - (block_size_ - operation.dst_length() % block_size_);
1158    brillo::Blob zeros(end_byte - begin_byte);
1159    TEST_AND_RETURN_FALSE(
1160        utils::PWriteAll(target_fd_, zeros.data(), end_byte - begin_byte, begin_byte));
1161  }
1162  return true;
1163}
1164
1165bool DeltaPerformer::PerformSourceBsdiffOperation(
1166    const InstallOperation& operation) {
1167  // Since we delete data off the beginning of the buffer as we use it,
1168  // the data we need should be exactly at the beginning of the buffer.
1169  TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
1170  TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
1171  if (operation.has_src_length())
1172    TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
1173  if (operation.has_dst_length())
1174    TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
1175
1176  if (operation.has_src_sha256_hash()) {
1177    HashCalculator source_hasher;
1178    const uint64_t kMaxBlocksToRead = 512;  // 2MB if block size is 4KB
1179    brillo::Blob buf(kMaxBlocksToRead * block_size_);
1180    for (const Extent& extent : operation.src_extents()) {
1181      for (uint64_t i = 0; i < extent.num_blocks(); i += kMaxBlocksToRead) {
1182        uint64_t blocks_to_read =
1183            min(kMaxBlocksToRead, extent.num_blocks() - i);
1184        ssize_t bytes_to_read = blocks_to_read * block_size_;
1185        ssize_t bytes_read_this_iteration = 0;
1186        TEST_AND_RETURN_FALSE(
1187            utils::PReadAll(source_fd_, buf.data(), bytes_to_read,
1188                            (extent.start_block() + i) * block_size_,
1189                            &bytes_read_this_iteration));
1190        TEST_AND_RETURN_FALSE(bytes_read_this_iteration == bytes_to_read);
1191        TEST_AND_RETURN_FALSE(source_hasher.Update(buf.data(), bytes_to_read));
1192      }
1193    }
1194    TEST_AND_RETURN_FALSE(source_hasher.Finalize());
1195    TEST_AND_RETURN_FALSE(
1196        ValidateSourceHash(source_hasher.raw_hash(), operation));
1197  }
1198
1199  string input_positions;
1200  TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.src_extents(),
1201                                                       block_size_,
1202                                                       operation.src_length(),
1203                                                       &input_positions));
1204  string output_positions;
1205  TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.dst_extents(),
1206                                                       block_size_,
1207                                                       operation.dst_length(),
1208                                                       &output_positions));
1209
1210  string temp_filename;
1211  TEST_AND_RETURN_FALSE(utils::MakeTempFile("au_patch.XXXXXX",
1212                                            &temp_filename,
1213                                            nullptr));
1214  ScopedPathUnlinker path_unlinker(temp_filename);
1215  {
1216    int fd = open(temp_filename.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0644);
1217    ScopedFdCloser fd_closer(&fd);
1218    TEST_AND_RETURN_FALSE(
1219        utils::WriteAll(fd, buffer_.data(), operation.data_length()));
1220  }
1221
1222  // Update the buffer to release the patch data memory as soon as the patch
1223  // file is written out.
1224  DiscardBuffer(true, buffer_.size());
1225
1226  vector<string> cmd{kBspatchPath, source_path_, target_path_, temp_filename,
1227                     input_positions, output_positions};
1228
1229  int return_code = 0;
1230  TEST_AND_RETURN_FALSE(
1231      Subprocess::SynchronousExecFlags(cmd, Subprocess::kSearchPath,
1232                                       &return_code, nullptr));
1233  TEST_AND_RETURN_FALSE(return_code == 0);
1234  return true;
1235}
1236
1237bool DeltaPerformer::ExtractSignatureMessageFromOperation(
1238    const InstallOperation& operation) {
1239  if (operation.type() != InstallOperation::REPLACE ||
1240      !manifest_.has_signatures_offset() ||
1241      manifest_.signatures_offset() != operation.data_offset()) {
1242    return false;
1243  }
1244  TEST_AND_RETURN_FALSE(manifest_.has_signatures_size() &&
1245                        manifest_.signatures_size() == operation.data_length());
1246  TEST_AND_RETURN_FALSE(ExtractSignatureMessage());
1247  return true;
1248}
1249
1250bool DeltaPerformer::ExtractSignatureMessage() {
1251  TEST_AND_RETURN_FALSE(signatures_message_data_.empty());
1252  TEST_AND_RETURN_FALSE(buffer_offset_ == manifest_.signatures_offset());
1253  TEST_AND_RETURN_FALSE(buffer_.size() >= manifest_.signatures_size());
1254  signatures_message_data_.assign(
1255      buffer_.begin(),
1256      buffer_.begin() + manifest_.signatures_size());
1257
1258  // Save the signature blob because if the update is interrupted after the
1259  // download phase we don't go through this path anymore. Some alternatives to
1260  // consider:
1261  //
1262  // 1. On resume, re-download the signature blob from the server and re-verify
1263  // it.
1264  //
1265  // 2. Verify the signature as soon as it's received and don't checkpoint the
1266  // blob and the signed sha-256 context.
1267  LOG_IF(WARNING, !prefs_->SetString(kPrefsUpdateStateSignatureBlob,
1268                                     string(signatures_message_data_.begin(),
1269                                            signatures_message_data_.end())))
1270      << "Unable to store the signature blob.";
1271
1272  LOG(INFO) << "Extracted signature data of size "
1273            << manifest_.signatures_size() << " at "
1274            << manifest_.signatures_offset();
1275  return true;
1276}
1277
1278bool DeltaPerformer::GetPublicKeyFromResponse(base::FilePath *out_tmp_key) {
1279  if (hardware_->IsOfficialBuild() ||
1280      utils::FileExists(public_key_path_.c_str()) ||
1281      install_plan_->public_key_rsa.empty())
1282    return false;
1283
1284  if (!utils::DecodeAndStoreBase64String(install_plan_->public_key_rsa,
1285                                         out_tmp_key))
1286    return false;
1287
1288  return true;
1289}
1290
1291ErrorCode DeltaPerformer::ValidateMetadataSignature(
1292    const brillo::Blob& payload) {
1293  if (payload.size() < metadata_size_ + metadata_signature_size_)
1294    return ErrorCode::kDownloadMetadataSignatureError;
1295
1296  brillo::Blob metadata_signature_blob, metadata_signature_protobuf_blob;
1297  if (!install_plan_->metadata_signature.empty()) {
1298    // Convert base64-encoded signature to raw bytes.
1299    if (!brillo::data_encoding::Base64Decode(
1300        install_plan_->metadata_signature, &metadata_signature_blob)) {
1301      LOG(ERROR) << "Unable to decode base64 metadata signature: "
1302                 << install_plan_->metadata_signature;
1303      return ErrorCode::kDownloadMetadataSignatureError;
1304    }
1305  } else if (major_payload_version_ == kBrilloMajorPayloadVersion) {
1306    metadata_signature_protobuf_blob.assign(payload.begin() + metadata_size_,
1307                                            payload.begin() + metadata_size_ +
1308                                            metadata_signature_size_);
1309  }
1310
1311  if (metadata_signature_blob.empty() &&
1312      metadata_signature_protobuf_blob.empty()) {
1313    if (install_plan_->hash_checks_mandatory) {
1314      LOG(ERROR) << "Missing mandatory metadata signature in both Omaha "
1315                 << "response and payload.";
1316      return ErrorCode::kDownloadMetadataSignatureMissingError;
1317    }
1318
1319    LOG(WARNING) << "Cannot validate metadata as the signature is empty";
1320    return ErrorCode::kSuccess;
1321  }
1322
1323  // See if we should use the public RSA key in the Omaha response.
1324  base::FilePath path_to_public_key(public_key_path_);
1325  base::FilePath tmp_key;
1326  if (GetPublicKeyFromResponse(&tmp_key))
1327    path_to_public_key = tmp_key;
1328  ScopedPathUnlinker tmp_key_remover(tmp_key.value());
1329  if (tmp_key.empty())
1330    tmp_key_remover.set_should_remove(false);
1331
1332  LOG(INFO) << "Verifying metadata hash signature using public key: "
1333            << path_to_public_key.value();
1334
1335  HashCalculator metadata_hasher;
1336  metadata_hasher.Update(payload.data(), metadata_size_);
1337  if (!metadata_hasher.Finalize()) {
1338    LOG(ERROR) << "Unable to compute actual hash of manifest";
1339    return ErrorCode::kDownloadMetadataSignatureVerificationError;
1340  }
1341
1342  brillo::Blob calculated_metadata_hash = metadata_hasher.raw_hash();
1343  PayloadVerifier::PadRSA2048SHA256Hash(&calculated_metadata_hash);
1344  if (calculated_metadata_hash.empty()) {
1345    LOG(ERROR) << "Computed actual hash of metadata is empty.";
1346    return ErrorCode::kDownloadMetadataSignatureVerificationError;
1347  }
1348
1349  if (!metadata_signature_blob.empty()) {
1350    brillo::Blob expected_metadata_hash;
1351    if (!PayloadVerifier::GetRawHashFromSignature(metadata_signature_blob,
1352                                                  path_to_public_key.value(),
1353                                                  &expected_metadata_hash)) {
1354      LOG(ERROR) << "Unable to compute expected hash from metadata signature";
1355      return ErrorCode::kDownloadMetadataSignatureError;
1356    }
1357    if (calculated_metadata_hash != expected_metadata_hash) {
1358      LOG(ERROR) << "Manifest hash verification failed. Expected hash = ";
1359      utils::HexDumpVector(expected_metadata_hash);
1360      LOG(ERROR) << "Calculated hash = ";
1361      utils::HexDumpVector(calculated_metadata_hash);
1362      return ErrorCode::kDownloadMetadataSignatureMismatch;
1363    }
1364  } else {
1365    if (!PayloadVerifier::VerifySignature(metadata_signature_protobuf_blob,
1366                                          path_to_public_key.value(),
1367                                          calculated_metadata_hash)) {
1368      LOG(ERROR) << "Manifest hash verification failed.";
1369      return ErrorCode::kDownloadMetadataSignatureMismatch;
1370    }
1371  }
1372
1373  // The autoupdate_CatchBadSignatures test checks for this string in
1374  // log-files. Keep in sync.
1375  LOG(INFO) << "Metadata hash signature matches value in Omaha response.";
1376  return ErrorCode::kSuccess;
1377}
1378
1379ErrorCode DeltaPerformer::ValidateManifest() {
1380  // Perform assorted checks to sanity check the manifest, make sure it
1381  // matches data from other sources, and that it is a supported version.
1382
1383  bool has_old_fields =
1384      (manifest_.has_old_kernel_info() || manifest_.has_old_rootfs_info());
1385  for (const PartitionUpdate& partition : manifest_.partitions()) {
1386    has_old_fields = has_old_fields || partition.has_old_partition_info();
1387  }
1388
1389  // The presence of an old partition hash is the sole indicator for a delta
1390  // update.
1391  InstallPayloadType actual_payload_type =
1392      has_old_fields ? InstallPayloadType::kDelta : InstallPayloadType::kFull;
1393
1394  if (install_plan_->payload_type == InstallPayloadType::kUnknown) {
1395    LOG(INFO) << "Detected a '"
1396              << InstallPayloadTypeToString(actual_payload_type)
1397              << "' payload.";
1398    install_plan_->payload_type = actual_payload_type;
1399  } else if (install_plan_->payload_type != actual_payload_type) {
1400    LOG(ERROR) << "InstallPlan expected a '"
1401               << InstallPayloadTypeToString(install_plan_->payload_type)
1402               << "' payload but the downloaded manifest contains a '"
1403               << InstallPayloadTypeToString(actual_payload_type)
1404               << "' payload.";
1405    return ErrorCode::kPayloadMismatchedType;
1406  }
1407
1408  // Check that the minor version is compatible.
1409  if (actual_payload_type == InstallPayloadType::kFull) {
1410    if (manifest_.minor_version() != kFullPayloadMinorVersion) {
1411      LOG(ERROR) << "Manifest contains minor version "
1412                 << manifest_.minor_version()
1413                 << ", but all full payloads should have version "
1414                 << kFullPayloadMinorVersion << ".";
1415      return ErrorCode::kUnsupportedMinorPayloadVersion;
1416    }
1417  } else {
1418    if (manifest_.minor_version() != supported_minor_version_) {
1419      LOG(ERROR) << "Manifest contains minor version "
1420                 << manifest_.minor_version()
1421                 << " not the supported "
1422                 << supported_minor_version_;
1423      return ErrorCode::kUnsupportedMinorPayloadVersion;
1424    }
1425  }
1426
1427  if (major_payload_version_ != kChromeOSMajorPayloadVersion) {
1428    if (manifest_.has_old_rootfs_info() ||
1429        manifest_.has_new_rootfs_info() ||
1430        manifest_.has_old_kernel_info() ||
1431        manifest_.has_new_kernel_info() ||
1432        manifest_.install_operations_size() != 0 ||
1433        manifest_.kernel_install_operations_size() != 0) {
1434      LOG(ERROR) << "Manifest contains deprecated field only supported in "
1435                 << "major payload version 1, but the payload major version is "
1436                 << major_payload_version_;
1437      return ErrorCode::kPayloadMismatchedType;
1438    }
1439  }
1440
1441  // TODO(garnold) we should be adding more and more manifest checks, such as
1442  // partition boundaries etc (see chromium-os:37661).
1443
1444  return ErrorCode::kSuccess;
1445}
1446
1447ErrorCode DeltaPerformer::ValidateOperationHash(
1448    const InstallOperation& operation) {
1449  if (!operation.data_sha256_hash().size()) {
1450    if (!operation.data_length()) {
1451      // Operations that do not have any data blob won't have any operation hash
1452      // either. So, these operations are always considered validated since the
1453      // metadata that contains all the non-data-blob portions of the operation
1454      // has already been validated. This is true for both HTTP and HTTPS cases.
1455      return ErrorCode::kSuccess;
1456    }
1457
1458    // No hash is present for an operation that has data blobs. This shouldn't
1459    // happen normally for any client that has this code, because the
1460    // corresponding update should have been produced with the operation
1461    // hashes. So if it happens it means either we've turned operation hash
1462    // generation off in DeltaDiffGenerator or it's a regression of some sort.
1463    // One caveat though: The last operation is a dummy signature operation
1464    // that doesn't have a hash at the time the manifest is created. So we
1465    // should not complaint about that operation. This operation can be
1466    // recognized by the fact that it's offset is mentioned in the manifest.
1467    if (manifest_.signatures_offset() &&
1468        manifest_.signatures_offset() == operation.data_offset()) {
1469      LOG(INFO) << "Skipping hash verification for signature operation "
1470                << next_operation_num_ + 1;
1471    } else {
1472      if (install_plan_->hash_checks_mandatory) {
1473        LOG(ERROR) << "Missing mandatory operation hash for operation "
1474                   << next_operation_num_ + 1;
1475        return ErrorCode::kDownloadOperationHashMissingError;
1476      }
1477
1478      LOG(WARNING) << "Cannot validate operation " << next_operation_num_ + 1
1479                   << " as there's no operation hash in manifest";
1480    }
1481    return ErrorCode::kSuccess;
1482  }
1483
1484  brillo::Blob expected_op_hash;
1485  expected_op_hash.assign(operation.data_sha256_hash().data(),
1486                          (operation.data_sha256_hash().data() +
1487                           operation.data_sha256_hash().size()));
1488
1489  HashCalculator operation_hasher;
1490  operation_hasher.Update(buffer_.data(), operation.data_length());
1491  if (!operation_hasher.Finalize()) {
1492    LOG(ERROR) << "Unable to compute actual hash of operation "
1493               << next_operation_num_;
1494    return ErrorCode::kDownloadOperationHashVerificationError;
1495  }
1496
1497  brillo::Blob calculated_op_hash = operation_hasher.raw_hash();
1498  if (calculated_op_hash != expected_op_hash) {
1499    LOG(ERROR) << "Hash verification failed for operation "
1500               << next_operation_num_ << ". Expected hash = ";
1501    utils::HexDumpVector(expected_op_hash);
1502    LOG(ERROR) << "Calculated hash over " << operation.data_length()
1503               << " bytes at offset: " << operation.data_offset() << " = ";
1504    utils::HexDumpVector(calculated_op_hash);
1505    return ErrorCode::kDownloadOperationHashMismatch;
1506  }
1507
1508  return ErrorCode::kSuccess;
1509}
1510
1511#define TEST_AND_RETURN_VAL(_retval, _condition)                \
1512  do {                                                          \
1513    if (!(_condition)) {                                        \
1514      LOG(ERROR) << "VerifyPayload failure: " << #_condition;   \
1515      return _retval;                                           \
1516    }                                                           \
1517  } while (0);
1518
1519ErrorCode DeltaPerformer::VerifyPayload(
1520    const string& update_check_response_hash,
1521    const uint64_t update_check_response_size) {
1522
1523  // See if we should use the public RSA key in the Omaha response.
1524  base::FilePath path_to_public_key(public_key_path_);
1525  base::FilePath tmp_key;
1526  if (GetPublicKeyFromResponse(&tmp_key))
1527    path_to_public_key = tmp_key;
1528  ScopedPathUnlinker tmp_key_remover(tmp_key.value());
1529  if (tmp_key.empty())
1530    tmp_key_remover.set_should_remove(false);
1531
1532  LOG(INFO) << "Verifying payload using public key: "
1533            << path_to_public_key.value();
1534
1535  // Verifies the download size.
1536  TEST_AND_RETURN_VAL(ErrorCode::kPayloadSizeMismatchError,
1537                      update_check_response_size ==
1538                      metadata_size_ + metadata_signature_size_ +
1539                      buffer_offset_);
1540
1541  // Verifies the payload hash.
1542  const string& payload_hash_data = payload_hash_calculator_.hash();
1543  TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadVerificationError,
1544                      !payload_hash_data.empty());
1545  TEST_AND_RETURN_VAL(ErrorCode::kPayloadHashMismatchError,
1546                      payload_hash_data == update_check_response_hash);
1547
1548  // Verifies the signed payload hash.
1549  if (!utils::FileExists(path_to_public_key.value().c_str())) {
1550    LOG(WARNING) << "Not verifying signed delta payload -- missing public key.";
1551    return ErrorCode::kSuccess;
1552  }
1553  TEST_AND_RETURN_VAL(ErrorCode::kSignedDeltaPayloadExpectedError,
1554                      !signatures_message_data_.empty());
1555  brillo::Blob hash_data = signed_hash_calculator_.raw_hash();
1556  TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
1557                      PayloadVerifier::PadRSA2048SHA256Hash(&hash_data));
1558  TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
1559                      !hash_data.empty());
1560
1561  if (!PayloadVerifier::VerifySignature(
1562      signatures_message_data_, path_to_public_key.value(), hash_data)) {
1563    // The autoupdate_CatchBadSignatures test checks for this string
1564    // in log-files. Keep in sync.
1565    LOG(ERROR) << "Public key verification failed, thus update failed.";
1566    return ErrorCode::kDownloadPayloadPubKeyVerificationError;
1567  }
1568
1569  LOG(INFO) << "Payload hash matches value in payload.";
1570
1571  // At this point, we are guaranteed to have downloaded a full payload, i.e
1572  // the one whose size matches the size mentioned in Omaha response. If any
1573  // errors happen after this, it's likely a problem with the payload itself or
1574  // the state of the system and not a problem with the URL or network.  So,
1575  // indicate that to the download delegate so that AU can backoff
1576  // appropriately.
1577  if (download_delegate_)
1578    download_delegate_->DownloadComplete();
1579
1580  return ErrorCode::kSuccess;
1581}
1582
1583namespace {
1584void LogVerifyError(const string& type,
1585                    const string& device,
1586                    uint64_t size,
1587                    const string& local_hash,
1588                    const string& expected_hash) {
1589  LOG(ERROR) << "This is a server-side error due to "
1590             << "mismatched delta update image!";
1591  LOG(ERROR) << "The delta I've been given contains a " << type << " delta "
1592             << "update that must be applied over a " << type << " with "
1593             << "a specific checksum, but the " << type << " we're starting "
1594             << "with doesn't have that checksum! This means that "
1595             << "the delta I've been given doesn't match my existing "
1596             << "system. The " << type << " partition I have has hash: "
1597             << local_hash << " but the update expected me to have "
1598             << expected_hash << " .";
1599  LOG(INFO) << "To get the checksum of the " << type << " partition run this"
1600               "command: dd if=" << device << " bs=1M count=" << size
1601            << " iflag=count_bytes 2>/dev/null | openssl dgst -sha256 -binary "
1602               "| openssl base64";
1603  LOG(INFO) << "To get the checksum of partitions in a bin file, "
1604            << "run: .../src/scripts/sha256_partitions.sh .../file.bin";
1605}
1606
1607string StringForHashBytes(const void* bytes, size_t size) {
1608  return brillo::data_encoding::Base64Encode(bytes, size);
1609}
1610}  // namespace
1611
1612bool DeltaPerformer::VerifySourcePartitions() {
1613  LOG(INFO) << "Verifying source partitions.";
1614  CHECK(manifest_valid_);
1615  CHECK(install_plan_);
1616  if (install_plan_->partitions.size() != partitions_.size()) {
1617    DLOG(ERROR) << "The list of partitions in the InstallPlan doesn't match the "
1618                   "list received in the payload. The InstallPlan has "
1619                << install_plan_->partitions.size()
1620                << " partitions while the payload has " << partitions_.size()
1621                << " partitions.";
1622    return false;
1623  }
1624  for (size_t i = 0; i < partitions_.size(); ++i) {
1625    if (partitions_[i].partition_name() != install_plan_->partitions[i].name) {
1626      DLOG(ERROR) << "The InstallPlan's partition " << i << " is \""
1627                  << install_plan_->partitions[i].name
1628                  << "\" but the payload expects it to be \""
1629                  << partitions_[i].partition_name()
1630                  << "\". This is an error in the DeltaPerformer setup.";
1631      return false;
1632    }
1633    if (!partitions_[i].has_old_partition_info())
1634      continue;
1635    const PartitionInfo& info = partitions_[i].old_partition_info();
1636    const InstallPlan::Partition& plan_part = install_plan_->partitions[i];
1637    bool valid =
1638        !plan_part.source_hash.empty() &&
1639        plan_part.source_hash.size() == info.hash().size() &&
1640        memcmp(plan_part.source_hash.data(),
1641               info.hash().data(),
1642               plan_part.source_hash.size()) == 0;
1643    if (!valid) {
1644      LogVerifyError(partitions_[i].partition_name(),
1645                     plan_part.source_path,
1646                     info.hash().size(),
1647                     StringForHashBytes(plan_part.source_hash.data(),
1648                                        plan_part.source_hash.size()),
1649                     StringForHashBytes(info.hash().data(),
1650                                        info.hash().size()));
1651      return false;
1652    }
1653  }
1654  return true;
1655}
1656
1657void DeltaPerformer::DiscardBuffer(bool do_advance_offset,
1658                                   size_t signed_hash_buffer_size) {
1659  // Update the buffer offset.
1660  if (do_advance_offset)
1661    buffer_offset_ += buffer_.size();
1662
1663  // Hash the content.
1664  payload_hash_calculator_.Update(buffer_.data(), buffer_.size());
1665  signed_hash_calculator_.Update(buffer_.data(), signed_hash_buffer_size);
1666
1667  // Swap content with an empty vector to ensure that all memory is released.
1668  brillo::Blob().swap(buffer_);
1669}
1670
1671bool DeltaPerformer::CanResumeUpdate(PrefsInterface* prefs,
1672                                     string update_check_response_hash) {
1673  int64_t next_operation = kUpdateStateOperationInvalid;
1674  if (!(prefs->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) &&
1675        next_operation != kUpdateStateOperationInvalid &&
1676        next_operation > 0))
1677    return false;
1678
1679  string interrupted_hash;
1680  if (!(prefs->GetString(kPrefsUpdateCheckResponseHash, &interrupted_hash) &&
1681        !interrupted_hash.empty() &&
1682        interrupted_hash == update_check_response_hash))
1683    return false;
1684
1685  int64_t resumed_update_failures;
1686  // Note that storing this value is optional, but if it is there it should not
1687  // be more than the limit.
1688  if (prefs->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures) &&
1689      resumed_update_failures > kMaxResumedUpdateFailures)
1690    return false;
1691
1692  // Sanity check the rest.
1693  int64_t next_data_offset = -1;
1694  if (!(prefs->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
1695        next_data_offset >= 0))
1696    return false;
1697
1698  string sha256_context;
1699  if (!(prefs->GetString(kPrefsUpdateStateSHA256Context, &sha256_context) &&
1700        !sha256_context.empty()))
1701    return false;
1702
1703  int64_t manifest_metadata_size = 0;
1704  if (!(prefs->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size) &&
1705        manifest_metadata_size > 0))
1706    return false;
1707
1708  int64_t manifest_signature_size = 0;
1709  if (!(prefs->GetInt64(kPrefsManifestSignatureSize,
1710                        &manifest_signature_size) &&
1711        manifest_signature_size >= 0))
1712    return false;
1713
1714  return true;
1715}
1716
1717bool DeltaPerformer::ResetUpdateProgress(PrefsInterface* prefs, bool quick) {
1718  TEST_AND_RETURN_FALSE(prefs->SetInt64(kPrefsUpdateStateNextOperation,
1719                                        kUpdateStateOperationInvalid));
1720  if (!quick) {
1721    prefs->SetString(kPrefsUpdateCheckResponseHash, "");
1722    prefs->SetInt64(kPrefsUpdateStateNextDataOffset, -1);
1723    prefs->SetInt64(kPrefsUpdateStateNextDataLength, 0);
1724    prefs->SetString(kPrefsUpdateStateSHA256Context, "");
1725    prefs->SetString(kPrefsUpdateStateSignedSHA256Context, "");
1726    prefs->SetString(kPrefsUpdateStateSignatureBlob, "");
1727    prefs->SetInt64(kPrefsManifestMetadataSize, -1);
1728    prefs->SetInt64(kPrefsManifestSignatureSize, -1);
1729    prefs->SetInt64(kPrefsResumedUpdateFailures, 0);
1730  }
1731  return true;
1732}
1733
1734bool DeltaPerformer::CheckpointUpdateProgress() {
1735  Terminator::set_exit_blocked(true);
1736  if (last_updated_buffer_offset_ != buffer_offset_) {
1737    // Resets the progress in case we die in the middle of the state update.
1738    ResetUpdateProgress(prefs_, true);
1739    TEST_AND_RETURN_FALSE(
1740        prefs_->SetString(kPrefsUpdateStateSHA256Context,
1741                          payload_hash_calculator_.GetContext()));
1742    TEST_AND_RETURN_FALSE(
1743        prefs_->SetString(kPrefsUpdateStateSignedSHA256Context,
1744                          signed_hash_calculator_.GetContext()));
1745    TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextDataOffset,
1746                                           buffer_offset_));
1747    last_updated_buffer_offset_ = buffer_offset_;
1748
1749    if (next_operation_num_ < num_total_operations_) {
1750      size_t partition_index = current_partition_;
1751      while (next_operation_num_ >= acc_num_operations_[partition_index])
1752        partition_index++;
1753      const size_t partition_operation_num = next_operation_num_ - (
1754          partition_index ? acc_num_operations_[partition_index - 1] : 0);
1755      const InstallOperation& op =
1756          partitions_[partition_index].operations(partition_operation_num);
1757      TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextDataLength,
1758                                             op.data_length()));
1759    } else {
1760      TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextDataLength,
1761                                             0));
1762    }
1763  }
1764  TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextOperation,
1765                                         next_operation_num_));
1766  return true;
1767}
1768
1769bool DeltaPerformer::PrimeUpdateState() {
1770  CHECK(manifest_valid_);
1771  block_size_ = manifest_.block_size();
1772
1773  int64_t next_operation = kUpdateStateOperationInvalid;
1774  if (!prefs_->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) ||
1775      next_operation == kUpdateStateOperationInvalid ||
1776      next_operation <= 0) {
1777    // Initiating a new update, no more state needs to be initialized.
1778    return true;
1779  }
1780  next_operation_num_ = next_operation;
1781
1782  // Resuming an update -- load the rest of the update state.
1783  int64_t next_data_offset = -1;
1784  TEST_AND_RETURN_FALSE(prefs_->GetInt64(kPrefsUpdateStateNextDataOffset,
1785                                         &next_data_offset) &&
1786                        next_data_offset >= 0);
1787  buffer_offset_ = next_data_offset;
1788
1789  // The signed hash context and the signature blob may be empty if the
1790  // interrupted update didn't reach the signature.
1791  string signed_hash_context;
1792  if (prefs_->GetString(kPrefsUpdateStateSignedSHA256Context,
1793                        &signed_hash_context)) {
1794    TEST_AND_RETURN_FALSE(
1795        signed_hash_calculator_.SetContext(signed_hash_context));
1796  }
1797
1798  string signature_blob;
1799  if (prefs_->GetString(kPrefsUpdateStateSignatureBlob, &signature_blob)) {
1800    signatures_message_data_.assign(signature_blob.begin(),
1801                                    signature_blob.end());
1802  }
1803
1804  string hash_context;
1805  TEST_AND_RETURN_FALSE(prefs_->GetString(kPrefsUpdateStateSHA256Context,
1806                                          &hash_context) &&
1807                        payload_hash_calculator_.SetContext(hash_context));
1808
1809  int64_t manifest_metadata_size = 0;
1810  TEST_AND_RETURN_FALSE(prefs_->GetInt64(kPrefsManifestMetadataSize,
1811                                         &manifest_metadata_size) &&
1812                        manifest_metadata_size > 0);
1813  metadata_size_ = manifest_metadata_size;
1814
1815  int64_t manifest_signature_size = 0;
1816  TEST_AND_RETURN_FALSE(
1817      prefs_->GetInt64(kPrefsManifestSignatureSize, &manifest_signature_size) &&
1818      manifest_signature_size >= 0);
1819  metadata_signature_size_ = manifest_signature_size;
1820
1821  // Advance the download progress to reflect what doesn't need to be
1822  // re-downloaded.
1823  total_bytes_received_ += buffer_offset_;
1824
1825  // Speculatively count the resume as a failure.
1826  int64_t resumed_update_failures;
1827  if (prefs_->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures)) {
1828    resumed_update_failures++;
1829  } else {
1830    resumed_update_failures = 1;
1831  }
1832  prefs_->SetInt64(kPrefsResumedUpdateFailures, resumed_update_failures);
1833  return true;
1834}
1835
1836}  // namespace chromeos_update_engine
1837