delta_performer.cc revision 889c65d1914ed8a151f451a2933fa94d6e577aa6
1//
2// Copyright (C) 2012 The Android Open Source Project
3//
4// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
7//
8//      http://www.apache.org/licenses/LICENSE-2.0
9//
10// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
15//
16
17#include "update_engine/payload_consumer/delta_performer.h"
18
19#include <endian.h>
20#include <errno.h>
21#include <linux/fs.h>
22
23#include <algorithm>
24#include <cstring>
25#include <memory>
26#include <string>
27#include <vector>
28
29#include <base/files/file_util.h>
30#include <base/format_macros.h>
31#include <base/strings/string_util.h>
32#include <base/strings/stringprintf.h>
33#include <brillo/data_encoding.h>
34#include <brillo/make_unique_ptr.h>
35#include <google/protobuf/repeated_field.h>
36
37#include "update_engine/common/constants.h"
38#include "update_engine/common/hardware_interface.h"
39#include "update_engine/common/prefs_interface.h"
40#include "update_engine/common/subprocess.h"
41#include "update_engine/common/terminator.h"
42#include "update_engine/payload_consumer/bzip_extent_writer.h"
43#include "update_engine/payload_consumer/download_action.h"
44#include "update_engine/payload_consumer/extent_writer.h"
45#if USE_MTD
46#include "update_engine/payload_consumer/mtd_file_descriptor.h"
47#endif
48#include "update_engine/payload_consumer/payload_constants.h"
49#include "update_engine/payload_consumer/payload_verifier.h"
50#include "update_engine/payload_consumer/xz_extent_writer.h"
51
52using google::protobuf::RepeatedPtrField;
53using std::min;
54using std::string;
55using std::vector;
56
57namespace chromeos_update_engine {
58
59const uint64_t DeltaPerformer::kDeltaVersionOffset = sizeof(kDeltaMagic);
60const uint64_t DeltaPerformer::kDeltaVersionSize = 8;
61const uint64_t DeltaPerformer::kDeltaManifestSizeOffset =
62    kDeltaVersionOffset + kDeltaVersionSize;
63const uint64_t DeltaPerformer::kDeltaManifestSizeSize = 8;
64const uint64_t DeltaPerformer::kDeltaMetadataSignatureSizeSize = 4;
65const uint64_t DeltaPerformer::kMaxPayloadHeaderSize = 24;
66const uint64_t DeltaPerformer::kSupportedMajorPayloadVersion = 2;
67const uint32_t DeltaPerformer::kSupportedMinorPayloadVersion = 3;
68
69const unsigned DeltaPerformer::kProgressLogMaxChunks = 10;
70const unsigned DeltaPerformer::kProgressLogTimeoutSeconds = 30;
71const unsigned DeltaPerformer::kProgressDownloadWeight = 50;
72const unsigned DeltaPerformer::kProgressOperationsWeight = 50;
73
74namespace {
75const int kUpdateStateOperationInvalid = -1;
76const int kMaxResumedUpdateFailures = 10;
77#if USE_MTD
78const int kUbiVolumeAttachTimeout = 5 * 60;
79#endif
80
81FileDescriptorPtr CreateFileDescriptor(const char* path) {
82  FileDescriptorPtr ret;
83#if USE_MTD
84  if (strstr(path, "/dev/ubi") == path) {
85    if (!UbiFileDescriptor::IsUbi(path)) {
86      // The volume might not have been attached at boot time.
87      int volume_no;
88      if (utils::SplitPartitionName(path, nullptr, &volume_no)) {
89        utils::TryAttachingUbiVolume(volume_no, kUbiVolumeAttachTimeout);
90      }
91    }
92    if (UbiFileDescriptor::IsUbi(path)) {
93      LOG(INFO) << path << " is a UBI device.";
94      ret.reset(new UbiFileDescriptor);
95    }
96  } else if (MtdFileDescriptor::IsMtd(path)) {
97    LOG(INFO) << path << " is an MTD device.";
98    ret.reset(new MtdFileDescriptor);
99  } else {
100    LOG(INFO) << path << " is not an MTD nor a UBI device.";
101#endif
102    ret.reset(new EintrSafeFileDescriptor);
103#if USE_MTD
104  }
105#endif
106  return ret;
107}
108
109// Opens path for read/write. On success returns an open FileDescriptor
110// and sets *err to 0. On failure, sets *err to errno and returns nullptr.
111FileDescriptorPtr OpenFile(const char* path, int mode, int* err) {
112  FileDescriptorPtr fd = CreateFileDescriptor(path);
113#if USE_MTD
114  // On NAND devices, we can either read, or write, but not both. So here we
115  // use O_WRONLY.
116  if (UbiFileDescriptor::IsUbi(path) || MtdFileDescriptor::IsMtd(path)) {
117    mode = O_WRONLY;
118  }
119#endif
120  if (!fd->Open(path, mode, 000)) {
121    *err = errno;
122    PLOG(ERROR) << "Unable to open file " << path;
123    return nullptr;
124  }
125  *err = 0;
126  return fd;
127}
128}  // namespace
129
130
131// Computes the ratio of |part| and |total|, scaled to |norm|, using integer
132// arithmetic.
133static uint64_t IntRatio(uint64_t part, uint64_t total, uint64_t norm) {
134  return part * norm / total;
135}
136
137void DeltaPerformer::LogProgress(const char* message_prefix) {
138  // Format operations total count and percentage.
139  string total_operations_str("?");
140  string completed_percentage_str("");
141  if (num_total_operations_) {
142    total_operations_str = std::to_string(num_total_operations_);
143    // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
144    completed_percentage_str =
145        base::StringPrintf(" (%" PRIu64 "%%)",
146                           IntRatio(next_operation_num_, num_total_operations_,
147                                    100));
148  }
149
150  // Format download total count and percentage.
151  size_t payload_size = install_plan_->payload_size;
152  string payload_size_str("?");
153  string downloaded_percentage_str("");
154  if (payload_size) {
155    payload_size_str = std::to_string(payload_size);
156    // Upcasting to 64-bit to avoid overflow, back to size_t for formatting.
157    downloaded_percentage_str =
158        base::StringPrintf(" (%" PRIu64 "%%)",
159                           IntRatio(total_bytes_received_, payload_size, 100));
160  }
161
162  LOG(INFO) << (message_prefix ? message_prefix : "") << next_operation_num_
163            << "/" << total_operations_str << " operations"
164            << completed_percentage_str << ", " << total_bytes_received_
165            << "/" << payload_size_str << " bytes downloaded"
166            << downloaded_percentage_str << ", overall progress "
167            << overall_progress_ << "%";
168}
169
170void DeltaPerformer::UpdateOverallProgress(bool force_log,
171                                           const char* message_prefix) {
172  // Compute our download and overall progress.
173  unsigned new_overall_progress = 0;
174  COMPILE_ASSERT(kProgressDownloadWeight + kProgressOperationsWeight == 100,
175                 progress_weight_dont_add_up);
176  // Only consider download progress if its total size is known; otherwise
177  // adjust the operations weight to compensate for the absence of download
178  // progress. Also, make sure to cap the download portion at
179  // kProgressDownloadWeight, in case we end up downloading more than we
180  // initially expected (this indicates a problem, but could generally happen).
181  // TODO(garnold) the correction of operations weight when we do not have the
182  // total payload size, as well as the conditional guard below, should both be
183  // eliminated once we ensure that the payload_size in the install plan is
184  // always given and is non-zero. This currently isn't the case during unit
185  // tests (see chromium-os:37969).
186  size_t payload_size = install_plan_->payload_size;
187  unsigned actual_operations_weight = kProgressOperationsWeight;
188  if (payload_size)
189    new_overall_progress += min(
190        static_cast<unsigned>(IntRatio(total_bytes_received_, payload_size,
191                                       kProgressDownloadWeight)),
192        kProgressDownloadWeight);
193  else
194    actual_operations_weight += kProgressDownloadWeight;
195
196  // Only add completed operations if their total number is known; we definitely
197  // expect an update to have at least one operation, so the expectation is that
198  // this will eventually reach |actual_operations_weight|.
199  if (num_total_operations_)
200    new_overall_progress += IntRatio(next_operation_num_, num_total_operations_,
201                                     actual_operations_weight);
202
203  // Progress ratio cannot recede, unless our assumptions about the total
204  // payload size, total number of operations, or the monotonicity of progress
205  // is breached.
206  if (new_overall_progress < overall_progress_) {
207    LOG(WARNING) << "progress counter receded from " << overall_progress_
208                 << "% down to " << new_overall_progress << "%; this is a bug";
209    force_log = true;
210  }
211  overall_progress_ = new_overall_progress;
212
213  // Update chunk index, log as needed: if forced by called, or we completed a
214  // progress chunk, or a timeout has expired.
215  base::Time curr_time = base::Time::Now();
216  unsigned curr_progress_chunk =
217      overall_progress_ * kProgressLogMaxChunks / 100;
218  if (force_log || curr_progress_chunk > last_progress_chunk_ ||
219      curr_time > forced_progress_log_time_) {
220    forced_progress_log_time_ = curr_time + forced_progress_log_wait_;
221    LogProgress(message_prefix);
222  }
223  last_progress_chunk_ = curr_progress_chunk;
224}
225
226
227size_t DeltaPerformer::CopyDataToBuffer(const char** bytes_p, size_t* count_p,
228                                        size_t max) {
229  const size_t count = *count_p;
230  if (!count)
231    return 0;  // Special case shortcut.
232  size_t read_len = min(count, max - buffer_.size());
233  const char* bytes_start = *bytes_p;
234  const char* bytes_end = bytes_start + read_len;
235  buffer_.insert(buffer_.end(), bytes_start, bytes_end);
236  *bytes_p = bytes_end;
237  *count_p = count - read_len;
238  return read_len;
239}
240
241
242bool DeltaPerformer::HandleOpResult(bool op_result, const char* op_type_name,
243                                    ErrorCode* error) {
244  if (op_result)
245    return true;
246
247  LOG(ERROR) << "Failed to perform " << op_type_name << " operation "
248             << next_operation_num_;
249  *error = ErrorCode::kDownloadOperationExecutionError;
250  return false;
251}
252
253int DeltaPerformer::Close() {
254  int err = -CloseCurrentPartition();
255  LOG_IF(ERROR, !payload_hash_calculator_.Finalize() ||
256                !signed_hash_calculator_.Finalize())
257      << "Unable to finalize the hash.";
258  if (!buffer_.empty()) {
259    LOG(INFO) << "Discarding " << buffer_.size() << " unused downloaded bytes";
260    if (err >= 0)
261      err = 1;
262  }
263  return -err;
264}
265
266int DeltaPerformer::CloseCurrentPartition() {
267  int err = 0;
268  if (source_fd_ && !source_fd_->Close()) {
269    err = errno;
270    PLOG(ERROR) << "Error closing source partition";
271    if (!err)
272      err = 1;
273  }
274  source_fd_.reset();
275  source_path_.clear();
276
277  if (target_fd_ && !target_fd_->Close()) {
278    err = errno;
279    PLOG(ERROR) << "Error closing target partition";
280    if (!err)
281      err = 1;
282  }
283  target_fd_.reset();
284  target_path_.clear();
285  return -err;
286}
287
288bool DeltaPerformer::OpenCurrentPartition() {
289  if (current_partition_ >= partitions_.size())
290    return false;
291
292  const PartitionUpdate& partition = partitions_[current_partition_];
293  // Open source fds if we have a delta payload with minor version >= 2.
294  if (!install_plan_->is_full_update &&
295      GetMinorVersion() != kInPlaceMinorPayloadVersion) {
296    source_path_ = install_plan_->partitions[current_partition_].source_path;
297    int err;
298    source_fd_ = OpenFile(source_path_.c_str(), O_RDONLY, &err);
299    if (!source_fd_) {
300      LOG(ERROR) << "Unable to open source partition "
301                 << partition.partition_name() << " on slot "
302                 << BootControlInterface::SlotName(install_plan_->source_slot)
303                 << ", file " << source_path_;
304      return false;
305    }
306  }
307
308  target_path_ = install_plan_->partitions[current_partition_].target_path;
309  int err;
310  target_fd_ = OpenFile(target_path_.c_str(), O_RDWR, &err);
311  if (!target_fd_) {
312    LOG(ERROR) << "Unable to open target partition "
313               << partition.partition_name() << " on slot "
314               << BootControlInterface::SlotName(install_plan_->target_slot)
315               << ", file " << target_path_;
316    return false;
317  }
318  return true;
319}
320
321namespace {
322
323void LogPartitionInfoHash(const PartitionInfo& info, const string& tag) {
324  string sha256 = brillo::data_encoding::Base64Encode(info.hash());
325  LOG(INFO) << "PartitionInfo " << tag << " sha256: " << sha256
326            << " size: " << info.size();
327}
328
329void LogPartitionInfo(const vector<PartitionUpdate>& partitions) {
330  for (const PartitionUpdate& partition : partitions) {
331    LogPartitionInfoHash(partition.old_partition_info(),
332                         "old " + partition.partition_name());
333    LogPartitionInfoHash(partition.new_partition_info(),
334                         "new " + partition.partition_name());
335  }
336}
337
338}  // namespace
339
340bool DeltaPerformer::GetMetadataSignatureSizeOffset(
341    uint64_t* out_offset) const {
342  if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
343    *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
344    return true;
345  }
346  return false;
347}
348
349bool DeltaPerformer::GetManifestOffset(uint64_t* out_offset) const {
350  // Actual manifest begins right after the manifest size field or
351  // metadata signature size field if major version >= 2.
352  if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
353    *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize;
354    return true;
355  }
356  if (major_payload_version_ == kBrilloMajorPayloadVersion) {
357    *out_offset = kDeltaManifestSizeOffset + kDeltaManifestSizeSize +
358                  kDeltaMetadataSignatureSizeSize;
359    return true;
360  }
361  LOG(ERROR) << "Unknown major payload version: " << major_payload_version_;
362  return false;
363}
364
365uint64_t DeltaPerformer::GetMetadataSize() const {
366  return metadata_size_;
367}
368
369uint64_t DeltaPerformer::GetMajorVersion() const {
370  return major_payload_version_;
371}
372
373uint32_t DeltaPerformer::GetMinorVersion() const {
374  if (manifest_.has_minor_version()) {
375    return manifest_.minor_version();
376  } else {
377    return (install_plan_->is_full_update ?
378            kFullPayloadMinorVersion :
379            kSupportedMinorPayloadVersion);
380  }
381}
382
383bool DeltaPerformer::GetManifest(DeltaArchiveManifest* out_manifest_p) const {
384  if (!manifest_parsed_)
385    return false;
386  *out_manifest_p = manifest_;
387  return true;
388}
389
390bool DeltaPerformer::IsHeaderParsed() const {
391  return metadata_size_ != 0;
392}
393
394DeltaPerformer::MetadataParseResult DeltaPerformer::ParsePayloadMetadata(
395    const brillo::Blob& payload, ErrorCode* error) {
396  *error = ErrorCode::kSuccess;
397  uint64_t manifest_offset;
398
399  if (!IsHeaderParsed()) {
400    // Ensure we have data to cover the major payload version.
401    if (payload.size() < kDeltaManifestSizeOffset)
402      return kMetadataParseInsufficientData;
403
404    // Validate the magic string.
405    if (memcmp(payload.data(), kDeltaMagic, sizeof(kDeltaMagic)) != 0) {
406      LOG(ERROR) << "Bad payload format -- invalid delta magic.";
407      *error = ErrorCode::kDownloadInvalidMetadataMagicString;
408      return kMetadataParseError;
409    }
410
411    // Extract the payload version from the metadata.
412    COMPILE_ASSERT(sizeof(major_payload_version_) == kDeltaVersionSize,
413                   major_payload_version_size_mismatch);
414    memcpy(&major_payload_version_,
415           &payload[kDeltaVersionOffset],
416           kDeltaVersionSize);
417    // switch big endian to host
418    major_payload_version_ = be64toh(major_payload_version_);
419
420    if (major_payload_version_ != supported_major_version_ &&
421        major_payload_version_ != kChromeOSMajorPayloadVersion) {
422      LOG(ERROR) << "Bad payload format -- unsupported payload version: "
423          << major_payload_version_;
424      *error = ErrorCode::kUnsupportedMajorPayloadVersion;
425      return kMetadataParseError;
426    }
427
428    // Get the manifest offset now that we have payload version.
429    if (!GetManifestOffset(&manifest_offset)) {
430      *error = ErrorCode::kUnsupportedMajorPayloadVersion;
431      return kMetadataParseError;
432    }
433    // Check again with the manifest offset.
434    if (payload.size() < manifest_offset)
435      return kMetadataParseInsufficientData;
436
437    // Next, parse the manifest size.
438    COMPILE_ASSERT(sizeof(manifest_size_) == kDeltaManifestSizeSize,
439                   manifest_size_size_mismatch);
440    memcpy(&manifest_size_,
441           &payload[kDeltaManifestSizeOffset],
442           kDeltaManifestSizeSize);
443    manifest_size_ = be64toh(manifest_size_);  // switch big endian to host
444
445    if (GetMajorVersion() == kBrilloMajorPayloadVersion) {
446      // Parse the metadata signature size.
447      COMPILE_ASSERT(sizeof(metadata_signature_size_) ==
448                     kDeltaMetadataSignatureSizeSize,
449                     metadata_signature_size_size_mismatch);
450      uint64_t metadata_signature_size_offset;
451      if (!GetMetadataSignatureSizeOffset(&metadata_signature_size_offset)) {
452        *error = ErrorCode::kError;
453        return kMetadataParseError;
454      }
455      memcpy(&metadata_signature_size_,
456             &payload[metadata_signature_size_offset],
457             kDeltaMetadataSignatureSizeSize);
458      metadata_signature_size_ = be32toh(metadata_signature_size_);
459    }
460
461    // If the metadata size is present in install plan, check for it immediately
462    // even before waiting for that many number of bytes to be downloaded in the
463    // payload. This will prevent any attack which relies on us downloading data
464    // beyond the expected metadata size.
465    metadata_size_ = manifest_offset + manifest_size_;
466    if (install_plan_->hash_checks_mandatory) {
467      if (install_plan_->metadata_size != metadata_size_) {
468        LOG(ERROR) << "Mandatory metadata size in Omaha response ("
469                   << install_plan_->metadata_size
470                   << ") is missing/incorrect, actual = " << metadata_size_;
471        *error = ErrorCode::kDownloadInvalidMetadataSize;
472        return kMetadataParseError;
473      }
474    }
475  }
476
477  // Now that we have validated the metadata size, we should wait for the full
478  // metadata and its signature (if exist) to be read in before we can parse it.
479  if (payload.size() < metadata_size_ + metadata_signature_size_)
480    return kMetadataParseInsufficientData;
481
482  // Log whether we validated the size or simply trusting what's in the payload
483  // here. This is logged here (after we received the full metadata data) so
484  // that we just log once (instead of logging n times) if it takes n
485  // DeltaPerformer::Write calls to download the full manifest.
486  if (install_plan_->metadata_size == metadata_size_) {
487    LOG(INFO) << "Manifest size in payload matches expected value from Omaha";
488  } else {
489    // For mandatory-cases, we'd have already returned a kMetadataParseError
490    // above. We'll be here only for non-mandatory cases. Just send a UMA stat.
491    LOG(WARNING) << "Ignoring missing/incorrect metadata size ("
492                 << install_plan_->metadata_size
493                 << ") in Omaha response as validation is not mandatory. "
494                 << "Trusting metadata size in payload = " << metadata_size_;
495  }
496
497  // We have the full metadata in |payload|. Verify its integrity
498  // and authenticity based on the information we have in Omaha response.
499  *error = ValidateMetadataSignature(payload);
500  if (*error != ErrorCode::kSuccess) {
501    if (install_plan_->hash_checks_mandatory) {
502      // The autoupdate_CatchBadSignatures test checks for this string
503      // in log-files. Keep in sync.
504      LOG(ERROR) << "Mandatory metadata signature validation failed";
505      return kMetadataParseError;
506    }
507
508    // For non-mandatory cases, just send a UMA stat.
509    LOG(WARNING) << "Ignoring metadata signature validation failures";
510    *error = ErrorCode::kSuccess;
511  }
512
513  if (!GetManifestOffset(&manifest_offset)) {
514    *error = ErrorCode::kUnsupportedMajorPayloadVersion;
515    return kMetadataParseError;
516  }
517  // The payload metadata is deemed valid, it's safe to parse the protobuf.
518  if (!manifest_.ParseFromArray(&payload[manifest_offset], manifest_size_)) {
519    LOG(ERROR) << "Unable to parse manifest in update file.";
520    *error = ErrorCode::kDownloadManifestParseError;
521    return kMetadataParseError;
522  }
523
524  manifest_parsed_ = true;
525  return kMetadataParseSuccess;
526}
527
528// Wrapper around write. Returns true if all requested bytes
529// were written, or false on any error, regardless of progress
530// and stores an action exit code in |error|.
531bool DeltaPerformer::Write(const void* bytes, size_t count, ErrorCode *error) {
532  *error = ErrorCode::kSuccess;
533
534  const char* c_bytes = reinterpret_cast<const char*>(bytes);
535
536  // Update the total byte downloaded count and the progress logs.
537  total_bytes_received_ += count;
538  UpdateOverallProgress(false, "Completed ");
539
540  while (!manifest_valid_) {
541    // Read data up to the needed limit; this is either maximium payload header
542    // size, or the full metadata size (once it becomes known).
543    const bool do_read_header = !IsHeaderParsed();
544    CopyDataToBuffer(&c_bytes, &count,
545                     (do_read_header ? kMaxPayloadHeaderSize :
546                      metadata_size_ + metadata_signature_size_));
547
548    MetadataParseResult result = ParsePayloadMetadata(buffer_, error);
549    if (result == kMetadataParseError)
550      return false;
551    if (result == kMetadataParseInsufficientData) {
552      // If we just processed the header, make an attempt on the manifest.
553      if (do_read_header && IsHeaderParsed())
554        continue;
555
556      return true;
557    }
558
559    // Checks the integrity of the payload manifest.
560    if ((*error = ValidateManifest()) != ErrorCode::kSuccess)
561      return false;
562    manifest_valid_ = true;
563
564    // Clear the download buffer.
565    DiscardBuffer(false, metadata_size_);
566
567    // This populates |partitions_| and the |install_plan.partitions| with the
568    // list of partitions from the manifest.
569    if (!ParseManifestPartitions(error))
570      return false;
571
572    num_total_operations_ = 0;
573    for (const auto& partition : partitions_) {
574      num_total_operations_ += partition.operations_size();
575      acc_num_operations_.push_back(num_total_operations_);
576    }
577
578    LOG_IF(WARNING, !prefs_->SetInt64(kPrefsManifestMetadataSize,
579                                      metadata_size_))
580        << "Unable to save the manifest metadata size.";
581
582    if (!PrimeUpdateState()) {
583      *error = ErrorCode::kDownloadStateInitializationError;
584      LOG(ERROR) << "Unable to prime the update state.";
585      return false;
586    }
587
588    if (!OpenCurrentPartition()) {
589      *error = ErrorCode::kInstallDeviceOpenError;
590      return false;
591    }
592
593    if (next_operation_num_ > 0)
594      UpdateOverallProgress(true, "Resuming after ");
595    LOG(INFO) << "Starting to apply update payload operations";
596  }
597
598  while (next_operation_num_ < num_total_operations_) {
599    // Check if we should cancel the current attempt for any reason.
600    // In this case, *error will have already been populated with the reason
601    // why we're canceling.
602    if (download_delegate_ && download_delegate_->ShouldCancel(error))
603      return false;
604
605    // We know there are more operations to perform because we didn't reach the
606    // |num_total_operations_| limit yet.
607    while (next_operation_num_ >= acc_num_operations_[current_partition_]) {
608      CloseCurrentPartition();
609      current_partition_++;
610      if (!OpenCurrentPartition()) {
611        *error = ErrorCode::kInstallDeviceOpenError;
612        return false;
613      }
614    }
615    const size_t partition_operation_num = next_operation_num_ - (
616        current_partition_ ? acc_num_operations_[current_partition_ - 1] : 0);
617
618    const InstallOperation& op =
619        partitions_[current_partition_].operations(partition_operation_num);
620
621    CopyDataToBuffer(&c_bytes, &count, op.data_length());
622
623    // Check whether we received all of the next operation's data payload.
624    if (!CanPerformInstallOperation(op))
625      return true;
626
627    // Validate the operation only if the metadata signature is present.
628    // Otherwise, keep the old behavior. This serves as a knob to disable
629    // the validation logic in case we find some regression after rollout.
630    // NOTE: If hash checks are mandatory and if metadata_signature is empty,
631    // we would have already failed in ParsePayloadMetadata method and thus not
632    // even be here. So no need to handle that case again here.
633    if (!install_plan_->metadata_signature.empty()) {
634      // Note: Validate must be called only if CanPerformInstallOperation is
635      // called. Otherwise, we might be failing operations before even if there
636      // isn't sufficient data to compute the proper hash.
637      *error = ValidateOperationHash(op);
638      if (*error != ErrorCode::kSuccess) {
639        if (install_plan_->hash_checks_mandatory) {
640          LOG(ERROR) << "Mandatory operation hash check failed";
641          return false;
642        }
643
644        // For non-mandatory cases, just send a UMA stat.
645        LOG(WARNING) << "Ignoring operation validation errors";
646        *error = ErrorCode::kSuccess;
647      }
648    }
649
650    // Makes sure we unblock exit when this operation completes.
651    ScopedTerminatorExitUnblocker exit_unblocker =
652        ScopedTerminatorExitUnblocker();  // Avoids a compiler unused var bug.
653
654    bool op_result;
655    switch (op.type()) {
656      case InstallOperation::REPLACE:
657      case InstallOperation::REPLACE_BZ:
658      case InstallOperation::REPLACE_XZ:
659        op_result = PerformReplaceOperation(op);
660        break;
661      case InstallOperation::ZERO:
662      case InstallOperation::DISCARD:
663        op_result = PerformZeroOrDiscardOperation(op);
664        break;
665      case InstallOperation::MOVE:
666        op_result = PerformMoveOperation(op);
667        break;
668      case InstallOperation::BSDIFF:
669        op_result = PerformBsdiffOperation(op);
670        break;
671      case InstallOperation::SOURCE_COPY:
672        op_result = PerformSourceCopyOperation(op);
673        break;
674      case InstallOperation::SOURCE_BSDIFF:
675        op_result = PerformSourceBsdiffOperation(op);
676        break;
677      default:
678       op_result = false;
679    }
680    if (!HandleOpResult(op_result, InstallOperationTypeName(op.type()), error))
681      return false;
682
683    next_operation_num_++;
684    UpdateOverallProgress(false, "Completed ");
685    CheckpointUpdateProgress();
686  }
687
688  // In major version 2, we don't add dummy operation to the payload.
689  if (major_payload_version_ == kBrilloMajorPayloadVersion &&
690      manifest_.has_signatures_offset() && manifest_.has_signatures_size()) {
691    if (manifest_.signatures_offset() != buffer_offset_) {
692      LOG(ERROR) << "Payload signatures offset points to blob offset "
693                 << manifest_.signatures_offset()
694                 << " but signatures are expected at offset "
695                 << buffer_offset_;
696      *error = ErrorCode::kDownloadPayloadVerificationError;
697      return false;
698    }
699    CopyDataToBuffer(&c_bytes, &count, manifest_.signatures_size());
700    // Needs more data to cover entire signature.
701    if (buffer_.size() < manifest_.signatures_size())
702      return true;
703    if (!ExtractSignatureMessage()) {
704      LOG(ERROR) << "Extract payload signature failed.";
705      *error = ErrorCode::kDownloadPayloadVerificationError;
706      return false;
707    }
708    DiscardBuffer(true, 0);
709  }
710
711  return true;
712}
713
714bool DeltaPerformer::IsManifestValid() {
715  return manifest_valid_;
716}
717
718bool DeltaPerformer::ParseManifestPartitions(ErrorCode* error) {
719  if (major_payload_version_ == kBrilloMajorPayloadVersion) {
720    partitions_.clear();
721    for (const PartitionUpdate& partition : manifest_.partitions()) {
722      partitions_.push_back(partition);
723    }
724    manifest_.clear_partitions();
725  } else if (major_payload_version_ == kChromeOSMajorPayloadVersion) {
726    LOG(INFO) << "Converting update information from old format.";
727    PartitionUpdate root_part;
728    root_part.set_partition_name(kLegacyPartitionNameRoot);
729#ifdef __ANDROID__
730    LOG(WARNING) << "Legacy payload major version provided to an Android "
731                    "build. Assuming no post-install. Please use major version "
732                    "2 or newer.";
733    root_part.set_run_postinstall(false);
734#else
735    root_part.set_run_postinstall(true);
736#endif  // __ANDROID__
737    if (manifest_.has_old_rootfs_info()) {
738      *root_part.mutable_old_partition_info() = manifest_.old_rootfs_info();
739      manifest_.clear_old_rootfs_info();
740    }
741    if (manifest_.has_new_rootfs_info()) {
742      *root_part.mutable_new_partition_info() = manifest_.new_rootfs_info();
743      manifest_.clear_new_rootfs_info();
744    }
745    *root_part.mutable_operations() = manifest_.install_operations();
746    manifest_.clear_install_operations();
747    partitions_.push_back(std::move(root_part));
748
749    PartitionUpdate kern_part;
750    kern_part.set_partition_name(kLegacyPartitionNameKernel);
751    kern_part.set_run_postinstall(false);
752    if (manifest_.has_old_kernel_info()) {
753      *kern_part.mutable_old_partition_info() = manifest_.old_kernel_info();
754      manifest_.clear_old_kernel_info();
755    }
756    if (manifest_.has_new_kernel_info()) {
757      *kern_part.mutable_new_partition_info() = manifest_.new_kernel_info();
758      manifest_.clear_new_kernel_info();
759    }
760    *kern_part.mutable_operations() = manifest_.kernel_install_operations();
761    manifest_.clear_kernel_install_operations();
762    partitions_.push_back(std::move(kern_part));
763  }
764
765  // TODO(deymo): Remove this block of code once we switched to optional
766  // source partition verification. This list of partitions in the InstallPlan
767  // is initialized with the expected hashes in the payload major version 1,
768  // so we need to check those now if already set. See b/23182225.
769  if (!install_plan_->partitions.empty()) {
770    if (!VerifySourcePartitions()) {
771      *error = ErrorCode::kDownloadStateInitializationError;
772      return false;
773    }
774  }
775
776  // Fill in the InstallPlan::partitions based on the partitions from the
777  // payload.
778  install_plan_->partitions.clear();
779  for (const auto& partition : partitions_) {
780    InstallPlan::Partition install_part;
781    install_part.name = partition.partition_name();
782    install_part.run_postinstall =
783        partition.has_run_postinstall() && partition.run_postinstall();
784
785    if (partition.has_old_partition_info()) {
786      const PartitionInfo& info = partition.old_partition_info();
787      install_part.source_size = info.size();
788      install_part.source_hash.assign(info.hash().begin(), info.hash().end());
789    }
790
791    if (!partition.has_new_partition_info()) {
792      LOG(ERROR) << "Unable to get new partition hash info on partition "
793                 << install_part.name << ".";
794      *error = ErrorCode::kDownloadNewPartitionInfoError;
795      return false;
796    }
797    const PartitionInfo& info = partition.new_partition_info();
798    install_part.target_size = info.size();
799    install_part.target_hash.assign(info.hash().begin(), info.hash().end());
800
801    install_plan_->partitions.push_back(install_part);
802  }
803
804  if (!install_plan_->LoadPartitionsFromSlots(boot_control_)) {
805    LOG(ERROR) << "Unable to determine all the partition devices.";
806    *error = ErrorCode::kInstallDeviceOpenError;
807    return false;
808  }
809  LogPartitionInfo(partitions_);
810  return true;
811}
812
813bool DeltaPerformer::CanPerformInstallOperation(
814    const chromeos_update_engine::InstallOperation& operation) {
815  // Move and source_copy operations don't require any data blob, so they can
816  // always be performed.
817  if (operation.type() == InstallOperation::MOVE ||
818      operation.type() == InstallOperation::SOURCE_COPY)
819    return true;
820
821  // See if we have the entire data blob in the buffer
822  if (operation.data_offset() < buffer_offset_) {
823    LOG(ERROR) << "we threw away data it seems?";
824    return false;
825  }
826
827  return (operation.data_offset() + operation.data_length() <=
828          buffer_offset_ + buffer_.size());
829}
830
831bool DeltaPerformer::PerformReplaceOperation(
832    const InstallOperation& operation) {
833  CHECK(operation.type() == InstallOperation::REPLACE ||
834        operation.type() == InstallOperation::REPLACE_BZ ||
835        operation.type() == InstallOperation::REPLACE_XZ);
836
837  // Since we delete data off the beginning of the buffer as we use it,
838  // the data we need should be exactly at the beginning of the buffer.
839  TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
840  TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
841
842  // Extract the signature message if it's in this operation.
843  if (ExtractSignatureMessageFromOperation(operation)) {
844    // If this is dummy replace operation, we ignore it after extracting the
845    // signature.
846    DiscardBuffer(true, 0);
847    return true;
848  }
849
850  // Setup the ExtentWriter stack based on the operation type.
851  std::unique_ptr<ExtentWriter> writer =
852    brillo::make_unique_ptr(new ZeroPadExtentWriter(
853      brillo::make_unique_ptr(new DirectExtentWriter())));
854
855  if (operation.type() == InstallOperation::REPLACE_BZ) {
856    writer.reset(new BzipExtentWriter(std::move(writer)));
857  } else if (operation.type() == InstallOperation::REPLACE_XZ) {
858    writer.reset(new XzExtentWriter(std::move(writer)));
859  }
860
861  // Create a vector of extents to pass to the ExtentWriter.
862  vector<Extent> extents;
863  for (int i = 0; i < operation.dst_extents_size(); i++) {
864    extents.push_back(operation.dst_extents(i));
865  }
866
867  TEST_AND_RETURN_FALSE(writer->Init(target_fd_, extents, block_size_));
868  TEST_AND_RETURN_FALSE(writer->Write(buffer_.data(), operation.data_length()));
869  TEST_AND_RETURN_FALSE(writer->End());
870
871  // Update buffer
872  DiscardBuffer(true, buffer_.size());
873  return true;
874}
875
876bool DeltaPerformer::PerformZeroOrDiscardOperation(
877    const InstallOperation& operation) {
878  CHECK(operation.type() == InstallOperation::DISCARD ||
879        operation.type() == InstallOperation::ZERO);
880
881  // These operations have no blob.
882  TEST_AND_RETURN_FALSE(!operation.has_data_offset());
883  TEST_AND_RETURN_FALSE(!operation.has_data_length());
884
885  int request =
886      (operation.type() == InstallOperation::ZERO ? BLKZEROOUT : BLKDISCARD);
887
888  bool attempt_ioctl = true;
889  brillo::Blob zeros;
890  for (int i = 0; i < operation.dst_extents_size(); i++) {
891    Extent extent = operation.dst_extents(i);
892    const uint64_t start = extent.start_block() * block_size_;
893    const uint64_t length = extent.num_blocks() * block_size_;
894    if (attempt_ioctl) {
895      int result = 0;
896      if (target_fd_->BlkIoctl(request, start, length, &result) && result == 0)
897        continue;
898      attempt_ioctl = false;
899      zeros.resize(16 * block_size_);
900    }
901    // In case of failure, we fall back to writing 0 to the selected region.
902    for (uint64_t offset = 0; offset < length; offset += zeros.size()) {
903      uint64_t chunk_length = min(length - offset,
904                                  static_cast<uint64_t>(zeros.size()));
905      TEST_AND_RETURN_FALSE(
906          utils::PWriteAll(target_fd_, zeros.data(), chunk_length, start + offset));
907    }
908  }
909  return true;
910}
911
912bool DeltaPerformer::PerformMoveOperation(const InstallOperation& operation) {
913  // Calculate buffer size. Note, this function doesn't do a sliding
914  // window to copy in case the source and destination blocks overlap.
915  // If we wanted to do a sliding window, we could program the server
916  // to generate deltas that effectively did a sliding window.
917
918  uint64_t blocks_to_read = 0;
919  for (int i = 0; i < operation.src_extents_size(); i++)
920    blocks_to_read += operation.src_extents(i).num_blocks();
921
922  uint64_t blocks_to_write = 0;
923  for (int i = 0; i < operation.dst_extents_size(); i++)
924    blocks_to_write += operation.dst_extents(i).num_blocks();
925
926  DCHECK_EQ(blocks_to_write, blocks_to_read);
927  brillo::Blob buf(blocks_to_write * block_size_);
928
929  // Read in bytes.
930  ssize_t bytes_read = 0;
931  for (int i = 0; i < operation.src_extents_size(); i++) {
932    ssize_t bytes_read_this_iteration = 0;
933    const Extent& extent = operation.src_extents(i);
934    const size_t bytes = extent.num_blocks() * block_size_;
935    TEST_AND_RETURN_FALSE(extent.start_block() != kSparseHole);
936    TEST_AND_RETURN_FALSE(utils::PReadAll(target_fd_,
937                                          &buf[bytes_read],
938                                          bytes,
939                                          extent.start_block() * block_size_,
940                                          &bytes_read_this_iteration));
941    TEST_AND_RETURN_FALSE(
942        bytes_read_this_iteration == static_cast<ssize_t>(bytes));
943    bytes_read += bytes_read_this_iteration;
944  }
945
946  // Write bytes out.
947  ssize_t bytes_written = 0;
948  for (int i = 0; i < operation.dst_extents_size(); i++) {
949    const Extent& extent = operation.dst_extents(i);
950    const size_t bytes = extent.num_blocks() * block_size_;
951    TEST_AND_RETURN_FALSE(extent.start_block() != kSparseHole);
952    TEST_AND_RETURN_FALSE(utils::PWriteAll(target_fd_,
953                                           &buf[bytes_written],
954                                           bytes,
955                                           extent.start_block() * block_size_));
956    bytes_written += bytes;
957  }
958  DCHECK_EQ(bytes_written, bytes_read);
959  DCHECK_EQ(bytes_written, static_cast<ssize_t>(buf.size()));
960  return true;
961}
962
963namespace {
964
965// Takes |extents| and fills an empty vector |blocks| with a block index for
966// each block in |extents|. For example, [(3, 2), (8, 1)] would give [3, 4, 8].
967void ExtentsToBlocks(const RepeatedPtrField<Extent>& extents,
968                     vector<uint64_t>* blocks) {
969  for (Extent ext : extents) {
970    for (uint64_t j = 0; j < ext.num_blocks(); j++)
971      blocks->push_back(ext.start_block() + j);
972  }
973}
974
975// Takes |extents| and returns the number of blocks in those extents.
976uint64_t GetBlockCount(const RepeatedPtrField<Extent>& extents) {
977  uint64_t sum = 0;
978  for (Extent ext : extents) {
979    sum += ext.num_blocks();
980  }
981  return sum;
982}
983
984// Compare |calculated_hash| with source hash in |operation|, return false and
985// dump hash if don't match.
986bool ValidateSourceHash(const brillo::Blob& calculated_hash,
987                        const InstallOperation& operation) {
988  brillo::Blob expected_source_hash(operation.src_sha256_hash().begin(),
989                                    operation.src_sha256_hash().end());
990  if (calculated_hash != expected_source_hash) {
991    LOG(ERROR) << "Hash verification failed. Expected hash = ";
992    utils::HexDumpVector(expected_source_hash);
993    LOG(ERROR) << "Calculated hash = ";
994    utils::HexDumpVector(calculated_hash);
995    return false;
996  }
997  return true;
998}
999
1000}  // namespace
1001
1002bool DeltaPerformer::PerformSourceCopyOperation(
1003    const InstallOperation& operation) {
1004  if (operation.has_src_length())
1005    TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
1006  if (operation.has_dst_length())
1007    TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
1008
1009  uint64_t blocks_to_read = GetBlockCount(operation.src_extents());
1010  uint64_t blocks_to_write = GetBlockCount(operation.dst_extents());
1011  TEST_AND_RETURN_FALSE(blocks_to_write ==  blocks_to_read);
1012
1013  // Create vectors of all the individual src/dst blocks.
1014  vector<uint64_t> src_blocks;
1015  vector<uint64_t> dst_blocks;
1016  ExtentsToBlocks(operation.src_extents(), &src_blocks);
1017  ExtentsToBlocks(operation.dst_extents(), &dst_blocks);
1018  DCHECK_EQ(src_blocks.size(), blocks_to_read);
1019  DCHECK_EQ(src_blocks.size(), dst_blocks.size());
1020
1021  brillo::Blob buf(block_size_);
1022  ssize_t bytes_read = 0;
1023  HashCalculator source_hasher;
1024  // Read/write one block at a time.
1025  for (uint64_t i = 0; i < blocks_to_read; i++) {
1026    ssize_t bytes_read_this_iteration = 0;
1027    uint64_t src_block = src_blocks[i];
1028    uint64_t dst_block = dst_blocks[i];
1029
1030    // Read in bytes.
1031    TEST_AND_RETURN_FALSE(
1032        utils::PReadAll(source_fd_,
1033                        buf.data(),
1034                        block_size_,
1035                        src_block * block_size_,
1036                        &bytes_read_this_iteration));
1037
1038    // Write bytes out.
1039    TEST_AND_RETURN_FALSE(
1040        utils::PWriteAll(target_fd_,
1041                         buf.data(),
1042                         block_size_,
1043                         dst_block * block_size_));
1044
1045    bytes_read += bytes_read_this_iteration;
1046    TEST_AND_RETURN_FALSE(bytes_read_this_iteration ==
1047                          static_cast<ssize_t>(block_size_));
1048
1049    if (operation.has_src_sha256_hash())
1050      TEST_AND_RETURN_FALSE(source_hasher.Update(buf.data(), buf.size()));
1051  }
1052
1053  if (operation.has_src_sha256_hash()) {
1054    TEST_AND_RETURN_FALSE(source_hasher.Finalize());
1055    TEST_AND_RETURN_FALSE(
1056        ValidateSourceHash(source_hasher.raw_hash(), operation));
1057  }
1058
1059  DCHECK_EQ(bytes_read, static_cast<ssize_t>(blocks_to_read * block_size_));
1060  return true;
1061}
1062
1063bool DeltaPerformer::ExtentsToBsdiffPositionsString(
1064    const RepeatedPtrField<Extent>& extents,
1065    uint64_t block_size,
1066    uint64_t full_length,
1067    string* positions_string) {
1068  string ret;
1069  uint64_t length = 0;
1070  for (int i = 0; i < extents.size(); i++) {
1071    Extent extent = extents.Get(i);
1072    int64_t start = extent.start_block() * block_size;
1073    uint64_t this_length = min(full_length - length,
1074                               extent.num_blocks() * block_size);
1075    ret += base::StringPrintf("%" PRIi64 ":%" PRIu64 ",", start, this_length);
1076    length += this_length;
1077  }
1078  TEST_AND_RETURN_FALSE(length == full_length);
1079  if (!ret.empty())
1080    ret.resize(ret.size() - 1);  // Strip trailing comma off
1081  *positions_string = ret;
1082  return true;
1083}
1084
1085bool DeltaPerformer::PerformBsdiffOperation(const InstallOperation& operation) {
1086  // Since we delete data off the beginning of the buffer as we use it,
1087  // the data we need should be exactly at the beginning of the buffer.
1088  TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
1089  TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
1090
1091  string input_positions;
1092  TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.src_extents(),
1093                                                       block_size_,
1094                                                       operation.src_length(),
1095                                                       &input_positions));
1096  string output_positions;
1097  TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.dst_extents(),
1098                                                       block_size_,
1099                                                       operation.dst_length(),
1100                                                       &output_positions));
1101
1102  string temp_filename;
1103  TEST_AND_RETURN_FALSE(utils::MakeTempFile("au_patch.XXXXXX",
1104                                            &temp_filename,
1105                                            nullptr));
1106  ScopedPathUnlinker path_unlinker(temp_filename);
1107  {
1108    int fd = open(temp_filename.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0644);
1109    ScopedFdCloser fd_closer(&fd);
1110    TEST_AND_RETURN_FALSE(
1111        utils::WriteAll(fd, buffer_.data(), operation.data_length()));
1112  }
1113
1114  // Update the buffer to release the patch data memory as soon as the patch
1115  // file is written out.
1116  DiscardBuffer(true, buffer_.size());
1117
1118  vector<string> cmd{kBspatchPath, target_path_, target_path_, temp_filename,
1119                     input_positions, output_positions};
1120
1121  int return_code = 0;
1122  TEST_AND_RETURN_FALSE(
1123      Subprocess::SynchronousExecFlags(cmd, Subprocess::kSearchPath,
1124                                       &return_code, nullptr));
1125  TEST_AND_RETURN_FALSE(return_code == 0);
1126
1127  if (operation.dst_length() % block_size_) {
1128    // Zero out rest of final block.
1129    // TODO(adlr): build this into bspatch; it's more efficient that way.
1130    const Extent& last_extent =
1131        operation.dst_extents(operation.dst_extents_size() - 1);
1132    const uint64_t end_byte =
1133        (last_extent.start_block() + last_extent.num_blocks()) * block_size_;
1134    const uint64_t begin_byte =
1135        end_byte - (block_size_ - operation.dst_length() % block_size_);
1136    brillo::Blob zeros(end_byte - begin_byte);
1137    TEST_AND_RETURN_FALSE(
1138        utils::PWriteAll(target_fd_, zeros.data(), end_byte - begin_byte, begin_byte));
1139  }
1140  return true;
1141}
1142
1143bool DeltaPerformer::PerformSourceBsdiffOperation(
1144    const InstallOperation& operation) {
1145  // Since we delete data off the beginning of the buffer as we use it,
1146  // the data we need should be exactly at the beginning of the buffer.
1147  TEST_AND_RETURN_FALSE(buffer_offset_ == operation.data_offset());
1148  TEST_AND_RETURN_FALSE(buffer_.size() >= operation.data_length());
1149  if (operation.has_src_length())
1150    TEST_AND_RETURN_FALSE(operation.src_length() % block_size_ == 0);
1151  if (operation.has_dst_length())
1152    TEST_AND_RETURN_FALSE(operation.dst_length() % block_size_ == 0);
1153
1154  if (operation.has_src_sha256_hash()) {
1155    HashCalculator source_hasher;
1156    const uint64_t kMaxBlocksToRead = 512;  // 2MB if block size is 4KB
1157    brillo::Blob buf(kMaxBlocksToRead * block_size_);
1158    for (const Extent& extent : operation.src_extents()) {
1159      for (uint64_t i = 0; i < extent.num_blocks(); i += kMaxBlocksToRead) {
1160        uint64_t blocks_to_read =
1161            min(kMaxBlocksToRead, extent.num_blocks() - i);
1162        ssize_t bytes_to_read = blocks_to_read * block_size_;
1163        ssize_t bytes_read_this_iteration = 0;
1164        TEST_AND_RETURN_FALSE(
1165            utils::PReadAll(source_fd_, buf.data(), bytes_to_read,
1166                            (extent.start_block() + i) * block_size_,
1167                            &bytes_read_this_iteration));
1168        TEST_AND_RETURN_FALSE(bytes_read_this_iteration == bytes_to_read);
1169        TEST_AND_RETURN_FALSE(source_hasher.Update(buf.data(), bytes_to_read));
1170      }
1171    }
1172    TEST_AND_RETURN_FALSE(source_hasher.Finalize());
1173    TEST_AND_RETURN_FALSE(
1174        ValidateSourceHash(source_hasher.raw_hash(), operation));
1175  }
1176
1177  string input_positions;
1178  TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.src_extents(),
1179                                                       block_size_,
1180                                                       operation.src_length(),
1181                                                       &input_positions));
1182  string output_positions;
1183  TEST_AND_RETURN_FALSE(ExtentsToBsdiffPositionsString(operation.dst_extents(),
1184                                                       block_size_,
1185                                                       operation.dst_length(),
1186                                                       &output_positions));
1187
1188  string temp_filename;
1189  TEST_AND_RETURN_FALSE(utils::MakeTempFile("au_patch.XXXXXX",
1190                                            &temp_filename,
1191                                            nullptr));
1192  ScopedPathUnlinker path_unlinker(temp_filename);
1193  {
1194    int fd = open(temp_filename.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0644);
1195    ScopedFdCloser fd_closer(&fd);
1196    TEST_AND_RETURN_FALSE(
1197        utils::WriteAll(fd, buffer_.data(), operation.data_length()));
1198  }
1199
1200  // Update the buffer to release the patch data memory as soon as the patch
1201  // file is written out.
1202  DiscardBuffer(true, buffer_.size());
1203
1204  vector<string> cmd{kBspatchPath, source_path_, target_path_, temp_filename,
1205                     input_positions, output_positions};
1206
1207  int return_code = 0;
1208  TEST_AND_RETURN_FALSE(
1209      Subprocess::SynchronousExecFlags(cmd, Subprocess::kSearchPath,
1210                                       &return_code, nullptr));
1211  TEST_AND_RETURN_FALSE(return_code == 0);
1212  return true;
1213}
1214
1215bool DeltaPerformer::ExtractSignatureMessageFromOperation(
1216    const InstallOperation& operation) {
1217  if (operation.type() != InstallOperation::REPLACE ||
1218      !manifest_.has_signatures_offset() ||
1219      manifest_.signatures_offset() != operation.data_offset()) {
1220    return false;
1221  }
1222  TEST_AND_RETURN_FALSE(manifest_.has_signatures_size() &&
1223                        manifest_.signatures_size() == operation.data_length());
1224  TEST_AND_RETURN_FALSE(ExtractSignatureMessage());
1225  return true;
1226}
1227
1228bool DeltaPerformer::ExtractSignatureMessage() {
1229  TEST_AND_RETURN_FALSE(signatures_message_data_.empty());
1230  TEST_AND_RETURN_FALSE(buffer_offset_ == manifest_.signatures_offset());
1231  TEST_AND_RETURN_FALSE(buffer_.size() >= manifest_.signatures_size());
1232  signatures_message_data_.assign(
1233      buffer_.begin(),
1234      buffer_.begin() + manifest_.signatures_size());
1235
1236  // Save the signature blob because if the update is interrupted after the
1237  // download phase we don't go through this path anymore. Some alternatives to
1238  // consider:
1239  //
1240  // 1. On resume, re-download the signature blob from the server and re-verify
1241  // it.
1242  //
1243  // 2. Verify the signature as soon as it's received and don't checkpoint the
1244  // blob and the signed sha-256 context.
1245  LOG_IF(WARNING, !prefs_->SetString(kPrefsUpdateStateSignatureBlob,
1246                                     string(signatures_message_data_.begin(),
1247                                            signatures_message_data_.end())))
1248      << "Unable to store the signature blob.";
1249
1250  LOG(INFO) << "Extracted signature data of size "
1251            << manifest_.signatures_size() << " at "
1252            << manifest_.signatures_offset();
1253  return true;
1254}
1255
1256bool DeltaPerformer::GetPublicKeyFromResponse(base::FilePath *out_tmp_key) {
1257  if (hardware_->IsOfficialBuild() ||
1258      utils::FileExists(public_key_path_.c_str()) ||
1259      install_plan_->public_key_rsa.empty())
1260    return false;
1261
1262  if (!utils::DecodeAndStoreBase64String(install_plan_->public_key_rsa,
1263                                         out_tmp_key))
1264    return false;
1265
1266  return true;
1267}
1268
1269ErrorCode DeltaPerformer::ValidateMetadataSignature(
1270    const brillo::Blob& payload) {
1271  if (payload.size() < metadata_size_ + metadata_signature_size_)
1272    return ErrorCode::kDownloadMetadataSignatureError;
1273
1274  brillo::Blob metadata_signature_blob, metadata_signature_protobuf_blob;
1275  if (!install_plan_->metadata_signature.empty()) {
1276    // Convert base64-encoded signature to raw bytes.
1277    if (!brillo::data_encoding::Base64Decode(
1278        install_plan_->metadata_signature, &metadata_signature_blob)) {
1279      LOG(ERROR) << "Unable to decode base64 metadata signature: "
1280                 << install_plan_->metadata_signature;
1281      return ErrorCode::kDownloadMetadataSignatureError;
1282    }
1283  } else if (major_payload_version_ == kBrilloMajorPayloadVersion) {
1284    metadata_signature_protobuf_blob.assign(payload.begin() + metadata_size_,
1285                                            payload.begin() + metadata_size_ +
1286                                            metadata_signature_size_);
1287  }
1288
1289  if (metadata_signature_blob.empty() &&
1290      metadata_signature_protobuf_blob.empty()) {
1291    if (install_plan_->hash_checks_mandatory) {
1292      LOG(ERROR) << "Missing mandatory metadata signature in both Omaha "
1293                 << "response and payload.";
1294      return ErrorCode::kDownloadMetadataSignatureMissingError;
1295    }
1296
1297    LOG(WARNING) << "Cannot validate metadata as the signature is empty";
1298    return ErrorCode::kSuccess;
1299  }
1300
1301  // See if we should use the public RSA key in the Omaha response.
1302  base::FilePath path_to_public_key(public_key_path_);
1303  base::FilePath tmp_key;
1304  if (GetPublicKeyFromResponse(&tmp_key))
1305    path_to_public_key = tmp_key;
1306  ScopedPathUnlinker tmp_key_remover(tmp_key.value());
1307  if (tmp_key.empty())
1308    tmp_key_remover.set_should_remove(false);
1309
1310  LOG(INFO) << "Verifying metadata hash signature using public key: "
1311            << path_to_public_key.value();
1312
1313  HashCalculator metadata_hasher;
1314  metadata_hasher.Update(payload.data(), metadata_size_);
1315  if (!metadata_hasher.Finalize()) {
1316    LOG(ERROR) << "Unable to compute actual hash of manifest";
1317    return ErrorCode::kDownloadMetadataSignatureVerificationError;
1318  }
1319
1320  brillo::Blob calculated_metadata_hash = metadata_hasher.raw_hash();
1321  PayloadVerifier::PadRSA2048SHA256Hash(&calculated_metadata_hash);
1322  if (calculated_metadata_hash.empty()) {
1323    LOG(ERROR) << "Computed actual hash of metadata is empty.";
1324    return ErrorCode::kDownloadMetadataSignatureVerificationError;
1325  }
1326
1327  if (!metadata_signature_blob.empty()) {
1328    brillo::Blob expected_metadata_hash;
1329    if (!PayloadVerifier::GetRawHashFromSignature(metadata_signature_blob,
1330                                                  path_to_public_key.value(),
1331                                                  &expected_metadata_hash)) {
1332      LOG(ERROR) << "Unable to compute expected hash from metadata signature";
1333      return ErrorCode::kDownloadMetadataSignatureError;
1334    }
1335    if (calculated_metadata_hash != expected_metadata_hash) {
1336      LOG(ERROR) << "Manifest hash verification failed. Expected hash = ";
1337      utils::HexDumpVector(expected_metadata_hash);
1338      LOG(ERROR) << "Calculated hash = ";
1339      utils::HexDumpVector(calculated_metadata_hash);
1340      return ErrorCode::kDownloadMetadataSignatureMismatch;
1341    }
1342  } else {
1343    if (!PayloadVerifier::VerifySignature(metadata_signature_protobuf_blob,
1344                                          path_to_public_key.value(),
1345                                          calculated_metadata_hash)) {
1346      LOG(ERROR) << "Manifest hash verification failed.";
1347      return ErrorCode::kDownloadMetadataSignatureMismatch;
1348    }
1349  }
1350
1351  // The autoupdate_CatchBadSignatures test checks for this string in
1352  // log-files. Keep in sync.
1353  LOG(INFO) << "Metadata hash signature matches value in Omaha response.";
1354  return ErrorCode::kSuccess;
1355}
1356
1357ErrorCode DeltaPerformer::ValidateManifest() {
1358  // Perform assorted checks to sanity check the manifest, make sure it
1359  // matches data from other sources, and that it is a supported version.
1360  //
1361  // TODO(garnold) in general, the presence of an old partition hash should be
1362  // the sole indicator for a delta update, as we would generally like update
1363  // payloads to be self contained and not assume an Omaha response to tell us
1364  // that. However, since this requires some massive reengineering of the update
1365  // flow (making filesystem copying happen conditionally only *after*
1366  // downloading and parsing of the update manifest) we'll put it off for now.
1367  // See chromium-os:7597 for further discussion.
1368  if (install_plan_->is_full_update) {
1369    if (manifest_.has_old_kernel_info() || manifest_.has_old_rootfs_info()) {
1370      LOG(ERROR) << "Purported full payload contains old partition "
1371                    "hash(es), aborting update";
1372      return ErrorCode::kPayloadMismatchedType;
1373    }
1374
1375    for (const PartitionUpdate& partition : manifest_.partitions()) {
1376      if (partition.has_old_partition_info()) {
1377        LOG(ERROR) << "Purported full payload contains old partition "
1378                      "hash(es), aborting update";
1379        return ErrorCode::kPayloadMismatchedType;
1380      }
1381    }
1382
1383    if (manifest_.minor_version() != kFullPayloadMinorVersion) {
1384      LOG(ERROR) << "Manifest contains minor version "
1385                 << manifest_.minor_version()
1386                 << ", but all full payloads should have version "
1387                 << kFullPayloadMinorVersion << ".";
1388      return ErrorCode::kUnsupportedMinorPayloadVersion;
1389    }
1390  } else {
1391    if (manifest_.minor_version() != supported_minor_version_) {
1392      LOG(ERROR) << "Manifest contains minor version "
1393                 << manifest_.minor_version()
1394                 << " not the supported "
1395                 << supported_minor_version_;
1396      return ErrorCode::kUnsupportedMinorPayloadVersion;
1397    }
1398  }
1399
1400  if (major_payload_version_ != kChromeOSMajorPayloadVersion) {
1401    if (manifest_.has_old_rootfs_info() ||
1402        manifest_.has_new_rootfs_info() ||
1403        manifest_.has_old_kernel_info() ||
1404        manifest_.has_new_kernel_info() ||
1405        manifest_.install_operations_size() != 0 ||
1406        manifest_.kernel_install_operations_size() != 0) {
1407      LOG(ERROR) << "Manifest contains deprecated field only supported in "
1408                 << "major payload version 1, but the payload major version is "
1409                 << major_payload_version_;
1410      return ErrorCode::kPayloadMismatchedType;
1411    }
1412  }
1413
1414  // TODO(garnold) we should be adding more and more manifest checks, such as
1415  // partition boundaries etc (see chromium-os:37661).
1416
1417  return ErrorCode::kSuccess;
1418}
1419
1420ErrorCode DeltaPerformer::ValidateOperationHash(
1421    const InstallOperation& operation) {
1422  if (!operation.data_sha256_hash().size()) {
1423    if (!operation.data_length()) {
1424      // Operations that do not have any data blob won't have any operation hash
1425      // either. So, these operations are always considered validated since the
1426      // metadata that contains all the non-data-blob portions of the operation
1427      // has already been validated. This is true for both HTTP and HTTPS cases.
1428      return ErrorCode::kSuccess;
1429    }
1430
1431    // No hash is present for an operation that has data blobs. This shouldn't
1432    // happen normally for any client that has this code, because the
1433    // corresponding update should have been produced with the operation
1434    // hashes. So if it happens it means either we've turned operation hash
1435    // generation off in DeltaDiffGenerator or it's a regression of some sort.
1436    // One caveat though: The last operation is a dummy signature operation
1437    // that doesn't have a hash at the time the manifest is created. So we
1438    // should not complaint about that operation. This operation can be
1439    // recognized by the fact that it's offset is mentioned in the manifest.
1440    if (manifest_.signatures_offset() &&
1441        manifest_.signatures_offset() == operation.data_offset()) {
1442      LOG(INFO) << "Skipping hash verification for signature operation "
1443                << next_operation_num_ + 1;
1444    } else {
1445      if (install_plan_->hash_checks_mandatory) {
1446        LOG(ERROR) << "Missing mandatory operation hash for operation "
1447                   << next_operation_num_ + 1;
1448        return ErrorCode::kDownloadOperationHashMissingError;
1449      }
1450
1451      LOG(WARNING) << "Cannot validate operation " << next_operation_num_ + 1
1452                   << " as there's no operation hash in manifest";
1453    }
1454    return ErrorCode::kSuccess;
1455  }
1456
1457  brillo::Blob expected_op_hash;
1458  expected_op_hash.assign(operation.data_sha256_hash().data(),
1459                          (operation.data_sha256_hash().data() +
1460                           operation.data_sha256_hash().size()));
1461
1462  HashCalculator operation_hasher;
1463  operation_hasher.Update(buffer_.data(), operation.data_length());
1464  if (!operation_hasher.Finalize()) {
1465    LOG(ERROR) << "Unable to compute actual hash of operation "
1466               << next_operation_num_;
1467    return ErrorCode::kDownloadOperationHashVerificationError;
1468  }
1469
1470  brillo::Blob calculated_op_hash = operation_hasher.raw_hash();
1471  if (calculated_op_hash != expected_op_hash) {
1472    LOG(ERROR) << "Hash verification failed for operation "
1473               << next_operation_num_ << ". Expected hash = ";
1474    utils::HexDumpVector(expected_op_hash);
1475    LOG(ERROR) << "Calculated hash over " << operation.data_length()
1476               << " bytes at offset: " << operation.data_offset() << " = ";
1477    utils::HexDumpVector(calculated_op_hash);
1478    return ErrorCode::kDownloadOperationHashMismatch;
1479  }
1480
1481  return ErrorCode::kSuccess;
1482}
1483
1484#define TEST_AND_RETURN_VAL(_retval, _condition)                \
1485  do {                                                          \
1486    if (!(_condition)) {                                        \
1487      LOG(ERROR) << "VerifyPayload failure: " << #_condition;   \
1488      return _retval;                                           \
1489    }                                                           \
1490  } while (0);
1491
1492ErrorCode DeltaPerformer::VerifyPayload(
1493    const string& update_check_response_hash,
1494    const uint64_t update_check_response_size) {
1495
1496  // See if we should use the public RSA key in the Omaha response.
1497  base::FilePath path_to_public_key(public_key_path_);
1498  base::FilePath tmp_key;
1499  if (GetPublicKeyFromResponse(&tmp_key))
1500    path_to_public_key = tmp_key;
1501  ScopedPathUnlinker tmp_key_remover(tmp_key.value());
1502  if (tmp_key.empty())
1503    tmp_key_remover.set_should_remove(false);
1504
1505  LOG(INFO) << "Verifying payload using public key: "
1506            << path_to_public_key.value();
1507
1508  // Verifies the download size.
1509  TEST_AND_RETURN_VAL(ErrorCode::kPayloadSizeMismatchError,
1510                      update_check_response_size ==
1511                      metadata_size_ + metadata_signature_size_ +
1512                      buffer_offset_);
1513
1514  // Verifies the payload hash.
1515  const string& payload_hash_data = payload_hash_calculator_.hash();
1516  TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadVerificationError,
1517                      !payload_hash_data.empty());
1518  TEST_AND_RETURN_VAL(ErrorCode::kPayloadHashMismatchError,
1519                      payload_hash_data == update_check_response_hash);
1520
1521  // Verifies the signed payload hash.
1522  if (!utils::FileExists(path_to_public_key.value().c_str())) {
1523    LOG(WARNING) << "Not verifying signed delta payload -- missing public key.";
1524    return ErrorCode::kSuccess;
1525  }
1526  TEST_AND_RETURN_VAL(ErrorCode::kSignedDeltaPayloadExpectedError,
1527                      !signatures_message_data_.empty());
1528  brillo::Blob hash_data = signed_hash_calculator_.raw_hash();
1529  TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
1530                      PayloadVerifier::PadRSA2048SHA256Hash(&hash_data));
1531  TEST_AND_RETURN_VAL(ErrorCode::kDownloadPayloadPubKeyVerificationError,
1532                      !hash_data.empty());
1533
1534  if (!PayloadVerifier::VerifySignature(
1535      signatures_message_data_, path_to_public_key.value(), hash_data)) {
1536    // The autoupdate_CatchBadSignatures test checks for this string
1537    // in log-files. Keep in sync.
1538    LOG(ERROR) << "Public key verification failed, thus update failed.";
1539    return ErrorCode::kDownloadPayloadPubKeyVerificationError;
1540  }
1541
1542  LOG(INFO) << "Payload hash matches value in payload.";
1543
1544  // At this point, we are guaranteed to have downloaded a full payload, i.e
1545  // the one whose size matches the size mentioned in Omaha response. If any
1546  // errors happen after this, it's likely a problem with the payload itself or
1547  // the state of the system and not a problem with the URL or network.  So,
1548  // indicate that to the download delegate so that AU can backoff
1549  // appropriately.
1550  if (download_delegate_)
1551    download_delegate_->DownloadComplete();
1552
1553  return ErrorCode::kSuccess;
1554}
1555
1556namespace {
1557void LogVerifyError(const string& type,
1558                    const string& device,
1559                    uint64_t size,
1560                    const string& local_hash,
1561                    const string& expected_hash) {
1562  LOG(ERROR) << "This is a server-side error due to "
1563             << "mismatched delta update image!";
1564  LOG(ERROR) << "The delta I've been given contains a " << type << " delta "
1565             << "update that must be applied over a " << type << " with "
1566             << "a specific checksum, but the " << type << " we're starting "
1567             << "with doesn't have that checksum! This means that "
1568             << "the delta I've been given doesn't match my existing "
1569             << "system. The " << type << " partition I have has hash: "
1570             << local_hash << " but the update expected me to have "
1571             << expected_hash << " .";
1572  LOG(INFO) << "To get the checksum of the " << type << " partition run this"
1573               "command: dd if=" << device << " bs=1M count=" << size
1574            << " iflag=count_bytes 2>/dev/null | openssl dgst -sha256 -binary "
1575               "| openssl base64";
1576  LOG(INFO) << "To get the checksum of partitions in a bin file, "
1577            << "run: .../src/scripts/sha256_partitions.sh .../file.bin";
1578}
1579
1580string StringForHashBytes(const void* bytes, size_t size) {
1581  return brillo::data_encoding::Base64Encode(bytes, size);
1582}
1583}  // namespace
1584
1585bool DeltaPerformer::VerifySourcePartitions() {
1586  LOG(INFO) << "Verifying source partitions.";
1587  CHECK(manifest_valid_);
1588  CHECK(install_plan_);
1589  if (install_plan_->partitions.size() != partitions_.size()) {
1590    DLOG(ERROR) << "The list of partitions in the InstallPlan doesn't match the "
1591                   "list received in the payload. The InstallPlan has "
1592                << install_plan_->partitions.size()
1593                << " partitions while the payload has " << partitions_.size()
1594                << " partitions.";
1595    return false;
1596  }
1597  for (size_t i = 0; i < partitions_.size(); ++i) {
1598    if (partitions_[i].partition_name() != install_plan_->partitions[i].name) {
1599      DLOG(ERROR) << "The InstallPlan's partition " << i << " is \""
1600                  << install_plan_->partitions[i].name
1601                  << "\" but the payload expects it to be \""
1602                  << partitions_[i].partition_name()
1603                  << "\". This is an error in the DeltaPerformer setup.";
1604      return false;
1605    }
1606    if (!partitions_[i].has_old_partition_info())
1607      continue;
1608    const PartitionInfo& info = partitions_[i].old_partition_info();
1609    const InstallPlan::Partition& plan_part = install_plan_->partitions[i];
1610    bool valid =
1611        !plan_part.source_hash.empty() &&
1612        plan_part.source_hash.size() == info.hash().size() &&
1613        memcmp(plan_part.source_hash.data(),
1614               info.hash().data(),
1615               plan_part.source_hash.size()) == 0;
1616    if (!valid) {
1617      LogVerifyError(partitions_[i].partition_name(),
1618                     plan_part.source_path,
1619                     info.hash().size(),
1620                     StringForHashBytes(plan_part.source_hash.data(),
1621                                        plan_part.source_hash.size()),
1622                     StringForHashBytes(info.hash().data(),
1623                                        info.hash().size()));
1624      return false;
1625    }
1626  }
1627  return true;
1628}
1629
1630void DeltaPerformer::DiscardBuffer(bool do_advance_offset,
1631                                   size_t signed_hash_buffer_size) {
1632  // Update the buffer offset.
1633  if (do_advance_offset)
1634    buffer_offset_ += buffer_.size();
1635
1636  // Hash the content.
1637  payload_hash_calculator_.Update(buffer_.data(), buffer_.size());
1638  signed_hash_calculator_.Update(buffer_.data(), signed_hash_buffer_size);
1639
1640  // Swap content with an empty vector to ensure that all memory is released.
1641  brillo::Blob().swap(buffer_);
1642}
1643
1644bool DeltaPerformer::CanResumeUpdate(PrefsInterface* prefs,
1645                                     string update_check_response_hash) {
1646  int64_t next_operation = kUpdateStateOperationInvalid;
1647  if (!(prefs->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) &&
1648        next_operation != kUpdateStateOperationInvalid &&
1649        next_operation > 0))
1650    return false;
1651
1652  string interrupted_hash;
1653  if (!(prefs->GetString(kPrefsUpdateCheckResponseHash, &interrupted_hash) &&
1654        !interrupted_hash.empty() &&
1655        interrupted_hash == update_check_response_hash))
1656    return false;
1657
1658  int64_t resumed_update_failures;
1659  if (!(prefs->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures)
1660        && resumed_update_failures > kMaxResumedUpdateFailures))
1661    return false;
1662
1663  // Sanity check the rest.
1664  int64_t next_data_offset = -1;
1665  if (!(prefs->GetInt64(kPrefsUpdateStateNextDataOffset, &next_data_offset) &&
1666        next_data_offset >= 0))
1667    return false;
1668
1669  string sha256_context;
1670  if (!(prefs->GetString(kPrefsUpdateStateSHA256Context, &sha256_context) &&
1671        !sha256_context.empty()))
1672    return false;
1673
1674  int64_t manifest_metadata_size = 0;
1675  if (!(prefs->GetInt64(kPrefsManifestMetadataSize, &manifest_metadata_size) &&
1676        manifest_metadata_size > 0))
1677    return false;
1678
1679  return true;
1680}
1681
1682bool DeltaPerformer::ResetUpdateProgress(PrefsInterface* prefs, bool quick) {
1683  TEST_AND_RETURN_FALSE(prefs->SetInt64(kPrefsUpdateStateNextOperation,
1684                                        kUpdateStateOperationInvalid));
1685  if (!quick) {
1686    prefs->SetString(kPrefsUpdateCheckResponseHash, "");
1687    prefs->SetInt64(kPrefsUpdateStateNextDataOffset, -1);
1688    prefs->SetInt64(kPrefsUpdateStateNextDataLength, 0);
1689    prefs->SetString(kPrefsUpdateStateSHA256Context, "");
1690    prefs->SetString(kPrefsUpdateStateSignedSHA256Context, "");
1691    prefs->SetString(kPrefsUpdateStateSignatureBlob, "");
1692    prefs->SetInt64(kPrefsManifestMetadataSize, -1);
1693    prefs->SetInt64(kPrefsResumedUpdateFailures, 0);
1694  }
1695  return true;
1696}
1697
1698bool DeltaPerformer::CheckpointUpdateProgress() {
1699  Terminator::set_exit_blocked(true);
1700  if (last_updated_buffer_offset_ != buffer_offset_) {
1701    // Resets the progress in case we die in the middle of the state update.
1702    ResetUpdateProgress(prefs_, true);
1703    TEST_AND_RETURN_FALSE(
1704        prefs_->SetString(kPrefsUpdateStateSHA256Context,
1705                          payload_hash_calculator_.GetContext()));
1706    TEST_AND_RETURN_FALSE(
1707        prefs_->SetString(kPrefsUpdateStateSignedSHA256Context,
1708                          signed_hash_calculator_.GetContext()));
1709    TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextDataOffset,
1710                                           buffer_offset_));
1711    last_updated_buffer_offset_ = buffer_offset_;
1712
1713    if (next_operation_num_ < num_total_operations_) {
1714      size_t partition_index = current_partition_;
1715      while (next_operation_num_ >= acc_num_operations_[partition_index])
1716        partition_index++;
1717      const size_t partition_operation_num = next_operation_num_ - (
1718          partition_index ? acc_num_operations_[partition_index - 1] : 0);
1719      const InstallOperation& op =
1720          partitions_[partition_index].operations(partition_operation_num);
1721      TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextDataLength,
1722                                             op.data_length()));
1723    } else {
1724      TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextDataLength,
1725                                             0));
1726    }
1727  }
1728  TEST_AND_RETURN_FALSE(prefs_->SetInt64(kPrefsUpdateStateNextOperation,
1729                                         next_operation_num_));
1730  return true;
1731}
1732
1733bool DeltaPerformer::PrimeUpdateState() {
1734  CHECK(manifest_valid_);
1735  block_size_ = manifest_.block_size();
1736
1737  int64_t next_operation = kUpdateStateOperationInvalid;
1738  if (!prefs_->GetInt64(kPrefsUpdateStateNextOperation, &next_operation) ||
1739      next_operation == kUpdateStateOperationInvalid ||
1740      next_operation <= 0) {
1741    // Initiating a new update, no more state needs to be initialized.
1742    return true;
1743  }
1744  next_operation_num_ = next_operation;
1745
1746  // Resuming an update -- load the rest of the update state.
1747  int64_t next_data_offset = -1;
1748  TEST_AND_RETURN_FALSE(prefs_->GetInt64(kPrefsUpdateStateNextDataOffset,
1749                                         &next_data_offset) &&
1750                        next_data_offset >= 0);
1751  buffer_offset_ = next_data_offset;
1752
1753  // The signed hash context and the signature blob may be empty if the
1754  // interrupted update didn't reach the signature.
1755  string signed_hash_context;
1756  if (prefs_->GetString(kPrefsUpdateStateSignedSHA256Context,
1757                        &signed_hash_context)) {
1758    TEST_AND_RETURN_FALSE(
1759        signed_hash_calculator_.SetContext(signed_hash_context));
1760  }
1761
1762  string signature_blob;
1763  if (prefs_->GetString(kPrefsUpdateStateSignatureBlob, &signature_blob)) {
1764    signatures_message_data_.assign(signature_blob.begin(),
1765                                    signature_blob.end());
1766  }
1767
1768  string hash_context;
1769  TEST_AND_RETURN_FALSE(prefs_->GetString(kPrefsUpdateStateSHA256Context,
1770                                          &hash_context) &&
1771                        payload_hash_calculator_.SetContext(hash_context));
1772
1773  int64_t manifest_metadata_size = 0;
1774  TEST_AND_RETURN_FALSE(prefs_->GetInt64(kPrefsManifestMetadataSize,
1775                                         &manifest_metadata_size) &&
1776                        manifest_metadata_size > 0);
1777  metadata_size_ = manifest_metadata_size;
1778
1779  // Advance the download progress to reflect what doesn't need to be
1780  // re-downloaded.
1781  total_bytes_received_ += buffer_offset_;
1782
1783  // Speculatively count the resume as a failure.
1784  int64_t resumed_update_failures;
1785  if (prefs_->GetInt64(kPrefsResumedUpdateFailures, &resumed_update_failures)) {
1786    resumed_update_failures++;
1787  } else {
1788    resumed_update_failures = 1;
1789  }
1790  prefs_->SetInt64(kPrefsResumedUpdateFailures, resumed_update_failures);
1791  return true;
1792}
1793
1794}  // namespace chromeos_update_engine
1795