delta_diff_generator.cc revision b8ccad0a8ac2632da2788b47d85e62e334bbe652
1// Copyright (c) 2012 The Chromium OS Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "update_engine/payload_generator/delta_diff_generator.h" 6 7#include <errno.h> 8#include <fcntl.h> 9#include <inttypes.h> 10#include <sys/stat.h> 11#include <sys/types.h> 12 13#include <algorithm> 14#include <map> 15#include <memory> 16#include <set> 17#include <string> 18#include <utility> 19#include <vector> 20 21#include <base/files/file_path.h> 22#include <base/files/file_util.h> 23#include <base/logging.h> 24#include <base/strings/stringprintf.h> 25#include <base/strings/string_number_conversions.h> 26#include <base/strings/string_util.h> 27#include <bzlib.h> 28 29#include "update_engine/bzip.h" 30#include "update_engine/delta_performer.h" 31#include "update_engine/extent_ranges.h" 32#include "update_engine/file_writer.h" 33#include "update_engine/omaha_hash_calculator.h" 34#include "update_engine/payload_constants.h" 35#include "update_engine/payload_generator/cycle_breaker.h" 36#include "update_engine/payload_generator/extent_mapper.h" 37#include "update_engine/payload_generator/filesystem_iterator.h" 38#include "update_engine/payload_generator/full_update_generator.h" 39#include "update_engine/payload_generator/graph_types.h" 40#include "update_engine/payload_generator/graph_utils.h" 41#include "update_engine/payload_generator/metadata.h" 42#include "update_engine/payload_generator/payload_signer.h" 43#include "update_engine/payload_generator/topological_sort.h" 44#include "update_engine/payload_verifier.h" 45#include "update_engine/subprocess.h" 46#include "update_engine/update_metadata.pb.h" 47#include "update_engine/utils.h" 48 49using std::make_pair; 50using std::map; 51using std::max; 52using std::min; 53using std::pair; 54using std::set; 55using std::string; 56using std::unique_ptr; 57using std::vector; 58 59namespace { 60 61const uint64_t kVersionNumber = 1; 62const uint64_t kFullUpdateChunkSize = 1024 * 1024; // bytes 63 64const size_t kBlockSize = 4096; // bytes 65const char kEmptyPath[] = ""; 66 67// The maximum destination size allowed for bsdiff. In general, bsdiff should 68// work for arbitrary big files, but the payload generation and payload 69// application requires a significant amount of RAM. We put a hard-limit of 70// 200 MiB that should not affect any released board, but will limit the 71// Chrome binary in ASan builders. 72const off_t kMaxBsdiffDestinationSize = 200 * 1024 * 1024; // bytes 73 74static const char* kInstallOperationTypes[] = { 75 "REPLACE", 76 "REPLACE_BZ", 77 "MOVE", 78 "BSDIFF" 79}; 80 81} // namespace 82 83namespace chromeos_update_engine { 84 85typedef DeltaDiffGenerator::Block Block; 86typedef map<const DeltaArchiveManifest_InstallOperation*, 87 string> OperationNameMap; 88 89// bytes 90const size_t kRootFSPartitionSize = static_cast<size_t>(2) * 1024 * 1024 * 1024; 91 92// Needed for testing purposes, in case we can't use actual filesystem objects. 93// TODO(garnold) (chromium:331965) Replace this hack with a properly injected 94// parameter in form of a mockable abstract class. 95bool (*get_extents_with_chunk_func)(const string&, off_t, off_t, 96 vector<Extent>*) = 97 extent_mapper::ExtentsForFileChunkFibmap; 98 99namespace { 100 101// Stores all the extents of |path| into |extents|. Returns true on success. 102bool GatherExtents(const string& path, 103 off_t chunk_offset, 104 off_t chunk_size, 105 vector<Extent>* extents) { 106 extents->clear(); 107 TEST_AND_RETURN_FALSE( 108 get_extents_with_chunk_func( 109 path, chunk_offset, chunk_size, extents)); 110 return true; 111} 112 113// For a given regular file which must exist at new_root + path, and 114// may exist at old_root + path, creates a new InstallOperation and 115// adds it to the graph. Also, populates the |blocks| array as 116// necessary, if |blocks| is non-null. Also, writes the data 117// necessary to send the file down to the client into data_fd, which 118// has length *data_file_size. *data_file_size is updated 119// appropriately. If |existing_vertex| is no kInvalidIndex, use that 120// rather than allocating a new vertex. Returns true on success. 121bool DeltaReadFile(Graph* graph, 122 Vertex::Index existing_vertex, 123 vector<Block>* blocks, 124 const string& old_root, 125 const string& new_root, 126 const string& path, // within new_root 127 off_t chunk_offset, 128 off_t chunk_size, 129 int data_fd, 130 off_t* data_file_size) { 131 vector<char> data; 132 DeltaArchiveManifest_InstallOperation operation; 133 134 string old_path = (old_root == kEmptyPath) ? kEmptyPath : 135 old_root + path; 136 137 // If bsdiff breaks again, blacklist the problem file by using: 138 // bsdiff_allowed = (path != "/foo/bar") 139 // 140 // TODO(dgarrett): chromium-os:15274 connect this test to the command line. 141 bool bsdiff_allowed = true; 142 143 if (utils::FileSize(new_root + path) > kMaxBsdiffDestinationSize) 144 bsdiff_allowed = false; 145 146 if (!bsdiff_allowed) 147 LOG(INFO) << "bsdiff blacklisting: " << path; 148 149 TEST_AND_RETURN_FALSE(DeltaDiffGenerator::ReadFileToDiff(old_path, 150 new_root + path, 151 chunk_offset, 152 chunk_size, 153 bsdiff_allowed, 154 &data, 155 &operation, 156 true)); 157 158 // Check if the operation writes nothing. 159 if (operation.dst_extents_size() == 0) { 160 if (operation.type() == DeltaArchiveManifest_InstallOperation_Type_MOVE) { 161 LOG(INFO) << "Empty MOVE operation (" << new_root + path << "), skipping"; 162 return true; 163 } else { 164 LOG(ERROR) << "Empty non-MOVE operation"; 165 return false; 166 } 167 } 168 169 // Write the data 170 if (operation.type() != DeltaArchiveManifest_InstallOperation_Type_MOVE) { 171 operation.set_data_offset(*data_file_size); 172 operation.set_data_length(data.size()); 173 } 174 175 TEST_AND_RETURN_FALSE(utils::WriteAll(data_fd, &data[0], data.size())); 176 *data_file_size += data.size(); 177 178 // Now, insert into graph and blocks vector 179 Vertex::Index vertex = existing_vertex; 180 if (vertex == Vertex::kInvalidIndex) { 181 graph->resize(graph->size() + 1); 182 vertex = graph->size() - 1; 183 } 184 (*graph)[vertex].op = operation; 185 CHECK((*graph)[vertex].op.has_type()); 186 (*graph)[vertex].file_name = path; 187 (*graph)[vertex].chunk_offset = chunk_offset; 188 (*graph)[vertex].chunk_size = chunk_size; 189 190 if (blocks) 191 TEST_AND_RETURN_FALSE(DeltaDiffGenerator::AddInstallOpToBlocksVector( 192 (*graph)[vertex].op, 193 *graph, 194 vertex, 195 blocks)); 196 return true; 197} 198 199// For each regular file within new_root, creates a node in the graph, 200// determines the best way to compress it (REPLACE, REPLACE_BZ, COPY, BSDIFF), 201// and writes any necessary data to the end of data_fd. 202bool DeltaReadFiles(Graph* graph, 203 vector<Block>* blocks, 204 const string& old_root, 205 const string& new_root, 206 off_t chunk_size, 207 int data_fd, 208 off_t* data_file_size) { 209 set<ino_t> visited_inodes; 210 set<ino_t> visited_src_inodes; 211 for (FilesystemIterator fs_iter(new_root, 212 set<string>{"/lost+found"}); 213 !fs_iter.IsEnd(); fs_iter.Increment()) { 214 // We never diff symlinks (here, we check that dst file is not a symlink). 215 if (!S_ISREG(fs_iter.GetStat().st_mode)) 216 continue; 217 218 // Make sure we visit each inode only once. 219 if (utils::SetContainsKey(visited_inodes, fs_iter.GetStat().st_ino)) 220 continue; 221 visited_inodes.insert(fs_iter.GetStat().st_ino); 222 off_t dst_size = fs_iter.GetFileSize(); 223 if (dst_size == 0) 224 continue; 225 226 LOG(INFO) << "Encoding file " << fs_iter.GetPartialPath(); 227 228 // We can't visit each dst image inode more than once, as that would 229 // duplicate work. Here, we avoid visiting each source image inode 230 // more than once. Technically, we could have multiple operations 231 // that read the same blocks from the source image for diffing, but 232 // we choose not to avoid complexity. Eventually we will move away 233 // from using a graph/cycle detection/etc to generate diffs, and at that 234 // time, it will be easy (non-complex) to have many operations read 235 // from the same source blocks. At that time, this code can die. -adlr 236 bool should_diff_from_source = false; 237 string src_path = old_root + fs_iter.GetPartialPath(); 238 struct stat src_stbuf; 239 // We never diff symlinks (here, we check that src file is not a symlink). 240 if (0 == lstat(src_path.c_str(), &src_stbuf) && 241 S_ISREG(src_stbuf.st_mode)) { 242 should_diff_from_source = !utils::SetContainsKey(visited_src_inodes, 243 src_stbuf.st_ino); 244 visited_src_inodes.insert(src_stbuf.st_ino); 245 } 246 247 off_t size = chunk_size == -1 ? dst_size : chunk_size; 248 off_t step = size; 249 for (off_t offset = 0; offset < dst_size; offset += step) { 250 if (offset + size >= dst_size) { 251 size = -1; // Read through the end of the file. 252 } 253 TEST_AND_RETURN_FALSE(DeltaReadFile(graph, 254 Vertex::kInvalidIndex, 255 blocks, 256 (should_diff_from_source ? 257 old_root : 258 kEmptyPath), 259 new_root, 260 fs_iter.GetPartialPath(), 261 offset, 262 size, 263 data_fd, 264 data_file_size)); 265 } 266 } 267 return true; 268} 269 270// This class allocates non-existent temp blocks, starting from 271// kTempBlockStart. Other code is responsible for converting these 272// temp blocks into real blocks, as the client can't read or write to 273// these blocks. 274class DummyExtentAllocator { 275 public: 276 DummyExtentAllocator() : next_block_(kTempBlockStart) {} 277 vector<Extent> Allocate(const uint64_t block_count) { 278 vector<Extent> ret(1); 279 ret[0].set_start_block(next_block_); 280 ret[0].set_num_blocks(block_count); 281 next_block_ += block_count; 282 return ret; 283 } 284 private: 285 uint64_t next_block_; 286}; 287 288// Reads blocks from image_path that are not yet marked as being written in the 289// blocks array. These blocks that remain are either unchanged files or 290// non-file-data blocks. We compare each of them to the old image, and compress 291// the ones that changed into a single REPLACE_BZ operation. This updates a 292// newly created node in the graph to write these blocks and writes the 293// appropriate blob to blobs_fd. Reads and updates blobs_length. 294bool ReadUnwrittenBlocks(const vector<Block>& blocks, 295 int blobs_fd, 296 off_t* blobs_length, 297 const string& old_image_path, 298 const string& new_image_path, 299 Vertex* vertex) { 300 vertex->file_name = "<rootfs-non-file-data>"; 301 302 DeltaArchiveManifest_InstallOperation* out_op = &vertex->op; 303 int new_image_fd = open(new_image_path.c_str(), O_RDONLY, 000); 304 TEST_AND_RETURN_FALSE_ERRNO(new_image_fd >= 0); 305 ScopedFdCloser new_image_fd_closer(&new_image_fd); 306 int old_image_fd = open(old_image_path.c_str(), O_RDONLY, 000); 307 TEST_AND_RETURN_FALSE_ERRNO(old_image_fd >= 0); 308 ScopedFdCloser old_image_fd_closer(&old_image_fd); 309 310 string temp_file_path; 311 TEST_AND_RETURN_FALSE(utils::MakeTempFile("CrAU_temp_data.XXXXXX", 312 &temp_file_path, 313 nullptr)); 314 315 FILE* file = fopen(temp_file_path.c_str(), "w"); 316 TEST_AND_RETURN_FALSE(file); 317 int err = BZ_OK; 318 319 BZFILE* bz_file = BZ2_bzWriteOpen(&err, 320 file, 321 9, // max compression 322 0, // verbosity 323 0); // default work factor 324 TEST_AND_RETURN_FALSE(err == BZ_OK); 325 326 vector<Extent> extents; 327 vector<Block>::size_type block_count = 0; 328 329 LOG(INFO) << "Appending unwritten blocks to extents"; 330 for (vector<Block>::size_type i = 0; i < blocks.size(); i++) { 331 if (blocks[i].writer != Vertex::kInvalidIndex) 332 continue; 333 graph_utils::AppendBlockToExtents(&extents, i); 334 block_count++; 335 } 336 337 // Code will handle buffers of any size that's a multiple of kBlockSize, 338 // so we arbitrarily set it to 1024 * kBlockSize. 339 vector<char> new_buf(1024 * kBlockSize); 340 vector<char> old_buf(1024 * kBlockSize); 341 342 LOG(INFO) << "Scanning " << block_count << " unwritten blocks"; 343 vector<Extent> changed_extents; 344 vector<Block>::size_type changed_block_count = 0; 345 vector<Block>::size_type blocks_copied_count = 0; 346 347 // For each extent in extents, write the unchanged blocks into BZ2_bzWrite, 348 // which sends it to an output file. We use the temporary buffers to hold the 349 // old and new data, which may be smaller than the extent, so in that case we 350 // have to loop to get the extent's data (that's the inner while loop). 351 for (const Extent& extent : extents) { 352 vector<Block>::size_type blocks_read = 0; 353 float printed_progress = -1; 354 while (blocks_read < extent.num_blocks()) { 355 const uint64_t copy_first_block = extent.start_block() + blocks_read; 356 const int copy_block_cnt = 357 min(new_buf.size() / kBlockSize, 358 static_cast<vector<char>::size_type>( 359 extent.num_blocks() - blocks_read)); 360 const size_t count = copy_block_cnt * kBlockSize; 361 const off_t offset = copy_first_block * kBlockSize; 362 ssize_t rc = pread(new_image_fd, &new_buf[0], count, offset); 363 TEST_AND_RETURN_FALSE_ERRNO(rc >= 0); 364 TEST_AND_RETURN_FALSE(static_cast<size_t>(rc) == count); 365 366 rc = pread(old_image_fd, &old_buf[0], count, offset); 367 TEST_AND_RETURN_FALSE_ERRNO(rc >= 0); 368 TEST_AND_RETURN_FALSE(static_cast<size_t>(rc) == count); 369 370 // Compare each block in the buffer to its counterpart in the old image 371 // and only compress it if its content has changed. 372 int buf_offset = 0; 373 for (int i = 0; i < copy_block_cnt; ++i) { 374 int buf_end_offset = buf_offset + kBlockSize; 375 if (!std::equal(new_buf.begin() + buf_offset, 376 new_buf.begin() + buf_end_offset, 377 old_buf.begin() + buf_offset)) { 378 BZ2_bzWrite(&err, bz_file, &new_buf[buf_offset], kBlockSize); 379 TEST_AND_RETURN_FALSE(err == BZ_OK); 380 const uint64_t block_idx = copy_first_block + i; 381 if (blocks[block_idx].reader != Vertex::kInvalidIndex) { 382 graph_utils::AddReadBeforeDep(vertex, blocks[block_idx].reader, 383 block_idx); 384 } 385 graph_utils::AppendBlockToExtents(&changed_extents, block_idx); 386 changed_block_count++; 387 } 388 buf_offset = buf_end_offset; 389 } 390 391 blocks_read += copy_block_cnt; 392 blocks_copied_count += copy_block_cnt; 393 float current_progress = 394 static_cast<float>(blocks_copied_count) / block_count; 395 if (printed_progress + 0.1 < current_progress || 396 blocks_copied_count == block_count) { 397 LOG(INFO) << "progress: " << current_progress; 398 printed_progress = current_progress; 399 } 400 } 401 } 402 BZ2_bzWriteClose(&err, bz_file, 0, nullptr, nullptr); 403 TEST_AND_RETURN_FALSE(err == BZ_OK); 404 bz_file = nullptr; 405 TEST_AND_RETURN_FALSE_ERRNO(0 == fclose(file)); 406 file = nullptr; 407 408 LOG(INFO) << "Compressed " << changed_block_count << " blocks (" 409 << block_count - changed_block_count << " blocks unchanged)"; 410 vector<char> compressed_data; 411 if (changed_block_count > 0) { 412 LOG(INFO) << "Reading compressed data off disk"; 413 TEST_AND_RETURN_FALSE(utils::ReadFile(temp_file_path, &compressed_data)); 414 } 415 TEST_AND_RETURN_FALSE(unlink(temp_file_path.c_str()) == 0); 416 417 // Add node to graph to write these blocks 418 out_op->set_type(DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ); 419 out_op->set_data_offset(*blobs_length); 420 out_op->set_data_length(compressed_data.size()); 421 LOG(INFO) << "Rootfs non-data blocks compressed take up " 422 << compressed_data.size(); 423 *blobs_length += compressed_data.size(); 424 out_op->set_dst_length(kBlockSize * changed_block_count); 425 DeltaDiffGenerator::StoreExtents(changed_extents, 426 out_op->mutable_dst_extents()); 427 428 TEST_AND_RETURN_FALSE(utils::WriteAll(blobs_fd, 429 &compressed_data[0], 430 compressed_data.size())); 431 LOG(INFO) << "Done processing unwritten blocks"; 432 return true; 433} 434 435// Writes the uint64_t passed in in host-endian to the file as big-endian. 436// Returns true on success. 437bool WriteUint64AsBigEndian(FileWriter* writer, const uint64_t value) { 438 uint64_t value_be = htobe64(value); 439 TEST_AND_RETURN_FALSE(writer->Write(&value_be, sizeof(value_be))); 440 return true; 441} 442 443// Adds each operation from |graph| to |out_manifest| in the order specified by 444// |order| while building |out_op_name_map| with operation to name 445// mappings. Adds all |kernel_ops| to |out_manifest|. Filters out no-op 446// operations. 447void InstallOperationsToManifest( 448 const Graph& graph, 449 const vector<Vertex::Index>& order, 450 const vector<DeltaArchiveManifest_InstallOperation>& kernel_ops, 451 DeltaArchiveManifest* out_manifest, 452 OperationNameMap* out_op_name_map) { 453 for (Vertex::Index vertex_index : order) { 454 const Vertex& vertex = graph[vertex_index]; 455 const DeltaArchiveManifest_InstallOperation& add_op = vertex.op; 456 if (DeltaDiffGenerator::IsNoopOperation(add_op)) { 457 continue; 458 } 459 DeltaArchiveManifest_InstallOperation* op = 460 out_manifest->add_install_operations(); 461 *op = add_op; 462 string name = vertex.file_name; 463 if (vertex.chunk_offset || vertex.chunk_size != -1) { 464 string offset = base::Int64ToString(vertex.chunk_offset); 465 if (vertex.chunk_size != -1) { 466 name += " [" + offset + ", " + 467 base::Int64ToString(vertex.chunk_offset + vertex.chunk_size - 1) + 468 "]"; 469 } else { 470 name += " [" + offset + ", end]"; 471 } 472 } 473 (*out_op_name_map)[op] = name; 474 } 475 for (vector<DeltaArchiveManifest_InstallOperation>::const_iterator it = 476 kernel_ops.begin(); it != kernel_ops.end(); ++it) { 477 const DeltaArchiveManifest_InstallOperation& add_op = *it; 478 if (DeltaDiffGenerator::IsNoopOperation(add_op)) { 479 continue; 480 } 481 DeltaArchiveManifest_InstallOperation* op = 482 out_manifest->add_kernel_install_operations(); 483 *op = add_op; 484 } 485} 486 487void CheckGraph(const Graph& graph) { 488 for (const Vertex& v : graph) { 489 CHECK(v.op.has_type()); 490 } 491} 492 493// Delta compresses a kernel partition |new_kernel_part| with knowledge of the 494// old kernel partition |old_kernel_part|. If |old_kernel_part| is an empty 495// string, generates a full update of the partition. 496bool DeltaCompressKernelPartition( 497 const string& old_kernel_part, 498 const string& new_kernel_part, 499 vector<DeltaArchiveManifest_InstallOperation>* ops, 500 int blobs_fd, 501 off_t* blobs_length) { 502 LOG(INFO) << "Delta compressing kernel partition..."; 503 LOG_IF(INFO, old_kernel_part.empty()) << "Generating full kernel update..."; 504 505 DeltaArchiveManifest_InstallOperation op; 506 vector<char> data; 507 TEST_AND_RETURN_FALSE( 508 DeltaDiffGenerator::ReadFileToDiff(old_kernel_part, 509 new_kernel_part, 510 0, // chunk_offset 511 -1, // chunk_size 512 true, // bsdiff_allowed 513 &data, 514 &op, 515 false)); 516 517 // Check if the operation writes nothing. 518 if (op.dst_extents_size() == 0) { 519 if (op.type() == DeltaArchiveManifest_InstallOperation_Type_MOVE) { 520 LOG(INFO) << "Empty MOVE operation, nothing to do."; 521 return true; 522 } else { 523 LOG(ERROR) << "Empty non-MOVE operation"; 524 return false; 525 } 526 } 527 528 // Write the data. 529 if (op.type() != DeltaArchiveManifest_InstallOperation_Type_MOVE) { 530 op.set_data_offset(*blobs_length); 531 op.set_data_length(data.size()); 532 } 533 534 // Add the new install operation. 535 ops->clear(); 536 ops->push_back(op); 537 538 TEST_AND_RETURN_FALSE(utils::WriteAll(blobs_fd, &data[0], data.size())); 539 *blobs_length += data.size(); 540 541 LOG(INFO) << "Done delta compressing kernel partition: " 542 << kInstallOperationTypes[op.type()]; 543 return true; 544} 545 546struct DeltaObject { 547 DeltaObject(const string& in_name, const int in_type, const off_t in_size) 548 : name(in_name), 549 type(in_type), 550 size(in_size) {} 551 bool operator <(const DeltaObject& object) const { 552 return (size != object.size) ? (size < object.size) : (name < object.name); 553 } 554 string name; 555 int type; 556 off_t size; 557}; 558 559void ReportPayloadUsage(const DeltaArchiveManifest& manifest, 560 const int64_t manifest_metadata_size, 561 const OperationNameMap& op_name_map) { 562 vector<DeltaObject> objects; 563 off_t total_size = 0; 564 565 // Rootfs install operations. 566 for (int i = 0; i < manifest.install_operations_size(); ++i) { 567 const DeltaArchiveManifest_InstallOperation& op = 568 manifest.install_operations(i); 569 objects.push_back(DeltaObject(op_name_map.find(&op)->second, 570 op.type(), 571 op.data_length())); 572 total_size += op.data_length(); 573 } 574 575 // Kernel install operations. 576 for (int i = 0; i < manifest.kernel_install_operations_size(); ++i) { 577 const DeltaArchiveManifest_InstallOperation& op = 578 manifest.kernel_install_operations(i); 579 objects.push_back(DeltaObject(base::StringPrintf("<kernel-operation-%d>", 580 i), 581 op.type(), 582 op.data_length())); 583 total_size += op.data_length(); 584 } 585 586 objects.push_back(DeltaObject("<manifest-metadata>", 587 -1, 588 manifest_metadata_size)); 589 total_size += manifest_metadata_size; 590 591 std::sort(objects.begin(), objects.end()); 592 593 static const char kFormatString[] = "%6.2f%% %10jd %-10s %s\n"; 594 for (const DeltaObject& object : objects) { 595 fprintf(stderr, kFormatString, 596 object.size * 100.0 / total_size, 597 static_cast<intmax_t>(object.size), 598 object.type >= 0 ? kInstallOperationTypes[object.type] : "-", 599 object.name.c_str()); 600 } 601 fprintf(stderr, kFormatString, 602 100.0, static_cast<intmax_t>(total_size), "", "<total>"); 603} 604 605// Process a range of blocks from |range_start| to |range_end| in the extent at 606// position |*idx_p| of |extents|. If |do_remove| is true, this range will be 607// removed, which may cause the extent to be trimmed, split or removed entirely. 608// The value of |*idx_p| is updated to point to the next extent to be processed. 609// Returns true iff the next extent to process is a new or updated one. 610bool ProcessExtentBlockRange(vector<Extent>* extents, size_t* idx_p, 611 const bool do_remove, uint64_t range_start, 612 uint64_t range_end) { 613 size_t idx = *idx_p; 614 uint64_t start_block = (*extents)[idx].start_block(); 615 uint64_t num_blocks = (*extents)[idx].num_blocks(); 616 uint64_t range_size = range_end - range_start; 617 618 if (do_remove) { 619 if (range_size == num_blocks) { 620 // Remove the entire extent. 621 extents->erase(extents->begin() + idx); 622 } else if (range_end == num_blocks) { 623 // Trim the end of the extent. 624 (*extents)[idx].set_num_blocks(num_blocks - range_size); 625 idx++; 626 } else if (range_start == 0) { 627 // Trim the head of the extent. 628 (*extents)[idx].set_start_block(start_block + range_size); 629 (*extents)[idx].set_num_blocks(num_blocks - range_size); 630 } else { 631 // Trim the middle, splitting the remainder into two parts. 632 (*extents)[idx].set_num_blocks(range_start); 633 Extent e; 634 e.set_start_block(start_block + range_end); 635 e.set_num_blocks(num_blocks - range_end); 636 idx++; 637 extents->insert(extents->begin() + idx, e); 638 } 639 } else if (range_end == num_blocks) { 640 // Done with this extent. 641 idx++; 642 } else { 643 return false; 644 } 645 646 *idx_p = idx; 647 return true; 648} 649 650// Remove identical corresponding block ranges in |src_extents| and 651// |dst_extents|. Used for preventing moving of blocks onto themselves during 652// MOVE operations. The value of |total_bytes| indicates the actual length of 653// content; this may be slightly less than the total size of blocks, in which 654// case the last block is only partly occupied with data. Returns the total 655// number of bytes removed. 656size_t RemoveIdenticalBlockRanges(vector<Extent>* src_extents, 657 vector<Extent>* dst_extents, 658 const size_t total_bytes) { 659 size_t src_idx = 0; 660 size_t dst_idx = 0; 661 uint64_t src_offset = 0, dst_offset = 0; 662 bool new_src = true, new_dst = true; 663 size_t removed_bytes = 0, nonfull_block_bytes; 664 bool do_remove = false; 665 while (src_idx < src_extents->size() && dst_idx < dst_extents->size()) { 666 if (new_src) { 667 src_offset = 0; 668 new_src = false; 669 } 670 if (new_dst) { 671 dst_offset = 0; 672 new_dst = false; 673 } 674 675 do_remove = ((*src_extents)[src_idx].start_block() + src_offset == 676 (*dst_extents)[dst_idx].start_block() + dst_offset); 677 678 uint64_t src_num_blocks = (*src_extents)[src_idx].num_blocks(); 679 uint64_t dst_num_blocks = (*dst_extents)[dst_idx].num_blocks(); 680 uint64_t min_num_blocks = min(src_num_blocks - src_offset, 681 dst_num_blocks - dst_offset); 682 uint64_t prev_src_offset = src_offset; 683 uint64_t prev_dst_offset = dst_offset; 684 src_offset += min_num_blocks; 685 dst_offset += min_num_blocks; 686 687 new_src = ProcessExtentBlockRange(src_extents, &src_idx, do_remove, 688 prev_src_offset, src_offset); 689 new_dst = ProcessExtentBlockRange(dst_extents, &dst_idx, do_remove, 690 prev_dst_offset, dst_offset); 691 if (do_remove) 692 removed_bytes += min_num_blocks * kBlockSize; 693 } 694 695 // If we removed the last block and this block is only partly used by file 696 // content, deduct the unused portion from the total removed byte count. 697 if (do_remove && (nonfull_block_bytes = total_bytes % kBlockSize)) 698 removed_bytes -= kBlockSize - nonfull_block_bytes; 699 700 return removed_bytes; 701} 702 703} // namespace 704 705bool DeltaDiffGenerator::ReadFileToDiff( 706 const string& old_filename, 707 const string& new_filename, 708 off_t chunk_offset, 709 off_t chunk_size, 710 bool bsdiff_allowed, 711 vector<char>* out_data, 712 DeltaArchiveManifest_InstallOperation* out_op, 713 bool gather_extents) { 714 // Read new data in 715 vector<char> new_data; 716 TEST_AND_RETURN_FALSE( 717 utils::ReadFileChunk(new_filename, chunk_offset, chunk_size, &new_data)); 718 719 TEST_AND_RETURN_FALSE(!new_data.empty()); 720 TEST_AND_RETURN_FALSE(chunk_size == -1 || 721 static_cast<off_t>(new_data.size()) <= chunk_size); 722 723 vector<char> new_data_bz; 724 TEST_AND_RETURN_FALSE(BzipCompress(new_data, &new_data_bz)); 725 CHECK(!new_data_bz.empty()); 726 727 vector<char> data; // Data blob that will be written to delta file. 728 729 DeltaArchiveManifest_InstallOperation operation; 730 size_t current_best_size = 0; 731 if (new_data.size() <= new_data_bz.size()) { 732 operation.set_type(DeltaArchiveManifest_InstallOperation_Type_REPLACE); 733 current_best_size = new_data.size(); 734 data = new_data; 735 } else { 736 operation.set_type(DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ); 737 current_best_size = new_data_bz.size(); 738 data = new_data_bz; 739 } 740 741 // Do we have an original file to consider? 742 off_t old_size = 0; 743 bool original = !old_filename.empty(); 744 if (original && (old_size = utils::FileSize(old_filename)) < 0) { 745 // If stat-ing the old file fails, it should be because it doesn't exist. 746 TEST_AND_RETURN_FALSE(!utils::FileExists(old_filename.c_str())); 747 original = false; 748 } 749 750 vector<char> old_data; 751 if (original) { 752 // Read old data 753 TEST_AND_RETURN_FALSE( 754 utils::ReadFileChunk( 755 old_filename, chunk_offset, chunk_size, &old_data)); 756 if (old_data == new_data) { 757 // No change in data. 758 operation.set_type(DeltaArchiveManifest_InstallOperation_Type_MOVE); 759 current_best_size = 0; 760 data.clear(); 761 } else if (!old_data.empty() && bsdiff_allowed) { 762 // If the source file is considered bsdiff safe (no bsdiff bugs 763 // triggered), see if BSDIFF encoding is smaller. 764 base::FilePath old_chunk; 765 TEST_AND_RETURN_FALSE(base::CreateTemporaryFile(&old_chunk)); 766 ScopedPathUnlinker old_unlinker(old_chunk.value()); 767 TEST_AND_RETURN_FALSE( 768 utils::WriteFile(old_chunk.value().c_str(), 769 &old_data[0], old_data.size())); 770 base::FilePath new_chunk; 771 TEST_AND_RETURN_FALSE(base::CreateTemporaryFile(&new_chunk)); 772 ScopedPathUnlinker new_unlinker(new_chunk.value()); 773 TEST_AND_RETURN_FALSE( 774 utils::WriteFile(new_chunk.value().c_str(), 775 &new_data[0], new_data.size())); 776 777 vector<char> bsdiff_delta; 778 TEST_AND_RETURN_FALSE( 779 BsdiffFiles(old_chunk.value(), new_chunk.value(), &bsdiff_delta)); 780 CHECK_GT(bsdiff_delta.size(), static_cast<vector<char>::size_type>(0)); 781 if (bsdiff_delta.size() < current_best_size) { 782 operation.set_type(DeltaArchiveManifest_InstallOperation_Type_BSDIFF); 783 current_best_size = bsdiff_delta.size(); 784 data = bsdiff_delta; 785 } 786 } 787 } 788 789 // Set parameters of the operations 790 CHECK_EQ(data.size(), current_best_size); 791 792 vector<Extent> src_extents, dst_extents; 793 if (operation.type() == DeltaArchiveManifest_InstallOperation_Type_MOVE || 794 operation.type() == DeltaArchiveManifest_InstallOperation_Type_BSDIFF) { 795 if (gather_extents) { 796 TEST_AND_RETURN_FALSE( 797 GatherExtents(old_filename, 798 chunk_offset, 799 chunk_size, 800 &src_extents)); 801 } else { 802 Extent* src_extent = operation.add_src_extents(); 803 src_extent->set_start_block(0); 804 src_extent->set_num_blocks((old_size + kBlockSize - 1) / kBlockSize); 805 } 806 operation.set_src_length(old_data.size()); 807 } 808 809 if (gather_extents) { 810 TEST_AND_RETURN_FALSE( 811 GatherExtents(new_filename, 812 chunk_offset, 813 chunk_size, 814 &dst_extents)); 815 } else { 816 Extent* dst_extent = operation.add_dst_extents(); 817 dst_extent->set_start_block(0); 818 dst_extent->set_num_blocks((new_data.size() + kBlockSize - 1) / kBlockSize); 819 } 820 operation.set_dst_length(new_data.size()); 821 822 if (gather_extents) { 823 // Remove identical src/dst block ranges in MOVE operations. 824 if (operation.type() == DeltaArchiveManifest_InstallOperation_Type_MOVE) { 825 size_t removed_bytes = RemoveIdenticalBlockRanges( 826 &src_extents, &dst_extents, new_data.size()); 827 828 // Adjust the file length field accordingly. 829 if (removed_bytes) { 830 operation.set_src_length(old_data.size() - removed_bytes); 831 operation.set_dst_length(new_data.size() - removed_bytes); 832 } 833 } 834 835 // Embed extents in the operation. 836 DeltaDiffGenerator::StoreExtents(src_extents, 837 operation.mutable_src_extents()); 838 DeltaDiffGenerator::StoreExtents(dst_extents, 839 operation.mutable_dst_extents()); 840 } 841 842 out_data->swap(data); 843 *out_op = operation; 844 845 return true; 846} 847 848bool DeltaDiffGenerator::InitializePartitionInfo(bool is_kernel, 849 const string& partition, 850 PartitionInfo* info) { 851 int64_t size = 0; 852 if (is_kernel) { 853 size = utils::FileSize(partition); 854 } else { 855 int block_count = 0, block_size = 0; 856 TEST_AND_RETURN_FALSE(utils::GetFilesystemSize(partition, 857 &block_count, 858 &block_size)); 859 size = static_cast<int64_t>(block_count) * block_size; 860 } 861 TEST_AND_RETURN_FALSE(size > 0); 862 info->set_size(size); 863 OmahaHashCalculator hasher; 864 TEST_AND_RETURN_FALSE(hasher.UpdateFile(partition, size) == size); 865 TEST_AND_RETURN_FALSE(hasher.Finalize()); 866 const vector<char>& hash = hasher.raw_hash(); 867 info->set_hash(hash.data(), hash.size()); 868 LOG(INFO) << partition << ": size=" << size << " hash=" << hasher.hash(); 869 return true; 870} 871 872bool InitializePartitionInfos(const string& old_kernel, 873 const string& new_kernel, 874 const string& old_rootfs, 875 const string& new_rootfs, 876 DeltaArchiveManifest* manifest) { 877 if (!old_kernel.empty()) { 878 TEST_AND_RETURN_FALSE(DeltaDiffGenerator::InitializePartitionInfo( 879 true, 880 old_kernel, 881 manifest->mutable_old_kernel_info())); 882 } 883 TEST_AND_RETURN_FALSE(DeltaDiffGenerator::InitializePartitionInfo( 884 true, 885 new_kernel, 886 manifest->mutable_new_kernel_info())); 887 if (!old_rootfs.empty()) { 888 TEST_AND_RETURN_FALSE(DeltaDiffGenerator::InitializePartitionInfo( 889 false, 890 old_rootfs, 891 manifest->mutable_old_rootfs_info())); 892 } 893 TEST_AND_RETURN_FALSE(DeltaDiffGenerator::InitializePartitionInfo( 894 false, 895 new_rootfs, 896 manifest->mutable_new_rootfs_info())); 897 return true; 898} 899 900namespace { 901 902// Takes a collection (vector or RepeatedPtrField) of Extent and 903// returns a vector of the blocks referenced, in order. 904template<typename T> 905vector<uint64_t> ExpandExtents(const T& extents) { 906 vector<uint64_t> ret; 907 for (size_t i = 0, e = static_cast<size_t>(extents.size()); i != e; ++i) { 908 const Extent extent = graph_utils::GetElement(extents, i); 909 if (extent.start_block() == kSparseHole) { 910 ret.resize(ret.size() + extent.num_blocks(), kSparseHole); 911 } else { 912 for (uint64_t block = extent.start_block(); 913 block < (extent.start_block() + extent.num_blocks()); block++) { 914 ret.push_back(block); 915 } 916 } 917 } 918 return ret; 919} 920 921// Takes a vector of blocks and returns an equivalent vector of Extent 922// objects. 923vector<Extent> CompressExtents(const vector<uint64_t>& blocks) { 924 vector<Extent> new_extents; 925 for (uint64_t block : blocks) { 926 graph_utils::AppendBlockToExtents(&new_extents, block); 927 } 928 return new_extents; 929} 930 931} // namespace 932 933void DeltaDiffGenerator::SubstituteBlocks( 934 Vertex* vertex, 935 const vector<Extent>& remove_extents, 936 const vector<Extent>& replace_extents) { 937 // First, expand out the blocks that op reads from 938 vector<uint64_t> read_blocks = ExpandExtents(vertex->op.src_extents()); 939 { 940 // Expand remove_extents and replace_extents 941 vector<uint64_t> remove_extents_expanded = 942 ExpandExtents(remove_extents); 943 vector<uint64_t> replace_extents_expanded = 944 ExpandExtents(replace_extents); 945 CHECK_EQ(remove_extents_expanded.size(), replace_extents_expanded.size()); 946 map<uint64_t, uint64_t> conversion; 947 for (vector<uint64_t>::size_type i = 0; 948 i < replace_extents_expanded.size(); i++) { 949 conversion[remove_extents_expanded[i]] = replace_extents_expanded[i]; 950 } 951 utils::ApplyMap(&read_blocks, conversion); 952 for (auto& edge_prop_pair : vertex->out_edges) { 953 vector<uint64_t> write_before_deps_expanded = 954 ExpandExtents(edge_prop_pair.second.write_extents); 955 utils::ApplyMap(&write_before_deps_expanded, conversion); 956 edge_prop_pair.second.write_extents = 957 CompressExtents(write_before_deps_expanded); 958 } 959 } 960 // Convert read_blocks back to extents 961 vertex->op.clear_src_extents(); 962 vector<Extent> new_extents = CompressExtents(read_blocks); 963 DeltaDiffGenerator::StoreExtents(new_extents, 964 vertex->op.mutable_src_extents()); 965} 966 967bool DeltaDiffGenerator::CutEdges(Graph* graph, 968 const set<Edge>& edges, 969 vector<CutEdgeVertexes>* out_cuts) { 970 DummyExtentAllocator scratch_allocator; 971 vector<CutEdgeVertexes> cuts; 972 cuts.reserve(edges.size()); 973 974 uint64_t scratch_blocks_used = 0; 975 for (const Edge& edge : edges) { 976 cuts.resize(cuts.size() + 1); 977 vector<Extent> old_extents = 978 (*graph)[edge.first].out_edges[edge.second].extents; 979 // Choose some scratch space 980 scratch_blocks_used += graph_utils::EdgeWeight(*graph, edge); 981 cuts.back().tmp_extents = 982 scratch_allocator.Allocate(graph_utils::EdgeWeight(*graph, edge)); 983 // create vertex to copy original->scratch 984 cuts.back().new_vertex = graph->size(); 985 graph->resize(graph->size() + 1); 986 cuts.back().old_src = edge.first; 987 cuts.back().old_dst = edge.second; 988 989 EdgeProperties& cut_edge_properties = 990 (*graph)[edge.first].out_edges.find(edge.second)->second; 991 992 // This should never happen, as we should only be cutting edges between 993 // real file nodes, and write-before relationships are created from 994 // a real file node to a temp copy node: 995 CHECK(cut_edge_properties.write_extents.empty()) 996 << "Can't cut edge that has write-before relationship."; 997 998 // make node depend on the copy operation 999 (*graph)[edge.first].out_edges.insert(make_pair(graph->size() - 1, 1000 cut_edge_properties)); 1001 1002 // Set src/dst extents and other proto variables for copy operation 1003 graph->back().op.set_type(DeltaArchiveManifest_InstallOperation_Type_MOVE); 1004 DeltaDiffGenerator::StoreExtents( 1005 cut_edge_properties.extents, 1006 graph->back().op.mutable_src_extents()); 1007 DeltaDiffGenerator::StoreExtents(cuts.back().tmp_extents, 1008 graph->back().op.mutable_dst_extents()); 1009 graph->back().op.set_src_length( 1010 graph_utils::EdgeWeight(*graph, edge) * kBlockSize); 1011 graph->back().op.set_dst_length(graph->back().op.src_length()); 1012 1013 // make the dest node read from the scratch space 1014 DeltaDiffGenerator::SubstituteBlocks( 1015 &((*graph)[edge.second]), 1016 (*graph)[edge.first].out_edges[edge.second].extents, 1017 cuts.back().tmp_extents); 1018 1019 // delete the old edge 1020 CHECK_EQ(static_cast<Graph::size_type>(1), 1021 (*graph)[edge.first].out_edges.erase(edge.second)); 1022 1023 // Add an edge from dst to copy operation 1024 EdgeProperties write_before_edge_properties; 1025 write_before_edge_properties.write_extents = cuts.back().tmp_extents; 1026 (*graph)[edge.second].out_edges.insert( 1027 make_pair(graph->size() - 1, write_before_edge_properties)); 1028 } 1029 out_cuts->swap(cuts); 1030 return true; 1031} 1032 1033// Stores all Extents in 'extents' into 'out'. 1034void DeltaDiffGenerator::StoreExtents( 1035 const vector<Extent>& extents, 1036 google::protobuf::RepeatedPtrField<Extent>* out) { 1037 for (const Extent& extent : extents) { 1038 Extent* new_extent = out->Add(); 1039 *new_extent = extent; 1040 } 1041} 1042 1043// Creates all the edges for the graph. Writers of a block point to 1044// readers of the same block. This is because for an edge A->B, B 1045// must complete before A executes. 1046void DeltaDiffGenerator::CreateEdges(Graph* graph, 1047 const vector<Block>& blocks) { 1048 for (vector<Block>::size_type i = 0; i < blocks.size(); i++) { 1049 // Blocks with both a reader and writer get an edge 1050 if (blocks[i].reader == Vertex::kInvalidIndex || 1051 blocks[i].writer == Vertex::kInvalidIndex) 1052 continue; 1053 // Don't have a node depend on itself 1054 if (blocks[i].reader == blocks[i].writer) 1055 continue; 1056 // See if there's already an edge we can add onto 1057 Vertex::EdgeMap::iterator edge_it = 1058 (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader); 1059 if (edge_it == (*graph)[blocks[i].writer].out_edges.end()) { 1060 // No existing edge. Create one 1061 (*graph)[blocks[i].writer].out_edges.insert( 1062 make_pair(blocks[i].reader, EdgeProperties())); 1063 edge_it = (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader); 1064 CHECK(edge_it != (*graph)[blocks[i].writer].out_edges.end()); 1065 } 1066 graph_utils::AppendBlockToExtents(&edge_it->second.extents, i); 1067 } 1068} 1069 1070namespace { 1071 1072class SortCutsByTopoOrderLess { 1073 public: 1074 explicit SortCutsByTopoOrderLess( 1075 const vector<vector<Vertex::Index>::size_type>& table) 1076 : table_(table) {} 1077 bool operator()(const CutEdgeVertexes& a, const CutEdgeVertexes& b) { 1078 return table_[a.old_dst] < table_[b.old_dst]; 1079 } 1080 private: 1081 const vector<vector<Vertex::Index>::size_type>& table_; 1082}; 1083 1084} // namespace 1085 1086void DeltaDiffGenerator::GenerateReverseTopoOrderMap( 1087 const vector<Vertex::Index>& op_indexes, 1088 vector<vector<Vertex::Index>::size_type>* reverse_op_indexes) { 1089 vector<vector<Vertex::Index>::size_type> table(op_indexes.size()); 1090 for (vector<Vertex::Index>::size_type i = 0, e = op_indexes.size(); 1091 i != e; ++i) { 1092 Vertex::Index node = op_indexes[i]; 1093 if (table.size() < (node + 1)) { 1094 table.resize(node + 1); 1095 } 1096 table[node] = i; 1097 } 1098 reverse_op_indexes->swap(table); 1099} 1100 1101void DeltaDiffGenerator::SortCutsByTopoOrder( 1102 const vector<Vertex::Index>& op_indexes, 1103 vector<CutEdgeVertexes>* cuts) { 1104 // first, make a reverse lookup table. 1105 vector<vector<Vertex::Index>::size_type> table; 1106 GenerateReverseTopoOrderMap(op_indexes, &table); 1107 SortCutsByTopoOrderLess less(table); 1108 sort(cuts->begin(), cuts->end(), less); 1109} 1110 1111void DeltaDiffGenerator::MoveFullOpsToBack(Graph* graph, 1112 vector<Vertex::Index>* op_indexes) { 1113 vector<Vertex::Index> ret; 1114 vector<Vertex::Index> full_ops; 1115 ret.reserve(op_indexes->size()); 1116 for (vector<Vertex::Index>::size_type i = 0, e = op_indexes->size(); i != e; 1117 ++i) { 1118 DeltaArchiveManifest_InstallOperation_Type type = 1119 (*graph)[(*op_indexes)[i]].op.type(); 1120 if (type == DeltaArchiveManifest_InstallOperation_Type_REPLACE || 1121 type == DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ) { 1122 full_ops.push_back((*op_indexes)[i]); 1123 } else { 1124 ret.push_back((*op_indexes)[i]); 1125 } 1126 } 1127 LOG(INFO) << "Stats: " << full_ops.size() << " full ops out of " 1128 << (full_ops.size() + ret.size()) << " total ops."; 1129 ret.insert(ret.end(), full_ops.begin(), full_ops.end()); 1130 op_indexes->swap(ret); 1131} 1132 1133namespace { 1134 1135template<typename T> 1136bool TempBlocksExistInExtents(const T& extents) { 1137 for (int i = 0, e = extents.size(); i < e; ++i) { 1138 Extent extent = graph_utils::GetElement(extents, i); 1139 uint64_t start = extent.start_block(); 1140 uint64_t num = extent.num_blocks(); 1141 if (start == kSparseHole) 1142 continue; 1143 if (start >= kTempBlockStart || 1144 (start + num) >= kTempBlockStart) { 1145 LOG(ERROR) << "temp block!"; 1146 LOG(ERROR) << "start: " << start << ", num: " << num; 1147 LOG(ERROR) << "kTempBlockStart: " << kTempBlockStart; 1148 LOG(ERROR) << "returning true"; 1149 return true; 1150 } 1151 // check for wrap-around, which would be a bug: 1152 CHECK(start <= (start + num)); 1153 } 1154 return false; 1155} 1156 1157// Converts the cuts, which must all have the same |old_dst| member, 1158// to full. It does this by converting the |old_dst| to REPLACE or 1159// REPLACE_BZ, dropping all incoming edges to |old_dst|, and marking 1160// all temp nodes invalid. 1161bool ConvertCutsToFull( 1162 Graph* graph, 1163 const string& new_root, 1164 int data_fd, 1165 off_t* data_file_size, 1166 vector<Vertex::Index>* op_indexes, 1167 vector<vector<Vertex::Index>::size_type>* reverse_op_indexes, 1168 const vector<CutEdgeVertexes>& cuts) { 1169 CHECK(!cuts.empty()); 1170 set<Vertex::Index> deleted_nodes; 1171 for (const CutEdgeVertexes& cut : cuts) { 1172 TEST_AND_RETURN_FALSE(DeltaDiffGenerator::ConvertCutToFullOp( 1173 graph, 1174 cut, 1175 new_root, 1176 data_fd, 1177 data_file_size)); 1178 deleted_nodes.insert(cut.new_vertex); 1179 } 1180 deleted_nodes.insert(cuts[0].old_dst); 1181 1182 vector<Vertex::Index> new_op_indexes; 1183 new_op_indexes.reserve(op_indexes->size()); 1184 for (Vertex::Index vertex_index : *op_indexes) { 1185 if (utils::SetContainsKey(deleted_nodes, vertex_index)) 1186 continue; 1187 new_op_indexes.push_back(vertex_index); 1188 } 1189 new_op_indexes.push_back(cuts[0].old_dst); 1190 op_indexes->swap(new_op_indexes); 1191 DeltaDiffGenerator::GenerateReverseTopoOrderMap(*op_indexes, 1192 reverse_op_indexes); 1193 return true; 1194} 1195 1196// Tries to assign temp blocks for a collection of cuts, all of which share 1197// the same old_dst member. If temp blocks can't be found, old_dst will be 1198// converted to a REPLACE or REPLACE_BZ operation. Returns true on success, 1199// which can happen even if blocks are converted to full. Returns false 1200// on exceptional error cases. 1201bool AssignBlockForAdjoiningCuts( 1202 Graph* graph, 1203 const string& new_root, 1204 int data_fd, 1205 off_t* data_file_size, 1206 vector<Vertex::Index>* op_indexes, 1207 vector<vector<Vertex::Index>::size_type>* reverse_op_indexes, 1208 const vector<CutEdgeVertexes>& cuts) { 1209 CHECK(!cuts.empty()); 1210 const Vertex::Index old_dst = cuts[0].old_dst; 1211 // Calculate # of blocks needed 1212 uint64_t blocks_needed = 0; 1213 vector<uint64_t> cuts_blocks_needed(cuts.size()); 1214 for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) { 1215 uint64_t cut_blocks_needed = 0; 1216 for (const Extent& extent : cuts[i].tmp_extents) { 1217 cut_blocks_needed += extent.num_blocks(); 1218 } 1219 blocks_needed += cut_blocks_needed; 1220 cuts_blocks_needed[i] = cut_blocks_needed; 1221 } 1222 1223 // Find enough blocks 1224 ExtentRanges scratch_ranges; 1225 // Each block that's supplying temp blocks and the corresponding blocks: 1226 typedef vector<pair<Vertex::Index, ExtentRanges>> SupplierVector; 1227 SupplierVector block_suppliers; 1228 uint64_t scratch_blocks_found = 0; 1229 for (vector<Vertex::Index>::size_type i = (*reverse_op_indexes)[old_dst] + 1, 1230 e = op_indexes->size(); i < e; ++i) { 1231 Vertex::Index test_node = (*op_indexes)[i]; 1232 if (!(*graph)[test_node].valid) 1233 continue; 1234 // See if this node has sufficient blocks 1235 ExtentRanges ranges; 1236 ranges.AddRepeatedExtents((*graph)[test_node].op.dst_extents()); 1237 ranges.SubtractExtent(ExtentForRange( 1238 kTempBlockStart, kSparseHole - kTempBlockStart)); 1239 ranges.SubtractRepeatedExtents((*graph)[test_node].op.src_extents()); 1240 // For now, for simplicity, subtract out all blocks in read-before 1241 // dependencies. 1242 for (Vertex::EdgeMap::const_iterator edge_i = 1243 (*graph)[test_node].out_edges.begin(), 1244 edge_e = (*graph)[test_node].out_edges.end(); 1245 edge_i != edge_e; ++edge_i) { 1246 ranges.SubtractExtents(edge_i->second.extents); 1247 } 1248 if (ranges.blocks() == 0) 1249 continue; 1250 1251 if (ranges.blocks() + scratch_blocks_found > blocks_needed) { 1252 // trim down ranges 1253 vector<Extent> new_ranges = ranges.GetExtentsForBlockCount( 1254 blocks_needed - scratch_blocks_found); 1255 ranges = ExtentRanges(); 1256 ranges.AddExtents(new_ranges); 1257 } 1258 scratch_ranges.AddRanges(ranges); 1259 block_suppliers.push_back(make_pair(test_node, ranges)); 1260 scratch_blocks_found += ranges.blocks(); 1261 if (scratch_ranges.blocks() >= blocks_needed) 1262 break; 1263 } 1264 if (scratch_ranges.blocks() < blocks_needed) { 1265 LOG(INFO) << "Unable to find sufficient scratch"; 1266 TEST_AND_RETURN_FALSE(ConvertCutsToFull(graph, 1267 new_root, 1268 data_fd, 1269 data_file_size, 1270 op_indexes, 1271 reverse_op_indexes, 1272 cuts)); 1273 return true; 1274 } 1275 // Use the scratch we found 1276 TEST_AND_RETURN_FALSE(scratch_ranges.blocks() == scratch_blocks_found); 1277 1278 // Make all the suppliers depend on this node 1279 for (const auto& index_range_pair : block_suppliers) { 1280 graph_utils::AddReadBeforeDepExtents( 1281 &(*graph)[index_range_pair.first], 1282 old_dst, 1283 index_range_pair.second.GetExtentsForBlockCount( 1284 index_range_pair.second.blocks())); 1285 } 1286 1287 // Replace temp blocks in each cut 1288 for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) { 1289 const CutEdgeVertexes& cut = cuts[i]; 1290 vector<Extent> real_extents = 1291 scratch_ranges.GetExtentsForBlockCount(cuts_blocks_needed[i]); 1292 scratch_ranges.SubtractExtents(real_extents); 1293 1294 // Fix the old dest node w/ the real blocks 1295 DeltaDiffGenerator::SubstituteBlocks(&(*graph)[old_dst], 1296 cut.tmp_extents, 1297 real_extents); 1298 1299 // Fix the new node w/ the real blocks. Since the new node is just a 1300 // copy operation, we can replace all the dest extents w/ the real 1301 // blocks. 1302 DeltaArchiveManifest_InstallOperation *op = &(*graph)[cut.new_vertex].op; 1303 op->clear_dst_extents(); 1304 DeltaDiffGenerator::StoreExtents(real_extents, op->mutable_dst_extents()); 1305 } 1306 return true; 1307} 1308 1309} // namespace 1310 1311// Returns true if |op| is a no-op operation that doesn't do any useful work 1312// (e.g., a move operation that copies blocks onto themselves). 1313bool DeltaDiffGenerator::IsNoopOperation( 1314 const DeltaArchiveManifest_InstallOperation& op) { 1315 return (op.type() == DeltaArchiveManifest_InstallOperation_Type_MOVE && 1316 ExpandExtents(op.src_extents()) == ExpandExtents(op.dst_extents())); 1317} 1318 1319bool DeltaDiffGenerator::AssignTempBlocks( 1320 Graph* graph, 1321 const string& new_root, 1322 int data_fd, 1323 off_t* data_file_size, 1324 vector<Vertex::Index>* op_indexes, 1325 vector<vector<Vertex::Index>::size_type>* reverse_op_indexes, 1326 const vector<CutEdgeVertexes>& cuts) { 1327 CHECK(!cuts.empty()); 1328 1329 // group of cuts w/ the same old_dst: 1330 vector<CutEdgeVertexes> cuts_group; 1331 1332 for (vector<CutEdgeVertexes>::size_type i = cuts.size() - 1, e = 0; 1333 true ; --i) { 1334 LOG(INFO) << "Fixing temp blocks in cut " << i 1335 << ": old dst: " << cuts[i].old_dst << " new vertex: " 1336 << cuts[i].new_vertex << " path: " 1337 << (*graph)[cuts[i].old_dst].file_name; 1338 1339 if (cuts_group.empty() || (cuts_group[0].old_dst == cuts[i].old_dst)) { 1340 cuts_group.push_back(cuts[i]); 1341 } else { 1342 CHECK(!cuts_group.empty()); 1343 TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph, 1344 new_root, 1345 data_fd, 1346 data_file_size, 1347 op_indexes, 1348 reverse_op_indexes, 1349 cuts_group)); 1350 cuts_group.clear(); 1351 cuts_group.push_back(cuts[i]); 1352 } 1353 1354 if (i == e) { 1355 // break out of for() loop 1356 break; 1357 } 1358 } 1359 CHECK(!cuts_group.empty()); 1360 TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph, 1361 new_root, 1362 data_fd, 1363 data_file_size, 1364 op_indexes, 1365 reverse_op_indexes, 1366 cuts_group)); 1367 return true; 1368} 1369 1370bool DeltaDiffGenerator::NoTempBlocksRemain(const Graph& graph) { 1371 size_t idx = 0; 1372 for (Graph::const_iterator it = graph.begin(), e = graph.end(); it != e; 1373 ++it, ++idx) { 1374 if (!it->valid) 1375 continue; 1376 const DeltaArchiveManifest_InstallOperation& op = it->op; 1377 if (TempBlocksExistInExtents(op.dst_extents()) || 1378 TempBlocksExistInExtents(op.src_extents())) { 1379 LOG(INFO) << "bad extents in node " << idx; 1380 LOG(INFO) << "so yeah"; 1381 return false; 1382 } 1383 1384 // Check out-edges: 1385 for (const auto& edge_prop_pair : it->out_edges) { 1386 if (TempBlocksExistInExtents(edge_prop_pair.second.extents) || 1387 TempBlocksExistInExtents(edge_prop_pair.second.write_extents)) { 1388 LOG(INFO) << "bad out edge in node " << idx; 1389 LOG(INFO) << "so yeah"; 1390 return false; 1391 } 1392 } 1393 } 1394 return true; 1395} 1396 1397bool DeltaDiffGenerator::ReorderDataBlobs( 1398 DeltaArchiveManifest* manifest, 1399 const string& data_blobs_path, 1400 const string& new_data_blobs_path) { 1401 int in_fd = open(data_blobs_path.c_str(), O_RDONLY, 0); 1402 TEST_AND_RETURN_FALSE_ERRNO(in_fd >= 0); 1403 ScopedFdCloser in_fd_closer(&in_fd); 1404 1405 DirectFileWriter writer; 1406 TEST_AND_RETURN_FALSE( 1407 writer.Open(new_data_blobs_path.c_str(), 1408 O_WRONLY | O_TRUNC | O_CREAT, 1409 0644) == 0); 1410 ScopedFileWriterCloser writer_closer(&writer); 1411 uint64_t out_file_size = 0; 1412 1413 for (int i = 0; i < (manifest->install_operations_size() + 1414 manifest->kernel_install_operations_size()); i++) { 1415 DeltaArchiveManifest_InstallOperation* op = nullptr; 1416 if (i < manifest->install_operations_size()) { 1417 op = manifest->mutable_install_operations(i); 1418 } else { 1419 op = manifest->mutable_kernel_install_operations( 1420 i - manifest->install_operations_size()); 1421 } 1422 if (!op->has_data_offset()) 1423 continue; 1424 CHECK(op->has_data_length()); 1425 vector<char> buf(op->data_length()); 1426 ssize_t rc = pread(in_fd, &buf[0], buf.size(), op->data_offset()); 1427 TEST_AND_RETURN_FALSE(rc == static_cast<ssize_t>(buf.size())); 1428 1429 // Add the hash of the data blobs for this operation 1430 TEST_AND_RETURN_FALSE(AddOperationHash(op, buf)); 1431 1432 op->set_data_offset(out_file_size); 1433 TEST_AND_RETURN_FALSE(writer.Write(&buf[0], buf.size())); 1434 out_file_size += buf.size(); 1435 } 1436 return true; 1437} 1438 1439bool DeltaDiffGenerator::AddOperationHash( 1440 DeltaArchiveManifest_InstallOperation* op, 1441 const vector<char>& buf) { 1442 OmahaHashCalculator hasher; 1443 1444 TEST_AND_RETURN_FALSE(hasher.Update(&buf[0], buf.size())); 1445 TEST_AND_RETURN_FALSE(hasher.Finalize()); 1446 1447 const vector<char>& hash = hasher.raw_hash(); 1448 op->set_data_sha256_hash(hash.data(), hash.size()); 1449 return true; 1450} 1451 1452bool DeltaDiffGenerator::ConvertCutToFullOp(Graph* graph, 1453 const CutEdgeVertexes& cut, 1454 const string& new_root, 1455 int data_fd, 1456 off_t* data_file_size) { 1457 // Drop all incoming edges, keep all outgoing edges 1458 1459 // Keep all outgoing edges 1460 if ((*graph)[cut.old_dst].op.type() != 1461 DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ && 1462 (*graph)[cut.old_dst].op.type() != 1463 DeltaArchiveManifest_InstallOperation_Type_REPLACE) { 1464 Vertex::EdgeMap out_edges = (*graph)[cut.old_dst].out_edges; 1465 graph_utils::DropWriteBeforeDeps(&out_edges); 1466 1467 TEST_AND_RETURN_FALSE(DeltaReadFile(graph, 1468 cut.old_dst, 1469 nullptr, 1470 kEmptyPath, 1471 new_root, 1472 (*graph)[cut.old_dst].file_name, 1473 (*graph)[cut.old_dst].chunk_offset, 1474 (*graph)[cut.old_dst].chunk_size, 1475 data_fd, 1476 data_file_size)); 1477 1478 (*graph)[cut.old_dst].out_edges = out_edges; 1479 1480 // Right now we don't have doubly-linked edges, so we have to scan 1481 // the whole graph. 1482 graph_utils::DropIncomingEdgesTo(graph, cut.old_dst); 1483 } 1484 1485 // Delete temp node 1486 (*graph)[cut.old_src].out_edges.erase(cut.new_vertex); 1487 CHECK((*graph)[cut.old_dst].out_edges.find(cut.new_vertex) == 1488 (*graph)[cut.old_dst].out_edges.end()); 1489 (*graph)[cut.new_vertex].valid = false; 1490 LOG(INFO) << "marked node invalid: " << cut.new_vertex; 1491 return true; 1492} 1493 1494bool DeltaDiffGenerator::ConvertGraphToDag(Graph* graph, 1495 const string& new_root, 1496 int fd, 1497 off_t* data_file_size, 1498 vector<Vertex::Index>* final_order, 1499 Vertex::Index scratch_vertex) { 1500 CycleBreaker cycle_breaker; 1501 LOG(INFO) << "Finding cycles..."; 1502 set<Edge> cut_edges; 1503 cycle_breaker.BreakCycles(*graph, &cut_edges); 1504 LOG(INFO) << "done finding cycles"; 1505 CheckGraph(*graph); 1506 1507 // Calculate number of scratch blocks needed 1508 1509 LOG(INFO) << "Cutting cycles..."; 1510 vector<CutEdgeVertexes> cuts; 1511 TEST_AND_RETURN_FALSE(CutEdges(graph, cut_edges, &cuts)); 1512 LOG(INFO) << "done cutting cycles"; 1513 LOG(INFO) << "There are " << cuts.size() << " cuts."; 1514 CheckGraph(*graph); 1515 1516 LOG(INFO) << "Creating initial topological order..."; 1517 TopologicalSort(*graph, final_order); 1518 LOG(INFO) << "done with initial topo order"; 1519 CheckGraph(*graph); 1520 1521 LOG(INFO) << "Moving full ops to the back"; 1522 MoveFullOpsToBack(graph, final_order); 1523 LOG(INFO) << "done moving full ops to back"; 1524 1525 vector<vector<Vertex::Index>::size_type> inverse_final_order; 1526 GenerateReverseTopoOrderMap(*final_order, &inverse_final_order); 1527 1528 SortCutsByTopoOrder(*final_order, &cuts); 1529 1530 if (!cuts.empty()) 1531 TEST_AND_RETURN_FALSE(AssignTempBlocks(graph, 1532 new_root, 1533 fd, 1534 data_file_size, 1535 final_order, 1536 &inverse_final_order, 1537 cuts)); 1538 LOG(INFO) << "Making sure all temp blocks have been allocated"; 1539 1540 // Remove the scratch node, if any 1541 if (scratch_vertex != Vertex::kInvalidIndex) { 1542 final_order->erase(final_order->begin() + 1543 inverse_final_order[scratch_vertex]); 1544 (*graph)[scratch_vertex].valid = false; 1545 GenerateReverseTopoOrderMap(*final_order, &inverse_final_order); 1546 } 1547 1548 graph_utils::DumpGraph(*graph); 1549 CHECK(NoTempBlocksRemain(*graph)); 1550 LOG(INFO) << "done making sure all temp blocks are allocated"; 1551 return true; 1552} 1553 1554void DeltaDiffGenerator::CreateScratchNode(uint64_t start_block, 1555 uint64_t num_blocks, 1556 Vertex* vertex) { 1557 vertex->file_name = "<scratch>"; 1558 vertex->op.set_type(DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ); 1559 vertex->op.set_data_offset(0); 1560 vertex->op.set_data_length(0); 1561 Extent* extent = vertex->op.add_dst_extents(); 1562 extent->set_start_block(start_block); 1563 extent->set_num_blocks(num_blocks); 1564} 1565 1566bool DeltaDiffGenerator::GenerateDeltaUpdateFile( 1567 const string& old_root, 1568 const string& old_image, 1569 const string& new_root, 1570 const string& new_image, 1571 const string& old_kernel_part, 1572 const string& new_kernel_part, 1573 const string& output_path, 1574 const string& private_key_path, 1575 off_t chunk_size, 1576 size_t rootfs_partition_size, 1577 uint64_t minor_version, 1578 const ImageInfo* old_image_info, 1579 const ImageInfo* new_image_info, 1580 uint64_t* metadata_size) { 1581 TEST_AND_RETURN_FALSE(chunk_size == -1 || chunk_size % kBlockSize == 0); 1582 int old_image_block_count = 0, old_image_block_size = 0; 1583 int new_image_block_count = 0, new_image_block_size = 0; 1584 TEST_AND_RETURN_FALSE(utils::GetFilesystemSize(new_image, 1585 &new_image_block_count, 1586 &new_image_block_size)); 1587 if (!old_image.empty()) { 1588 TEST_AND_RETURN_FALSE(utils::GetFilesystemSize(old_image, 1589 &old_image_block_count, 1590 &old_image_block_size)); 1591 TEST_AND_RETURN_FALSE(old_image_block_size == new_image_block_size); 1592 LOG_IF(WARNING, old_image_block_count != new_image_block_count) 1593 << "Old and new images have different block counts."; 1594 1595 // If new_image_info is present, old_image_info must be present. 1596 TEST_AND_RETURN_FALSE(!old_image_info == !new_image_info); 1597 } else { 1598 // old_image_info must not be present for a full update. 1599 TEST_AND_RETURN_FALSE(!old_image_info); 1600 } 1601 1602 // Sanity checks for the partition size. 1603 TEST_AND_RETURN_FALSE(rootfs_partition_size % kBlockSize == 0); 1604 size_t fs_size = static_cast<size_t>(new_image_block_size) * 1605 new_image_block_count; 1606 LOG(INFO) << "Rootfs partition size: " << rootfs_partition_size; 1607 LOG(INFO) << "Actual filesystem size: " << fs_size; 1608 TEST_AND_RETURN_FALSE(rootfs_partition_size >= fs_size); 1609 1610 // Sanity check kernel partition arg 1611 TEST_AND_RETURN_FALSE(utils::FileSize(new_kernel_part) >= 0); 1612 1613 vector<Block> blocks(max(old_image_block_count, new_image_block_count)); 1614 LOG(INFO) << "Invalid block index: " << Vertex::kInvalidIndex; 1615 LOG(INFO) << "Block count: " << blocks.size(); 1616 for (vector<Block>::size_type i = 0; i < blocks.size(); i++) { 1617 CHECK(blocks[i].reader == Vertex::kInvalidIndex); 1618 CHECK(blocks[i].writer == Vertex::kInvalidIndex); 1619 } 1620 Graph graph; 1621 CheckGraph(graph); 1622 1623 const string kTempFileTemplate("CrAU_temp_data.XXXXXX"); 1624 string temp_file_path; 1625 unique_ptr<ScopedPathUnlinker> temp_file_unlinker; 1626 off_t data_file_size = 0; 1627 1628 LOG(INFO) << "Reading files..."; 1629 1630 // Create empty protobuf Manifest object 1631 DeltaArchiveManifest manifest; 1632 1633 vector<DeltaArchiveManifest_InstallOperation> kernel_ops; 1634 1635 vector<Vertex::Index> final_order; 1636 Vertex::Index scratch_vertex = Vertex::kInvalidIndex; 1637 { 1638 int fd; 1639 TEST_AND_RETURN_FALSE( 1640 utils::MakeTempFile(kTempFileTemplate, &temp_file_path, &fd)); 1641 temp_file_unlinker.reset(new ScopedPathUnlinker(temp_file_path)); 1642 TEST_AND_RETURN_FALSE(fd >= 0); 1643 ScopedFdCloser fd_closer(&fd); 1644 if (!old_image.empty()) { 1645 // Delta update 1646 1647 // Set the minor version for this payload. 1648 LOG(INFO) << "Adding Delta Minor Version."; 1649 manifest.set_minor_version(minor_version); 1650 1651 TEST_AND_RETURN_FALSE(DeltaReadFiles(&graph, 1652 &blocks, 1653 old_root, 1654 new_root, 1655 chunk_size, 1656 fd, 1657 &data_file_size)); 1658 LOG(INFO) << "done reading normal files"; 1659 CheckGraph(graph); 1660 1661 LOG(INFO) << "Starting metadata processing"; 1662 TEST_AND_RETURN_FALSE(Metadata::DeltaReadMetadata(&graph, 1663 &blocks, 1664 old_image, 1665 new_image, 1666 fd, 1667 &data_file_size)); 1668 LOG(INFO) << "Done metadata processing"; 1669 CheckGraph(graph); 1670 1671 graph.resize(graph.size() + 1); 1672 TEST_AND_RETURN_FALSE(ReadUnwrittenBlocks(blocks, 1673 fd, 1674 &data_file_size, 1675 old_image, 1676 new_image, 1677 &graph.back())); 1678 if (graph.back().op.data_length() == 0) { 1679 LOG(INFO) << "No unwritten blocks to write, omitting operation"; 1680 graph.pop_back(); 1681 } 1682 1683 // Final scratch block (if there's space) 1684 if (blocks.size() < (rootfs_partition_size / kBlockSize)) { 1685 scratch_vertex = graph.size(); 1686 graph.resize(graph.size() + 1); 1687 CreateScratchNode(blocks.size(), 1688 (rootfs_partition_size / kBlockSize) - blocks.size(), 1689 &graph.back()); 1690 } 1691 1692 // Read kernel partition 1693 TEST_AND_RETURN_FALSE(DeltaCompressKernelPartition(old_kernel_part, 1694 new_kernel_part, 1695 &kernel_ops, 1696 fd, 1697 &data_file_size)); 1698 1699 LOG(INFO) << "done reading kernel"; 1700 CheckGraph(graph); 1701 1702 LOG(INFO) << "Creating edges..."; 1703 CreateEdges(&graph, blocks); 1704 LOG(INFO) << "Done creating edges"; 1705 CheckGraph(graph); 1706 1707 TEST_AND_RETURN_FALSE(ConvertGraphToDag(&graph, 1708 new_root, 1709 fd, 1710 &data_file_size, 1711 &final_order, 1712 scratch_vertex)); 1713 } else { 1714 // Full update 1715 off_t new_image_size = 1716 static_cast<off_t>(new_image_block_count) * new_image_block_size; 1717 TEST_AND_RETURN_FALSE(FullUpdateGenerator::Run(&graph, 1718 new_kernel_part, 1719 new_image, 1720 new_image_size, 1721 fd, 1722 &data_file_size, 1723 kFullUpdateChunkSize, 1724 kBlockSize, 1725 &kernel_ops, 1726 &final_order)); 1727 1728 // Set the minor version for this payload. 1729 LOG(INFO) << "Adding Full Minor Version."; 1730 manifest.set_minor_version(DeltaPerformer::kFullPayloadMinorVersion); 1731 } 1732 } 1733 1734 if (old_image_info) 1735 *(manifest.mutable_old_image_info()) = *old_image_info; 1736 1737 if (new_image_info) 1738 *(manifest.mutable_new_image_info()) = *new_image_info; 1739 1740 OperationNameMap op_name_map; 1741 CheckGraph(graph); 1742 InstallOperationsToManifest(graph, 1743 final_order, 1744 kernel_ops, 1745 &manifest, 1746 &op_name_map); 1747 CheckGraph(graph); 1748 manifest.set_block_size(kBlockSize); 1749 1750 // Reorder the data blobs with the newly ordered manifest 1751 string ordered_blobs_path; 1752 TEST_AND_RETURN_FALSE(utils::MakeTempFile( 1753 "CrAU_temp_data.ordered.XXXXXX", 1754 &ordered_blobs_path, 1755 nullptr)); 1756 ScopedPathUnlinker ordered_blobs_unlinker(ordered_blobs_path); 1757 TEST_AND_RETURN_FALSE(ReorderDataBlobs(&manifest, 1758 temp_file_path, 1759 ordered_blobs_path)); 1760 temp_file_unlinker.reset(); 1761 1762 // Check that install op blobs are in order. 1763 uint64_t next_blob_offset = 0; 1764 { 1765 for (int i = 0; i < (manifest.install_operations_size() + 1766 manifest.kernel_install_operations_size()); i++) { 1767 DeltaArchiveManifest_InstallOperation* op = 1768 i < manifest.install_operations_size() ? 1769 manifest.mutable_install_operations(i) : 1770 manifest.mutable_kernel_install_operations( 1771 i - manifest.install_operations_size()); 1772 if (op->has_data_offset()) { 1773 if (op->data_offset() != next_blob_offset) { 1774 LOG(FATAL) << "bad blob offset! " << op->data_offset() << " != " 1775 << next_blob_offset; 1776 } 1777 next_blob_offset += op->data_length(); 1778 } 1779 } 1780 } 1781 1782 // Signatures appear at the end of the blobs. Note the offset in the 1783 // manifest 1784 if (!private_key_path.empty()) { 1785 uint64_t signature_blob_length = 0; 1786 TEST_AND_RETURN_FALSE( 1787 PayloadSigner::SignatureBlobLength(vector<string>(1, private_key_path), 1788 &signature_blob_length)); 1789 AddSignatureOp(next_blob_offset, signature_blob_length, &manifest); 1790 } 1791 1792 TEST_AND_RETURN_FALSE(InitializePartitionInfos(old_kernel_part, 1793 new_kernel_part, 1794 old_image, 1795 new_image, 1796 &manifest)); 1797 1798 // Serialize protobuf 1799 string serialized_manifest; 1800 1801 CheckGraph(graph); 1802 TEST_AND_RETURN_FALSE(manifest.AppendToString(&serialized_manifest)); 1803 CheckGraph(graph); 1804 1805 LOG(INFO) << "Writing final delta file header..."; 1806 DirectFileWriter writer; 1807 TEST_AND_RETURN_FALSE_ERRNO(writer.Open(output_path.c_str(), 1808 O_WRONLY | O_CREAT | O_TRUNC, 1809 0644) == 0); 1810 ScopedFileWriterCloser writer_closer(&writer); 1811 1812 // Write header 1813 TEST_AND_RETURN_FALSE(writer.Write(kDeltaMagic, strlen(kDeltaMagic))); 1814 1815 // Write version number 1816 TEST_AND_RETURN_FALSE(WriteUint64AsBigEndian(&writer, kVersionNumber)); 1817 1818 // Write protobuf length 1819 TEST_AND_RETURN_FALSE(WriteUint64AsBigEndian(&writer, 1820 serialized_manifest.size())); 1821 1822 // Write protobuf 1823 LOG(INFO) << "Writing final delta file protobuf... " 1824 << serialized_manifest.size(); 1825 TEST_AND_RETURN_FALSE(writer.Write(serialized_manifest.data(), 1826 serialized_manifest.size())); 1827 1828 // Append the data blobs 1829 LOG(INFO) << "Writing final delta file data blobs..."; 1830 int blobs_fd = open(ordered_blobs_path.c_str(), O_RDONLY, 0); 1831 ScopedFdCloser blobs_fd_closer(&blobs_fd); 1832 TEST_AND_RETURN_FALSE(blobs_fd >= 0); 1833 for (;;) { 1834 char buf[kBlockSize]; 1835 ssize_t rc = read(blobs_fd, buf, sizeof(buf)); 1836 if (0 == rc) { 1837 // EOF 1838 break; 1839 } 1840 TEST_AND_RETURN_FALSE_ERRNO(rc > 0); 1841 TEST_AND_RETURN_FALSE(writer.Write(buf, rc)); 1842 } 1843 1844 // Write signature blob. 1845 if (!private_key_path.empty()) { 1846 LOG(INFO) << "Signing the update..."; 1847 vector<char> signature_blob; 1848 TEST_AND_RETURN_FALSE(PayloadSigner::SignPayload( 1849 output_path, 1850 vector<string>(1, private_key_path), 1851 &signature_blob)); 1852 TEST_AND_RETURN_FALSE(writer.Write(&signature_blob[0], 1853 signature_blob.size())); 1854 } 1855 1856 *metadata_size = 1857 strlen(kDeltaMagic) + 2 * sizeof(uint64_t) + serialized_manifest.size(); 1858 ReportPayloadUsage(manifest, *metadata_size, op_name_map); 1859 1860 LOG(INFO) << "All done. Successfully created delta file with " 1861 << "metadata size = " << *metadata_size; 1862 return true; 1863} 1864 1865// Runs the bsdiff tool on two files and returns the resulting delta in 1866// 'out'. Returns true on success. 1867bool DeltaDiffGenerator::BsdiffFiles(const string& old_file, 1868 const string& new_file, 1869 vector<char>* out) { 1870 const string kPatchFile = "delta.patchXXXXXX"; 1871 string patch_file_path; 1872 1873 TEST_AND_RETURN_FALSE( 1874 utils::MakeTempFile(kPatchFile, &patch_file_path, nullptr)); 1875 1876 vector<string> cmd; 1877 cmd.push_back(kBsdiffPath); 1878 cmd.push_back(old_file); 1879 cmd.push_back(new_file); 1880 cmd.push_back(patch_file_path); 1881 1882 int rc = 1; 1883 vector<char> patch_file; 1884 TEST_AND_RETURN_FALSE(Subprocess::SynchronousExec(cmd, &rc, nullptr)); 1885 TEST_AND_RETURN_FALSE(rc == 0); 1886 TEST_AND_RETURN_FALSE(utils::ReadFile(patch_file_path, out)); 1887 unlink(patch_file_path.c_str()); 1888 return true; 1889} 1890 1891// The |blocks| vector contains a reader and writer for each block on the 1892// filesystem that's being in-place updated. We populate the reader/writer 1893// fields of |blocks| by calling this function. 1894// For each block in |operation| that is read or written, find that block 1895// in |blocks| and set the reader/writer field to the vertex passed. 1896// |graph| is not strictly necessary, but useful for printing out 1897// error messages. 1898bool DeltaDiffGenerator::AddInstallOpToBlocksVector( 1899 const DeltaArchiveManifest_InstallOperation& operation, 1900 const Graph& graph, 1901 Vertex::Index vertex, 1902 vector<Block>* blocks) { 1903 // See if this is already present. 1904 TEST_AND_RETURN_FALSE(operation.dst_extents_size() > 0); 1905 1906 enum BlockField { READER = 0, WRITER, BLOCK_FIELD_COUNT }; 1907 for (int field = READER; field < BLOCK_FIELD_COUNT; field++) { 1908 const int extents_size = 1909 (field == READER) ? operation.src_extents_size() : 1910 operation.dst_extents_size(); 1911 const char* past_participle = (field == READER) ? "read" : "written"; 1912 const google::protobuf::RepeatedPtrField<Extent>& extents = 1913 (field == READER) ? operation.src_extents() : operation.dst_extents(); 1914 Vertex::Index Block::*access_type = 1915 (field == READER) ? &Block::reader : &Block::writer; 1916 1917 for (int i = 0; i < extents_size; i++) { 1918 const Extent& extent = extents.Get(i); 1919 if (extent.start_block() == kSparseHole) { 1920 // Hole in sparse file. skip 1921 continue; 1922 } 1923 for (uint64_t block = extent.start_block(); 1924 block < (extent.start_block() + extent.num_blocks()); block++) { 1925 if ((*blocks)[block].*access_type != Vertex::kInvalidIndex) { 1926 LOG(FATAL) << "Block " << block << " is already " 1927 << past_participle << " by " 1928 << (*blocks)[block].*access_type << "(" 1929 << graph[(*blocks)[block].*access_type].file_name 1930 << ") and also " << vertex << "(" 1931 << graph[vertex].file_name << ")"; 1932 } 1933 (*blocks)[block].*access_type = vertex; 1934 } 1935 } 1936 } 1937 return true; 1938} 1939 1940void DeltaDiffGenerator::AddSignatureOp(uint64_t signature_blob_offset, 1941 uint64_t signature_blob_length, 1942 DeltaArchiveManifest* manifest) { 1943 LOG(INFO) << "Making room for signature in file"; 1944 manifest->set_signatures_offset(signature_blob_offset); 1945 LOG(INFO) << "set? " << manifest->has_signatures_offset(); 1946 // Add a dummy op at the end to appease older clients 1947 DeltaArchiveManifest_InstallOperation* dummy_op = 1948 manifest->add_kernel_install_operations(); 1949 dummy_op->set_type(DeltaArchiveManifest_InstallOperation_Type_REPLACE); 1950 dummy_op->set_data_offset(signature_blob_offset); 1951 manifest->set_signatures_offset(signature_blob_offset); 1952 dummy_op->set_data_length(signature_blob_length); 1953 manifest->set_signatures_size(signature_blob_length); 1954 Extent* dummy_extent = dummy_op->add_dst_extents(); 1955 // Tell the dummy op to write this data to a big sparse hole 1956 dummy_extent->set_start_block(kSparseHole); 1957 dummy_extent->set_num_blocks((signature_blob_length + kBlockSize - 1) / 1958 kBlockSize); 1959} 1960 1961const char* const kBsdiffPath = "bsdiff"; 1962 1963}; // namespace chromeos_update_engine 1964