inplace_generator_unittest.cc revision 8cc502dacbccdab96824d42287f230ce04004784
1// Copyright 2015 The Chromium OS Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "update_engine/payload_generator/inplace_generator.h" 6 7#include <map> 8#include <set> 9#include <sstream> 10#include <string> 11#include <utility> 12#include <vector> 13 14#include <base/format_macros.h> 15#include <base/logging.h> 16#include <base/strings/string_util.h> 17#include <base/strings/stringprintf.h> 18#include <gtest/gtest.h> 19 20#include "update_engine/payload_generator/cycle_breaker.h" 21#include "update_engine/payload_generator/delta_diff_generator.h" 22#include "update_engine/payload_generator/extent_ranges.h" 23#include "update_engine/payload_generator/graph_types.h" 24#include "update_engine/payload_generator/graph_utils.h" 25#include "update_engine/test_utils.h" 26#include "update_engine/utils.h" 27 28using std::map; 29using std::set; 30using std::string; 31using std::stringstream; 32using std::vector; 33 34namespace chromeos_update_engine { 35 36using Block = InplaceGenerator::Block; 37 38namespace { 39 40#define OP_BSDIFF DeltaArchiveManifest_InstallOperation_Type_BSDIFF 41#define OP_MOVE DeltaArchiveManifest_InstallOperation_Type_MOVE 42#define OP_REPLACE DeltaArchiveManifest_InstallOperation_Type_REPLACE 43#define OP_REPLACE_BZ DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ 44 45void GenVertex(Vertex* out, 46 const vector<Extent>& src_extents, 47 const vector<Extent>& dst_extents, 48 const string& path, 49 DeltaArchiveManifest_InstallOperation_Type type) { 50 out->aop.op.set_type(type); 51 out->aop.name = path; 52 StoreExtents(src_extents, out->aop.op.mutable_src_extents()); 53 StoreExtents(dst_extents, out->aop.op.mutable_dst_extents()); 54} 55 56vector<Extent> VectOfExt(uint64_t start_block, uint64_t num_blocks) { 57 return vector<Extent>(1, ExtentForRange(start_block, num_blocks)); 58} 59 60EdgeProperties EdgeWithReadDep(const vector<Extent>& extents) { 61 EdgeProperties ret; 62 ret.extents = extents; 63 return ret; 64} 65 66EdgeProperties EdgeWithWriteDep(const vector<Extent>& extents) { 67 EdgeProperties ret; 68 ret.write_extents = extents; 69 return ret; 70} 71 72template<typename T> 73void DumpVect(const vector<T>& vect) { 74 stringstream ss(stringstream::out); 75 for (typename vector<T>::const_iterator it = vect.begin(), e = vect.end(); 76 it != e; ++it) { 77 ss << *it << ", "; 78 } 79 LOG(INFO) << "{" << ss.str() << "}"; 80} 81 82void AppendExtent(vector<Extent>* vect, uint64_t start, uint64_t length) { 83 vect->resize(vect->size() + 1); 84 vect->back().set_start_block(start); 85 vect->back().set_num_blocks(length); 86} 87 88void OpAppendExtent(DeltaArchiveManifest_InstallOperation* op, 89 uint64_t start, 90 uint64_t length) { 91 Extent* extent = op->add_src_extents(); 92 extent->set_start_block(start); 93 extent->set_num_blocks(length); 94} 95 96} // namespace 97 98class InplaceGeneratorTest : public ::testing::Test { 99 protected: 100 // Initialize |blob_path_|, |blob_file_size_| and |blob_file_fd_| variables 101 // with a new blob file. The file is closed and removed automatically when 102 // the test finishes. 103 void CreateBlobFile() { 104 // blob_fd_closer_ takes a pointer to blob_fd_. Make sure we destroy a 105 // previous instance before overriding blob_fd_. 106 blob_fd_closer_.reset(); 107 EXPECT_TRUE(utils::MakeTempFile( 108 "InplaceGenerator_blob_file.XXXXXX", &blob_path_, &blob_fd_)); 109 blob_path_unlinker_.reset(new ScopedPathUnlinker(blob_path_)); 110 blob_fd_closer_.reset(new ScopedFdCloser(&blob_fd_)); 111 blob_file_size_ = 0; 112 EXPECT_GE(blob_fd_, 0); 113 blob_file_.reset(new BlobFileWriter(blob_fd_, &blob_file_size_)); 114 } 115 116 // Blob file name, file descriptor and file size used to store operation 117 // blobs. 118 string blob_path_; 119 int blob_fd_{-1}; 120 off_t blob_file_size_{0}; 121 std::unique_ptr<BlobFileWriter> blob_file_; 122 std::unique_ptr<ScopedPathUnlinker> blob_path_unlinker_; 123 std::unique_ptr<ScopedFdCloser> blob_fd_closer_; 124}; 125 126TEST_F(InplaceGeneratorTest, BlockDefaultValues) { 127 // Tests that a Block is initialized with the default values as a 128 // Vertex::kInvalidIndex. This is required by the delta generators. 129 Block block; 130 EXPECT_EQ(Vertex::kInvalidIndex, block.reader); 131 EXPECT_EQ(Vertex::kInvalidIndex, block.writer); 132} 133 134TEST_F(InplaceGeneratorTest, SubstituteBlocksTest) { 135 vector<Extent> remove_blocks; 136 AppendExtent(&remove_blocks, 3, 3); 137 AppendExtent(&remove_blocks, 7, 1); 138 vector<Extent> replace_blocks; 139 AppendExtent(&replace_blocks, 10, 2); 140 AppendExtent(&replace_blocks, 13, 2); 141 Vertex vertex; 142 DeltaArchiveManifest_InstallOperation& op = vertex.aop.op; 143 OpAppendExtent(&op, 4, 3); 144 OpAppendExtent(&op, kSparseHole, 4); // Sparse hole in file 145 OpAppendExtent(&op, 3, 1); 146 OpAppendExtent(&op, 7, 3); 147 148 InplaceGenerator::SubstituteBlocks(&vertex, remove_blocks, replace_blocks); 149 150 EXPECT_EQ(7, op.src_extents_size()); 151 EXPECT_EQ(11, op.src_extents(0).start_block()); 152 EXPECT_EQ(1, op.src_extents(0).num_blocks()); 153 EXPECT_EQ(13, op.src_extents(1).start_block()); 154 EXPECT_EQ(1, op.src_extents(1).num_blocks()); 155 EXPECT_EQ(6, op.src_extents(2).start_block()); 156 EXPECT_EQ(1, op.src_extents(2).num_blocks()); 157 EXPECT_EQ(kSparseHole, op.src_extents(3).start_block()); 158 EXPECT_EQ(4, op.src_extents(3).num_blocks()); 159 EXPECT_EQ(10, op.src_extents(4).start_block()); 160 EXPECT_EQ(1, op.src_extents(4).num_blocks()); 161 EXPECT_EQ(14, op.src_extents(5).start_block()); 162 EXPECT_EQ(1, op.src_extents(5).num_blocks()); 163 EXPECT_EQ(8, op.src_extents(6).start_block()); 164 EXPECT_EQ(2, op.src_extents(6).num_blocks()); 165} 166 167TEST_F(InplaceGeneratorTest, CutEdgesTest) { 168 Graph graph; 169 vector<Block> blocks(9); 170 171 // Create nodes in graph 172 { 173 graph.resize(graph.size() + 1); 174 graph.back().aop.op.set_type(OP_MOVE); 175 // Reads from blocks 3, 5, 7 176 vector<Extent> extents; 177 AppendBlockToExtents(&extents, 3); 178 AppendBlockToExtents(&extents, 5); 179 AppendBlockToExtents(&extents, 7); 180 StoreExtents(extents, graph.back().aop.op.mutable_src_extents()); 181 blocks[3].reader = graph.size() - 1; 182 blocks[5].reader = graph.size() - 1; 183 blocks[7].reader = graph.size() - 1; 184 185 // Writes to blocks 1, 2, 4 186 extents.clear(); 187 AppendBlockToExtents(&extents, 1); 188 AppendBlockToExtents(&extents, 2); 189 AppendBlockToExtents(&extents, 4); 190 StoreExtents(extents, graph.back().aop.op.mutable_dst_extents()); 191 blocks[1].writer = graph.size() - 1; 192 blocks[2].writer = graph.size() - 1; 193 blocks[4].writer = graph.size() - 1; 194 } 195 { 196 graph.resize(graph.size() + 1); 197 graph.back().aop.op.set_type(OP_MOVE); 198 // Reads from blocks 1, 2, 4 199 vector<Extent> extents; 200 AppendBlockToExtents(&extents, 1); 201 AppendBlockToExtents(&extents, 2); 202 AppendBlockToExtents(&extents, 4); 203 StoreExtents(extents, graph.back().aop.op.mutable_src_extents()); 204 blocks[1].reader = graph.size() - 1; 205 blocks[2].reader = graph.size() - 1; 206 blocks[4].reader = graph.size() - 1; 207 208 // Writes to blocks 3, 5, 6 209 extents.clear(); 210 AppendBlockToExtents(&extents, 3); 211 AppendBlockToExtents(&extents, 5); 212 AppendBlockToExtents(&extents, 6); 213 StoreExtents(extents, graph.back().aop.op.mutable_dst_extents()); 214 blocks[3].writer = graph.size() - 1; 215 blocks[5].writer = graph.size() - 1; 216 blocks[6].writer = graph.size() - 1; 217 } 218 219 // Create edges 220 InplaceGenerator::CreateEdges(&graph, blocks); 221 222 // Find cycles 223 CycleBreaker cycle_breaker; 224 set<Edge> cut_edges; 225 cycle_breaker.BreakCycles(graph, &cut_edges); 226 227 EXPECT_EQ(1, cut_edges.size()); 228 EXPECT_TRUE(cut_edges.end() != cut_edges.find( 229 std::pair<Vertex::Index, Vertex::Index>(1, 0))); 230 231 vector<CutEdgeVertexes> cuts; 232 EXPECT_TRUE(InplaceGenerator::CutEdges(&graph, cut_edges, &cuts)); 233 234 EXPECT_EQ(3, graph.size()); 235 236 // Check new node in graph: 237 EXPECT_EQ(OP_MOVE, graph.back().aop.op.type()); 238 EXPECT_EQ(2, graph.back().aop.op.src_extents_size()); 239 EXPECT_EQ(1, graph.back().aop.op.dst_extents_size()); 240 EXPECT_EQ(kTempBlockStart, graph.back().aop.op.dst_extents(0).start_block()); 241 EXPECT_EQ(2, graph.back().aop.op.dst_extents(0).num_blocks()); 242 EXPECT_TRUE(graph.back().out_edges.empty()); 243 244 // Check that old node reads from new blocks 245 EXPECT_EQ(2, graph[0].aop.op.src_extents_size()); 246 EXPECT_EQ(kTempBlockStart, graph[0].aop.op.src_extents(0).start_block()); 247 EXPECT_EQ(2, graph[0].aop.op.src_extents(0).num_blocks()); 248 EXPECT_EQ(7, graph[0].aop.op.src_extents(1).start_block()); 249 EXPECT_EQ(1, graph[0].aop.op.src_extents(1).num_blocks()); 250 251 // And that the old dst extents haven't changed 252 EXPECT_EQ(2, graph[0].aop.op.dst_extents_size()); 253 EXPECT_EQ(1, graph[0].aop.op.dst_extents(0).start_block()); 254 EXPECT_EQ(2, graph[0].aop.op.dst_extents(0).num_blocks()); 255 EXPECT_EQ(4, graph[0].aop.op.dst_extents(1).start_block()); 256 EXPECT_EQ(1, graph[0].aop.op.dst_extents(1).num_blocks()); 257 258 // Ensure it only depends on the next node and the new temp node 259 EXPECT_EQ(2, graph[0].out_edges.size()); 260 EXPECT_TRUE(graph[0].out_edges.end() != graph[0].out_edges.find(1)); 261 EXPECT_TRUE(graph[0].out_edges.end() != graph[0].out_edges.find(graph.size() - 262 1)); 263 264 // Check second node has unchanged extents 265 EXPECT_EQ(2, graph[1].aop.op.src_extents_size()); 266 EXPECT_EQ(1, graph[1].aop.op.src_extents(0).start_block()); 267 EXPECT_EQ(2, graph[1].aop.op.src_extents(0).num_blocks()); 268 EXPECT_EQ(4, graph[1].aop.op.src_extents(1).start_block()); 269 EXPECT_EQ(1, graph[1].aop.op.src_extents(1).num_blocks()); 270 271 EXPECT_EQ(2, graph[1].aop.op.dst_extents_size()); 272 EXPECT_EQ(3, graph[1].aop.op.dst_extents(0).start_block()); 273 EXPECT_EQ(1, graph[1].aop.op.dst_extents(0).num_blocks()); 274 EXPECT_EQ(5, graph[1].aop.op.dst_extents(1).start_block()); 275 EXPECT_EQ(2, graph[1].aop.op.dst_extents(1).num_blocks()); 276 277 // Ensure it only depends on the next node 278 EXPECT_EQ(1, graph[1].out_edges.size()); 279 EXPECT_TRUE(graph[1].out_edges.end() != graph[1].out_edges.find(2)); 280} 281 282TEST_F(InplaceGeneratorTest, AssignTempBlocksReuseTest) { 283 Graph graph(9); 284 285 const vector<Extent> empt; 286 uint64_t tmp = kTempBlockStart; 287 const string kFilename = "/foo"; 288 289 vector<CutEdgeVertexes> cuts; 290 cuts.resize(3); 291 292 // Simple broken loop: 293 GenVertex(&graph[0], VectOfExt(0, 1), VectOfExt(1, 1), "", OP_MOVE); 294 GenVertex(&graph[1], VectOfExt(tmp, 1), VectOfExt(0, 1), "", OP_MOVE); 295 GenVertex(&graph[2], VectOfExt(1, 1), VectOfExt(tmp, 1), "", OP_MOVE); 296 // Corresponding edges: 297 graph[0].out_edges[2] = EdgeWithReadDep(VectOfExt(1, 1)); 298 graph[1].out_edges[2] = EdgeWithWriteDep(VectOfExt(tmp, 1)); 299 graph[1].out_edges[0] = EdgeWithReadDep(VectOfExt(0, 1)); 300 // Store the cut: 301 cuts[0].old_dst = 1; 302 cuts[0].old_src = 0; 303 cuts[0].new_vertex = 2; 304 cuts[0].tmp_extents = VectOfExt(tmp, 1); 305 tmp++; 306 307 // Slightly more complex pair of loops: 308 GenVertex(&graph[3], VectOfExt(4, 2), VectOfExt(2, 2), "", OP_MOVE); 309 GenVertex(&graph[4], VectOfExt(6, 1), VectOfExt(7, 1), "", OP_MOVE); 310 GenVertex(&graph[5], VectOfExt(tmp, 3), VectOfExt(4, 3), kFilename, OP_MOVE); 311 GenVertex(&graph[6], VectOfExt(2, 2), VectOfExt(tmp, 2), "", OP_MOVE); 312 GenVertex(&graph[7], VectOfExt(7, 1), VectOfExt(tmp + 2, 1), "", OP_MOVE); 313 // Corresponding edges: 314 graph[3].out_edges[6] = EdgeWithReadDep(VectOfExt(2, 2)); 315 graph[4].out_edges[7] = EdgeWithReadDep(VectOfExt(7, 1)); 316 graph[5].out_edges[6] = EdgeWithWriteDep(VectOfExt(tmp, 2)); 317 graph[5].out_edges[7] = EdgeWithWriteDep(VectOfExt(tmp + 2, 1)); 318 graph[5].out_edges[3] = EdgeWithReadDep(VectOfExt(4, 2)); 319 graph[5].out_edges[4] = EdgeWithReadDep(VectOfExt(6, 1)); 320 // Store the cuts: 321 cuts[1].old_dst = 5; 322 cuts[1].old_src = 3; 323 cuts[1].new_vertex = 6; 324 cuts[1].tmp_extents = VectOfExt(tmp, 2); 325 cuts[2].old_dst = 5; 326 cuts[2].old_src = 4; 327 cuts[2].new_vertex = 7; 328 cuts[2].tmp_extents = VectOfExt(tmp + 2, 1); 329 330 // Supplier of temp block: 331 GenVertex(&graph[8], empt, VectOfExt(8, 1), "", OP_REPLACE); 332 333 // Specify the final order: 334 vector<Vertex::Index> op_indexes; 335 op_indexes.push_back(2); 336 op_indexes.push_back(0); 337 op_indexes.push_back(1); 338 op_indexes.push_back(6); 339 op_indexes.push_back(3); 340 op_indexes.push_back(7); 341 op_indexes.push_back(4); 342 op_indexes.push_back(5); 343 op_indexes.push_back(8); 344 345 vector<vector<Vertex::Index>::size_type> reverse_op_indexes; 346 InplaceGenerator::GenerateReverseTopoOrderMap(op_indexes, 347 &reverse_op_indexes); 348 349 CreateBlobFile(); 350 EXPECT_TRUE(InplaceGenerator::AssignTempBlocks(&graph, 351 "/dev/zero", 352 blob_file_.get(), 353 &op_indexes, 354 &reverse_op_indexes, 355 cuts)); 356 EXPECT_FALSE(graph[6].valid); 357 EXPECT_FALSE(graph[7].valid); 358 EXPECT_EQ(1, graph[1].aop.op.src_extents_size()); 359 EXPECT_EQ(2, graph[1].aop.op.src_extents(0).start_block()); 360 EXPECT_EQ(1, graph[1].aop.op.src_extents(0).num_blocks()); 361 EXPECT_EQ(OP_REPLACE_BZ, graph[5].aop.op.type()); 362} 363 364TEST_F(InplaceGeneratorTest, MoveAndSortFullOpsToBackTest) { 365 Graph graph(4); 366 graph[0].aop.name = "A"; 367 graph[0].aop.op.set_type(OP_REPLACE); 368 graph[1].aop.name = "B"; 369 graph[1].aop.op.set_type(OP_BSDIFF); 370 graph[2].aop.name = "C"; 371 graph[2].aop.op.set_type(OP_REPLACE_BZ); 372 graph[3].aop.name = "D"; 373 graph[3].aop.op.set_type(OP_MOVE); 374 375 vector<Vertex::Index> vect(graph.size()); 376 377 for (vector<Vertex::Index>::size_type i = 0; i < vect.size(); ++i) { 378 vect[i] = i; 379 } 380 InplaceGenerator::MoveAndSortFullOpsToBack(&graph, &vect); 381 EXPECT_EQ(vect.size(), graph.size()); 382 EXPECT_EQ(graph[vect[0]].aop.name, "B"); 383 EXPECT_EQ(graph[vect[1]].aop.name, "D"); 384 EXPECT_EQ(graph[vect[2]].aop.name, "A"); 385 EXPECT_EQ(graph[vect[3]].aop.name, "C"); 386} 387 388TEST_F(InplaceGeneratorTest, AssignTempBlocksTest) { 389 Graph graph(9); 390 const vector<Extent> empt; // empty 391 const string kFilename = "/foo"; 392 393 // Some scratch space: 394 GenVertex(&graph[0], empt, VectOfExt(200, 1), "", OP_REPLACE); 395 GenVertex(&graph[1], empt, VectOfExt(210, 10), "", OP_REPLACE); 396 GenVertex(&graph[2], empt, VectOfExt(220, 1), "", OP_REPLACE); 397 398 // A cycle that requires 10 blocks to break: 399 GenVertex(&graph[3], VectOfExt(10, 11), VectOfExt(0, 9), "", OP_BSDIFF); 400 graph[3].out_edges[4] = EdgeWithReadDep(VectOfExt(0, 9)); 401 GenVertex(&graph[4], VectOfExt(0, 9), VectOfExt(10, 11), "", OP_BSDIFF); 402 graph[4].out_edges[3] = EdgeWithReadDep(VectOfExt(10, 11)); 403 404 // A cycle that requires 9 blocks to break: 405 GenVertex(&graph[5], VectOfExt(40, 11), VectOfExt(30, 10), "", OP_BSDIFF); 406 graph[5].out_edges[6] = EdgeWithReadDep(VectOfExt(30, 10)); 407 GenVertex(&graph[6], VectOfExt(30, 10), VectOfExt(40, 11), "", OP_BSDIFF); 408 graph[6].out_edges[5] = EdgeWithReadDep(VectOfExt(40, 11)); 409 410 // A cycle that requires 40 blocks to break (which is too many): 411 GenVertex(&graph[7], 412 VectOfExt(120, 50), 413 VectOfExt(60, 40), 414 "", 415 OP_BSDIFF); 416 graph[7].out_edges[8] = EdgeWithReadDep(VectOfExt(60, 40)); 417 GenVertex(&graph[8], 418 VectOfExt(60, 40), 419 VectOfExt(120, 50), 420 kFilename, 421 OP_BSDIFF); 422 graph[8].out_edges[7] = EdgeWithReadDep(VectOfExt(120, 50)); 423 424 graph_utils::DumpGraph(graph); 425 426 vector<Vertex::Index> final_order; 427 428 CreateBlobFile(); 429 EXPECT_TRUE(InplaceGenerator::ConvertGraphToDag(&graph, 430 "/dev/zero", 431 blob_file_.get(), 432 &final_order, 433 Vertex::kInvalidIndex)); 434 435 Graph expected_graph(12); 436 GenVertex(&expected_graph[0], empt, VectOfExt(200, 1), "", OP_REPLACE); 437 GenVertex(&expected_graph[1], empt, VectOfExt(210, 10), "", OP_REPLACE); 438 GenVertex(&expected_graph[2], empt, VectOfExt(220, 1), "", OP_REPLACE); 439 GenVertex(&expected_graph[3], 440 VectOfExt(10, 11), 441 VectOfExt(0, 9), 442 "", 443 OP_BSDIFF); 444 expected_graph[3].out_edges[9] = EdgeWithReadDep(VectOfExt(0, 9)); 445 GenVertex(&expected_graph[4], 446 VectOfExt(60, 9), 447 VectOfExt(10, 11), 448 "", 449 OP_BSDIFF); 450 expected_graph[4].out_edges[3] = EdgeWithReadDep(VectOfExt(10, 11)); 451 expected_graph[4].out_edges[9] = EdgeWithWriteDep(VectOfExt(60, 9)); 452 GenVertex(&expected_graph[5], 453 VectOfExt(40, 11), 454 VectOfExt(30, 10), 455 "", 456 OP_BSDIFF); 457 expected_graph[5].out_edges[10] = EdgeWithReadDep(VectOfExt(30, 10)); 458 459 GenVertex(&expected_graph[6], 460 VectOfExt(60, 10), 461 VectOfExt(40, 11), 462 "", 463 OP_BSDIFF); 464 expected_graph[6].out_edges[5] = EdgeWithReadDep(VectOfExt(40, 11)); 465 expected_graph[6].out_edges[10] = EdgeWithWriteDep(VectOfExt(60, 10)); 466 467 GenVertex(&expected_graph[7], 468 VectOfExt(120, 50), 469 VectOfExt(60, 40), 470 "", 471 OP_BSDIFF); 472 expected_graph[7].out_edges[6] = EdgeWithReadDep(VectOfExt(60, 10)); 473 474 GenVertex(&expected_graph[8], empt, VectOfExt(0, 50), "/foo", OP_REPLACE_BZ); 475 expected_graph[8].out_edges[7] = EdgeWithReadDep(VectOfExt(120, 50)); 476 477 GenVertex(&expected_graph[9], 478 VectOfExt(0, 9), 479 VectOfExt(60, 9), 480 "", 481 OP_MOVE); 482 483 GenVertex(&expected_graph[10], 484 VectOfExt(30, 10), 485 VectOfExt(60, 10), 486 "", 487 OP_MOVE); 488 expected_graph[10].out_edges[4] = EdgeWithReadDep(VectOfExt(60, 9)); 489 490 EXPECT_EQ(12, graph.size()); 491 EXPECT_FALSE(graph.back().valid); 492 for (Graph::size_type i = 0; i < graph.size() - 1; i++) { 493 EXPECT_TRUE(graph[i].out_edges == expected_graph[i].out_edges); 494 if (i == 8) { 495 // special case 496 } else { 497 // EXPECT_TRUE(graph[i] == expected_graph[i]) << "i = " << i; 498 } 499 } 500} 501 502TEST_F(InplaceGeneratorTest, CreateScratchNodeTest) { 503 Vertex vertex; 504 InplaceGenerator::CreateScratchNode(12, 34, &vertex); 505 EXPECT_EQ(OP_REPLACE_BZ, vertex.aop.op.type()); 506 EXPECT_EQ(0, vertex.aop.op.data_offset()); 507 EXPECT_EQ(0, vertex.aop.op.data_length()); 508 EXPECT_EQ(1, vertex.aop.op.dst_extents_size()); 509 EXPECT_EQ(12, vertex.aop.op.dst_extents(0).start_block()); 510 EXPECT_EQ(34, vertex.aop.op.dst_extents(0).num_blocks()); 511} 512 513TEST_F(InplaceGeneratorTest, ApplyMapTest) { 514 vector<uint64_t> collection = {1, 2, 3, 4, 6}; 515 vector<uint64_t> expected_values = {1, 2, 5, 4, 8}; 516 map<uint64_t, uint64_t> value_map; 517 value_map[3] = 5; 518 value_map[6] = 8; 519 value_map[5] = 10; 520 521 InplaceGenerator::ApplyMap(&collection, value_map); 522 EXPECT_EQ(expected_values, collection); 523} 524 525// We can't produce MOVE operations with a source or destination in the block 0. 526// This test checks that the cycle breaker procedure doesn't produce such 527// operations. 528TEST_F(InplaceGeneratorTest, ResolveReadAfterWriteDependenciesAvoidMoveToZero) { 529 size_t block_size = 4096; 530 size_t num_blocks = 4; 531 vector<AnnotatedOperation> aops; 532 533 // Create a REPLACE_BZ for block 0, and a circular dependency among all other 534 // blocks. This situation would prefer to issue a MOVE to scratch space and 535 // the only available block is 0. 536 aops.emplace_back(); 537 aops.back().name = base::StringPrintf("<bz-block-0>"); 538 aops.back().op.set_type( 539 OP_REPLACE_BZ); 540 StoreExtents({ExtentForRange(0, 1)}, aops.back().op.mutable_dst_extents()); 541 542 for (size_t i = 1; i < num_blocks; i++) { 543 AnnotatedOperation aop; 544 aop.name = base::StringPrintf("<op-%" PRIuS ">", i); 545 aop.op.set_type(OP_BSDIFF); 546 StoreExtents({ExtentForRange(1 + i % (num_blocks - 1), 1)}, 547 aop.op.mutable_src_extents()); 548 StoreExtents({ExtentForRange(i, 1)}, aop.op.mutable_dst_extents()); 549 aops.push_back(aop); 550 } 551 552 PartitionConfig part(PartitionName::kRootfs); 553 part.path = "/dev/zero"; 554 part.size = num_blocks * block_size; 555 556 CreateBlobFile(); 557 558 // We ran two tests here. The first one without enough blocks for the scratch 559 // space, forcing it to create a new full operation and the second case with 560 // one extra block in the partition that can be used for the move operation. 561 for (const auto part_blocks : vector<uint64_t>{num_blocks, num_blocks + 1}) { 562 SCOPED_TRACE(base::StringPrintf("Using partition_blocs=%" PRIu64, 563 part_blocks)); 564 vector<AnnotatedOperation> result_aops = aops; 565 EXPECT_TRUE(InplaceGenerator::ResolveReadAfterWriteDependencies( 566 part, part_blocks * block_size, block_size, blob_file_.get(), 567 &result_aops)); 568 569 size_t full_ops = 0; 570 for (const auto& aop : result_aops) { 571 if (aop.op.type() == OP_REPLACE || aop.op.type() == OP_REPLACE_BZ) 572 full_ops++; 573 574 if (aop.op.type() != OP_MOVE) 575 continue; 576 for (const Extent& extent : aop.op.src_extents()) { 577 EXPECT_NE(0, extent.start_block()) << "On src extents for aop: " << aop; 578 } 579 for (const Extent& extent : aop.op.dst_extents()) { 580 EXPECT_NE(0, extent.start_block()) << "On dst extents for aop: " << aop; 581 } 582 } 583 584 // If there's extra space in the partition, it should not use a new full 585 // operation for it. 586 EXPECT_EQ(part_blocks == num_blocks ? 2 : 1, full_ops); 587 588 if (HasNonfatalFailure()) { 589 LOG(INFO) << "Result operation list:"; 590 for (const auto& aop : result_aops) { 591 LOG(INFO) << aop; 592 } 593 } 594 } 595} 596 597} // namespace chromeos_update_engine 598