inplace_generator_unittest.cc revision 896fdbaa1cc9a422ac36ef455d02c3cef1f27816
1// Copyright 2015 The Chromium OS Authors. All rights reserved. 2// Use of this source code is governed by a BSD-style license that can be 3// found in the LICENSE file. 4 5#include "update_engine/payload_generator/inplace_generator.h" 6 7#include <map> 8#include <set> 9#include <sstream> 10#include <string> 11#include <utility> 12#include <vector> 13 14#include <base/format_macros.h> 15#include <base/logging.h> 16#include <base/strings/string_util.h> 17#include <base/strings/stringprintf.h> 18#include <gtest/gtest.h> 19 20#include "update_engine/payload_generator/cycle_breaker.h" 21#include "update_engine/payload_generator/delta_diff_generator.h" 22#include "update_engine/payload_generator/extent_ranges.h" 23#include "update_engine/payload_generator/graph_types.h" 24#include "update_engine/payload_generator/graph_utils.h" 25#include "update_engine/test_utils.h" 26#include "update_engine/utils.h" 27 28using std::map; 29using std::set; 30using std::string; 31using std::stringstream; 32using std::vector; 33 34namespace chromeos_update_engine { 35 36using Block = InplaceGenerator::Block; 37 38namespace { 39 40#define OP_BSDIFF DeltaArchiveManifest_InstallOperation_Type_BSDIFF 41#define OP_MOVE DeltaArchiveManifest_InstallOperation_Type_MOVE 42#define OP_REPLACE DeltaArchiveManifest_InstallOperation_Type_REPLACE 43#define OP_REPLACE_BZ DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ 44 45void GenVertex(Vertex* out, 46 const vector<Extent>& src_extents, 47 const vector<Extent>& dst_extents, 48 const string& path, 49 DeltaArchiveManifest_InstallOperation_Type type) { 50 out->aop.op.set_type(type); 51 out->aop.name = path; 52 StoreExtents(src_extents, out->aop.op.mutable_src_extents()); 53 StoreExtents(dst_extents, out->aop.op.mutable_dst_extents()); 54} 55 56vector<Extent> VectOfExt(uint64_t start_block, uint64_t num_blocks) { 57 return vector<Extent>(1, ExtentForRange(start_block, num_blocks)); 58} 59 60EdgeProperties EdgeWithReadDep(const vector<Extent>& extents) { 61 EdgeProperties ret; 62 ret.extents = extents; 63 return ret; 64} 65 66EdgeProperties EdgeWithWriteDep(const vector<Extent>& extents) { 67 EdgeProperties ret; 68 ret.write_extents = extents; 69 return ret; 70} 71 72template<typename T> 73void DumpVect(const vector<T>& vect) { 74 stringstream ss(stringstream::out); 75 for (typename vector<T>::const_iterator it = vect.begin(), e = vect.end(); 76 it != e; ++it) { 77 ss << *it << ", "; 78 } 79 LOG(INFO) << "{" << ss.str() << "}"; 80} 81 82void AppendExtent(vector<Extent>* vect, uint64_t start, uint64_t length) { 83 vect->resize(vect->size() + 1); 84 vect->back().set_start_block(start); 85 vect->back().set_num_blocks(length); 86} 87 88void OpAppendExtent(DeltaArchiveManifest_InstallOperation* op, 89 uint64_t start, 90 uint64_t length) { 91 Extent* extent = op->add_src_extents(); 92 extent->set_start_block(start); 93 extent->set_num_blocks(length); 94} 95 96} // namespace 97 98class InplaceGeneratorTest : public ::testing::Test { 99 protected: 100 // Initialize |blob_path_|, |blob_file_size_| and |blob_file_fd_| variables 101 // with a new blob file. The file is closed and removed automatically when 102 // the test finishes. 103 void CreateBlobFile() { 104 // blob_fd_closer_ takes a pointer to blob_fd_. Make sure we destroy a 105 // previous instance before overriding blob_fd_. 106 blob_fd_closer_.reset(); 107 EXPECT_TRUE(utils::MakeTempFile( 108 "InplaceGenerator_blob_file.XXXXXX", &blob_path_, &blob_fd_)); 109 blob_path_unlinker_.reset(new ScopedPathUnlinker(blob_path_)); 110 blob_fd_closer_.reset(new ScopedFdCloser(&blob_fd_)); 111 blob_file_size_ = 0; 112 EXPECT_GE(blob_fd_, 0); 113 } 114 115 // Blob file name, file descriptor and file size used to store operation 116 // blobs. 117 string blob_path_; 118 int blob_fd_{-1}; 119 off_t blob_file_size_{0}; 120 std::unique_ptr<ScopedPathUnlinker> blob_path_unlinker_; 121 std::unique_ptr<ScopedFdCloser> blob_fd_closer_; 122}; 123 124TEST_F(InplaceGeneratorTest, BlockDefaultValues) { 125 // Tests that a Block is initialized with the default values as a 126 // Vertex::kInvalidIndex. This is required by the delta generators. 127 Block block; 128 EXPECT_EQ(Vertex::kInvalidIndex, block.reader); 129 EXPECT_EQ(Vertex::kInvalidIndex, block.writer); 130} 131 132TEST_F(InplaceGeneratorTest, SubstituteBlocksTest) { 133 vector<Extent> remove_blocks; 134 AppendExtent(&remove_blocks, 3, 3); 135 AppendExtent(&remove_blocks, 7, 1); 136 vector<Extent> replace_blocks; 137 AppendExtent(&replace_blocks, 10, 2); 138 AppendExtent(&replace_blocks, 13, 2); 139 Vertex vertex; 140 DeltaArchiveManifest_InstallOperation& op = vertex.aop.op; 141 OpAppendExtent(&op, 4, 3); 142 OpAppendExtent(&op, kSparseHole, 4); // Sparse hole in file 143 OpAppendExtent(&op, 3, 1); 144 OpAppendExtent(&op, 7, 3); 145 146 InplaceGenerator::SubstituteBlocks(&vertex, remove_blocks, replace_blocks); 147 148 EXPECT_EQ(7, op.src_extents_size()); 149 EXPECT_EQ(11, op.src_extents(0).start_block()); 150 EXPECT_EQ(1, op.src_extents(0).num_blocks()); 151 EXPECT_EQ(13, op.src_extents(1).start_block()); 152 EXPECT_EQ(1, op.src_extents(1).num_blocks()); 153 EXPECT_EQ(6, op.src_extents(2).start_block()); 154 EXPECT_EQ(1, op.src_extents(2).num_blocks()); 155 EXPECT_EQ(kSparseHole, op.src_extents(3).start_block()); 156 EXPECT_EQ(4, op.src_extents(3).num_blocks()); 157 EXPECT_EQ(10, op.src_extents(4).start_block()); 158 EXPECT_EQ(1, op.src_extents(4).num_blocks()); 159 EXPECT_EQ(14, op.src_extents(5).start_block()); 160 EXPECT_EQ(1, op.src_extents(5).num_blocks()); 161 EXPECT_EQ(8, op.src_extents(6).start_block()); 162 EXPECT_EQ(2, op.src_extents(6).num_blocks()); 163} 164 165TEST_F(InplaceGeneratorTest, CutEdgesTest) { 166 Graph graph; 167 vector<Block> blocks(9); 168 169 // Create nodes in graph 170 { 171 graph.resize(graph.size() + 1); 172 graph.back().aop.op.set_type(OP_MOVE); 173 // Reads from blocks 3, 5, 7 174 vector<Extent> extents; 175 AppendBlockToExtents(&extents, 3); 176 AppendBlockToExtents(&extents, 5); 177 AppendBlockToExtents(&extents, 7); 178 StoreExtents(extents, graph.back().aop.op.mutable_src_extents()); 179 blocks[3].reader = graph.size() - 1; 180 blocks[5].reader = graph.size() - 1; 181 blocks[7].reader = graph.size() - 1; 182 183 // Writes to blocks 1, 2, 4 184 extents.clear(); 185 AppendBlockToExtents(&extents, 1); 186 AppendBlockToExtents(&extents, 2); 187 AppendBlockToExtents(&extents, 4); 188 StoreExtents(extents, graph.back().aop.op.mutable_dst_extents()); 189 blocks[1].writer = graph.size() - 1; 190 blocks[2].writer = graph.size() - 1; 191 blocks[4].writer = graph.size() - 1; 192 } 193 { 194 graph.resize(graph.size() + 1); 195 graph.back().aop.op.set_type(OP_MOVE); 196 // Reads from blocks 1, 2, 4 197 vector<Extent> extents; 198 AppendBlockToExtents(&extents, 1); 199 AppendBlockToExtents(&extents, 2); 200 AppendBlockToExtents(&extents, 4); 201 StoreExtents(extents, graph.back().aop.op.mutable_src_extents()); 202 blocks[1].reader = graph.size() - 1; 203 blocks[2].reader = graph.size() - 1; 204 blocks[4].reader = graph.size() - 1; 205 206 // Writes to blocks 3, 5, 6 207 extents.clear(); 208 AppendBlockToExtents(&extents, 3); 209 AppendBlockToExtents(&extents, 5); 210 AppendBlockToExtents(&extents, 6); 211 StoreExtents(extents, graph.back().aop.op.mutable_dst_extents()); 212 blocks[3].writer = graph.size() - 1; 213 blocks[5].writer = graph.size() - 1; 214 blocks[6].writer = graph.size() - 1; 215 } 216 217 // Create edges 218 InplaceGenerator::CreateEdges(&graph, blocks); 219 220 // Find cycles 221 CycleBreaker cycle_breaker; 222 set<Edge> cut_edges; 223 cycle_breaker.BreakCycles(graph, &cut_edges); 224 225 EXPECT_EQ(1, cut_edges.size()); 226 EXPECT_TRUE(cut_edges.end() != cut_edges.find( 227 std::pair<Vertex::Index, Vertex::Index>(1, 0))); 228 229 vector<CutEdgeVertexes> cuts; 230 EXPECT_TRUE(InplaceGenerator::CutEdges(&graph, cut_edges, &cuts)); 231 232 EXPECT_EQ(3, graph.size()); 233 234 // Check new node in graph: 235 EXPECT_EQ(OP_MOVE, graph.back().aop.op.type()); 236 EXPECT_EQ(2, graph.back().aop.op.src_extents_size()); 237 EXPECT_EQ(1, graph.back().aop.op.dst_extents_size()); 238 EXPECT_EQ(kTempBlockStart, graph.back().aop.op.dst_extents(0).start_block()); 239 EXPECT_EQ(2, graph.back().aop.op.dst_extents(0).num_blocks()); 240 EXPECT_TRUE(graph.back().out_edges.empty()); 241 242 // Check that old node reads from new blocks 243 EXPECT_EQ(2, graph[0].aop.op.src_extents_size()); 244 EXPECT_EQ(kTempBlockStart, graph[0].aop.op.src_extents(0).start_block()); 245 EXPECT_EQ(2, graph[0].aop.op.src_extents(0).num_blocks()); 246 EXPECT_EQ(7, graph[0].aop.op.src_extents(1).start_block()); 247 EXPECT_EQ(1, graph[0].aop.op.src_extents(1).num_blocks()); 248 249 // And that the old dst extents haven't changed 250 EXPECT_EQ(2, graph[0].aop.op.dst_extents_size()); 251 EXPECT_EQ(1, graph[0].aop.op.dst_extents(0).start_block()); 252 EXPECT_EQ(2, graph[0].aop.op.dst_extents(0).num_blocks()); 253 EXPECT_EQ(4, graph[0].aop.op.dst_extents(1).start_block()); 254 EXPECT_EQ(1, graph[0].aop.op.dst_extents(1).num_blocks()); 255 256 // Ensure it only depends on the next node and the new temp node 257 EXPECT_EQ(2, graph[0].out_edges.size()); 258 EXPECT_TRUE(graph[0].out_edges.end() != graph[0].out_edges.find(1)); 259 EXPECT_TRUE(graph[0].out_edges.end() != graph[0].out_edges.find(graph.size() - 260 1)); 261 262 // Check second node has unchanged extents 263 EXPECT_EQ(2, graph[1].aop.op.src_extents_size()); 264 EXPECT_EQ(1, graph[1].aop.op.src_extents(0).start_block()); 265 EXPECT_EQ(2, graph[1].aop.op.src_extents(0).num_blocks()); 266 EXPECT_EQ(4, graph[1].aop.op.src_extents(1).start_block()); 267 EXPECT_EQ(1, graph[1].aop.op.src_extents(1).num_blocks()); 268 269 EXPECT_EQ(2, graph[1].aop.op.dst_extents_size()); 270 EXPECT_EQ(3, graph[1].aop.op.dst_extents(0).start_block()); 271 EXPECT_EQ(1, graph[1].aop.op.dst_extents(0).num_blocks()); 272 EXPECT_EQ(5, graph[1].aop.op.dst_extents(1).start_block()); 273 EXPECT_EQ(2, graph[1].aop.op.dst_extents(1).num_blocks()); 274 275 // Ensure it only depends on the next node 276 EXPECT_EQ(1, graph[1].out_edges.size()); 277 EXPECT_TRUE(graph[1].out_edges.end() != graph[1].out_edges.find(2)); 278} 279 280TEST_F(InplaceGeneratorTest, AssignTempBlocksReuseTest) { 281 Graph graph(9); 282 283 const vector<Extent> empt; 284 uint64_t tmp = kTempBlockStart; 285 const string kFilename = "/foo"; 286 287 vector<CutEdgeVertexes> cuts; 288 cuts.resize(3); 289 290 // Simple broken loop: 291 GenVertex(&graph[0], VectOfExt(0, 1), VectOfExt(1, 1), "", OP_MOVE); 292 GenVertex(&graph[1], VectOfExt(tmp, 1), VectOfExt(0, 1), "", OP_MOVE); 293 GenVertex(&graph[2], VectOfExt(1, 1), VectOfExt(tmp, 1), "", OP_MOVE); 294 // Corresponding edges: 295 graph[0].out_edges[2] = EdgeWithReadDep(VectOfExt(1, 1)); 296 graph[1].out_edges[2] = EdgeWithWriteDep(VectOfExt(tmp, 1)); 297 graph[1].out_edges[0] = EdgeWithReadDep(VectOfExt(0, 1)); 298 // Store the cut: 299 cuts[0].old_dst = 1; 300 cuts[0].old_src = 0; 301 cuts[0].new_vertex = 2; 302 cuts[0].tmp_extents = VectOfExt(tmp, 1); 303 tmp++; 304 305 // Slightly more complex pair of loops: 306 GenVertex(&graph[3], VectOfExt(4, 2), VectOfExt(2, 2), "", OP_MOVE); 307 GenVertex(&graph[4], VectOfExt(6, 1), VectOfExt(7, 1), "", OP_MOVE); 308 GenVertex(&graph[5], VectOfExt(tmp, 3), VectOfExt(4, 3), kFilename, OP_MOVE); 309 GenVertex(&graph[6], VectOfExt(2, 2), VectOfExt(tmp, 2), "", OP_MOVE); 310 GenVertex(&graph[7], VectOfExt(7, 1), VectOfExt(tmp + 2, 1), "", OP_MOVE); 311 // Corresponding edges: 312 graph[3].out_edges[6] = EdgeWithReadDep(VectOfExt(2, 2)); 313 graph[4].out_edges[7] = EdgeWithReadDep(VectOfExt(7, 1)); 314 graph[5].out_edges[6] = EdgeWithWriteDep(VectOfExt(tmp, 2)); 315 graph[5].out_edges[7] = EdgeWithWriteDep(VectOfExt(tmp + 2, 1)); 316 graph[5].out_edges[3] = EdgeWithReadDep(VectOfExt(4, 2)); 317 graph[5].out_edges[4] = EdgeWithReadDep(VectOfExt(6, 1)); 318 // Store the cuts: 319 cuts[1].old_dst = 5; 320 cuts[1].old_src = 3; 321 cuts[1].new_vertex = 6; 322 cuts[1].tmp_extents = VectOfExt(tmp, 2); 323 cuts[2].old_dst = 5; 324 cuts[2].old_src = 4; 325 cuts[2].new_vertex = 7; 326 cuts[2].tmp_extents = VectOfExt(tmp + 2, 1); 327 328 // Supplier of temp block: 329 GenVertex(&graph[8], empt, VectOfExt(8, 1), "", OP_REPLACE); 330 331 // Specify the final order: 332 vector<Vertex::Index> op_indexes; 333 op_indexes.push_back(2); 334 op_indexes.push_back(0); 335 op_indexes.push_back(1); 336 op_indexes.push_back(6); 337 op_indexes.push_back(3); 338 op_indexes.push_back(7); 339 op_indexes.push_back(4); 340 op_indexes.push_back(5); 341 op_indexes.push_back(8); 342 343 vector<vector<Vertex::Index>::size_type> reverse_op_indexes; 344 InplaceGenerator::GenerateReverseTopoOrderMap(op_indexes, 345 &reverse_op_indexes); 346 347 CreateBlobFile(); 348 EXPECT_TRUE(InplaceGenerator::AssignTempBlocks(&graph, 349 "/dev/zero", 350 blob_fd_, 351 &blob_file_size_, 352 &op_indexes, 353 &reverse_op_indexes, 354 cuts)); 355 EXPECT_FALSE(graph[6].valid); 356 EXPECT_FALSE(graph[7].valid); 357 EXPECT_EQ(1, graph[1].aop.op.src_extents_size()); 358 EXPECT_EQ(2, graph[1].aop.op.src_extents(0).start_block()); 359 EXPECT_EQ(1, graph[1].aop.op.src_extents(0).num_blocks()); 360 EXPECT_EQ(OP_REPLACE_BZ, graph[5].aop.op.type()); 361} 362 363TEST_F(InplaceGeneratorTest, MoveAndSortFullOpsToBackTest) { 364 Graph graph(4); 365 graph[0].aop.name = "A"; 366 graph[0].aop.op.set_type(OP_REPLACE); 367 graph[1].aop.name = "B"; 368 graph[1].aop.op.set_type(OP_BSDIFF); 369 graph[2].aop.name = "C"; 370 graph[2].aop.op.set_type(OP_REPLACE_BZ); 371 graph[3].aop.name = "D"; 372 graph[3].aop.op.set_type(OP_MOVE); 373 374 vector<Vertex::Index> vect(graph.size()); 375 376 for (vector<Vertex::Index>::size_type i = 0; i < vect.size(); ++i) { 377 vect[i] = i; 378 } 379 InplaceGenerator::MoveAndSortFullOpsToBack(&graph, &vect); 380 EXPECT_EQ(vect.size(), graph.size()); 381 EXPECT_EQ(graph[vect[0]].aop.name, "B"); 382 EXPECT_EQ(graph[vect[1]].aop.name, "D"); 383 EXPECT_EQ(graph[vect[2]].aop.name, "A"); 384 EXPECT_EQ(graph[vect[3]].aop.name, "C"); 385} 386 387TEST_F(InplaceGeneratorTest, AssignTempBlocksTest) { 388 Graph graph(9); 389 const vector<Extent> empt; // empty 390 const string kFilename = "/foo"; 391 392 // Some scratch space: 393 GenVertex(&graph[0], empt, VectOfExt(200, 1), "", OP_REPLACE); 394 GenVertex(&graph[1], empt, VectOfExt(210, 10), "", OP_REPLACE); 395 GenVertex(&graph[2], empt, VectOfExt(220, 1), "", OP_REPLACE); 396 397 // A cycle that requires 10 blocks to break: 398 GenVertex(&graph[3], VectOfExt(10, 11), VectOfExt(0, 9), "", OP_BSDIFF); 399 graph[3].out_edges[4] = EdgeWithReadDep(VectOfExt(0, 9)); 400 GenVertex(&graph[4], VectOfExt(0, 9), VectOfExt(10, 11), "", OP_BSDIFF); 401 graph[4].out_edges[3] = EdgeWithReadDep(VectOfExt(10, 11)); 402 403 // A cycle that requires 9 blocks to break: 404 GenVertex(&graph[5], VectOfExt(40, 11), VectOfExt(30, 10), "", OP_BSDIFF); 405 graph[5].out_edges[6] = EdgeWithReadDep(VectOfExt(30, 10)); 406 GenVertex(&graph[6], VectOfExt(30, 10), VectOfExt(40, 11), "", OP_BSDIFF); 407 graph[6].out_edges[5] = EdgeWithReadDep(VectOfExt(40, 11)); 408 409 // A cycle that requires 40 blocks to break (which is too many): 410 GenVertex(&graph[7], 411 VectOfExt(120, 50), 412 VectOfExt(60, 40), 413 "", 414 OP_BSDIFF); 415 graph[7].out_edges[8] = EdgeWithReadDep(VectOfExt(60, 40)); 416 GenVertex(&graph[8], 417 VectOfExt(60, 40), 418 VectOfExt(120, 50), 419 kFilename, 420 OP_BSDIFF); 421 graph[8].out_edges[7] = EdgeWithReadDep(VectOfExt(120, 50)); 422 423 graph_utils::DumpGraph(graph); 424 425 vector<Vertex::Index> final_order; 426 427 CreateBlobFile(); 428 EXPECT_TRUE(InplaceGenerator::ConvertGraphToDag(&graph, 429 "/dev/zero", 430 blob_fd_, 431 &blob_file_size_, 432 &final_order, 433 Vertex::kInvalidIndex)); 434 435 Graph expected_graph(12); 436 GenVertex(&expected_graph[0], empt, VectOfExt(200, 1), "", OP_REPLACE); 437 GenVertex(&expected_graph[1], empt, VectOfExt(210, 10), "", OP_REPLACE); 438 GenVertex(&expected_graph[2], empt, VectOfExt(220, 1), "", OP_REPLACE); 439 GenVertex(&expected_graph[3], 440 VectOfExt(10, 11), 441 VectOfExt(0, 9), 442 "", 443 OP_BSDIFF); 444 expected_graph[3].out_edges[9] = EdgeWithReadDep(VectOfExt(0, 9)); 445 GenVertex(&expected_graph[4], 446 VectOfExt(60, 9), 447 VectOfExt(10, 11), 448 "", 449 OP_BSDIFF); 450 expected_graph[4].out_edges[3] = EdgeWithReadDep(VectOfExt(10, 11)); 451 expected_graph[4].out_edges[9] = EdgeWithWriteDep(VectOfExt(60, 9)); 452 GenVertex(&expected_graph[5], 453 VectOfExt(40, 11), 454 VectOfExt(30, 10), 455 "", 456 OP_BSDIFF); 457 expected_graph[5].out_edges[10] = EdgeWithReadDep(VectOfExt(30, 10)); 458 459 GenVertex(&expected_graph[6], 460 VectOfExt(60, 10), 461 VectOfExt(40, 11), 462 "", 463 OP_BSDIFF); 464 expected_graph[6].out_edges[5] = EdgeWithReadDep(VectOfExt(40, 11)); 465 expected_graph[6].out_edges[10] = EdgeWithWriteDep(VectOfExt(60, 10)); 466 467 GenVertex(&expected_graph[7], 468 VectOfExt(120, 50), 469 VectOfExt(60, 40), 470 "", 471 OP_BSDIFF); 472 expected_graph[7].out_edges[6] = EdgeWithReadDep(VectOfExt(60, 10)); 473 474 GenVertex(&expected_graph[8], empt, VectOfExt(0, 50), "/foo", OP_REPLACE_BZ); 475 expected_graph[8].out_edges[7] = EdgeWithReadDep(VectOfExt(120, 50)); 476 477 GenVertex(&expected_graph[9], 478 VectOfExt(0, 9), 479 VectOfExt(60, 9), 480 "", 481 OP_MOVE); 482 483 GenVertex(&expected_graph[10], 484 VectOfExt(30, 10), 485 VectOfExt(60, 10), 486 "", 487 OP_MOVE); 488 expected_graph[10].out_edges[4] = EdgeWithReadDep(VectOfExt(60, 9)); 489 490 EXPECT_EQ(12, graph.size()); 491 EXPECT_FALSE(graph.back().valid); 492 for (Graph::size_type i = 0; i < graph.size() - 1; i++) { 493 EXPECT_TRUE(graph[i].out_edges == expected_graph[i].out_edges); 494 if (i == 8) { 495 // special case 496 } else { 497 // EXPECT_TRUE(graph[i] == expected_graph[i]) << "i = " << i; 498 } 499 } 500} 501 502TEST_F(InplaceGeneratorTest, CreateScratchNodeTest) { 503 Vertex vertex; 504 InplaceGenerator::CreateScratchNode(12, 34, &vertex); 505 EXPECT_EQ(OP_REPLACE_BZ, vertex.aop.op.type()); 506 EXPECT_EQ(0, vertex.aop.op.data_offset()); 507 EXPECT_EQ(0, vertex.aop.op.data_length()); 508 EXPECT_EQ(1, vertex.aop.op.dst_extents_size()); 509 EXPECT_EQ(12, vertex.aop.op.dst_extents(0).start_block()); 510 EXPECT_EQ(34, vertex.aop.op.dst_extents(0).num_blocks()); 511} 512 513TEST_F(InplaceGeneratorTest, ApplyMapTest) { 514 vector<uint64_t> collection = {1, 2, 3, 4, 6}; 515 vector<uint64_t> expected_values = {1, 2, 5, 4, 8}; 516 map<uint64_t, uint64_t> value_map; 517 value_map[3] = 5; 518 value_map[6] = 8; 519 value_map[5] = 10; 520 521 InplaceGenerator::ApplyMap(&collection, value_map); 522 EXPECT_EQ(expected_values, collection); 523} 524 525// We can't produce MOVE operations with a source or destination in the block 0. 526// This test checks that the cycle breaker procedure doesn't produce such 527// operations. 528TEST_F(InplaceGeneratorTest, ResolveReadAfterWriteDependenciesAvoidMoveToZero) { 529 size_t block_size = 4096; 530 size_t num_blocks = 4; 531 vector<AnnotatedOperation> aops; 532 533 // Create a REPLACE_BZ for block 0, and a circular dependency among all other 534 // blocks. This situation would prefer to issue a MOVE to scratch space and 535 // the only available block is 0. 536 aops.emplace_back(); 537 aops.back().name = base::StringPrintf("<bz-block-0>"); 538 aops.back().op.set_type( 539 OP_REPLACE_BZ); 540 StoreExtents({ExtentForRange(0, 1)}, aops.back().op.mutable_dst_extents()); 541 542 for (size_t i = 1; i < num_blocks; i++) { 543 AnnotatedOperation aop; 544 aop.name = base::StringPrintf("<op-%" PRIuS ">", i); 545 aop.op.set_type(OP_BSDIFF); 546 StoreExtents({ExtentForRange(1 + i % (num_blocks - 1), 1)}, 547 aop.op.mutable_src_extents()); 548 StoreExtents({ExtentForRange(i, 1)}, aop.op.mutable_dst_extents()); 549 aops.push_back(aop); 550 } 551 552 PartitionConfig part(PartitionName::kRootfs); 553 part.path = "/dev/zero"; 554 part.size = num_blocks * block_size; 555 556 CreateBlobFile(); 557 558 // We ran two tests here. The first one without enough blocks for the scratch 559 // space, forcing it to create a new full operation and the second case with 560 // one extra block in the partition that can be used for the move operation. 561 for (const auto part_blocks : vector<uint64_t>{num_blocks, num_blocks + 1}) { 562 SCOPED_TRACE(base::StringPrintf("Using partition_blocs=%" PRIu64, 563 part_blocks)); 564 vector<AnnotatedOperation> result_aops = aops; 565 EXPECT_TRUE(InplaceGenerator::ResolveReadAfterWriteDependencies( 566 part, part_blocks * block_size, block_size, blob_fd_, &blob_file_size_, 567 &result_aops)); 568 569 size_t full_ops = 0; 570 for (const auto& aop : result_aops) { 571 if (aop.op.type() == OP_REPLACE || aop.op.type() == OP_REPLACE_BZ) 572 full_ops++; 573 574 if (aop.op.type() != OP_MOVE) 575 continue; 576 for (const Extent& extent : aop.op.src_extents()) { 577 EXPECT_NE(0, extent.start_block()) << "On src extents for aop: " << aop; 578 } 579 for (const Extent& extent : aop.op.dst_extents()) { 580 EXPECT_NE(0, extent.start_block()) << "On dst extents for aop: " << aop; 581 } 582 } 583 584 // If there's extra space in the partition, it should not use a new full 585 // operation for it. 586 EXPECT_EQ(part_blocks == num_blocks ? 2 : 1, full_ops); 587 588 if (HasNonfatalFailure()) { 589 LOG(INFO) << "Result operation list:"; 590 for (const auto& aop : result_aops) { 591 LOG(INFO) << aop; 592 } 593 } 594 } 595} 596 597} // namespace chromeos_update_engine 598