inplace_generator.cc revision aea4c1cea20dda7ae7e85fc8924a2d784f70d806
1//
2// Copyright (C) 2015 The Android Open Source Project
3//
4// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
7//
8//      http://www.apache.org/licenses/LICENSE-2.0
9//
10// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
15//
16
17#include "update_engine/payload_generator/inplace_generator.h"
18
19#include <algorithm>
20#include <map>
21#include <set>
22#include <string>
23#include <utility>
24#include <vector>
25
26#include "update_engine/payload_constants.h"
27#include "update_engine/payload_generator/cycle_breaker.h"
28#include "update_engine/payload_generator/delta_diff_generator.h"
29#include "update_engine/payload_generator/delta_diff_utils.h"
30#include "update_engine/payload_generator/extent_ranges.h"
31#include "update_engine/payload_generator/graph_types.h"
32#include "update_engine/payload_generator/graph_utils.h"
33#include "update_engine/payload_generator/topological_sort.h"
34#include "update_engine/update_metadata.pb.h"
35#include "update_engine/utils.h"
36
37using std::make_pair;
38using std::map;
39using std::pair;
40using std::set;
41using std::string;
42using std::vector;
43
44namespace chromeos_update_engine {
45
46using Block = InplaceGenerator::Block;
47
48namespace {
49
50// This class allocates non-existent temp blocks, starting from
51// kTempBlockStart. Other code is responsible for converting these
52// temp blocks into real blocks, as the client can't read or write to
53// these blocks.
54class DummyExtentAllocator {
55 public:
56  vector<Extent> Allocate(const uint64_t block_count) {
57    vector<Extent> ret(1);
58    ret[0].set_start_block(next_block_);
59    ret[0].set_num_blocks(block_count);
60    next_block_ += block_count;
61    return ret;
62  }
63
64 private:
65  uint64_t next_block_{kTempBlockStart};
66};
67
68// Takes a vector of blocks and returns an equivalent vector of Extent
69// objects.
70vector<Extent> CompressExtents(const vector<uint64_t>& blocks) {
71  vector<Extent> new_extents;
72  for (uint64_t block : blocks) {
73    AppendBlockToExtents(&new_extents, block);
74  }
75  return new_extents;
76}
77
78// Helper class to compare two operations by start block of the first Extent in
79// their destination extents given the index of the operations in the graph.
80class IndexedInstallOperationsDstComparator {
81 public:
82  explicit IndexedInstallOperationsDstComparator(Graph* graph)
83    : graph_(graph) {}
84
85  // Compares the operations in the vertex a and b of graph_.
86  bool operator()(size_t a, size_t b) const {
87    return diff_utils::CompareAopsByDestination((*graph_)[a].aop,
88                                                (*graph_)[b].aop);
89  }
90
91 private:
92  const Graph* const graph_;
93};
94
95}  // namespace
96
97void InplaceGenerator::CheckGraph(const Graph& graph) {
98  for (const Vertex& v : graph) {
99    CHECK(v.aop.op.has_type());
100  }
101}
102
103void InplaceGenerator::SubstituteBlocks(
104    Vertex* vertex,
105    const vector<Extent>& remove_extents,
106    const vector<Extent>& replace_extents) {
107  // First, expand out the blocks that op reads from
108  vector<uint64_t> read_blocks =
109      ExpandExtents(vertex->aop.op.src_extents());
110  {
111    // Expand remove_extents and replace_extents
112    vector<uint64_t> remove_extents_expanded = ExpandExtents(remove_extents);
113    vector<uint64_t> replace_extents_expanded = ExpandExtents(replace_extents);
114    CHECK_EQ(remove_extents_expanded.size(), replace_extents_expanded.size());
115    map<uint64_t, uint64_t> conversion;
116    for (vector<uint64_t>::size_type i = 0;
117         i < replace_extents_expanded.size(); i++) {
118      conversion[remove_extents_expanded[i]] = replace_extents_expanded[i];
119    }
120    ApplyMap(&read_blocks, conversion);
121    for (auto& edge_prop_pair : vertex->out_edges) {
122      vector<uint64_t> write_before_deps_expanded = ExpandExtents(
123          edge_prop_pair.second.write_extents);
124      ApplyMap(&write_before_deps_expanded, conversion);
125      edge_prop_pair.second.write_extents =
126          CompressExtents(write_before_deps_expanded);
127    }
128  }
129  // Convert read_blocks back to extents
130  vertex->aop.op.clear_src_extents();
131  vector<Extent> new_extents = CompressExtents(read_blocks);
132  StoreExtents(new_extents, vertex->aop.op.mutable_src_extents());
133}
134
135bool InplaceGenerator::CutEdges(Graph* graph,
136                                const set<Edge>& edges,
137                                vector<CutEdgeVertexes>* out_cuts) {
138  DummyExtentAllocator scratch_allocator;
139  vector<CutEdgeVertexes> cuts;
140  cuts.reserve(edges.size());
141
142  uint64_t scratch_blocks_used = 0;
143  for (const Edge& edge : edges) {
144    cuts.resize(cuts.size() + 1);
145    vector<Extent> old_extents =
146        (*graph)[edge.first].out_edges[edge.second].extents;
147    // Choose some scratch space
148    scratch_blocks_used += graph_utils::EdgeWeight(*graph, edge);
149    cuts.back().tmp_extents =
150        scratch_allocator.Allocate(graph_utils::EdgeWeight(*graph, edge));
151    // create vertex to copy original->scratch
152    cuts.back().new_vertex = graph->size();
153    graph->emplace_back();
154    cuts.back().old_src = edge.first;
155    cuts.back().old_dst = edge.second;
156
157    EdgeProperties& cut_edge_properties =
158        (*graph)[edge.first].out_edges.find(edge.second)->second;
159
160    // This should never happen, as we should only be cutting edges between
161    // real file nodes, and write-before relationships are created from
162    // a real file node to a temp copy node:
163    CHECK(cut_edge_properties.write_extents.empty())
164        << "Can't cut edge that has write-before relationship.";
165
166    // make node depend on the copy operation
167    (*graph)[edge.first].out_edges.insert(make_pair(graph->size() - 1,
168                                                    cut_edge_properties));
169
170    // Set src/dst extents and other proto variables for copy operation
171    graph->back().aop.op.set_type(InstallOperation::MOVE);
172    StoreExtents(cut_edge_properties.extents,
173                 graph->back().aop.op.mutable_src_extents());
174    StoreExtents(cuts.back().tmp_extents,
175                 graph->back().aop.op.mutable_dst_extents());
176    graph->back().aop.op.set_src_length(
177        graph_utils::EdgeWeight(*graph, edge) * kBlockSize);
178    graph->back().aop.op.set_dst_length(graph->back().aop.op.src_length());
179
180    // make the dest node read from the scratch space
181    SubstituteBlocks(
182        &((*graph)[edge.second]),
183        (*graph)[edge.first].out_edges[edge.second].extents,
184        cuts.back().tmp_extents);
185
186    // delete the old edge
187    CHECK_EQ(static_cast<Graph::size_type>(1),
188             (*graph)[edge.first].out_edges.erase(edge.second));
189
190    // Add an edge from dst to copy operation
191    EdgeProperties write_before_edge_properties;
192    write_before_edge_properties.write_extents = cuts.back().tmp_extents;
193    (*graph)[edge.second].out_edges.insert(
194        make_pair(graph->size() - 1, write_before_edge_properties));
195  }
196  out_cuts->swap(cuts);
197  return true;
198}
199
200// Creates all the edges for the graph. Writers of a block point to
201// readers of the same block. This is because for an edge A->B, B
202// must complete before A executes.
203void InplaceGenerator::CreateEdges(
204    Graph* graph,
205    const vector<Block>& blocks) {
206  for (vector<Block>::size_type i = 0;
207       i < blocks.size(); i++) {
208    // Blocks with both a reader and writer get an edge
209    if (blocks[i].reader == Vertex::kInvalidIndex ||
210        blocks[i].writer == Vertex::kInvalidIndex)
211      continue;
212    // Don't have a node depend on itself
213    if (blocks[i].reader == blocks[i].writer)
214      continue;
215    // See if there's already an edge we can add onto
216    Vertex::EdgeMap::iterator edge_it =
217        (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
218    if (edge_it == (*graph)[blocks[i].writer].out_edges.end()) {
219      // No existing edge. Create one
220      (*graph)[blocks[i].writer].out_edges.insert(
221          make_pair(blocks[i].reader, EdgeProperties()));
222      edge_it = (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
223      CHECK(edge_it != (*graph)[blocks[i].writer].out_edges.end());
224    }
225    AppendBlockToExtents(&edge_it->second.extents, i);
226  }
227}
228
229namespace {
230
231class SortCutsByTopoOrderLess {
232 public:
233  explicit SortCutsByTopoOrderLess(
234      const vector<vector<Vertex::Index>::size_type>& table)
235      : table_(table) {}
236  bool operator()(const CutEdgeVertexes& a, const CutEdgeVertexes& b) {
237    return table_[a.old_dst] < table_[b.old_dst];
238  }
239 private:
240  const vector<vector<Vertex::Index>::size_type>& table_;
241};
242
243}  // namespace
244
245void InplaceGenerator::GenerateReverseTopoOrderMap(
246    const vector<Vertex::Index>& op_indexes,
247    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes) {
248  vector<vector<Vertex::Index>::size_type> table(op_indexes.size());
249  for (vector<Vertex::Index>::size_type i = 0, e = op_indexes.size();
250       i != e; ++i) {
251    Vertex::Index node = op_indexes[i];
252    if (table.size() < (node + 1)) {
253      table.resize(node + 1);
254    }
255    table[node] = i;
256  }
257  reverse_op_indexes->swap(table);
258}
259
260void InplaceGenerator::SortCutsByTopoOrder(
261    const vector<Vertex::Index>& op_indexes,
262    vector<CutEdgeVertexes>* cuts) {
263  // first, make a reverse lookup table.
264  vector<vector<Vertex::Index>::size_type> table;
265  GenerateReverseTopoOrderMap(op_indexes, &table);
266  SortCutsByTopoOrderLess less(table);
267  sort(cuts->begin(), cuts->end(), less);
268}
269
270void InplaceGenerator::MoveAndSortFullOpsToBack(
271    Graph* graph,
272    vector<Vertex::Index>* op_indexes) {
273  vector<Vertex::Index> ret;
274  vector<Vertex::Index> full_ops;
275  ret.reserve(op_indexes->size());
276  for (auto op_index : *op_indexes) {
277    InstallOperation_Type type = (*graph)[op_index].aop.op.type();
278    if (type == InstallOperation::REPLACE ||
279        type == InstallOperation::REPLACE_BZ) {
280      full_ops.push_back(op_index);
281    } else {
282      ret.push_back(op_index);
283    }
284  }
285  LOG(INFO) << "Stats: " << full_ops.size() << " full ops out of "
286            << (full_ops.size() + ret.size()) << " total ops.";
287  // Sort full ops according to their dst_extents.
288  sort(full_ops.begin(), full_ops.end(),
289       IndexedInstallOperationsDstComparator(graph));
290  ret.insert(ret.end(), full_ops.begin(), full_ops.end());
291  op_indexes->swap(ret);
292}
293
294namespace {
295
296template<typename T>
297bool TempBlocksExistInExtents(const T& extents) {
298  for (int i = 0, e = extents.size(); i < e; ++i) {
299    Extent extent = GetElement(extents, i);
300    uint64_t start = extent.start_block();
301    uint64_t num = extent.num_blocks();
302    if (start >= kTempBlockStart || (start + num) >= kTempBlockStart) {
303      LOG(ERROR) << "temp block!";
304      LOG(ERROR) << "start: " << start << ", num: " << num;
305      LOG(ERROR) << "kTempBlockStart: " << kTempBlockStart;
306      LOG(ERROR) << "returning true";
307      return true;
308    }
309    // check for wrap-around, which would be a bug:
310    CHECK(start <= (start + num));
311  }
312  return false;
313}
314
315// Converts the cuts, which must all have the same |old_dst| member,
316// to full. It does this by converting the |old_dst| to REPLACE or
317// REPLACE_BZ, dropping all incoming edges to |old_dst|, and marking
318// all temp nodes invalid.
319bool ConvertCutsToFull(
320    Graph* graph,
321    const string& new_part,
322    BlobFileWriter* blob_file,
323    vector<Vertex::Index>* op_indexes,
324    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
325    const vector<CutEdgeVertexes>& cuts) {
326  CHECK(!cuts.empty());
327  set<Vertex::Index> deleted_nodes;
328  for (const CutEdgeVertexes& cut : cuts) {
329    TEST_AND_RETURN_FALSE(InplaceGenerator::ConvertCutToFullOp(
330        graph,
331        cut,
332        new_part,
333        blob_file));
334    deleted_nodes.insert(cut.new_vertex);
335  }
336  deleted_nodes.insert(cuts[0].old_dst);
337
338  vector<Vertex::Index> new_op_indexes;
339  new_op_indexes.reserve(op_indexes->size());
340  for (Vertex::Index vertex_index : *op_indexes) {
341    if (utils::SetContainsKey(deleted_nodes, vertex_index))
342      continue;
343    new_op_indexes.push_back(vertex_index);
344  }
345  new_op_indexes.push_back(cuts[0].old_dst);
346  op_indexes->swap(new_op_indexes);
347  InplaceGenerator::GenerateReverseTopoOrderMap(*op_indexes,
348                                                reverse_op_indexes);
349  return true;
350}
351
352// Tries to assign temp blocks for a collection of cuts, all of which share
353// the same old_dst member. If temp blocks can't be found, old_dst will be
354// converted to a REPLACE or REPLACE_BZ operation. Returns true on success,
355// which can happen even if blocks are converted to full. Returns false
356// on exceptional error cases.
357bool AssignBlockForAdjoiningCuts(
358    Graph* graph,
359    const string& new_part,
360    BlobFileWriter* blob_file,
361    vector<Vertex::Index>* op_indexes,
362    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
363    const vector<CutEdgeVertexes>& cuts) {
364  CHECK(!cuts.empty());
365  const Vertex::Index old_dst = cuts[0].old_dst;
366  // Calculate # of blocks needed
367  uint64_t blocks_needed = 0;
368  vector<uint64_t> cuts_blocks_needed(cuts.size());
369  for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) {
370    uint64_t cut_blocks_needed = 0;
371    for (const Extent& extent : cuts[i].tmp_extents) {
372      cut_blocks_needed += extent.num_blocks();
373    }
374    blocks_needed += cut_blocks_needed;
375    cuts_blocks_needed[i] = cut_blocks_needed;
376  }
377
378  // Find enough blocks
379  ExtentRanges scratch_ranges;
380  // Each block that's supplying temp blocks and the corresponding blocks:
381  typedef vector<pair<Vertex::Index, ExtentRanges>> SupplierVector;
382  SupplierVector block_suppliers;
383  uint64_t scratch_blocks_found = 0;
384  for (vector<Vertex::Index>::size_type i = (*reverse_op_indexes)[old_dst] + 1,
385           e = op_indexes->size(); i < e; ++i) {
386    Vertex::Index test_node = (*op_indexes)[i];
387    if (!(*graph)[test_node].valid)
388      continue;
389    // See if this node has sufficient blocks
390    ExtentRanges ranges;
391    ranges.AddRepeatedExtents((*graph)[test_node].aop.op.dst_extents());
392    ranges.SubtractExtent(ExtentForRange(
393        kTempBlockStart, kSparseHole - kTempBlockStart));
394    ranges.SubtractRepeatedExtents((*graph)[test_node].aop.op.src_extents());
395    // For now, for simplicity, subtract out all blocks in read-before
396    // dependencies.
397    for (Vertex::EdgeMap::const_iterator edge_i =
398             (*graph)[test_node].out_edges.begin(),
399             edge_e = (*graph)[test_node].out_edges.end();
400         edge_i != edge_e; ++edge_i) {
401      ranges.SubtractExtents(edge_i->second.extents);
402    }
403
404    // Prevent using the block 0 as scratch space due to crbug.com/480751.
405    if (ranges.ContainsBlock(0)) {
406      LOG(INFO) << "Removing block 0 from the selected scratch range in vertex "
407                << i;
408      ranges.SubtractBlock(0);
409    }
410
411    if (ranges.blocks() == 0)
412      continue;
413
414    if (ranges.blocks() + scratch_blocks_found > blocks_needed) {
415      // trim down ranges
416      vector<Extent> new_ranges = ranges.GetExtentsForBlockCount(
417          blocks_needed - scratch_blocks_found);
418      ranges = ExtentRanges();
419      ranges.AddExtents(new_ranges);
420    }
421    scratch_ranges.AddRanges(ranges);
422    block_suppliers.push_back(make_pair(test_node, ranges));
423    scratch_blocks_found += ranges.blocks();
424    if (scratch_ranges.blocks() >= blocks_needed)
425      break;
426  }
427  if (scratch_ranges.blocks() < blocks_needed) {
428    LOG(INFO) << "Unable to find sufficient scratch";
429    TEST_AND_RETURN_FALSE(ConvertCutsToFull(graph,
430                                            new_part,
431                                            blob_file,
432                                            op_indexes,
433                                            reverse_op_indexes,
434                                            cuts));
435    return true;
436  }
437  // Use the scratch we found
438  TEST_AND_RETURN_FALSE(scratch_ranges.blocks() == scratch_blocks_found);
439
440  // Make all the suppliers depend on this node
441  for (const auto& index_range_pair : block_suppliers) {
442    graph_utils::AddReadBeforeDepExtents(
443        &(*graph)[index_range_pair.first],
444        old_dst,
445        index_range_pair.second.GetExtentsForBlockCount(
446            index_range_pair.second.blocks()));
447  }
448
449  // Replace temp blocks in each cut
450  for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) {
451    const CutEdgeVertexes& cut = cuts[i];
452    vector<Extent> real_extents =
453        scratch_ranges.GetExtentsForBlockCount(cuts_blocks_needed[i]);
454    scratch_ranges.SubtractExtents(real_extents);
455
456    // Fix the old dest node w/ the real blocks
457    InplaceGenerator::SubstituteBlocks(&(*graph)[old_dst],
458                                         cut.tmp_extents,
459                                         real_extents);
460
461    // Fix the new node w/ the real blocks. Since the new node is just a
462    // copy operation, we can replace all the dest extents w/ the real
463    // blocks.
464    InstallOperation* op = &(*graph)[cut.new_vertex].aop.op;
465    op->clear_dst_extents();
466    StoreExtents(real_extents, op->mutable_dst_extents());
467  }
468  return true;
469}
470
471}  // namespace
472
473bool InplaceGenerator::AssignTempBlocks(
474    Graph* graph,
475    const string& new_part,
476    BlobFileWriter* blob_file,
477    vector<Vertex::Index>* op_indexes,
478    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
479    const vector<CutEdgeVertexes>& cuts) {
480  CHECK(!cuts.empty());
481
482  // group of cuts w/ the same old_dst:
483  vector<CutEdgeVertexes> cuts_group;
484
485  for (vector<CutEdgeVertexes>::size_type i = cuts.size() - 1, e = 0;
486       true ; --i) {
487    LOG(INFO) << "Fixing temp blocks in cut " << i
488              << ": old dst: " << cuts[i].old_dst << " new vertex: "
489              << cuts[i].new_vertex << " path: "
490              << (*graph)[cuts[i].old_dst].aop.name;
491
492    if (cuts_group.empty() || (cuts_group[0].old_dst == cuts[i].old_dst)) {
493      cuts_group.push_back(cuts[i]);
494    } else {
495      CHECK(!cuts_group.empty());
496      TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph,
497                                                        new_part,
498                                                        blob_file,
499                                                        op_indexes,
500                                                        reverse_op_indexes,
501                                                        cuts_group));
502      cuts_group.clear();
503      cuts_group.push_back(cuts[i]);
504    }
505
506    if (i == e) {
507      // break out of for() loop
508      break;
509    }
510  }
511  CHECK(!cuts_group.empty());
512  TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph,
513                                                    new_part,
514                                                    blob_file,
515                                                    op_indexes,
516                                                    reverse_op_indexes,
517                                                    cuts_group));
518  return true;
519}
520
521bool InplaceGenerator::NoTempBlocksRemain(const Graph& graph) {
522  size_t idx = 0;
523  for (Graph::const_iterator it = graph.begin(), e = graph.end(); it != e;
524       ++it, ++idx) {
525    if (!it->valid)
526      continue;
527    const InstallOperation& op = it->aop.op;
528    if (TempBlocksExistInExtents(op.dst_extents()) ||
529        TempBlocksExistInExtents(op.src_extents())) {
530      LOG(INFO) << "bad extents in node " << idx;
531      LOG(INFO) << "so yeah";
532      return false;
533    }
534
535    // Check out-edges:
536    for (const auto& edge_prop_pair : it->out_edges) {
537      if (TempBlocksExistInExtents(edge_prop_pair.second.extents) ||
538          TempBlocksExistInExtents(edge_prop_pair.second.write_extents)) {
539        LOG(INFO) << "bad out edge in node " << idx;
540        LOG(INFO) << "so yeah";
541        return false;
542      }
543    }
544  }
545  return true;
546}
547
548bool InplaceGenerator::ConvertCutToFullOp(Graph* graph,
549                                          const CutEdgeVertexes& cut,
550                                          const string& new_part,
551                                          BlobFileWriter* blob_file) {
552  // Drop all incoming edges, keep all outgoing edges
553
554  // Keep all outgoing edges
555  if ((*graph)[cut.old_dst].aop.op.type() != InstallOperation::REPLACE_BZ &&
556      (*graph)[cut.old_dst].aop.op.type() != InstallOperation::REPLACE) {
557    Vertex::EdgeMap out_edges = (*graph)[cut.old_dst].out_edges;
558    graph_utils::DropWriteBeforeDeps(&out_edges);
559
560    // Replace the operation with a REPLACE or REPLACE_BZ to generate the same
561    // |new_extents| list of blocks and update the graph.
562    vector<AnnotatedOperation> new_aop;
563    vector<Extent> new_extents;
564    ExtentsToVector((*graph)[cut.old_dst].aop.op.dst_extents(),
565                    &new_extents);
566    TEST_AND_RETURN_FALSE(diff_utils::DeltaReadFile(
567        &new_aop,
568        "",  // old_part
569        new_part,
570        vector<Extent>(),  // old_extents
571        new_extents,
572        (*graph)[cut.old_dst].aop.name,
573        -1,  // chunk_blocks, forces to have a single operation.
574        blob_file,
575        false));  // src_ops_allowed
576    TEST_AND_RETURN_FALSE(new_aop.size() == 1);
577    TEST_AND_RETURN_FALSE(AddInstallOpToGraph(
578      graph, cut.old_dst, nullptr, new_aop.front().op, new_aop.front().name));
579
580    (*graph)[cut.old_dst].out_edges = out_edges;
581
582    // Right now we don't have doubly-linked edges, so we have to scan
583    // the whole graph.
584    graph_utils::DropIncomingEdgesTo(graph, cut.old_dst);
585  }
586
587  // Delete temp node
588  (*graph)[cut.old_src].out_edges.erase(cut.new_vertex);
589  CHECK((*graph)[cut.old_dst].out_edges.find(cut.new_vertex) ==
590        (*graph)[cut.old_dst].out_edges.end());
591  (*graph)[cut.new_vertex].valid = false;
592  LOG(INFO) << "marked node invalid: " << cut.new_vertex;
593  return true;
594}
595
596bool InplaceGenerator::ConvertGraphToDag(Graph* graph,
597                                         const string& new_part,
598                                         BlobFileWriter* blob_file,
599                                         vector<Vertex::Index>* final_order,
600                                         Vertex::Index scratch_vertex) {
601  CycleBreaker cycle_breaker;
602  LOG(INFO) << "Finding cycles...";
603  set<Edge> cut_edges;
604  cycle_breaker.BreakCycles(*graph, &cut_edges);
605  LOG(INFO) << "done finding cycles";
606  CheckGraph(*graph);
607
608  // Calculate number of scratch blocks needed
609
610  LOG(INFO) << "Cutting cycles...";
611  vector<CutEdgeVertexes> cuts;
612  TEST_AND_RETURN_FALSE(CutEdges(graph, cut_edges, &cuts));
613  LOG(INFO) << "done cutting cycles";
614  LOG(INFO) << "There are " << cuts.size() << " cuts.";
615  CheckGraph(*graph);
616
617  LOG(INFO) << "Creating initial topological order...";
618  TopologicalSort(*graph, final_order);
619  LOG(INFO) << "done with initial topo order";
620  CheckGraph(*graph);
621
622  LOG(INFO) << "Moving full ops to the back";
623  MoveAndSortFullOpsToBack(graph, final_order);
624  LOG(INFO) << "done moving full ops to back";
625
626  vector<vector<Vertex::Index>::size_type> inverse_final_order;
627  GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
628
629  SortCutsByTopoOrder(*final_order, &cuts);
630
631  if (!cuts.empty())
632    TEST_AND_RETURN_FALSE(AssignTempBlocks(graph,
633                                           new_part,
634                                           blob_file,
635                                           final_order,
636                                           &inverse_final_order,
637                                           cuts));
638  LOG(INFO) << "Making sure all temp blocks have been allocated";
639
640  // Remove the scratch node, if any
641  if (scratch_vertex != Vertex::kInvalidIndex) {
642    final_order->erase(final_order->begin() +
643                       inverse_final_order[scratch_vertex]);
644    (*graph)[scratch_vertex].valid = false;
645    GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
646  }
647
648  graph_utils::DumpGraph(*graph);
649  CHECK(NoTempBlocksRemain(*graph));
650  LOG(INFO) << "done making sure all temp blocks are allocated";
651  return true;
652}
653
654void InplaceGenerator::CreateScratchNode(uint64_t start_block,
655                                         uint64_t num_blocks,
656                                         Vertex* vertex) {
657  vertex->aop.name = "<scratch>";
658  vertex->aop.op.set_type(InstallOperation::REPLACE_BZ);
659  vertex->aop.op.set_data_offset(0);
660  vertex->aop.op.set_data_length(0);
661  Extent* extent = vertex->aop.op.add_dst_extents();
662  extent->set_start_block(start_block);
663  extent->set_num_blocks(num_blocks);
664}
665
666bool InplaceGenerator::AddInstallOpToBlocksVector(
667    const InstallOperation& operation,
668    const Graph& graph,
669    Vertex::Index vertex,
670    vector<Block>* blocks) {
671  // See if this is already present.
672  TEST_AND_RETURN_FALSE(operation.dst_extents_size() > 0);
673
674  enum BlockField { READER = 0, WRITER, BLOCK_FIELD_COUNT };
675  for (int field = READER; field < BLOCK_FIELD_COUNT; field++) {
676    const int extents_size =
677        (field == READER) ? operation.src_extents_size() :
678        operation.dst_extents_size();
679    const char* past_participle = (field == READER) ? "read" : "written";
680    const google::protobuf::RepeatedPtrField<Extent>& extents =
681        (field == READER) ? operation.src_extents() : operation.dst_extents();
682    Vertex::Index Block::*access_type = (field == READER) ?
683        &Block::reader : &Block::writer;
684
685    for (int i = 0; i < extents_size; i++) {
686      const Extent& extent = extents.Get(i);
687      for (uint64_t block = extent.start_block();
688           block < (extent.start_block() + extent.num_blocks()); block++) {
689        if ((*blocks)[block].*access_type != Vertex::kInvalidIndex) {
690          LOG(FATAL) << "Block " << block << " is already "
691                     << past_participle << " by "
692                     << (*blocks)[block].*access_type << "("
693                     << graph[(*blocks)[block].*access_type].aop.name
694                     << ") and also " << vertex << "("
695                     << graph[vertex].aop.name << ")";
696        }
697        (*blocks)[block].*access_type = vertex;
698      }
699    }
700  }
701  return true;
702}
703
704bool InplaceGenerator::AddInstallOpToGraph(Graph* graph,
705                                           Vertex::Index existing_vertex,
706                                           vector<Block>* blocks,
707                                           const InstallOperation operation,
708                                           const string& op_name) {
709  Vertex::Index vertex = existing_vertex;
710  if (vertex == Vertex::kInvalidIndex) {
711    graph->emplace_back();
712    vertex = graph->size() - 1;
713  }
714  (*graph)[vertex].aop.op = operation;
715  CHECK((*graph)[vertex].aop.op.has_type());
716  (*graph)[vertex].aop.name = op_name;
717
718  if (blocks)
719    TEST_AND_RETURN_FALSE(InplaceGenerator::AddInstallOpToBlocksVector(
720        (*graph)[vertex].aop.op,
721        *graph,
722        vertex,
723        blocks));
724  return true;
725}
726
727void InplaceGenerator::ApplyMap(vector<uint64_t>* collection,
728                                const map<uint64_t, uint64_t>& the_map) {
729  for (uint64_t& elem : *collection) {
730    const auto& map_it = the_map.find(elem);
731    if (map_it != the_map.end())
732      elem = map_it->second;
733  }
734}
735
736bool InplaceGenerator::ResolveReadAfterWriteDependencies(
737    const PartitionConfig& new_part,
738    uint64_t partition_size,
739    size_t block_size,
740    BlobFileWriter* blob_file,
741    vector<AnnotatedOperation>* aops) {
742  // Convert the operations to the graph.
743  Graph graph;
744  CheckGraph(graph);
745  vector<Block> blocks(new_part.size / block_size);
746  for (const auto& aop : *aops) {
747    AddInstallOpToGraph(
748        &graph, Vertex::kInvalidIndex, &blocks, aop.op, aop.name);
749  }
750  CheckGraph(graph);
751
752  // Final scratch block (if there's space)
753  Vertex::Index scratch_vertex = Vertex::kInvalidIndex;
754  if (blocks.size() < (partition_size / block_size)) {
755    scratch_vertex = graph.size();
756    graph.emplace_back();
757    size_t scratch_blocks = (partition_size / block_size) - blocks.size();
758    LOG(INFO) << "Added " << scratch_blocks << " scratch space blocks.";
759    CreateScratchNode(blocks.size(), scratch_blocks, &graph.back());
760  }
761  CheckGraph(graph);
762
763  LOG(INFO) << "Creating edges...";
764  CreateEdges(&graph, blocks);
765  LOG(INFO) << "Done creating edges";
766  CheckGraph(graph);
767
768  vector<Vertex::Index> final_order;
769  TEST_AND_RETURN_FALSE(ConvertGraphToDag(
770      &graph,
771      new_part.path,
772      blob_file,
773      &final_order,
774      scratch_vertex));
775
776  // Copy operations over to the |aops| vector in the final_order generated by
777  // the topological sort.
778  aops->clear();
779  aops->reserve(final_order.size());
780  for (const Vertex::Index vertex_index : final_order) {
781    const Vertex& vertex = graph[vertex_index];
782    aops->push_back(vertex.aop);
783  }
784  return true;
785}
786
787bool InplaceGenerator::GenerateOperationsForPartition(
788    const PartitionConfig& old_part,
789    const PartitionConfig& new_part,
790    uint64_t partition_size,
791    size_t block_size,
792    ssize_t hard_chunk_blocks,
793    size_t soft_chunk_blocks,
794    BlobFileWriter* blob_file,
795    vector<AnnotatedOperation>* aops) {
796  const string part_name = PartitionNameString(new_part.name);
797  LOG(INFO) << "Delta compressing " << part_name << " partition...";
798  TEST_AND_RETURN_FALSE(
799      diff_utils::DeltaReadPartition(aops,
800                                     old_part,
801                                     new_part,
802                                     hard_chunk_blocks,
803                                     soft_chunk_blocks,
804                                     blob_file,
805                                     false));  // src_ops_allowed
806  LOG(INFO) << "Done reading " << part_name;
807
808  TEST_AND_RETURN_FALSE(
809      ResolveReadAfterWriteDependencies(new_part,
810                                        partition_size,
811                                        block_size,
812                                        blob_file,
813                                        aops));
814  LOG(INFO) << "Done reordering " << part_name;
815  return true;
816}
817
818bool InplaceGenerator::GenerateOperations(
819    const PayloadGenerationConfig& config,
820    BlobFileWriter* blob_file,
821    vector<AnnotatedOperation>* rootfs_ops,
822    vector<AnnotatedOperation>* kernel_ops) {
823  ssize_t hard_chunk_blocks = (config.hard_chunk_size == -1 ? -1 :
824                               config.hard_chunk_size / config.block_size);
825  size_t soft_chunk_blocks = config.soft_chunk_size / config.block_size;
826
827  TEST_AND_RETURN_FALSE(GenerateOperationsForPartition(
828      config.source.rootfs,
829      config.target.rootfs,
830      config.rootfs_partition_size,
831      config.block_size,
832      hard_chunk_blocks,
833      soft_chunk_blocks,
834      blob_file,
835      rootfs_ops));
836
837  TEST_AND_RETURN_FALSE(GenerateOperationsForPartition(
838      config.source.kernel,
839      config.target.kernel,
840      config.target.kernel.size,  // kernel "filesystem" is the whole partition.
841      config.block_size,
842      hard_chunk_blocks,
843      soft_chunk_blocks,
844      blob_file,
845      kernel_ops));
846
847  return true;
848}
849
850};  // namespace chromeos_update_engine
851