inplace_generator.cc revision 896fdbaa1cc9a422ac36ef455d02c3cef1f27816
1// Copyright 2015 The Chromium OS Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "update_engine/payload_generator/inplace_generator.h"
6
7#include <algorithm>
8#include <map>
9#include <set>
10#include <string>
11#include <utility>
12#include <vector>
13
14#include "update_engine/payload_constants.h"
15#include "update_engine/payload_generator/cycle_breaker.h"
16#include "update_engine/payload_generator/delta_diff_generator.h"
17#include "update_engine/payload_generator/delta_diff_utils.h"
18#include "update_engine/payload_generator/extent_ranges.h"
19#include "update_engine/payload_generator/graph_types.h"
20#include "update_engine/payload_generator/graph_utils.h"
21#include "update_engine/payload_generator/topological_sort.h"
22#include "update_engine/update_metadata.pb.h"
23#include "update_engine/utils.h"
24
25using std::make_pair;
26using std::map;
27using std::pair;
28using std::set;
29using std::string;
30using std::vector;
31
32namespace chromeos_update_engine {
33
34using Block = InplaceGenerator::Block;
35
36namespace {
37
38// This class allocates non-existent temp blocks, starting from
39// kTempBlockStart. Other code is responsible for converting these
40// temp blocks into real blocks, as the client can't read or write to
41// these blocks.
42class DummyExtentAllocator {
43 public:
44  vector<Extent> Allocate(const uint64_t block_count) {
45    vector<Extent> ret(1);
46    ret[0].set_start_block(next_block_);
47    ret[0].set_num_blocks(block_count);
48    next_block_ += block_count;
49    return ret;
50  }
51
52 private:
53  uint64_t next_block_{kTempBlockStart};
54};
55
56// Takes a vector of blocks and returns an equivalent vector of Extent
57// objects.
58vector<Extent> CompressExtents(const vector<uint64_t>& blocks) {
59  vector<Extent> new_extents;
60  for (uint64_t block : blocks) {
61    AppendBlockToExtents(&new_extents, block);
62  }
63  return new_extents;
64}
65
66// Helper class to compare two operations by start block of the first Extent in
67// their destination extents given the index of the operations in the graph.
68class IndexedInstallOperationsDstComparator {
69 public:
70  explicit IndexedInstallOperationsDstComparator(Graph* graph)
71    : graph_(graph) {}
72
73  // Compares the operations in the vertex a and b of graph_.
74  bool operator()(size_t a, size_t b) const {
75    return diff_utils::CompareAopsByDestination((*graph_)[a].aop,
76                                                (*graph_)[b].aop);
77  }
78
79 private:
80  const Graph* const graph_;
81};
82
83}  // namespace
84
85void InplaceGenerator::CheckGraph(const Graph& graph) {
86  for (const Vertex& v : graph) {
87    CHECK(v.aop.op.has_type());
88  }
89}
90
91void InplaceGenerator::SubstituteBlocks(
92    Vertex* vertex,
93    const vector<Extent>& remove_extents,
94    const vector<Extent>& replace_extents) {
95  // First, expand out the blocks that op reads from
96  vector<uint64_t> read_blocks =
97      ExpandExtents(vertex->aop.op.src_extents());
98  {
99    // Expand remove_extents and replace_extents
100    vector<uint64_t> remove_extents_expanded = ExpandExtents(remove_extents);
101    vector<uint64_t> replace_extents_expanded = ExpandExtents(replace_extents);
102    CHECK_EQ(remove_extents_expanded.size(), replace_extents_expanded.size());
103    map<uint64_t, uint64_t> conversion;
104    for (vector<uint64_t>::size_type i = 0;
105         i < replace_extents_expanded.size(); i++) {
106      conversion[remove_extents_expanded[i]] = replace_extents_expanded[i];
107    }
108    ApplyMap(&read_blocks, conversion);
109    for (auto& edge_prop_pair : vertex->out_edges) {
110      vector<uint64_t> write_before_deps_expanded = ExpandExtents(
111          edge_prop_pair.second.write_extents);
112      ApplyMap(&write_before_deps_expanded, conversion);
113      edge_prop_pair.second.write_extents =
114          CompressExtents(write_before_deps_expanded);
115    }
116  }
117  // Convert read_blocks back to extents
118  vertex->aop.op.clear_src_extents();
119  vector<Extent> new_extents = CompressExtents(read_blocks);
120  StoreExtents(new_extents, vertex->aop.op.mutable_src_extents());
121}
122
123bool InplaceGenerator::CutEdges(Graph* graph,
124                                const set<Edge>& edges,
125                                vector<CutEdgeVertexes>* out_cuts) {
126  DummyExtentAllocator scratch_allocator;
127  vector<CutEdgeVertexes> cuts;
128  cuts.reserve(edges.size());
129
130  uint64_t scratch_blocks_used = 0;
131  for (const Edge& edge : edges) {
132    cuts.resize(cuts.size() + 1);
133    vector<Extent> old_extents =
134        (*graph)[edge.first].out_edges[edge.second].extents;
135    // Choose some scratch space
136    scratch_blocks_used += graph_utils::EdgeWeight(*graph, edge);
137    cuts.back().tmp_extents =
138        scratch_allocator.Allocate(graph_utils::EdgeWeight(*graph, edge));
139    // create vertex to copy original->scratch
140    cuts.back().new_vertex = graph->size();
141    graph->emplace_back();
142    cuts.back().old_src = edge.first;
143    cuts.back().old_dst = edge.second;
144
145    EdgeProperties& cut_edge_properties =
146        (*graph)[edge.first].out_edges.find(edge.second)->second;
147
148    // This should never happen, as we should only be cutting edges between
149    // real file nodes, and write-before relationships are created from
150    // a real file node to a temp copy node:
151    CHECK(cut_edge_properties.write_extents.empty())
152        << "Can't cut edge that has write-before relationship.";
153
154    // make node depend on the copy operation
155    (*graph)[edge.first].out_edges.insert(make_pair(graph->size() - 1,
156                                                    cut_edge_properties));
157
158    // Set src/dst extents and other proto variables for copy operation
159    graph->back().aop.op.set_type(
160        DeltaArchiveManifest_InstallOperation_Type_MOVE);
161    StoreExtents(cut_edge_properties.extents,
162                 graph->back().aop.op.mutable_src_extents());
163    StoreExtents(cuts.back().tmp_extents,
164                 graph->back().aop.op.mutable_dst_extents());
165    graph->back().aop.op.set_src_length(
166        graph_utils::EdgeWeight(*graph, edge) * kBlockSize);
167    graph->back().aop.op.set_dst_length(graph->back().aop.op.src_length());
168
169    // make the dest node read from the scratch space
170    SubstituteBlocks(
171        &((*graph)[edge.second]),
172        (*graph)[edge.first].out_edges[edge.second].extents,
173        cuts.back().tmp_extents);
174
175    // delete the old edge
176    CHECK_EQ(static_cast<Graph::size_type>(1),
177             (*graph)[edge.first].out_edges.erase(edge.second));
178
179    // Add an edge from dst to copy operation
180    EdgeProperties write_before_edge_properties;
181    write_before_edge_properties.write_extents = cuts.back().tmp_extents;
182    (*graph)[edge.second].out_edges.insert(
183        make_pair(graph->size() - 1, write_before_edge_properties));
184  }
185  out_cuts->swap(cuts);
186  return true;
187}
188
189// Creates all the edges for the graph. Writers of a block point to
190// readers of the same block. This is because for an edge A->B, B
191// must complete before A executes.
192void InplaceGenerator::CreateEdges(
193    Graph* graph,
194    const vector<Block>& blocks) {
195  for (vector<Block>::size_type i = 0;
196       i < blocks.size(); i++) {
197    // Blocks with both a reader and writer get an edge
198    if (blocks[i].reader == Vertex::kInvalidIndex ||
199        blocks[i].writer == Vertex::kInvalidIndex)
200      continue;
201    // Don't have a node depend on itself
202    if (blocks[i].reader == blocks[i].writer)
203      continue;
204    // See if there's already an edge we can add onto
205    Vertex::EdgeMap::iterator edge_it =
206        (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
207    if (edge_it == (*graph)[blocks[i].writer].out_edges.end()) {
208      // No existing edge. Create one
209      (*graph)[blocks[i].writer].out_edges.insert(
210          make_pair(blocks[i].reader, EdgeProperties()));
211      edge_it = (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
212      CHECK(edge_it != (*graph)[blocks[i].writer].out_edges.end());
213    }
214    AppendBlockToExtents(&edge_it->second.extents, i);
215  }
216}
217
218namespace {
219
220class SortCutsByTopoOrderLess {
221 public:
222  explicit SortCutsByTopoOrderLess(
223      const vector<vector<Vertex::Index>::size_type>& table)
224      : table_(table) {}
225  bool operator()(const CutEdgeVertexes& a, const CutEdgeVertexes& b) {
226    return table_[a.old_dst] < table_[b.old_dst];
227  }
228 private:
229  const vector<vector<Vertex::Index>::size_type>& table_;
230};
231
232}  // namespace
233
234void InplaceGenerator::GenerateReverseTopoOrderMap(
235    const vector<Vertex::Index>& op_indexes,
236    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes) {
237  vector<vector<Vertex::Index>::size_type> table(op_indexes.size());
238  for (vector<Vertex::Index>::size_type i = 0, e = op_indexes.size();
239       i != e; ++i) {
240    Vertex::Index node = op_indexes[i];
241    if (table.size() < (node + 1)) {
242      table.resize(node + 1);
243    }
244    table[node] = i;
245  }
246  reverse_op_indexes->swap(table);
247}
248
249void InplaceGenerator::SortCutsByTopoOrder(
250    const vector<Vertex::Index>& op_indexes,
251    vector<CutEdgeVertexes>* cuts) {
252  // first, make a reverse lookup table.
253  vector<vector<Vertex::Index>::size_type> table;
254  GenerateReverseTopoOrderMap(op_indexes, &table);
255  SortCutsByTopoOrderLess less(table);
256  sort(cuts->begin(), cuts->end(), less);
257}
258
259void InplaceGenerator::MoveAndSortFullOpsToBack(
260    Graph* graph,
261    vector<Vertex::Index>* op_indexes) {
262  vector<Vertex::Index> ret;
263  vector<Vertex::Index> full_ops;
264  ret.reserve(op_indexes->size());
265  for (auto op_index : *op_indexes) {
266    DeltaArchiveManifest_InstallOperation_Type type =
267        (*graph)[op_index].aop.op.type();
268    if (type == DeltaArchiveManifest_InstallOperation_Type_REPLACE ||
269        type == DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ) {
270      full_ops.push_back(op_index);
271    } else {
272      ret.push_back(op_index);
273    }
274  }
275  LOG(INFO) << "Stats: " << full_ops.size() << " full ops out of "
276            << (full_ops.size() + ret.size()) << " total ops.";
277  // Sort full ops according to their dst_extents.
278  sort(full_ops.begin(), full_ops.end(),
279       IndexedInstallOperationsDstComparator(graph));
280  ret.insert(ret.end(), full_ops.begin(), full_ops.end());
281  op_indexes->swap(ret);
282}
283
284namespace {
285
286template<typename T>
287bool TempBlocksExistInExtents(const T& extents) {
288  for (int i = 0, e = extents.size(); i < e; ++i) {
289    Extent extent = GetElement(extents, i);
290    uint64_t start = extent.start_block();
291    uint64_t num = extent.num_blocks();
292    if (start >= kTempBlockStart || (start + num) >= kTempBlockStart) {
293      LOG(ERROR) << "temp block!";
294      LOG(ERROR) << "start: " << start << ", num: " << num;
295      LOG(ERROR) << "kTempBlockStart: " << kTempBlockStart;
296      LOG(ERROR) << "returning true";
297      return true;
298    }
299    // check for wrap-around, which would be a bug:
300    CHECK(start <= (start + num));
301  }
302  return false;
303}
304
305// Converts the cuts, which must all have the same |old_dst| member,
306// to full. It does this by converting the |old_dst| to REPLACE or
307// REPLACE_BZ, dropping all incoming edges to |old_dst|, and marking
308// all temp nodes invalid.
309bool ConvertCutsToFull(
310    Graph* graph,
311    const string& new_part,
312    int data_fd,
313    off_t* data_file_size,
314    vector<Vertex::Index>* op_indexes,
315    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
316    const vector<CutEdgeVertexes>& cuts) {
317  CHECK(!cuts.empty());
318  set<Vertex::Index> deleted_nodes;
319  for (const CutEdgeVertexes& cut : cuts) {
320    TEST_AND_RETURN_FALSE(InplaceGenerator::ConvertCutToFullOp(
321        graph,
322        cut,
323        new_part,
324        data_fd,
325        data_file_size));
326    deleted_nodes.insert(cut.new_vertex);
327  }
328  deleted_nodes.insert(cuts[0].old_dst);
329
330  vector<Vertex::Index> new_op_indexes;
331  new_op_indexes.reserve(op_indexes->size());
332  for (Vertex::Index vertex_index : *op_indexes) {
333    if (utils::SetContainsKey(deleted_nodes, vertex_index))
334      continue;
335    new_op_indexes.push_back(vertex_index);
336  }
337  new_op_indexes.push_back(cuts[0].old_dst);
338  op_indexes->swap(new_op_indexes);
339  InplaceGenerator::GenerateReverseTopoOrderMap(*op_indexes,
340                                                reverse_op_indexes);
341  return true;
342}
343
344// Tries to assign temp blocks for a collection of cuts, all of which share
345// the same old_dst member. If temp blocks can't be found, old_dst will be
346// converted to a REPLACE or REPLACE_BZ operation. Returns true on success,
347// which can happen even if blocks are converted to full. Returns false
348// on exceptional error cases.
349bool AssignBlockForAdjoiningCuts(
350    Graph* graph,
351    const string& new_part,
352    int data_fd,
353    off_t* data_file_size,
354    vector<Vertex::Index>* op_indexes,
355    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
356    const vector<CutEdgeVertexes>& cuts) {
357  CHECK(!cuts.empty());
358  const Vertex::Index old_dst = cuts[0].old_dst;
359  // Calculate # of blocks needed
360  uint64_t blocks_needed = 0;
361  vector<uint64_t> cuts_blocks_needed(cuts.size());
362  for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) {
363    uint64_t cut_blocks_needed = 0;
364    for (const Extent& extent : cuts[i].tmp_extents) {
365      cut_blocks_needed += extent.num_blocks();
366    }
367    blocks_needed += cut_blocks_needed;
368    cuts_blocks_needed[i] = cut_blocks_needed;
369  }
370
371  // Find enough blocks
372  ExtentRanges scratch_ranges;
373  // Each block that's supplying temp blocks and the corresponding blocks:
374  typedef vector<pair<Vertex::Index, ExtentRanges>> SupplierVector;
375  SupplierVector block_suppliers;
376  uint64_t scratch_blocks_found = 0;
377  for (vector<Vertex::Index>::size_type i = (*reverse_op_indexes)[old_dst] + 1,
378           e = op_indexes->size(); i < e; ++i) {
379    Vertex::Index test_node = (*op_indexes)[i];
380    if (!(*graph)[test_node].valid)
381      continue;
382    // See if this node has sufficient blocks
383    ExtentRanges ranges;
384    ranges.AddRepeatedExtents((*graph)[test_node].aop.op.dst_extents());
385    ranges.SubtractExtent(ExtentForRange(
386        kTempBlockStart, kSparseHole - kTempBlockStart));
387    ranges.SubtractRepeatedExtents((*graph)[test_node].aop.op.src_extents());
388    // For now, for simplicity, subtract out all blocks in read-before
389    // dependencies.
390    for (Vertex::EdgeMap::const_iterator edge_i =
391             (*graph)[test_node].out_edges.begin(),
392             edge_e = (*graph)[test_node].out_edges.end();
393         edge_i != edge_e; ++edge_i) {
394      ranges.SubtractExtents(edge_i->second.extents);
395    }
396
397    // Prevent using the block 0 as scratch space due to crbug.com/480751.
398    if (ranges.ContainsBlock(0)) {
399      LOG(INFO) << "Removing block 0 from the selected scratch range in vertex "
400                << i;
401      ranges.SubtractBlock(0);
402    }
403
404    if (ranges.blocks() == 0)
405      continue;
406
407    if (ranges.blocks() + scratch_blocks_found > blocks_needed) {
408      // trim down ranges
409      vector<Extent> new_ranges = ranges.GetExtentsForBlockCount(
410          blocks_needed - scratch_blocks_found);
411      ranges = ExtentRanges();
412      ranges.AddExtents(new_ranges);
413    }
414    scratch_ranges.AddRanges(ranges);
415    block_suppliers.push_back(make_pair(test_node, ranges));
416    scratch_blocks_found += ranges.blocks();
417    if (scratch_ranges.blocks() >= blocks_needed)
418      break;
419  }
420  if (scratch_ranges.blocks() < blocks_needed) {
421    LOG(INFO) << "Unable to find sufficient scratch";
422    TEST_AND_RETURN_FALSE(ConvertCutsToFull(graph,
423                                            new_part,
424                                            data_fd,
425                                            data_file_size,
426                                            op_indexes,
427                                            reverse_op_indexes,
428                                            cuts));
429    return true;
430  }
431  // Use the scratch we found
432  TEST_AND_RETURN_FALSE(scratch_ranges.blocks() == scratch_blocks_found);
433
434  // Make all the suppliers depend on this node
435  for (const auto& index_range_pair : block_suppliers) {
436    graph_utils::AddReadBeforeDepExtents(
437        &(*graph)[index_range_pair.first],
438        old_dst,
439        index_range_pair.second.GetExtentsForBlockCount(
440            index_range_pair.second.blocks()));
441  }
442
443  // Replace temp blocks in each cut
444  for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) {
445    const CutEdgeVertexes& cut = cuts[i];
446    vector<Extent> real_extents =
447        scratch_ranges.GetExtentsForBlockCount(cuts_blocks_needed[i]);
448    scratch_ranges.SubtractExtents(real_extents);
449
450    // Fix the old dest node w/ the real blocks
451    InplaceGenerator::SubstituteBlocks(&(*graph)[old_dst],
452                                         cut.tmp_extents,
453                                         real_extents);
454
455    // Fix the new node w/ the real blocks. Since the new node is just a
456    // copy operation, we can replace all the dest extents w/ the real
457    // blocks.
458    DeltaArchiveManifest_InstallOperation *op =
459        &(*graph)[cut.new_vertex].aop.op;
460    op->clear_dst_extents();
461    StoreExtents(real_extents, op->mutable_dst_extents());
462  }
463  return true;
464}
465
466}  // namespace
467
468bool InplaceGenerator::AssignTempBlocks(
469    Graph* graph,
470    const string& new_part,
471    int data_fd,
472    off_t* data_file_size,
473    vector<Vertex::Index>* op_indexes,
474    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
475    const vector<CutEdgeVertexes>& cuts) {
476  CHECK(!cuts.empty());
477
478  // group of cuts w/ the same old_dst:
479  vector<CutEdgeVertexes> cuts_group;
480
481  for (vector<CutEdgeVertexes>::size_type i = cuts.size() - 1, e = 0;
482       true ; --i) {
483    LOG(INFO) << "Fixing temp blocks in cut " << i
484              << ": old dst: " << cuts[i].old_dst << " new vertex: "
485              << cuts[i].new_vertex << " path: "
486              << (*graph)[cuts[i].old_dst].aop.name;
487
488    if (cuts_group.empty() || (cuts_group[0].old_dst == cuts[i].old_dst)) {
489      cuts_group.push_back(cuts[i]);
490    } else {
491      CHECK(!cuts_group.empty());
492      TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph,
493                                                        new_part,
494                                                        data_fd,
495                                                        data_file_size,
496                                                        op_indexes,
497                                                        reverse_op_indexes,
498                                                        cuts_group));
499      cuts_group.clear();
500      cuts_group.push_back(cuts[i]);
501    }
502
503    if (i == e) {
504      // break out of for() loop
505      break;
506    }
507  }
508  CHECK(!cuts_group.empty());
509  TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph,
510                                                    new_part,
511                                                    data_fd,
512                                                    data_file_size,
513                                                    op_indexes,
514                                                    reverse_op_indexes,
515                                                    cuts_group));
516  return true;
517}
518
519bool InplaceGenerator::NoTempBlocksRemain(const Graph& graph) {
520  size_t idx = 0;
521  for (Graph::const_iterator it = graph.begin(), e = graph.end(); it != e;
522       ++it, ++idx) {
523    if (!it->valid)
524      continue;
525    const DeltaArchiveManifest_InstallOperation& op = it->aop.op;
526    if (TempBlocksExistInExtents(op.dst_extents()) ||
527        TempBlocksExistInExtents(op.src_extents())) {
528      LOG(INFO) << "bad extents in node " << idx;
529      LOG(INFO) << "so yeah";
530      return false;
531    }
532
533    // Check out-edges:
534    for (const auto& edge_prop_pair : it->out_edges) {
535      if (TempBlocksExistInExtents(edge_prop_pair.second.extents) ||
536          TempBlocksExistInExtents(edge_prop_pair.second.write_extents)) {
537        LOG(INFO) << "bad out edge in node " << idx;
538        LOG(INFO) << "so yeah";
539        return false;
540      }
541    }
542  }
543  return true;
544}
545
546bool InplaceGenerator::ConvertCutToFullOp(Graph* graph,
547                                          const CutEdgeVertexes& cut,
548                                          const string& new_part,
549                                          int data_fd,
550                                          off_t* data_file_size) {
551  // Drop all incoming edges, keep all outgoing edges
552
553  // Keep all outgoing edges
554  if ((*graph)[cut.old_dst].aop.op.type() !=
555      DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ &&
556      (*graph)[cut.old_dst].aop.op.type() !=
557      DeltaArchiveManifest_InstallOperation_Type_REPLACE) {
558    Vertex::EdgeMap out_edges = (*graph)[cut.old_dst].out_edges;
559    graph_utils::DropWriteBeforeDeps(&out_edges);
560
561    // Replace the operation with a REPLACE or REPLACE_BZ to generate the same
562    // |new_extents| list of blocks and update the graph.
563    vector<AnnotatedOperation> new_aop;
564    vector<Extent> new_extents;
565    ExtentsToVector((*graph)[cut.old_dst].aop.op.dst_extents(),
566                    &new_extents);
567    TEST_AND_RETURN_FALSE(diff_utils::DeltaReadFile(
568        &new_aop,
569        "",  // old_part
570        new_part,
571        vector<Extent>(),  // old_extents
572        new_extents,
573        (*graph)[cut.old_dst].aop.name,
574        -1,  // chunk_blocks, forces to have a single operation.
575        data_fd,
576        data_file_size,
577        false));  // src_ops_allowed
578    TEST_AND_RETURN_FALSE(new_aop.size() == 1);
579    TEST_AND_RETURN_FALSE(AddInstallOpToGraph(
580      graph, cut.old_dst, nullptr, new_aop.front().op, new_aop.front().name));
581
582    (*graph)[cut.old_dst].out_edges = out_edges;
583
584    // Right now we don't have doubly-linked edges, so we have to scan
585    // the whole graph.
586    graph_utils::DropIncomingEdgesTo(graph, cut.old_dst);
587  }
588
589  // Delete temp node
590  (*graph)[cut.old_src].out_edges.erase(cut.new_vertex);
591  CHECK((*graph)[cut.old_dst].out_edges.find(cut.new_vertex) ==
592        (*graph)[cut.old_dst].out_edges.end());
593  (*graph)[cut.new_vertex].valid = false;
594  LOG(INFO) << "marked node invalid: " << cut.new_vertex;
595  return true;
596}
597
598bool InplaceGenerator::ConvertGraphToDag(Graph* graph,
599                                         const string& new_part,
600                                         int fd,
601                                         off_t* data_file_size,
602                                         vector<Vertex::Index>* final_order,
603                                         Vertex::Index scratch_vertex) {
604  CycleBreaker cycle_breaker;
605  LOG(INFO) << "Finding cycles...";
606  set<Edge> cut_edges;
607  cycle_breaker.BreakCycles(*graph, &cut_edges);
608  LOG(INFO) << "done finding cycles";
609  CheckGraph(*graph);
610
611  // Calculate number of scratch blocks needed
612
613  LOG(INFO) << "Cutting cycles...";
614  vector<CutEdgeVertexes> cuts;
615  TEST_AND_RETURN_FALSE(CutEdges(graph, cut_edges, &cuts));
616  LOG(INFO) << "done cutting cycles";
617  LOG(INFO) << "There are " << cuts.size() << " cuts.";
618  CheckGraph(*graph);
619
620  LOG(INFO) << "Creating initial topological order...";
621  TopologicalSort(*graph, final_order);
622  LOG(INFO) << "done with initial topo order";
623  CheckGraph(*graph);
624
625  LOG(INFO) << "Moving full ops to the back";
626  MoveAndSortFullOpsToBack(graph, final_order);
627  LOG(INFO) << "done moving full ops to back";
628
629  vector<vector<Vertex::Index>::size_type> inverse_final_order;
630  GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
631
632  SortCutsByTopoOrder(*final_order, &cuts);
633
634  if (!cuts.empty())
635    TEST_AND_RETURN_FALSE(AssignTempBlocks(graph,
636                                           new_part,
637                                           fd,
638                                           data_file_size,
639                                           final_order,
640                                           &inverse_final_order,
641                                           cuts));
642  LOG(INFO) << "Making sure all temp blocks have been allocated";
643
644  // Remove the scratch node, if any
645  if (scratch_vertex != Vertex::kInvalidIndex) {
646    final_order->erase(final_order->begin() +
647                       inverse_final_order[scratch_vertex]);
648    (*graph)[scratch_vertex].valid = false;
649    GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
650  }
651
652  graph_utils::DumpGraph(*graph);
653  CHECK(NoTempBlocksRemain(*graph));
654  LOG(INFO) << "done making sure all temp blocks are allocated";
655  return true;
656}
657
658void InplaceGenerator::CreateScratchNode(uint64_t start_block,
659                                         uint64_t num_blocks,
660                                         Vertex* vertex) {
661  vertex->aop.name = "<scratch>";
662  vertex->aop.op.set_type(
663      DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ);
664  vertex->aop.op.set_data_offset(0);
665  vertex->aop.op.set_data_length(0);
666  Extent* extent = vertex->aop.op.add_dst_extents();
667  extent->set_start_block(start_block);
668  extent->set_num_blocks(num_blocks);
669}
670
671bool InplaceGenerator::AddInstallOpToBlocksVector(
672    const DeltaArchiveManifest_InstallOperation& operation,
673    const Graph& graph,
674    Vertex::Index vertex,
675    vector<Block>* blocks) {
676  // See if this is already present.
677  TEST_AND_RETURN_FALSE(operation.dst_extents_size() > 0);
678
679  enum BlockField { READER = 0, WRITER, BLOCK_FIELD_COUNT };
680  for (int field = READER; field < BLOCK_FIELD_COUNT; field++) {
681    const int extents_size =
682        (field == READER) ? operation.src_extents_size() :
683        operation.dst_extents_size();
684    const char* past_participle = (field == READER) ? "read" : "written";
685    const google::protobuf::RepeatedPtrField<Extent>& extents =
686        (field == READER) ? operation.src_extents() : operation.dst_extents();
687    Vertex::Index Block::*access_type = (field == READER) ?
688        &Block::reader : &Block::writer;
689
690    for (int i = 0; i < extents_size; i++) {
691      const Extent& extent = extents.Get(i);
692      for (uint64_t block = extent.start_block();
693           block < (extent.start_block() + extent.num_blocks()); block++) {
694        if ((*blocks)[block].*access_type != Vertex::kInvalidIndex) {
695          LOG(FATAL) << "Block " << block << " is already "
696                     << past_participle << " by "
697                     << (*blocks)[block].*access_type << "("
698                     << graph[(*blocks)[block].*access_type].aop.name
699                     << ") and also " << vertex << "("
700                     << graph[vertex].aop.name << ")";
701        }
702        (*blocks)[block].*access_type = vertex;
703      }
704    }
705  }
706  return true;
707}
708
709bool InplaceGenerator::AddInstallOpToGraph(
710    Graph* graph,
711    Vertex::Index existing_vertex,
712    vector<Block>* blocks,
713    const DeltaArchiveManifest_InstallOperation operation,
714    const string& op_name) {
715  Vertex::Index vertex = existing_vertex;
716  if (vertex == Vertex::kInvalidIndex) {
717    graph->emplace_back();
718    vertex = graph->size() - 1;
719  }
720  (*graph)[vertex].aop.op = operation;
721  CHECK((*graph)[vertex].aop.op.has_type());
722  (*graph)[vertex].aop.name = op_name;
723
724  if (blocks)
725    TEST_AND_RETURN_FALSE(InplaceGenerator::AddInstallOpToBlocksVector(
726        (*graph)[vertex].aop.op,
727        *graph,
728        vertex,
729        blocks));
730  return true;
731}
732
733void InplaceGenerator::ApplyMap(vector<uint64_t>* collection,
734                                const map<uint64_t, uint64_t>& the_map) {
735  for (uint64_t& elem : *collection) {
736    const auto& map_it = the_map.find(elem);
737    if (map_it != the_map.end())
738      elem = map_it->second;
739  }
740}
741
742bool InplaceGenerator::ResolveReadAfterWriteDependencies(
743    const PartitionConfig& new_part,
744    uint64_t partition_size,
745    size_t block_size,
746    int data_file_fd,
747    off_t* data_file_size,
748    vector<AnnotatedOperation>* aops) {
749  // Convert the operations to the graph.
750  Graph graph;
751  CheckGraph(graph);
752  vector<Block> blocks(new_part.size / block_size);
753  for (const auto& aop : *aops) {
754    AddInstallOpToGraph(
755        &graph, Vertex::kInvalidIndex, &blocks, aop.op, aop.name);
756  }
757  CheckGraph(graph);
758
759  // Final scratch block (if there's space)
760  Vertex::Index scratch_vertex = Vertex::kInvalidIndex;
761  if (blocks.size() < (partition_size / block_size)) {
762    scratch_vertex = graph.size();
763    graph.emplace_back();
764    size_t scratch_blocks = (partition_size / block_size) - blocks.size();
765    LOG(INFO) << "Added " << scratch_blocks << " scratch space blocks.";
766    CreateScratchNode(blocks.size(), scratch_blocks, &graph.back());
767  }
768  CheckGraph(graph);
769
770  LOG(INFO) << "Creating edges...";
771  CreateEdges(&graph, blocks);
772  LOG(INFO) << "Done creating edges";
773  CheckGraph(graph);
774
775  vector<Vertex::Index> final_order;
776  TEST_AND_RETURN_FALSE(ConvertGraphToDag(
777      &graph,
778      new_part.path,
779      data_file_fd,
780      data_file_size,
781      &final_order,
782      scratch_vertex));
783
784  // Copy operations over to the |aops| vector in the final_order generated by
785  // the topological sort.
786  aops->clear();
787  aops->reserve(final_order.size());
788  for (const Vertex::Index vertex_index : final_order) {
789    const Vertex& vertex = graph[vertex_index];
790    aops->push_back(vertex.aop);
791  }
792  return true;
793}
794
795bool InplaceGenerator::GenerateOperationsForPartition(
796    const PartitionConfig& old_part,
797    const PartitionConfig& new_part,
798    uint64_t partition_size,
799    size_t block_size,
800    off_t chunk_blocks,
801    int data_file_fd,
802    off_t* data_file_size,
803    vector<AnnotatedOperation>* aops) {
804  const string part_name = PartitionNameString(new_part.name);
805  LOG(INFO) << "Delta compressing " << part_name << " partition...";
806  TEST_AND_RETURN_FALSE(
807      diff_utils::DeltaReadPartition(aops,
808                                     old_part,
809                                     new_part,
810                                     chunk_blocks,
811                                     data_file_fd,
812                                     data_file_size,
813                                     false));  // src_ops_allowed
814  LOG(INFO) << "Done reading " << part_name;
815
816  TEST_AND_RETURN_FALSE(
817      ResolveReadAfterWriteDependencies(new_part,
818                                        partition_size,
819                                        block_size,
820                                        data_file_fd,
821                                        data_file_size,
822                                        aops));
823  LOG(INFO) << "Done reordering " << part_name;
824  return true;
825}
826
827bool InplaceGenerator::GenerateOperations(
828    const PayloadGenerationConfig& config,
829    int data_file_fd,
830    off_t* data_file_size,
831    vector<AnnotatedOperation>* rootfs_ops,
832    vector<AnnotatedOperation>* kernel_ops) {
833  off_t chunk_blocks = (config.chunk_size == -1 ? -1 :
834                        config.chunk_size / config.block_size);
835
836  TEST_AND_RETURN_FALSE(GenerateOperationsForPartition(
837      config.source.rootfs,
838      config.target.rootfs,
839      config.rootfs_partition_size,
840      config.block_size,
841      chunk_blocks,
842      data_file_fd,
843      data_file_size,
844      rootfs_ops));
845
846  TEST_AND_RETURN_FALSE(GenerateOperationsForPartition(
847      config.source.kernel,
848      config.target.kernel,
849      config.target.kernel.size,  // kernel "filesystem" is the whole partition.
850      config.block_size,
851      chunk_blocks,
852      data_file_fd,
853      data_file_size,
854      kernel_ops));
855
856  return true;
857}
858
859};  // namespace chromeos_update_engine
860