inplace_generator.cc revision b42b98db059a12c44110588fc0b3d5f82d32a2f8
1// Copyright 2015 The Chromium OS Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "update_engine/payload_generator/inplace_generator.h"
6
7#include <algorithm>
8#include <map>
9#include <set>
10#include <string>
11#include <utility>
12#include <vector>
13
14#include "update_engine/payload_constants.h"
15#include "update_engine/payload_generator/cycle_breaker.h"
16#include "update_engine/payload_generator/delta_diff_generator.h"
17#include "update_engine/payload_generator/delta_diff_utils.h"
18#include "update_engine/payload_generator/extent_ranges.h"
19#include "update_engine/payload_generator/graph_types.h"
20#include "update_engine/payload_generator/graph_utils.h"
21#include "update_engine/payload_generator/topological_sort.h"
22#include "update_engine/update_metadata.pb.h"
23#include "update_engine/utils.h"
24
25using std::make_pair;
26using std::map;
27using std::pair;
28using std::set;
29using std::string;
30using std::unique_ptr;
31using std::vector;
32
33namespace chromeos_update_engine {
34
35using Block = InplaceGenerator::Block;
36
37// This class allocates non-existent temp blocks, starting from
38// kTempBlockStart. Other code is responsible for converting these
39// temp blocks into real blocks, as the client can't read or write to
40// these blocks.
41class DummyExtentAllocator {
42 public:
43  vector<Extent> Allocate(const uint64_t block_count) {
44    vector<Extent> ret(1);
45    ret[0].set_start_block(next_block_);
46    ret[0].set_num_blocks(block_count);
47    next_block_ += block_count;
48    return ret;
49  }
50
51 private:
52  uint64_t next_block_{kTempBlockStart};
53};
54
55// Takes a vector of blocks and returns an equivalent vector of Extent
56// objects.
57vector<Extent> CompressExtents(const vector<uint64_t>& blocks) {
58  vector<Extent> new_extents;
59  for (uint64_t block : blocks) {
60    AppendBlockToExtents(&new_extents, block);
61  }
62  return new_extents;
63}
64
65void InplaceGenerator::CheckGraph(const Graph& graph) {
66  for (const Vertex& v : graph) {
67    CHECK(v.op.has_type());
68  }
69}
70
71void InplaceGenerator::SubstituteBlocks(
72    Vertex* vertex,
73    const vector<Extent>& remove_extents,
74    const vector<Extent>& replace_extents) {
75  // First, expand out the blocks that op reads from
76  vector<uint64_t> read_blocks =
77      ExpandExtents(vertex->op.src_extents());
78  {
79    // Expand remove_extents and replace_extents
80    vector<uint64_t> remove_extents_expanded = ExpandExtents(remove_extents);
81    vector<uint64_t> replace_extents_expanded = ExpandExtents(replace_extents);
82    CHECK_EQ(remove_extents_expanded.size(), replace_extents_expanded.size());
83    map<uint64_t, uint64_t> conversion;
84    for (vector<uint64_t>::size_type i = 0;
85         i < replace_extents_expanded.size(); i++) {
86      conversion[remove_extents_expanded[i]] = replace_extents_expanded[i];
87    }
88    ApplyMap(&read_blocks, conversion);
89    for (auto& edge_prop_pair : vertex->out_edges) {
90      vector<uint64_t> write_before_deps_expanded = ExpandExtents(
91          edge_prop_pair.second.write_extents);
92      ApplyMap(&write_before_deps_expanded, conversion);
93      edge_prop_pair.second.write_extents =
94          CompressExtents(write_before_deps_expanded);
95    }
96  }
97  // Convert read_blocks back to extents
98  vertex->op.clear_src_extents();
99  vector<Extent> new_extents = CompressExtents(read_blocks);
100  StoreExtents(new_extents, vertex->op.mutable_src_extents());
101}
102
103bool InplaceGenerator::CutEdges(Graph* graph,
104                                const set<Edge>& edges,
105                                vector<CutEdgeVertexes>* out_cuts) {
106  DummyExtentAllocator scratch_allocator;
107  vector<CutEdgeVertexes> cuts;
108  cuts.reserve(edges.size());
109
110  uint64_t scratch_blocks_used = 0;
111  for (const Edge& edge : edges) {
112    cuts.resize(cuts.size() + 1);
113    vector<Extent> old_extents =
114        (*graph)[edge.first].out_edges[edge.second].extents;
115    // Choose some scratch space
116    scratch_blocks_used += graph_utils::EdgeWeight(*graph, edge);
117    cuts.back().tmp_extents =
118        scratch_allocator.Allocate(graph_utils::EdgeWeight(*graph, edge));
119    // create vertex to copy original->scratch
120    cuts.back().new_vertex = graph->size();
121    graph->emplace_back();
122    cuts.back().old_src = edge.first;
123    cuts.back().old_dst = edge.second;
124
125    EdgeProperties& cut_edge_properties =
126        (*graph)[edge.first].out_edges.find(edge.second)->second;
127
128    // This should never happen, as we should only be cutting edges between
129    // real file nodes, and write-before relationships are created from
130    // a real file node to a temp copy node:
131    CHECK(cut_edge_properties.write_extents.empty())
132        << "Can't cut edge that has write-before relationship.";
133
134    // make node depend on the copy operation
135    (*graph)[edge.first].out_edges.insert(make_pair(graph->size() - 1,
136                                                    cut_edge_properties));
137
138    // Set src/dst extents and other proto variables for copy operation
139    graph->back().op.set_type(DeltaArchiveManifest_InstallOperation_Type_MOVE);
140    StoreExtents(cut_edge_properties.extents,
141                 graph->back().op.mutable_src_extents());
142    StoreExtents(cuts.back().tmp_extents,
143                 graph->back().op.mutable_dst_extents());
144    graph->back().op.set_src_length(
145        graph_utils::EdgeWeight(*graph, edge) * kBlockSize);
146    graph->back().op.set_dst_length(graph->back().op.src_length());
147
148    // make the dest node read from the scratch space
149    SubstituteBlocks(
150        &((*graph)[edge.second]),
151        (*graph)[edge.first].out_edges[edge.second].extents,
152        cuts.back().tmp_extents);
153
154    // delete the old edge
155    CHECK_EQ(static_cast<Graph::size_type>(1),
156             (*graph)[edge.first].out_edges.erase(edge.second));
157
158    // Add an edge from dst to copy operation
159    EdgeProperties write_before_edge_properties;
160    write_before_edge_properties.write_extents = cuts.back().tmp_extents;
161    (*graph)[edge.second].out_edges.insert(
162        make_pair(graph->size() - 1, write_before_edge_properties));
163  }
164  out_cuts->swap(cuts);
165  return true;
166}
167
168// Creates all the edges for the graph. Writers of a block point to
169// readers of the same block. This is because for an edge A->B, B
170// must complete before A executes.
171void InplaceGenerator::CreateEdges(
172    Graph* graph,
173    const vector<Block>& blocks) {
174  for (vector<Block>::size_type i = 0;
175       i < blocks.size(); i++) {
176    // Blocks with both a reader and writer get an edge
177    if (blocks[i].reader == Vertex::kInvalidIndex ||
178        blocks[i].writer == Vertex::kInvalidIndex)
179      continue;
180    // Don't have a node depend on itself
181    if (blocks[i].reader == blocks[i].writer)
182      continue;
183    // See if there's already an edge we can add onto
184    Vertex::EdgeMap::iterator edge_it =
185        (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
186    if (edge_it == (*graph)[blocks[i].writer].out_edges.end()) {
187      // No existing edge. Create one
188      (*graph)[blocks[i].writer].out_edges.insert(
189          make_pair(blocks[i].reader, EdgeProperties()));
190      edge_it = (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
191      CHECK(edge_it != (*graph)[blocks[i].writer].out_edges.end());
192    }
193    AppendBlockToExtents(&edge_it->second.extents, i);
194  }
195}
196
197namespace {
198
199class SortCutsByTopoOrderLess {
200 public:
201  explicit SortCutsByTopoOrderLess(
202      const vector<vector<Vertex::Index>::size_type>& table)
203      : table_(table) {}
204  bool operator()(const CutEdgeVertexes& a, const CutEdgeVertexes& b) {
205    return table_[a.old_dst] < table_[b.old_dst];
206  }
207 private:
208  const vector<vector<Vertex::Index>::size_type>& table_;
209};
210
211}  // namespace
212
213void InplaceGenerator::GenerateReverseTopoOrderMap(
214    const vector<Vertex::Index>& op_indexes,
215    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes) {
216  vector<vector<Vertex::Index>::size_type> table(op_indexes.size());
217  for (vector<Vertex::Index>::size_type i = 0, e = op_indexes.size();
218       i != e; ++i) {
219    Vertex::Index node = op_indexes[i];
220    if (table.size() < (node + 1)) {
221      table.resize(node + 1);
222    }
223    table[node] = i;
224  }
225  reverse_op_indexes->swap(table);
226}
227
228void InplaceGenerator::SortCutsByTopoOrder(
229    const vector<Vertex::Index>& op_indexes,
230    vector<CutEdgeVertexes>* cuts) {
231  // first, make a reverse lookup table.
232  vector<vector<Vertex::Index>::size_type> table;
233  GenerateReverseTopoOrderMap(op_indexes, &table);
234  SortCutsByTopoOrderLess less(table);
235  sort(cuts->begin(), cuts->end(), less);
236}
237
238void InplaceGenerator::MoveFullOpsToBack(Graph* graph,
239                                         vector<Vertex::Index>* op_indexes) {
240  vector<Vertex::Index> ret;
241  vector<Vertex::Index> full_ops;
242  ret.reserve(op_indexes->size());
243  for (vector<Vertex::Index>::size_type i = 0, e = op_indexes->size(); i != e;
244       ++i) {
245    DeltaArchiveManifest_InstallOperation_Type type =
246        (*graph)[(*op_indexes)[i]].op.type();
247    if (type == DeltaArchiveManifest_InstallOperation_Type_REPLACE ||
248        type == DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ) {
249      full_ops.push_back((*op_indexes)[i]);
250    } else {
251      ret.push_back((*op_indexes)[i]);
252    }
253  }
254  LOG(INFO) << "Stats: " << full_ops.size() << " full ops out of "
255            << (full_ops.size() + ret.size()) << " total ops.";
256  ret.insert(ret.end(), full_ops.begin(), full_ops.end());
257  op_indexes->swap(ret);
258}
259
260namespace {
261
262template<typename T>
263bool TempBlocksExistInExtents(const T& extents) {
264  for (int i = 0, e = extents.size(); i < e; ++i) {
265    Extent extent = GetElement(extents, i);
266    uint64_t start = extent.start_block();
267    uint64_t num = extent.num_blocks();
268    if (start >= kTempBlockStart || (start + num) >= kTempBlockStart) {
269      LOG(ERROR) << "temp block!";
270      LOG(ERROR) << "start: " << start << ", num: " << num;
271      LOG(ERROR) << "kTempBlockStart: " << kTempBlockStart;
272      LOG(ERROR) << "returning true";
273      return true;
274    }
275    // check for wrap-around, which would be a bug:
276    CHECK(start <= (start + num));
277  }
278  return false;
279}
280
281// Converts the cuts, which must all have the same |old_dst| member,
282// to full. It does this by converting the |old_dst| to REPLACE or
283// REPLACE_BZ, dropping all incoming edges to |old_dst|, and marking
284// all temp nodes invalid.
285bool ConvertCutsToFull(
286    Graph* graph,
287    const string& new_part,
288    int data_fd,
289    off_t* data_file_size,
290    vector<Vertex::Index>* op_indexes,
291    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
292    const vector<CutEdgeVertexes>& cuts) {
293  CHECK(!cuts.empty());
294  set<Vertex::Index> deleted_nodes;
295  for (const CutEdgeVertexes& cut : cuts) {
296    TEST_AND_RETURN_FALSE(InplaceGenerator::ConvertCutToFullOp(
297        graph,
298        cut,
299        new_part,
300        data_fd,
301        data_file_size));
302    deleted_nodes.insert(cut.new_vertex);
303  }
304  deleted_nodes.insert(cuts[0].old_dst);
305
306  vector<Vertex::Index> new_op_indexes;
307  new_op_indexes.reserve(op_indexes->size());
308  for (Vertex::Index vertex_index : *op_indexes) {
309    if (utils::SetContainsKey(deleted_nodes, vertex_index))
310      continue;
311    new_op_indexes.push_back(vertex_index);
312  }
313  new_op_indexes.push_back(cuts[0].old_dst);
314  op_indexes->swap(new_op_indexes);
315  InplaceGenerator::GenerateReverseTopoOrderMap(*op_indexes,
316                                                reverse_op_indexes);
317  return true;
318}
319
320// Tries to assign temp blocks for a collection of cuts, all of which share
321// the same old_dst member. If temp blocks can't be found, old_dst will be
322// converted to a REPLACE or REPLACE_BZ operation. Returns true on success,
323// which can happen even if blocks are converted to full. Returns false
324// on exceptional error cases.
325bool AssignBlockForAdjoiningCuts(
326    Graph* graph,
327    const string& new_part,
328    int data_fd,
329    off_t* data_file_size,
330    vector<Vertex::Index>* op_indexes,
331    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
332    const vector<CutEdgeVertexes>& cuts) {
333  CHECK(!cuts.empty());
334  const Vertex::Index old_dst = cuts[0].old_dst;
335  // Calculate # of blocks needed
336  uint64_t blocks_needed = 0;
337  vector<uint64_t> cuts_blocks_needed(cuts.size());
338  for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) {
339    uint64_t cut_blocks_needed = 0;
340    for (const Extent& extent : cuts[i].tmp_extents) {
341      cut_blocks_needed += extent.num_blocks();
342    }
343    blocks_needed += cut_blocks_needed;
344    cuts_blocks_needed[i] = cut_blocks_needed;
345  }
346
347  // Find enough blocks
348  ExtentRanges scratch_ranges;
349  // Each block that's supplying temp blocks and the corresponding blocks:
350  typedef vector<pair<Vertex::Index, ExtentRanges>> SupplierVector;
351  SupplierVector block_suppliers;
352  uint64_t scratch_blocks_found = 0;
353  for (vector<Vertex::Index>::size_type i = (*reverse_op_indexes)[old_dst] + 1,
354           e = op_indexes->size(); i < e; ++i) {
355    Vertex::Index test_node = (*op_indexes)[i];
356    if (!(*graph)[test_node].valid)
357      continue;
358    // See if this node has sufficient blocks
359    ExtentRanges ranges;
360    ranges.AddRepeatedExtents((*graph)[test_node].op.dst_extents());
361    ranges.SubtractExtent(ExtentForRange(
362        kTempBlockStart, kSparseHole - kTempBlockStart));
363    ranges.SubtractRepeatedExtents((*graph)[test_node].op.src_extents());
364    // For now, for simplicity, subtract out all blocks in read-before
365    // dependencies.
366    for (Vertex::EdgeMap::const_iterator edge_i =
367             (*graph)[test_node].out_edges.begin(),
368             edge_e = (*graph)[test_node].out_edges.end();
369         edge_i != edge_e; ++edge_i) {
370      ranges.SubtractExtents(edge_i->second.extents);
371    }
372    if (ranges.blocks() == 0)
373      continue;
374
375    if (ranges.blocks() + scratch_blocks_found > blocks_needed) {
376      // trim down ranges
377      vector<Extent> new_ranges = ranges.GetExtentsForBlockCount(
378          blocks_needed - scratch_blocks_found);
379      ranges = ExtentRanges();
380      ranges.AddExtents(new_ranges);
381    }
382    scratch_ranges.AddRanges(ranges);
383    block_suppliers.push_back(make_pair(test_node, ranges));
384    scratch_blocks_found += ranges.blocks();
385    if (scratch_ranges.blocks() >= blocks_needed)
386      break;
387  }
388  if (scratch_ranges.blocks() < blocks_needed) {
389    LOG(INFO) << "Unable to find sufficient scratch";
390    TEST_AND_RETURN_FALSE(ConvertCutsToFull(graph,
391                                            new_part,
392                                            data_fd,
393                                            data_file_size,
394                                            op_indexes,
395                                            reverse_op_indexes,
396                                            cuts));
397    return true;
398  }
399  // Use the scratch we found
400  TEST_AND_RETURN_FALSE(scratch_ranges.blocks() == scratch_blocks_found);
401
402  // Make all the suppliers depend on this node
403  for (const auto& index_range_pair : block_suppliers) {
404    graph_utils::AddReadBeforeDepExtents(
405        &(*graph)[index_range_pair.first],
406        old_dst,
407        index_range_pair.second.GetExtentsForBlockCount(
408            index_range_pair.second.blocks()));
409  }
410
411  // Replace temp blocks in each cut
412  for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) {
413    const CutEdgeVertexes& cut = cuts[i];
414    vector<Extent> real_extents =
415        scratch_ranges.GetExtentsForBlockCount(cuts_blocks_needed[i]);
416    scratch_ranges.SubtractExtents(real_extents);
417
418    // Fix the old dest node w/ the real blocks
419    InplaceGenerator::SubstituteBlocks(&(*graph)[old_dst],
420                                         cut.tmp_extents,
421                                         real_extents);
422
423    // Fix the new node w/ the real blocks. Since the new node is just a
424    // copy operation, we can replace all the dest extents w/ the real
425    // blocks.
426    DeltaArchiveManifest_InstallOperation *op = &(*graph)[cut.new_vertex].op;
427    op->clear_dst_extents();
428    StoreExtents(real_extents, op->mutable_dst_extents());
429  }
430  return true;
431}
432
433}  // namespace
434
435bool InplaceGenerator::AssignTempBlocks(
436    Graph* graph,
437    const string& new_part,
438    int data_fd,
439    off_t* data_file_size,
440    vector<Vertex::Index>* op_indexes,
441    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
442    const vector<CutEdgeVertexes>& cuts) {
443  CHECK(!cuts.empty());
444
445  // group of cuts w/ the same old_dst:
446  vector<CutEdgeVertexes> cuts_group;
447
448  for (vector<CutEdgeVertexes>::size_type i = cuts.size() - 1, e = 0;
449       true ; --i) {
450    LOG(INFO) << "Fixing temp blocks in cut " << i
451              << ": old dst: " << cuts[i].old_dst << " new vertex: "
452              << cuts[i].new_vertex << " path: "
453              << (*graph)[cuts[i].old_dst].file_name;
454
455    if (cuts_group.empty() || (cuts_group[0].old_dst == cuts[i].old_dst)) {
456      cuts_group.push_back(cuts[i]);
457    } else {
458      CHECK(!cuts_group.empty());
459      TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph,
460                                                        new_part,
461                                                        data_fd,
462                                                        data_file_size,
463                                                        op_indexes,
464                                                        reverse_op_indexes,
465                                                        cuts_group));
466      cuts_group.clear();
467      cuts_group.push_back(cuts[i]);
468    }
469
470    if (i == e) {
471      // break out of for() loop
472      break;
473    }
474  }
475  CHECK(!cuts_group.empty());
476  TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph,
477                                                    new_part,
478                                                    data_fd,
479                                                    data_file_size,
480                                                    op_indexes,
481                                                    reverse_op_indexes,
482                                                    cuts_group));
483  return true;
484}
485
486bool InplaceGenerator::NoTempBlocksRemain(const Graph& graph) {
487  size_t idx = 0;
488  for (Graph::const_iterator it = graph.begin(), e = graph.end(); it != e;
489       ++it, ++idx) {
490    if (!it->valid)
491      continue;
492    const DeltaArchiveManifest_InstallOperation& op = it->op;
493    if (TempBlocksExistInExtents(op.dst_extents()) ||
494        TempBlocksExistInExtents(op.src_extents())) {
495      LOG(INFO) << "bad extents in node " << idx;
496      LOG(INFO) << "so yeah";
497      return false;
498    }
499
500    // Check out-edges:
501    for (const auto& edge_prop_pair : it->out_edges) {
502      if (TempBlocksExistInExtents(edge_prop_pair.second.extents) ||
503          TempBlocksExistInExtents(edge_prop_pair.second.write_extents)) {
504        LOG(INFO) << "bad out edge in node " << idx;
505        LOG(INFO) << "so yeah";
506        return false;
507      }
508    }
509  }
510  return true;
511}
512
513bool InplaceGenerator::ConvertCutToFullOp(Graph* graph,
514                                          const CutEdgeVertexes& cut,
515                                          const string& new_part,
516                                          int data_fd,
517                                          off_t* data_file_size) {
518  // Drop all incoming edges, keep all outgoing edges
519
520  // Keep all outgoing edges
521  if ((*graph)[cut.old_dst].op.type() !=
522      DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ &&
523      (*graph)[cut.old_dst].op.type() !=
524      DeltaArchiveManifest_InstallOperation_Type_REPLACE) {
525    Vertex::EdgeMap out_edges = (*graph)[cut.old_dst].out_edges;
526    graph_utils::DropWriteBeforeDeps(&out_edges);
527
528    // Replace the operation with a REPLACE or REPLACE_BZ to generate the same
529    // |new_extents| list of blocks and update the graph.
530    vector<AnnotatedOperation> new_aop;
531    vector<Extent> new_extents;
532    ExtentsToVector((*graph)[cut.old_dst].op.dst_extents(),
533                    &new_extents);
534    TEST_AND_RETURN_FALSE(diff_utils::DeltaReadFile(
535        &new_aop,
536        "",  // old_part
537        new_part,
538        vector<Extent>(),  // old_extents
539        new_extents,
540        (*graph)[cut.old_dst].file_name,
541        -1,  // chunk_blocks, forces to have a single operation.
542        data_fd,
543        data_file_size,
544        false));  // src_ops_allowed
545    TEST_AND_RETURN_FALSE(new_aop.size() == 1);
546    TEST_AND_RETURN_FALSE(AddInstallOpToGraph(
547      graph, cut.old_dst, nullptr, new_aop.front().op, new_aop.front().name));
548
549    (*graph)[cut.old_dst].out_edges = out_edges;
550
551    // Right now we don't have doubly-linked edges, so we have to scan
552    // the whole graph.
553    graph_utils::DropIncomingEdgesTo(graph, cut.old_dst);
554  }
555
556  // Delete temp node
557  (*graph)[cut.old_src].out_edges.erase(cut.new_vertex);
558  CHECK((*graph)[cut.old_dst].out_edges.find(cut.new_vertex) ==
559        (*graph)[cut.old_dst].out_edges.end());
560  (*graph)[cut.new_vertex].valid = false;
561  LOG(INFO) << "marked node invalid: " << cut.new_vertex;
562  return true;
563}
564
565bool InplaceGenerator::ConvertGraphToDag(Graph* graph,
566                                         const string& new_part,
567                                         int fd,
568                                         off_t* data_file_size,
569                                         vector<Vertex::Index>* final_order,
570                                         Vertex::Index scratch_vertex) {
571  CycleBreaker cycle_breaker;
572  LOG(INFO) << "Finding cycles...";
573  set<Edge> cut_edges;
574  cycle_breaker.BreakCycles(*graph, &cut_edges);
575  LOG(INFO) << "done finding cycles";
576  CheckGraph(*graph);
577
578  // Calculate number of scratch blocks needed
579
580  LOG(INFO) << "Cutting cycles...";
581  vector<CutEdgeVertexes> cuts;
582  TEST_AND_RETURN_FALSE(CutEdges(graph, cut_edges, &cuts));
583  LOG(INFO) << "done cutting cycles";
584  LOG(INFO) << "There are " << cuts.size() << " cuts.";
585  CheckGraph(*graph);
586
587  LOG(INFO) << "Creating initial topological order...";
588  TopologicalSort(*graph, final_order);
589  LOG(INFO) << "done with initial topo order";
590  CheckGraph(*graph);
591
592  LOG(INFO) << "Moving full ops to the back";
593  MoveFullOpsToBack(graph, final_order);
594  LOG(INFO) << "done moving full ops to back";
595
596  vector<vector<Vertex::Index>::size_type> inverse_final_order;
597  GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
598
599  SortCutsByTopoOrder(*final_order, &cuts);
600
601  if (!cuts.empty())
602    TEST_AND_RETURN_FALSE(AssignTempBlocks(graph,
603                                           new_part,
604                                           fd,
605                                           data_file_size,
606                                           final_order,
607                                           &inverse_final_order,
608                                           cuts));
609  LOG(INFO) << "Making sure all temp blocks have been allocated";
610
611  // Remove the scratch node, if any
612  if (scratch_vertex != Vertex::kInvalidIndex) {
613    final_order->erase(final_order->begin() +
614                       inverse_final_order[scratch_vertex]);
615    (*graph)[scratch_vertex].valid = false;
616    GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
617  }
618
619  graph_utils::DumpGraph(*graph);
620  CHECK(NoTempBlocksRemain(*graph));
621  LOG(INFO) << "done making sure all temp blocks are allocated";
622  return true;
623}
624
625void InplaceGenerator::CreateScratchNode(uint64_t start_block,
626                                         uint64_t num_blocks,
627                                         Vertex* vertex) {
628  vertex->file_name = "<scratch>";
629  vertex->op.set_type(DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ);
630  vertex->op.set_data_offset(0);
631  vertex->op.set_data_length(0);
632  Extent* extent = vertex->op.add_dst_extents();
633  extent->set_start_block(start_block);
634  extent->set_num_blocks(num_blocks);
635}
636
637bool InplaceGenerator::AddInstallOpToBlocksVector(
638    const DeltaArchiveManifest_InstallOperation& operation,
639    const Graph& graph,
640    Vertex::Index vertex,
641    vector<Block>* blocks) {
642  // See if this is already present.
643  TEST_AND_RETURN_FALSE(operation.dst_extents_size() > 0);
644
645  enum BlockField { READER = 0, WRITER, BLOCK_FIELD_COUNT };
646  for (int field = READER; field < BLOCK_FIELD_COUNT; field++) {
647    const int extents_size =
648        (field == READER) ? operation.src_extents_size() :
649        operation.dst_extents_size();
650    const char* past_participle = (field == READER) ? "read" : "written";
651    const google::protobuf::RepeatedPtrField<Extent>& extents =
652        (field == READER) ? operation.src_extents() : operation.dst_extents();
653    Vertex::Index Block::*access_type = (field == READER) ?
654        &Block::reader : &Block::writer;
655
656    for (int i = 0; i < extents_size; i++) {
657      const Extent& extent = extents.Get(i);
658      for (uint64_t block = extent.start_block();
659           block < (extent.start_block() + extent.num_blocks()); block++) {
660        if ((*blocks)[block].*access_type != Vertex::kInvalidIndex) {
661          LOG(FATAL) << "Block " << block << " is already "
662                     << past_participle << " by "
663                     << (*blocks)[block].*access_type << "("
664                     << graph[(*blocks)[block].*access_type].file_name
665                     << ") and also " << vertex << "("
666                     << graph[vertex].file_name << ")";
667        }
668        (*blocks)[block].*access_type = vertex;
669      }
670    }
671  }
672  return true;
673}
674
675bool InplaceGenerator::AddInstallOpToGraph(
676    Graph* graph,
677    Vertex::Index existing_vertex,
678    vector<Block>* blocks,
679    const DeltaArchiveManifest_InstallOperation operation,
680    const string& op_name) {
681  Vertex::Index vertex = existing_vertex;
682  if (vertex == Vertex::kInvalidIndex) {
683    graph->emplace_back();
684    vertex = graph->size() - 1;
685  }
686  (*graph)[vertex].op = operation;
687  CHECK((*graph)[vertex].op.has_type());
688  (*graph)[vertex].file_name = op_name;
689
690  if (blocks)
691    TEST_AND_RETURN_FALSE(InplaceGenerator::AddInstallOpToBlocksVector(
692        (*graph)[vertex].op,
693        *graph,
694        vertex,
695        blocks));
696  return true;
697}
698
699void InplaceGenerator::ApplyMap(vector<uint64_t>* collection,
700                                const map<uint64_t, uint64_t>& the_map) {
701  for (uint64_t& elem : *collection) {
702    const auto& map_it = the_map.find(elem);
703    if (map_it != the_map.end())
704      elem = map_it->second;
705  }
706}
707
708bool InplaceGenerator::GenerateOperations(
709    const PayloadGenerationConfig& config,
710    int data_file_fd,
711    off_t* data_file_size,
712    vector<AnnotatedOperation>* rootfs_ops,
713    vector<AnnotatedOperation>* kernel_ops) {
714  off_t chunk_blocks = (config.chunk_size == -1 ? -1 :
715                        config.chunk_size / config.block_size);
716
717  // Temporary list of operations used to construct the dependency graph.
718  vector<AnnotatedOperation> aops;
719  TEST_AND_RETURN_FALSE(
720      diff_utils::DeltaReadPartition(&aops,
721                                     config.source.rootfs,
722                                     config.target.rootfs,
723                                     chunk_blocks,
724                                     data_file_fd,
725                                     data_file_size,
726                                     true,  // skip_block_0
727                                     false));  // src_ops_allowed
728  // Convert the rootfs operations to the graph.
729  Graph graph;
730  CheckGraph(graph);
731  vector<Block> blocks(config.target.rootfs.size / config.block_size);
732  for (const auto& aop : aops) {
733    AddInstallOpToGraph(
734        &graph, Vertex::kInvalidIndex, &blocks, aop.op, aop.name);
735  }
736  LOG(INFO) << "done reading normal files";
737  CheckGraph(graph);
738
739  // Final scratch block (if there's space)
740  Vertex::Index scratch_vertex = Vertex::kInvalidIndex;
741  if (blocks.size() < (config.rootfs_partition_size / kBlockSize)) {
742    scratch_vertex = graph.size();
743    graph.emplace_back();
744    CreateScratchNode(
745        blocks.size(),
746        (config.rootfs_partition_size / kBlockSize) - blocks.size(),
747        &graph.back());
748  }
749
750  // Read kernel partition
751  LOG(INFO) << "Delta compressing kernel partition...";
752  // It is safe to not skip the block 0 since we will not be using the cycle
753  // breaking algorithm on this list of operations as we expect no cycles here.
754  TEST_AND_RETURN_FALSE(
755      diff_utils::DeltaReadPartition(kernel_ops,
756                                     config.source.kernel,
757                                     config.target.kernel,
758                                     chunk_blocks,
759                                     data_file_fd,
760                                     data_file_size,
761                                     false,  // skip_block_0
762                                     false));  // src_ops_allowed
763  LOG(INFO) << "done reading kernel";
764  CheckGraph(graph);
765
766  LOG(INFO) << "Creating edges...";
767  CreateEdges(&graph, blocks);
768  LOG(INFO) << "Done creating edges";
769  CheckGraph(graph);
770
771  vector<Vertex::Index> final_order;
772  TEST_AND_RETURN_FALSE(ConvertGraphToDag(
773      &graph,
774      config.target.rootfs.path,
775      data_file_fd,
776      data_file_size,
777      &final_order,
778      scratch_vertex));
779
780  // Copy operations over to the rootfs_ops in the final_order generated by the
781  // topological sort.
782  rootfs_ops->clear();
783  for (const Vertex::Index vertex_index : final_order) {
784    const Vertex& vertex = graph[vertex_index];
785    rootfs_ops->emplace_back();
786    rootfs_ops->back().op = vertex.op;
787    rootfs_ops->back().name = vertex.file_name;
788  }
789
790  // Re-add the operation for the block 0.
791  TEST_AND_RETURN_FALSE(diff_utils::DeltaReadFile(
792      rootfs_ops,
793      config.source.rootfs.path,
794      config.target.rootfs.path,
795      vector<Extent>{ExtentForRange(0, 1)},
796      vector<Extent>{ExtentForRange(0, 1)},
797      "<block-0>",  // operation name
798      -1,  // chunk_blocks
799      data_file_fd,
800      data_file_size,
801      false));  // src_ops_allowed
802
803  return true;
804}
805
806};  // namespace chromeos_update_engine
807