inplace_generator.cc revision 14158570d3995008dc93a628004118b87a6bca01
1// Copyright 2015 The Chromium OS Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "update_engine/payload_generator/inplace_generator.h"
6
7#include <algorithm>
8#include <map>
9#include <set>
10#include <string>
11#include <utility>
12#include <vector>
13
14#include "update_engine/payload_constants.h"
15#include "update_engine/payload_generator/cycle_breaker.h"
16#include "update_engine/payload_generator/delta_diff_generator.h"
17#include "update_engine/payload_generator/delta_diff_utils.h"
18#include "update_engine/payload_generator/ext2_filesystem.h"
19#include "update_engine/payload_generator/extent_ranges.h"
20#include "update_engine/payload_generator/graph_types.h"
21#include "update_engine/payload_generator/graph_utils.h"
22#include "update_engine/payload_generator/topological_sort.h"
23#include "update_engine/update_metadata.pb.h"
24#include "update_engine/utils.h"
25
26using std::make_pair;
27using std::map;
28using std::pair;
29using std::set;
30using std::string;
31using std::unique_ptr;
32using std::vector;
33
34namespace chromeos_update_engine {
35
36using Block = InplaceGenerator::Block;
37
38// This class allocates non-existent temp blocks, starting from
39// kTempBlockStart. Other code is responsible for converting these
40// temp blocks into real blocks, as the client can't read or write to
41// these blocks.
42class DummyExtentAllocator {
43 public:
44  vector<Extent> Allocate(const uint64_t block_count) {
45    vector<Extent> ret(1);
46    ret[0].set_start_block(next_block_);
47    ret[0].set_num_blocks(block_count);
48    next_block_ += block_count;
49    return ret;
50  }
51
52 private:
53  uint64_t next_block_{kTempBlockStart};
54};
55
56// Takes a vector of blocks and returns an equivalent vector of Extent
57// objects.
58vector<Extent> CompressExtents(const vector<uint64_t>& blocks) {
59  vector<Extent> new_extents;
60  for (uint64_t block : blocks) {
61    AppendBlockToExtents(&new_extents, block);
62  }
63  return new_extents;
64}
65
66void InplaceGenerator::CheckGraph(const Graph& graph) {
67  for (const Vertex& v : graph) {
68    CHECK(v.op.has_type());
69  }
70}
71
72void InplaceGenerator::SubstituteBlocks(
73    Vertex* vertex,
74    const vector<Extent>& remove_extents,
75    const vector<Extent>& replace_extents) {
76  // First, expand out the blocks that op reads from
77  vector<uint64_t> read_blocks =
78      ExpandExtents(vertex->op.src_extents());
79  {
80    // Expand remove_extents and replace_extents
81    vector<uint64_t> remove_extents_expanded = ExpandExtents(remove_extents);
82    vector<uint64_t> replace_extents_expanded = ExpandExtents(replace_extents);
83    CHECK_EQ(remove_extents_expanded.size(), replace_extents_expanded.size());
84    map<uint64_t, uint64_t> conversion;
85    for (vector<uint64_t>::size_type i = 0;
86         i < replace_extents_expanded.size(); i++) {
87      conversion[remove_extents_expanded[i]] = replace_extents_expanded[i];
88    }
89    ApplyMap(&read_blocks, conversion);
90    for (auto& edge_prop_pair : vertex->out_edges) {
91      vector<uint64_t> write_before_deps_expanded = ExpandExtents(
92          edge_prop_pair.second.write_extents);
93      ApplyMap(&write_before_deps_expanded, conversion);
94      edge_prop_pair.second.write_extents =
95          CompressExtents(write_before_deps_expanded);
96    }
97  }
98  // Convert read_blocks back to extents
99  vertex->op.clear_src_extents();
100  vector<Extent> new_extents = CompressExtents(read_blocks);
101  StoreExtents(new_extents, vertex->op.mutable_src_extents());
102}
103
104bool InplaceGenerator::CutEdges(Graph* graph,
105                                const set<Edge>& edges,
106                                vector<CutEdgeVertexes>* out_cuts) {
107  DummyExtentAllocator scratch_allocator;
108  vector<CutEdgeVertexes> cuts;
109  cuts.reserve(edges.size());
110
111  uint64_t scratch_blocks_used = 0;
112  for (const Edge& edge : edges) {
113    cuts.resize(cuts.size() + 1);
114    vector<Extent> old_extents =
115        (*graph)[edge.first].out_edges[edge.second].extents;
116    // Choose some scratch space
117    scratch_blocks_used += graph_utils::EdgeWeight(*graph, edge);
118    cuts.back().tmp_extents =
119        scratch_allocator.Allocate(graph_utils::EdgeWeight(*graph, edge));
120    // create vertex to copy original->scratch
121    cuts.back().new_vertex = graph->size();
122    graph->emplace_back();
123    cuts.back().old_src = edge.first;
124    cuts.back().old_dst = edge.second;
125
126    EdgeProperties& cut_edge_properties =
127        (*graph)[edge.first].out_edges.find(edge.second)->second;
128
129    // This should never happen, as we should only be cutting edges between
130    // real file nodes, and write-before relationships are created from
131    // a real file node to a temp copy node:
132    CHECK(cut_edge_properties.write_extents.empty())
133        << "Can't cut edge that has write-before relationship.";
134
135    // make node depend on the copy operation
136    (*graph)[edge.first].out_edges.insert(make_pair(graph->size() - 1,
137                                                    cut_edge_properties));
138
139    // Set src/dst extents and other proto variables for copy operation
140    graph->back().op.set_type(DeltaArchiveManifest_InstallOperation_Type_MOVE);
141    StoreExtents(cut_edge_properties.extents,
142                 graph->back().op.mutable_src_extents());
143    StoreExtents(cuts.back().tmp_extents,
144                 graph->back().op.mutable_dst_extents());
145    graph->back().op.set_src_length(
146        graph_utils::EdgeWeight(*graph, edge) * kBlockSize);
147    graph->back().op.set_dst_length(graph->back().op.src_length());
148
149    // make the dest node read from the scratch space
150    SubstituteBlocks(
151        &((*graph)[edge.second]),
152        (*graph)[edge.first].out_edges[edge.second].extents,
153        cuts.back().tmp_extents);
154
155    // delete the old edge
156    CHECK_EQ(static_cast<Graph::size_type>(1),
157             (*graph)[edge.first].out_edges.erase(edge.second));
158
159    // Add an edge from dst to copy operation
160    EdgeProperties write_before_edge_properties;
161    write_before_edge_properties.write_extents = cuts.back().tmp_extents;
162    (*graph)[edge.second].out_edges.insert(
163        make_pair(graph->size() - 1, write_before_edge_properties));
164  }
165  out_cuts->swap(cuts);
166  return true;
167}
168
169// Creates all the edges for the graph. Writers of a block point to
170// readers of the same block. This is because for an edge A->B, B
171// must complete before A executes.
172void InplaceGenerator::CreateEdges(
173    Graph* graph,
174    const vector<Block>& blocks) {
175  for (vector<Block>::size_type i = 0;
176       i < blocks.size(); i++) {
177    // Blocks with both a reader and writer get an edge
178    if (blocks[i].reader == Vertex::kInvalidIndex ||
179        blocks[i].writer == Vertex::kInvalidIndex)
180      continue;
181    // Don't have a node depend on itself
182    if (blocks[i].reader == blocks[i].writer)
183      continue;
184    // See if there's already an edge we can add onto
185    Vertex::EdgeMap::iterator edge_it =
186        (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
187    if (edge_it == (*graph)[blocks[i].writer].out_edges.end()) {
188      // No existing edge. Create one
189      (*graph)[blocks[i].writer].out_edges.insert(
190          make_pair(blocks[i].reader, EdgeProperties()));
191      edge_it = (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
192      CHECK(edge_it != (*graph)[blocks[i].writer].out_edges.end());
193    }
194    AppendBlockToExtents(&edge_it->second.extents, i);
195  }
196}
197
198namespace {
199
200class SortCutsByTopoOrderLess {
201 public:
202  explicit SortCutsByTopoOrderLess(
203      const vector<vector<Vertex::Index>::size_type>& table)
204      : table_(table) {}
205  bool operator()(const CutEdgeVertexes& a, const CutEdgeVertexes& b) {
206    return table_[a.old_dst] < table_[b.old_dst];
207  }
208 private:
209  const vector<vector<Vertex::Index>::size_type>& table_;
210};
211
212}  // namespace
213
214void InplaceGenerator::GenerateReverseTopoOrderMap(
215    const vector<Vertex::Index>& op_indexes,
216    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes) {
217  vector<vector<Vertex::Index>::size_type> table(op_indexes.size());
218  for (vector<Vertex::Index>::size_type i = 0, e = op_indexes.size();
219       i != e; ++i) {
220    Vertex::Index node = op_indexes[i];
221    if (table.size() < (node + 1)) {
222      table.resize(node + 1);
223    }
224    table[node] = i;
225  }
226  reverse_op_indexes->swap(table);
227}
228
229void InplaceGenerator::SortCutsByTopoOrder(
230    const vector<Vertex::Index>& op_indexes,
231    vector<CutEdgeVertexes>* cuts) {
232  // first, make a reverse lookup table.
233  vector<vector<Vertex::Index>::size_type> table;
234  GenerateReverseTopoOrderMap(op_indexes, &table);
235  SortCutsByTopoOrderLess less(table);
236  sort(cuts->begin(), cuts->end(), less);
237}
238
239void InplaceGenerator::MoveFullOpsToBack(Graph* graph,
240                                         vector<Vertex::Index>* op_indexes) {
241  vector<Vertex::Index> ret;
242  vector<Vertex::Index> full_ops;
243  ret.reserve(op_indexes->size());
244  for (vector<Vertex::Index>::size_type i = 0, e = op_indexes->size(); i != e;
245       ++i) {
246    DeltaArchiveManifest_InstallOperation_Type type =
247        (*graph)[(*op_indexes)[i]].op.type();
248    if (type == DeltaArchiveManifest_InstallOperation_Type_REPLACE ||
249        type == DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ) {
250      full_ops.push_back((*op_indexes)[i]);
251    } else {
252      ret.push_back((*op_indexes)[i]);
253    }
254  }
255  LOG(INFO) << "Stats: " << full_ops.size() << " full ops out of "
256            << (full_ops.size() + ret.size()) << " total ops.";
257  ret.insert(ret.end(), full_ops.begin(), full_ops.end());
258  op_indexes->swap(ret);
259}
260
261namespace {
262
263template<typename T>
264bool TempBlocksExistInExtents(const T& extents) {
265  for (int i = 0, e = extents.size(); i < e; ++i) {
266    Extent extent = GetElement(extents, i);
267    uint64_t start = extent.start_block();
268    uint64_t num = extent.num_blocks();
269    if (start >= kTempBlockStart || (start + num) >= kTempBlockStart) {
270      LOG(ERROR) << "temp block!";
271      LOG(ERROR) << "start: " << start << ", num: " << num;
272      LOG(ERROR) << "kTempBlockStart: " << kTempBlockStart;
273      LOG(ERROR) << "returning true";
274      return true;
275    }
276    // check for wrap-around, which would be a bug:
277    CHECK(start <= (start + num));
278  }
279  return false;
280}
281
282// Converts the cuts, which must all have the same |old_dst| member,
283// to full. It does this by converting the |old_dst| to REPLACE or
284// REPLACE_BZ, dropping all incoming edges to |old_dst|, and marking
285// all temp nodes invalid.
286bool ConvertCutsToFull(
287    Graph* graph,
288    const string& new_part,
289    int data_fd,
290    off_t* data_file_size,
291    vector<Vertex::Index>* op_indexes,
292    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
293    const vector<CutEdgeVertexes>& cuts) {
294  CHECK(!cuts.empty());
295  set<Vertex::Index> deleted_nodes;
296  for (const CutEdgeVertexes& cut : cuts) {
297    TEST_AND_RETURN_FALSE(InplaceGenerator::ConvertCutToFullOp(
298        graph,
299        cut,
300        new_part,
301        data_fd,
302        data_file_size));
303    deleted_nodes.insert(cut.new_vertex);
304  }
305  deleted_nodes.insert(cuts[0].old_dst);
306
307  vector<Vertex::Index> new_op_indexes;
308  new_op_indexes.reserve(op_indexes->size());
309  for (Vertex::Index vertex_index : *op_indexes) {
310    if (utils::SetContainsKey(deleted_nodes, vertex_index))
311      continue;
312    new_op_indexes.push_back(vertex_index);
313  }
314  new_op_indexes.push_back(cuts[0].old_dst);
315  op_indexes->swap(new_op_indexes);
316  InplaceGenerator::GenerateReverseTopoOrderMap(*op_indexes,
317                                                reverse_op_indexes);
318  return true;
319}
320
321// Tries to assign temp blocks for a collection of cuts, all of which share
322// the same old_dst member. If temp blocks can't be found, old_dst will be
323// converted to a REPLACE or REPLACE_BZ operation. Returns true on success,
324// which can happen even if blocks are converted to full. Returns false
325// on exceptional error cases.
326bool AssignBlockForAdjoiningCuts(
327    Graph* graph,
328    const string& new_part,
329    int data_fd,
330    off_t* data_file_size,
331    vector<Vertex::Index>* op_indexes,
332    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
333    const vector<CutEdgeVertexes>& cuts) {
334  CHECK(!cuts.empty());
335  const Vertex::Index old_dst = cuts[0].old_dst;
336  // Calculate # of blocks needed
337  uint64_t blocks_needed = 0;
338  vector<uint64_t> cuts_blocks_needed(cuts.size());
339  for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) {
340    uint64_t cut_blocks_needed = 0;
341    for (const Extent& extent : cuts[i].tmp_extents) {
342      cut_blocks_needed += extent.num_blocks();
343    }
344    blocks_needed += cut_blocks_needed;
345    cuts_blocks_needed[i] = cut_blocks_needed;
346  }
347
348  // Find enough blocks
349  ExtentRanges scratch_ranges;
350  // Each block that's supplying temp blocks and the corresponding blocks:
351  typedef vector<pair<Vertex::Index, ExtentRanges>> SupplierVector;
352  SupplierVector block_suppliers;
353  uint64_t scratch_blocks_found = 0;
354  for (vector<Vertex::Index>::size_type i = (*reverse_op_indexes)[old_dst] + 1,
355           e = op_indexes->size(); i < e; ++i) {
356    Vertex::Index test_node = (*op_indexes)[i];
357    if (!(*graph)[test_node].valid)
358      continue;
359    // See if this node has sufficient blocks
360    ExtentRanges ranges;
361    ranges.AddRepeatedExtents((*graph)[test_node].op.dst_extents());
362    ranges.SubtractExtent(ExtentForRange(
363        kTempBlockStart, kSparseHole - kTempBlockStart));
364    ranges.SubtractRepeatedExtents((*graph)[test_node].op.src_extents());
365    // For now, for simplicity, subtract out all blocks in read-before
366    // dependencies.
367    for (Vertex::EdgeMap::const_iterator edge_i =
368             (*graph)[test_node].out_edges.begin(),
369             edge_e = (*graph)[test_node].out_edges.end();
370         edge_i != edge_e; ++edge_i) {
371      ranges.SubtractExtents(edge_i->second.extents);
372    }
373    if (ranges.blocks() == 0)
374      continue;
375
376    if (ranges.blocks() + scratch_blocks_found > blocks_needed) {
377      // trim down ranges
378      vector<Extent> new_ranges = ranges.GetExtentsForBlockCount(
379          blocks_needed - scratch_blocks_found);
380      ranges = ExtentRanges();
381      ranges.AddExtents(new_ranges);
382    }
383    scratch_ranges.AddRanges(ranges);
384    block_suppliers.push_back(make_pair(test_node, ranges));
385    scratch_blocks_found += ranges.blocks();
386    if (scratch_ranges.blocks() >= blocks_needed)
387      break;
388  }
389  if (scratch_ranges.blocks() < blocks_needed) {
390    LOG(INFO) << "Unable to find sufficient scratch";
391    TEST_AND_RETURN_FALSE(ConvertCutsToFull(graph,
392                                            new_part,
393                                            data_fd,
394                                            data_file_size,
395                                            op_indexes,
396                                            reverse_op_indexes,
397                                            cuts));
398    return true;
399  }
400  // Use the scratch we found
401  TEST_AND_RETURN_FALSE(scratch_ranges.blocks() == scratch_blocks_found);
402
403  // Make all the suppliers depend on this node
404  for (const auto& index_range_pair : block_suppliers) {
405    graph_utils::AddReadBeforeDepExtents(
406        &(*graph)[index_range_pair.first],
407        old_dst,
408        index_range_pair.second.GetExtentsForBlockCount(
409            index_range_pair.second.blocks()));
410  }
411
412  // Replace temp blocks in each cut
413  for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) {
414    const CutEdgeVertexes& cut = cuts[i];
415    vector<Extent> real_extents =
416        scratch_ranges.GetExtentsForBlockCount(cuts_blocks_needed[i]);
417    scratch_ranges.SubtractExtents(real_extents);
418
419    // Fix the old dest node w/ the real blocks
420    InplaceGenerator::SubstituteBlocks(&(*graph)[old_dst],
421                                         cut.tmp_extents,
422                                         real_extents);
423
424    // Fix the new node w/ the real blocks. Since the new node is just a
425    // copy operation, we can replace all the dest extents w/ the real
426    // blocks.
427    DeltaArchiveManifest_InstallOperation *op = &(*graph)[cut.new_vertex].op;
428    op->clear_dst_extents();
429    StoreExtents(real_extents, op->mutable_dst_extents());
430  }
431  return true;
432}
433
434}  // namespace
435
436bool InplaceGenerator::AssignTempBlocks(
437    Graph* graph,
438    const string& new_part,
439    int data_fd,
440    off_t* data_file_size,
441    vector<Vertex::Index>* op_indexes,
442    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
443    const vector<CutEdgeVertexes>& cuts) {
444  CHECK(!cuts.empty());
445
446  // group of cuts w/ the same old_dst:
447  vector<CutEdgeVertexes> cuts_group;
448
449  for (vector<CutEdgeVertexes>::size_type i = cuts.size() - 1, e = 0;
450       true ; --i) {
451    LOG(INFO) << "Fixing temp blocks in cut " << i
452              << ": old dst: " << cuts[i].old_dst << " new vertex: "
453              << cuts[i].new_vertex << " path: "
454              << (*graph)[cuts[i].old_dst].file_name;
455
456    if (cuts_group.empty() || (cuts_group[0].old_dst == cuts[i].old_dst)) {
457      cuts_group.push_back(cuts[i]);
458    } else {
459      CHECK(!cuts_group.empty());
460      TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph,
461                                                        new_part,
462                                                        data_fd,
463                                                        data_file_size,
464                                                        op_indexes,
465                                                        reverse_op_indexes,
466                                                        cuts_group));
467      cuts_group.clear();
468      cuts_group.push_back(cuts[i]);
469    }
470
471    if (i == e) {
472      // break out of for() loop
473      break;
474    }
475  }
476  CHECK(!cuts_group.empty());
477  TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph,
478                                                    new_part,
479                                                    data_fd,
480                                                    data_file_size,
481                                                    op_indexes,
482                                                    reverse_op_indexes,
483                                                    cuts_group));
484  return true;
485}
486
487bool InplaceGenerator::NoTempBlocksRemain(const Graph& graph) {
488  size_t idx = 0;
489  for (Graph::const_iterator it = graph.begin(), e = graph.end(); it != e;
490       ++it, ++idx) {
491    if (!it->valid)
492      continue;
493    const DeltaArchiveManifest_InstallOperation& op = it->op;
494    if (TempBlocksExistInExtents(op.dst_extents()) ||
495        TempBlocksExistInExtents(op.src_extents())) {
496      LOG(INFO) << "bad extents in node " << idx;
497      LOG(INFO) << "so yeah";
498      return false;
499    }
500
501    // Check out-edges:
502    for (const auto& edge_prop_pair : it->out_edges) {
503      if (TempBlocksExistInExtents(edge_prop_pair.second.extents) ||
504          TempBlocksExistInExtents(edge_prop_pair.second.write_extents)) {
505        LOG(INFO) << "bad out edge in node " << idx;
506        LOG(INFO) << "so yeah";
507        return false;
508      }
509    }
510  }
511  return true;
512}
513
514bool InplaceGenerator::ConvertCutToFullOp(Graph* graph,
515                                          const CutEdgeVertexes& cut,
516                                          const string& new_part,
517                                          int data_fd,
518                                          off_t* data_file_size) {
519  // Drop all incoming edges, keep all outgoing edges
520
521  // Keep all outgoing edges
522  if ((*graph)[cut.old_dst].op.type() !=
523      DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ &&
524      (*graph)[cut.old_dst].op.type() !=
525      DeltaArchiveManifest_InstallOperation_Type_REPLACE) {
526    Vertex::EdgeMap out_edges = (*graph)[cut.old_dst].out_edges;
527    graph_utils::DropWriteBeforeDeps(&out_edges);
528
529    // Replace the operation with a REPLACE or REPLACE_BZ to generate the same
530    // |new_extents| list of blocks and update the graph.
531    vector<AnnotatedOperation> new_aop;
532    vector<Extent> new_extents;
533    ExtentsToVector((*graph)[cut.old_dst].op.dst_extents(),
534                    &new_extents);
535    TEST_AND_RETURN_FALSE(diff_utils::DeltaReadFile(
536        &new_aop,
537        "",  // old_part
538        new_part,
539        vector<Extent>(),  // old_extents
540        new_extents,
541        (*graph)[cut.old_dst].file_name,
542        -1,  // chunk_blocks, forces to have a single operation.
543        data_fd,
544        data_file_size,
545        false));  // src_ops_allowed
546    TEST_AND_RETURN_FALSE(new_aop.size() == 1);
547    TEST_AND_RETURN_FALSE(AddInstallOpToGraph(
548      graph, cut.old_dst, nullptr, new_aop.front().op, new_aop.front().name));
549
550    (*graph)[cut.old_dst].out_edges = out_edges;
551
552    // Right now we don't have doubly-linked edges, so we have to scan
553    // the whole graph.
554    graph_utils::DropIncomingEdgesTo(graph, cut.old_dst);
555  }
556
557  // Delete temp node
558  (*graph)[cut.old_src].out_edges.erase(cut.new_vertex);
559  CHECK((*graph)[cut.old_dst].out_edges.find(cut.new_vertex) ==
560        (*graph)[cut.old_dst].out_edges.end());
561  (*graph)[cut.new_vertex].valid = false;
562  LOG(INFO) << "marked node invalid: " << cut.new_vertex;
563  return true;
564}
565
566bool InplaceGenerator::ConvertGraphToDag(Graph* graph,
567                                         const string& new_part,
568                                         int fd,
569                                         off_t* data_file_size,
570                                         vector<Vertex::Index>* final_order,
571                                         Vertex::Index scratch_vertex) {
572  CycleBreaker cycle_breaker;
573  LOG(INFO) << "Finding cycles...";
574  set<Edge> cut_edges;
575  cycle_breaker.BreakCycles(*graph, &cut_edges);
576  LOG(INFO) << "done finding cycles";
577  CheckGraph(*graph);
578
579  // Calculate number of scratch blocks needed
580
581  LOG(INFO) << "Cutting cycles...";
582  vector<CutEdgeVertexes> cuts;
583  TEST_AND_RETURN_FALSE(CutEdges(graph, cut_edges, &cuts));
584  LOG(INFO) << "done cutting cycles";
585  LOG(INFO) << "There are " << cuts.size() << " cuts.";
586  CheckGraph(*graph);
587
588  LOG(INFO) << "Creating initial topological order...";
589  TopologicalSort(*graph, final_order);
590  LOG(INFO) << "done with initial topo order";
591  CheckGraph(*graph);
592
593  LOG(INFO) << "Moving full ops to the back";
594  MoveFullOpsToBack(graph, final_order);
595  LOG(INFO) << "done moving full ops to back";
596
597  vector<vector<Vertex::Index>::size_type> inverse_final_order;
598  GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
599
600  SortCutsByTopoOrder(*final_order, &cuts);
601
602  if (!cuts.empty())
603    TEST_AND_RETURN_FALSE(AssignTempBlocks(graph,
604                                           new_part,
605                                           fd,
606                                           data_file_size,
607                                           final_order,
608                                           &inverse_final_order,
609                                           cuts));
610  LOG(INFO) << "Making sure all temp blocks have been allocated";
611
612  // Remove the scratch node, if any
613  if (scratch_vertex != Vertex::kInvalidIndex) {
614    final_order->erase(final_order->begin() +
615                       inverse_final_order[scratch_vertex]);
616    (*graph)[scratch_vertex].valid = false;
617    GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
618  }
619
620  graph_utils::DumpGraph(*graph);
621  CHECK(NoTempBlocksRemain(*graph));
622  LOG(INFO) << "done making sure all temp blocks are allocated";
623  return true;
624}
625
626void InplaceGenerator::CreateScratchNode(uint64_t start_block,
627                                         uint64_t num_blocks,
628                                         Vertex* vertex) {
629  vertex->file_name = "<scratch>";
630  vertex->op.set_type(DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ);
631  vertex->op.set_data_offset(0);
632  vertex->op.set_data_length(0);
633  Extent* extent = vertex->op.add_dst_extents();
634  extent->set_start_block(start_block);
635  extent->set_num_blocks(num_blocks);
636}
637
638bool InplaceGenerator::AddInstallOpToBlocksVector(
639    const DeltaArchiveManifest_InstallOperation& operation,
640    const Graph& graph,
641    Vertex::Index vertex,
642    vector<Block>* blocks) {
643  // See if this is already present.
644  TEST_AND_RETURN_FALSE(operation.dst_extents_size() > 0);
645
646  enum BlockField { READER = 0, WRITER, BLOCK_FIELD_COUNT };
647  for (int field = READER; field < BLOCK_FIELD_COUNT; field++) {
648    const int extents_size =
649        (field == READER) ? operation.src_extents_size() :
650        operation.dst_extents_size();
651    const char* past_participle = (field == READER) ? "read" : "written";
652    const google::protobuf::RepeatedPtrField<Extent>& extents =
653        (field == READER) ? operation.src_extents() : operation.dst_extents();
654    Vertex::Index Block::*access_type = (field == READER) ?
655        &Block::reader : &Block::writer;
656
657    for (int i = 0; i < extents_size; i++) {
658      const Extent& extent = extents.Get(i);
659      for (uint64_t block = extent.start_block();
660           block < (extent.start_block() + extent.num_blocks()); block++) {
661        if ((*blocks)[block].*access_type != Vertex::kInvalidIndex) {
662          LOG(FATAL) << "Block " << block << " is already "
663                     << past_participle << " by "
664                     << (*blocks)[block].*access_type << "("
665                     << graph[(*blocks)[block].*access_type].file_name
666                     << ") and also " << vertex << "("
667                     << graph[vertex].file_name << ")";
668        }
669        (*blocks)[block].*access_type = vertex;
670      }
671    }
672  }
673  return true;
674}
675
676bool InplaceGenerator::AddInstallOpToGraph(
677    Graph* graph,
678    Vertex::Index existing_vertex,
679    vector<Block>* blocks,
680    const DeltaArchiveManifest_InstallOperation operation,
681    const string& op_name) {
682  Vertex::Index vertex = existing_vertex;
683  if (vertex == Vertex::kInvalidIndex) {
684    graph->emplace_back();
685    vertex = graph->size() - 1;
686  }
687  (*graph)[vertex].op = operation;
688  CHECK((*graph)[vertex].op.has_type());
689  (*graph)[vertex].file_name = op_name;
690
691  if (blocks)
692    TEST_AND_RETURN_FALSE(InplaceGenerator::AddInstallOpToBlocksVector(
693        (*graph)[vertex].op,
694        *graph,
695        vertex,
696        blocks));
697  return true;
698}
699
700void InplaceGenerator::ApplyMap(vector<uint64_t>* collection,
701                                const map<uint64_t, uint64_t>& the_map) {
702  for (uint64_t& elem : *collection) {
703    const auto& map_it = the_map.find(elem);
704    if (map_it != the_map.end())
705      elem = map_it->second;
706  }
707}
708
709bool InplaceGenerator::GenerateOperations(
710    const PayloadGenerationConfig& config,
711    int data_file_fd,
712    off_t* data_file_size,
713    vector<AnnotatedOperation>* rootfs_ops,
714    vector<AnnotatedOperation>* kernel_ops) {
715  unique_ptr<Ext2Filesystem> old_fs = Ext2Filesystem::CreateFromFile(
716      config.source.rootfs.path);
717  unique_ptr<Ext2Filesystem> new_fs = Ext2Filesystem::CreateFromFile(
718      config.target.rootfs.path);
719
720  off_t chunk_blocks = (config.chunk_size == -1 ? -1 :
721                        config.chunk_size / config.block_size);
722
723  // Temporary list of operations used to construct the dependency graph.
724  vector<AnnotatedOperation> aops;
725  TEST_AND_RETURN_FALSE(
726      diff_utils::DeltaReadFilesystem(&aops,
727                                      config.source.rootfs.path,
728                                      config.target.rootfs.path,
729                                      old_fs.get(),
730                                      new_fs.get(),
731                                      chunk_blocks,
732                                      data_file_fd,
733                                      data_file_size,
734                                      true,  // skip_block_0
735                                      false));  // src_ops_allowed
736  // Convert the rootfs operations to the graph.
737  Graph graph;
738  CheckGraph(graph);
739  vector<Block> blocks(config.target.rootfs.size / config.block_size);
740  for (const auto& aop : aops) {
741    AddInstallOpToGraph(
742        &graph, Vertex::kInvalidIndex, &blocks, aop.op, aop.name);
743  }
744  LOG(INFO) << "done reading normal files";
745  CheckGraph(graph);
746
747  // Final scratch block (if there's space)
748  Vertex::Index scratch_vertex = Vertex::kInvalidIndex;
749  if (blocks.size() < (config.rootfs_partition_size / kBlockSize)) {
750    scratch_vertex = graph.size();
751    graph.emplace_back();
752    CreateScratchNode(
753        blocks.size(),
754        (config.rootfs_partition_size / kBlockSize) - blocks.size(),
755        &graph.back());
756  }
757
758  // Read kernel partition
759  TEST_AND_RETURN_FALSE(diff_utils::DeltaCompressKernelPartition(
760      config.source.kernel.path,
761      config.target.kernel.path,
762      config.source.kernel.size,
763      config.target.kernel.size,
764      config.block_size,
765      kernel_ops,
766      data_file_fd,
767      data_file_size,
768      false));  // src_ops_allowed
769  LOG(INFO) << "done reading kernel";
770  CheckGraph(graph);
771
772  LOG(INFO) << "Creating edges...";
773  CreateEdges(&graph, blocks);
774  LOG(INFO) << "Done creating edges";
775  CheckGraph(graph);
776
777  vector<Vertex::Index> final_order;
778  TEST_AND_RETURN_FALSE(ConvertGraphToDag(
779      &graph,
780      config.target.rootfs.path,
781      data_file_fd,
782      data_file_size,
783      &final_order,
784      scratch_vertex));
785
786  // Copy operations over to the rootfs_ops in the final_order generated by the
787  // topological sort.
788  rootfs_ops->clear();
789  for (const Vertex::Index vertex_index : final_order) {
790    const Vertex& vertex = graph[vertex_index];
791    rootfs_ops->emplace_back();
792    rootfs_ops->back().op = vertex.op;
793    rootfs_ops->back().name = vertex.file_name;
794  }
795
796  // Re-add the operation for the block 0.
797  TEST_AND_RETURN_FALSE(diff_utils::DeltaReadFile(
798      rootfs_ops,
799      config.source.rootfs.path,
800      config.target.rootfs.path,
801      vector<Extent>{ExtentForRange(0, 1)},
802      vector<Extent>{ExtentForRange(0, 1)},
803      "<block-0>",  // operation name
804      -1,  // chunk_blocks
805      data_file_fd,
806      data_file_size,
807      false));  // src_ops_allowed
808
809  return true;
810}
811
812};  // namespace chromeos_update_engine
813