inplace_generator.cc revision 703022b71fc6a89796f2f97448b1a419007a52ca
1// Copyright 2015 The Chromium OS Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "update_engine/payload_generator/inplace_generator.h"
6
7#include <algorithm>
8#include <map>
9#include <set>
10#include <string>
11#include <utility>
12#include <vector>
13
14#include "update_engine/extent_ranges.h"
15#include "update_engine/payload_constants.h"
16#include "update_engine/payload_generator/cycle_breaker.h"
17#include "update_engine/payload_generator/delta_diff_generator.h"
18#include "update_engine/payload_generator/graph_types.h"
19#include "update_engine/payload_generator/graph_utils.h"
20#include "update_engine/payload_generator/metadata.h"
21#include "update_engine/payload_generator/topological_sort.h"
22#include "update_engine/update_metadata.pb.h"
23#include "update_engine/utils.h"
24
25using std::make_pair;
26using std::map;
27using std::pair;
28using std::set;
29using std::string;
30using std::vector;
31
32namespace chromeos_update_engine {
33
34using Block = DeltaDiffGenerator::Block;
35
36// This class allocates non-existent temp blocks, starting from
37// kTempBlockStart. Other code is responsible for converting these
38// temp blocks into real blocks, as the client can't read or write to
39// these blocks.
40class DummyExtentAllocator {
41 public:
42  vector<Extent> Allocate(const uint64_t block_count) {
43    vector<Extent> ret(1);
44    ret[0].set_start_block(next_block_);
45    ret[0].set_num_blocks(block_count);
46    next_block_ += block_count;
47    return ret;
48  }
49
50 private:
51  uint64_t next_block_{kTempBlockStart};
52};
53
54// Takes a vector of blocks and returns an equivalent vector of Extent
55// objects.
56vector<Extent> CompressExtents(const vector<uint64_t>& blocks) {
57  vector<Extent> new_extents;
58  for (uint64_t block : blocks) {
59    graph_utils::AppendBlockToExtents(&new_extents, block);
60  }
61  return new_extents;
62}
63
64void InplaceGenerator::CheckGraph(const Graph& graph) {
65  for (const Vertex& v : graph) {
66    CHECK(v.op.has_type());
67  }
68}
69
70void InplaceGenerator::SubstituteBlocks(
71    Vertex* vertex,
72    const vector<Extent>& remove_extents,
73    const vector<Extent>& replace_extents) {
74  // First, expand out the blocks that op reads from
75  vector<uint64_t> read_blocks =
76      DeltaDiffGenerator::ExpandExtents(vertex->op.src_extents());
77  {
78    // Expand remove_extents and replace_extents
79    vector<uint64_t> remove_extents_expanded =
80        DeltaDiffGenerator::ExpandExtents(remove_extents);
81    vector<uint64_t> replace_extents_expanded =
82        DeltaDiffGenerator::ExpandExtents(replace_extents);
83    CHECK_EQ(remove_extents_expanded.size(), replace_extents_expanded.size());
84    map<uint64_t, uint64_t> conversion;
85    for (vector<uint64_t>::size_type i = 0;
86         i < replace_extents_expanded.size(); i++) {
87      conversion[remove_extents_expanded[i]] = replace_extents_expanded[i];
88    }
89    utils::ApplyMap(&read_blocks, conversion);
90    for (auto& edge_prop_pair : vertex->out_edges) {
91      vector<uint64_t> write_before_deps_expanded =
92          DeltaDiffGenerator::ExpandExtents(
93              edge_prop_pair.second.write_extents);
94      utils::ApplyMap(&write_before_deps_expanded, conversion);
95      edge_prop_pair.second.write_extents =
96          CompressExtents(write_before_deps_expanded);
97    }
98  }
99  // Convert read_blocks back to extents
100  vertex->op.clear_src_extents();
101  vector<Extent> new_extents = CompressExtents(read_blocks);
102  DeltaDiffGenerator::StoreExtents(new_extents,
103                                   vertex->op.mutable_src_extents());
104}
105
106bool InplaceGenerator::CutEdges(Graph* graph,
107                                const set<Edge>& edges,
108                                vector<CutEdgeVertexes>* out_cuts) {
109  DummyExtentAllocator scratch_allocator;
110  vector<CutEdgeVertexes> cuts;
111  cuts.reserve(edges.size());
112
113  uint64_t scratch_blocks_used = 0;
114  for (const Edge& edge : edges) {
115    cuts.resize(cuts.size() + 1);
116    vector<Extent> old_extents =
117        (*graph)[edge.first].out_edges[edge.second].extents;
118    // Choose some scratch space
119    scratch_blocks_used += graph_utils::EdgeWeight(*graph, edge);
120    cuts.back().tmp_extents =
121        scratch_allocator.Allocate(graph_utils::EdgeWeight(*graph, edge));
122    // create vertex to copy original->scratch
123    cuts.back().new_vertex = graph->size();
124    graph->emplace_back();
125    cuts.back().old_src = edge.first;
126    cuts.back().old_dst = edge.second;
127
128    EdgeProperties& cut_edge_properties =
129        (*graph)[edge.first].out_edges.find(edge.second)->second;
130
131    // This should never happen, as we should only be cutting edges between
132    // real file nodes, and write-before relationships are created from
133    // a real file node to a temp copy node:
134    CHECK(cut_edge_properties.write_extents.empty())
135        << "Can't cut edge that has write-before relationship.";
136
137    // make node depend on the copy operation
138    (*graph)[edge.first].out_edges.insert(make_pair(graph->size() - 1,
139                                                    cut_edge_properties));
140
141    // Set src/dst extents and other proto variables for copy operation
142    graph->back().op.set_type(DeltaArchiveManifest_InstallOperation_Type_MOVE);
143    DeltaDiffGenerator::StoreExtents(
144        cut_edge_properties.extents,
145        graph->back().op.mutable_src_extents());
146    DeltaDiffGenerator::StoreExtents(cuts.back().tmp_extents,
147                                     graph->back().op.mutable_dst_extents());
148    graph->back().op.set_src_length(
149        graph_utils::EdgeWeight(*graph, edge) * kBlockSize);
150    graph->back().op.set_dst_length(graph->back().op.src_length());
151
152    // make the dest node read from the scratch space
153    SubstituteBlocks(
154        &((*graph)[edge.second]),
155        (*graph)[edge.first].out_edges[edge.second].extents,
156        cuts.back().tmp_extents);
157
158    // delete the old edge
159    CHECK_EQ(static_cast<Graph::size_type>(1),
160             (*graph)[edge.first].out_edges.erase(edge.second));
161
162    // Add an edge from dst to copy operation
163    EdgeProperties write_before_edge_properties;
164    write_before_edge_properties.write_extents = cuts.back().tmp_extents;
165    (*graph)[edge.second].out_edges.insert(
166        make_pair(graph->size() - 1, write_before_edge_properties));
167  }
168  out_cuts->swap(cuts);
169  return true;
170}
171
172// Creates all the edges for the graph. Writers of a block point to
173// readers of the same block. This is because for an edge A->B, B
174// must complete before A executes.
175void InplaceGenerator::CreateEdges(
176    Graph* graph,
177    const vector<DeltaDiffGenerator::Block>& blocks) {
178  for (vector<DeltaDiffGenerator::Block>::size_type i = 0;
179       i < blocks.size(); i++) {
180    // Blocks with both a reader and writer get an edge
181    if (blocks[i].reader == Vertex::kInvalidIndex ||
182        blocks[i].writer == Vertex::kInvalidIndex)
183      continue;
184    // Don't have a node depend on itself
185    if (blocks[i].reader == blocks[i].writer)
186      continue;
187    // See if there's already an edge we can add onto
188    Vertex::EdgeMap::iterator edge_it =
189        (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
190    if (edge_it == (*graph)[blocks[i].writer].out_edges.end()) {
191      // No existing edge. Create one
192      (*graph)[blocks[i].writer].out_edges.insert(
193          make_pair(blocks[i].reader, EdgeProperties()));
194      edge_it = (*graph)[blocks[i].writer].out_edges.find(blocks[i].reader);
195      CHECK(edge_it != (*graph)[blocks[i].writer].out_edges.end());
196    }
197    graph_utils::AppendBlockToExtents(&edge_it->second.extents, i);
198  }
199}
200
201namespace {
202
203class SortCutsByTopoOrderLess {
204 public:
205  explicit SortCutsByTopoOrderLess(
206      const vector<vector<Vertex::Index>::size_type>& table)
207      : table_(table) {}
208  bool operator()(const CutEdgeVertexes& a, const CutEdgeVertexes& b) {
209    return table_[a.old_dst] < table_[b.old_dst];
210  }
211 private:
212  const vector<vector<Vertex::Index>::size_type>& table_;
213};
214
215}  // namespace
216
217void InplaceGenerator::GenerateReverseTopoOrderMap(
218    const vector<Vertex::Index>& op_indexes,
219    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes) {
220  vector<vector<Vertex::Index>::size_type> table(op_indexes.size());
221  for (vector<Vertex::Index>::size_type i = 0, e = op_indexes.size();
222       i != e; ++i) {
223    Vertex::Index node = op_indexes[i];
224    if (table.size() < (node + 1)) {
225      table.resize(node + 1);
226    }
227    table[node] = i;
228  }
229  reverse_op_indexes->swap(table);
230}
231
232void InplaceGenerator::SortCutsByTopoOrder(
233    const vector<Vertex::Index>& op_indexes,
234    vector<CutEdgeVertexes>* cuts) {
235  // first, make a reverse lookup table.
236  vector<vector<Vertex::Index>::size_type> table;
237  GenerateReverseTopoOrderMap(op_indexes, &table);
238  SortCutsByTopoOrderLess less(table);
239  sort(cuts->begin(), cuts->end(), less);
240}
241
242void InplaceGenerator::MoveFullOpsToBack(Graph* graph,
243                                         vector<Vertex::Index>* op_indexes) {
244  vector<Vertex::Index> ret;
245  vector<Vertex::Index> full_ops;
246  ret.reserve(op_indexes->size());
247  for (vector<Vertex::Index>::size_type i = 0, e = op_indexes->size(); i != e;
248       ++i) {
249    DeltaArchiveManifest_InstallOperation_Type type =
250        (*graph)[(*op_indexes)[i]].op.type();
251    if (type == DeltaArchiveManifest_InstallOperation_Type_REPLACE ||
252        type == DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ) {
253      full_ops.push_back((*op_indexes)[i]);
254    } else {
255      ret.push_back((*op_indexes)[i]);
256    }
257  }
258  LOG(INFO) << "Stats: " << full_ops.size() << " full ops out of "
259            << (full_ops.size() + ret.size()) << " total ops.";
260  ret.insert(ret.end(), full_ops.begin(), full_ops.end());
261  op_indexes->swap(ret);
262}
263
264namespace {
265
266template<typename T>
267bool TempBlocksExistInExtents(const T& extents) {
268  for (int i = 0, e = extents.size(); i < e; ++i) {
269    Extent extent = graph_utils::GetElement(extents, i);
270    uint64_t start = extent.start_block();
271    uint64_t num = extent.num_blocks();
272    if (start == kSparseHole)
273      continue;
274    if (start >= kTempBlockStart || (start + num) >= kTempBlockStart) {
275      LOG(ERROR) << "temp block!";
276      LOG(ERROR) << "start: " << start << ", num: " << num;
277      LOG(ERROR) << "kTempBlockStart: " << kTempBlockStart;
278      LOG(ERROR) << "returning true";
279      return true;
280    }
281    // check for wrap-around, which would be a bug:
282    CHECK(start <= (start + num));
283  }
284  return false;
285}
286
287// Converts the cuts, which must all have the same |old_dst| member,
288// to full. It does this by converting the |old_dst| to REPLACE or
289// REPLACE_BZ, dropping all incoming edges to |old_dst|, and marking
290// all temp nodes invalid.
291bool ConvertCutsToFull(
292    Graph* graph,
293    const string& new_root,
294    int data_fd,
295    off_t* data_file_size,
296    vector<Vertex::Index>* op_indexes,
297    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
298    const vector<CutEdgeVertexes>& cuts) {
299  CHECK(!cuts.empty());
300  set<Vertex::Index> deleted_nodes;
301  for (const CutEdgeVertexes& cut : cuts) {
302    TEST_AND_RETURN_FALSE(InplaceGenerator::ConvertCutToFullOp(
303        graph,
304        cut,
305        new_root,
306        data_fd,
307        data_file_size));
308    deleted_nodes.insert(cut.new_vertex);
309  }
310  deleted_nodes.insert(cuts[0].old_dst);
311
312  vector<Vertex::Index> new_op_indexes;
313  new_op_indexes.reserve(op_indexes->size());
314  for (Vertex::Index vertex_index : *op_indexes) {
315    if (utils::SetContainsKey(deleted_nodes, vertex_index))
316      continue;
317    new_op_indexes.push_back(vertex_index);
318  }
319  new_op_indexes.push_back(cuts[0].old_dst);
320  op_indexes->swap(new_op_indexes);
321  InplaceGenerator::GenerateReverseTopoOrderMap(*op_indexes,
322                                                reverse_op_indexes);
323  return true;
324}
325
326// Tries to assign temp blocks for a collection of cuts, all of which share
327// the same old_dst member. If temp blocks can't be found, old_dst will be
328// converted to a REPLACE or REPLACE_BZ operation. Returns true on success,
329// which can happen even if blocks are converted to full. Returns false
330// on exceptional error cases.
331bool AssignBlockForAdjoiningCuts(
332    Graph* graph,
333    const string& new_root,
334    int data_fd,
335    off_t* data_file_size,
336    vector<Vertex::Index>* op_indexes,
337    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
338    const vector<CutEdgeVertexes>& cuts) {
339  CHECK(!cuts.empty());
340  const Vertex::Index old_dst = cuts[0].old_dst;
341  // Calculate # of blocks needed
342  uint64_t blocks_needed = 0;
343  vector<uint64_t> cuts_blocks_needed(cuts.size());
344  for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) {
345    uint64_t cut_blocks_needed = 0;
346    for (const Extent& extent : cuts[i].tmp_extents) {
347      cut_blocks_needed += extent.num_blocks();
348    }
349    blocks_needed += cut_blocks_needed;
350    cuts_blocks_needed[i] = cut_blocks_needed;
351  }
352
353  // Find enough blocks
354  ExtentRanges scratch_ranges;
355  // Each block that's supplying temp blocks and the corresponding blocks:
356  typedef vector<pair<Vertex::Index, ExtentRanges>> SupplierVector;
357  SupplierVector block_suppliers;
358  uint64_t scratch_blocks_found = 0;
359  for (vector<Vertex::Index>::size_type i = (*reverse_op_indexes)[old_dst] + 1,
360           e = op_indexes->size(); i < e; ++i) {
361    Vertex::Index test_node = (*op_indexes)[i];
362    if (!(*graph)[test_node].valid)
363      continue;
364    // See if this node has sufficient blocks
365    ExtentRanges ranges;
366    ranges.AddRepeatedExtents((*graph)[test_node].op.dst_extents());
367    ranges.SubtractExtent(ExtentForRange(
368        kTempBlockStart, kSparseHole - kTempBlockStart));
369    ranges.SubtractRepeatedExtents((*graph)[test_node].op.src_extents());
370    // For now, for simplicity, subtract out all blocks in read-before
371    // dependencies.
372    for (Vertex::EdgeMap::const_iterator edge_i =
373             (*graph)[test_node].out_edges.begin(),
374             edge_e = (*graph)[test_node].out_edges.end();
375         edge_i != edge_e; ++edge_i) {
376      ranges.SubtractExtents(edge_i->second.extents);
377    }
378    if (ranges.blocks() == 0)
379      continue;
380
381    if (ranges.blocks() + scratch_blocks_found > blocks_needed) {
382      // trim down ranges
383      vector<Extent> new_ranges = ranges.GetExtentsForBlockCount(
384          blocks_needed - scratch_blocks_found);
385      ranges = ExtentRanges();
386      ranges.AddExtents(new_ranges);
387    }
388    scratch_ranges.AddRanges(ranges);
389    block_suppliers.push_back(make_pair(test_node, ranges));
390    scratch_blocks_found += ranges.blocks();
391    if (scratch_ranges.blocks() >= blocks_needed)
392      break;
393  }
394  if (scratch_ranges.blocks() < blocks_needed) {
395    LOG(INFO) << "Unable to find sufficient scratch";
396    TEST_AND_RETURN_FALSE(ConvertCutsToFull(graph,
397                                            new_root,
398                                            data_fd,
399                                            data_file_size,
400                                            op_indexes,
401                                            reverse_op_indexes,
402                                            cuts));
403    return true;
404  }
405  // Use the scratch we found
406  TEST_AND_RETURN_FALSE(scratch_ranges.blocks() == scratch_blocks_found);
407
408  // Make all the suppliers depend on this node
409  for (const auto& index_range_pair : block_suppliers) {
410    graph_utils::AddReadBeforeDepExtents(
411        &(*graph)[index_range_pair.first],
412        old_dst,
413        index_range_pair.second.GetExtentsForBlockCount(
414            index_range_pair.second.blocks()));
415  }
416
417  // Replace temp blocks in each cut
418  for (vector<CutEdgeVertexes>::size_type i = 0; i < cuts.size(); ++i) {
419    const CutEdgeVertexes& cut = cuts[i];
420    vector<Extent> real_extents =
421        scratch_ranges.GetExtentsForBlockCount(cuts_blocks_needed[i]);
422    scratch_ranges.SubtractExtents(real_extents);
423
424    // Fix the old dest node w/ the real blocks
425    InplaceGenerator::SubstituteBlocks(&(*graph)[old_dst],
426                                         cut.tmp_extents,
427                                         real_extents);
428
429    // Fix the new node w/ the real blocks. Since the new node is just a
430    // copy operation, we can replace all the dest extents w/ the real
431    // blocks.
432    DeltaArchiveManifest_InstallOperation *op = &(*graph)[cut.new_vertex].op;
433    op->clear_dst_extents();
434    DeltaDiffGenerator::StoreExtents(real_extents, op->mutable_dst_extents());
435  }
436  return true;
437}
438
439}  // namespace
440
441bool InplaceGenerator::AssignTempBlocks(
442    Graph* graph,
443    const string& new_root,
444    int data_fd,
445    off_t* data_file_size,
446    vector<Vertex::Index>* op_indexes,
447    vector<vector<Vertex::Index>::size_type>* reverse_op_indexes,
448    const vector<CutEdgeVertexes>& cuts) {
449  CHECK(!cuts.empty());
450
451  // group of cuts w/ the same old_dst:
452  vector<CutEdgeVertexes> cuts_group;
453
454  for (vector<CutEdgeVertexes>::size_type i = cuts.size() - 1, e = 0;
455       true ; --i) {
456    LOG(INFO) << "Fixing temp blocks in cut " << i
457              << ": old dst: " << cuts[i].old_dst << " new vertex: "
458              << cuts[i].new_vertex << " path: "
459              << (*graph)[cuts[i].old_dst].file_name;
460
461    if (cuts_group.empty() || (cuts_group[0].old_dst == cuts[i].old_dst)) {
462      cuts_group.push_back(cuts[i]);
463    } else {
464      CHECK(!cuts_group.empty());
465      TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph,
466                                                        new_root,
467                                                        data_fd,
468                                                        data_file_size,
469                                                        op_indexes,
470                                                        reverse_op_indexes,
471                                                        cuts_group));
472      cuts_group.clear();
473      cuts_group.push_back(cuts[i]);
474    }
475
476    if (i == e) {
477      // break out of for() loop
478      break;
479    }
480  }
481  CHECK(!cuts_group.empty());
482  TEST_AND_RETURN_FALSE(AssignBlockForAdjoiningCuts(graph,
483                                                    new_root,
484                                                    data_fd,
485                                                    data_file_size,
486                                                    op_indexes,
487                                                    reverse_op_indexes,
488                                                    cuts_group));
489  return true;
490}
491
492bool InplaceGenerator::NoTempBlocksRemain(const Graph& graph) {
493  size_t idx = 0;
494  for (Graph::const_iterator it = graph.begin(), e = graph.end(); it != e;
495       ++it, ++idx) {
496    if (!it->valid)
497      continue;
498    const DeltaArchiveManifest_InstallOperation& op = it->op;
499    if (TempBlocksExistInExtents(op.dst_extents()) ||
500        TempBlocksExistInExtents(op.src_extents())) {
501      LOG(INFO) << "bad extents in node " << idx;
502      LOG(INFO) << "so yeah";
503      return false;
504    }
505
506    // Check out-edges:
507    for (const auto& edge_prop_pair : it->out_edges) {
508      if (TempBlocksExistInExtents(edge_prop_pair.second.extents) ||
509          TempBlocksExistInExtents(edge_prop_pair.second.write_extents)) {
510        LOG(INFO) << "bad out edge in node " << idx;
511        LOG(INFO) << "so yeah";
512        return false;
513      }
514    }
515  }
516  return true;
517}
518
519bool InplaceGenerator::ConvertCutToFullOp(Graph* graph,
520                                          const CutEdgeVertexes& cut,
521                                          const string& new_root,
522                                          int data_fd,
523                                          off_t* data_file_size) {
524  // Drop all incoming edges, keep all outgoing edges
525
526  // Keep all outgoing edges
527  if ((*graph)[cut.old_dst].op.type() !=
528      DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ &&
529      (*graph)[cut.old_dst].op.type() !=
530      DeltaArchiveManifest_InstallOperation_Type_REPLACE) {
531    Vertex::EdgeMap out_edges = (*graph)[cut.old_dst].out_edges;
532    graph_utils::DropWriteBeforeDeps(&out_edges);
533
534    TEST_AND_RETURN_FALSE(DeltaDiffGenerator::DeltaReadFile(
535        graph,
536        cut.old_dst,
537        nullptr,
538        kEmptyPath,
539        new_root,
540        (*graph)[cut.old_dst].file_name,
541        (*graph)[cut.old_dst].chunk_offset,
542        (*graph)[cut.old_dst].chunk_size,
543        data_fd,
544        data_file_size,
545        false));  // src_ops_allowed
546
547    (*graph)[cut.old_dst].out_edges = out_edges;
548
549    // Right now we don't have doubly-linked edges, so we have to scan
550    // the whole graph.
551    graph_utils::DropIncomingEdgesTo(graph, cut.old_dst);
552  }
553
554  // Delete temp node
555  (*graph)[cut.old_src].out_edges.erase(cut.new_vertex);
556  CHECK((*graph)[cut.old_dst].out_edges.find(cut.new_vertex) ==
557        (*graph)[cut.old_dst].out_edges.end());
558  (*graph)[cut.new_vertex].valid = false;
559  LOG(INFO) << "marked node invalid: " << cut.new_vertex;
560  return true;
561}
562
563bool InplaceGenerator::ConvertGraphToDag(Graph* graph,
564                                        const string& new_root,
565                                        int fd,
566                                        off_t* data_file_size,
567                                        vector<Vertex::Index>* final_order,
568                                        Vertex::Index scratch_vertex) {
569  CycleBreaker cycle_breaker;
570  LOG(INFO) << "Finding cycles...";
571  set<Edge> cut_edges;
572  cycle_breaker.BreakCycles(*graph, &cut_edges);
573  LOG(INFO) << "done finding cycles";
574  CheckGraph(*graph);
575
576  // Calculate number of scratch blocks needed
577
578  LOG(INFO) << "Cutting cycles...";
579  vector<CutEdgeVertexes> cuts;
580  TEST_AND_RETURN_FALSE(CutEdges(graph, cut_edges, &cuts));
581  LOG(INFO) << "done cutting cycles";
582  LOG(INFO) << "There are " << cuts.size() << " cuts.";
583  CheckGraph(*graph);
584
585  LOG(INFO) << "Creating initial topological order...";
586  TopologicalSort(*graph, final_order);
587  LOG(INFO) << "done with initial topo order";
588  CheckGraph(*graph);
589
590  LOG(INFO) << "Moving full ops to the back";
591  MoveFullOpsToBack(graph, final_order);
592  LOG(INFO) << "done moving full ops to back";
593
594  vector<vector<Vertex::Index>::size_type> inverse_final_order;
595  GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
596
597  SortCutsByTopoOrder(*final_order, &cuts);
598
599  if (!cuts.empty())
600    TEST_AND_RETURN_FALSE(AssignTempBlocks(graph,
601                                           new_root,
602                                           fd,
603                                           data_file_size,
604                                           final_order,
605                                           &inverse_final_order,
606                                           cuts));
607  LOG(INFO) << "Making sure all temp blocks have been allocated";
608
609  // Remove the scratch node, if any
610  if (scratch_vertex != Vertex::kInvalidIndex) {
611    final_order->erase(final_order->begin() +
612                       inverse_final_order[scratch_vertex]);
613    (*graph)[scratch_vertex].valid = false;
614    GenerateReverseTopoOrderMap(*final_order, &inverse_final_order);
615  }
616
617  graph_utils::DumpGraph(*graph);
618  CHECK(NoTempBlocksRemain(*graph));
619  LOG(INFO) << "done making sure all temp blocks are allocated";
620  return true;
621}
622
623void InplaceGenerator::CreateScratchNode(uint64_t start_block,
624                                         uint64_t num_blocks,
625                                         Vertex* vertex) {
626  vertex->file_name = "<scratch>";
627  vertex->op.set_type(DeltaArchiveManifest_InstallOperation_Type_REPLACE_BZ);
628  vertex->op.set_data_offset(0);
629  vertex->op.set_data_length(0);
630  Extent* extent = vertex->op.add_dst_extents();
631  extent->set_start_block(start_block);
632  extent->set_num_blocks(num_blocks);
633}
634
635bool InplaceGenerator::AddInstallOpToBlocksVector(
636    const DeltaArchiveManifest_InstallOperation& operation,
637    const Graph& graph,
638    Vertex::Index vertex,
639    vector<DeltaDiffGenerator::Block>* blocks) {
640  // See if this is already present.
641  TEST_AND_RETURN_FALSE(operation.dst_extents_size() > 0);
642
643  enum BlockField { READER = 0, WRITER, BLOCK_FIELD_COUNT };
644  for (int field = READER; field < BLOCK_FIELD_COUNT; field++) {
645    const int extents_size =
646        (field == READER) ? operation.src_extents_size() :
647        operation.dst_extents_size();
648    const char* past_participle = (field == READER) ? "read" : "written";
649    const google::protobuf::RepeatedPtrField<Extent>& extents =
650        (field == READER) ? operation.src_extents() : operation.dst_extents();
651    Vertex::Index DeltaDiffGenerator::Block::*access_type =
652        (field == READER) ? &DeltaDiffGenerator::Block::reader
653            : &DeltaDiffGenerator::Block::writer;
654
655    for (int i = 0; i < extents_size; i++) {
656      const Extent& extent = extents.Get(i);
657      if (extent.start_block() == kSparseHole) {
658        // Hole in sparse file. skip
659        continue;
660      }
661      for (uint64_t block = extent.start_block();
662           block < (extent.start_block() + extent.num_blocks()); block++) {
663        if ((*blocks)[block].*access_type != Vertex::kInvalidIndex) {
664          LOG(FATAL) << "Block " << block << " is already "
665                     << past_participle << " by "
666                     << (*blocks)[block].*access_type << "("
667                     << graph[(*blocks)[block].*access_type].file_name
668                     << ") and also " << vertex << "("
669                     << graph[vertex].file_name << ")";
670        }
671        (*blocks)[block].*access_type = vertex;
672      }
673    }
674  }
675  return true;
676}
677
678bool InplaceGenerator::GenerateOperations(
679    const PayloadGenerationConfig& config,
680    int data_file_fd,
681    off_t* data_file_size,
682    vector<AnnotatedOperation>* rootfs_ops,
683    vector<AnnotatedOperation>* kernel_ops) {
684  Graph graph;
685  CheckGraph(graph);
686  vector<Block> blocks(config.target.rootfs_size / config.block_size);
687
688  TEST_AND_RETURN_FALSE(
689      DeltaDiffGenerator::DeltaReadFiles(&graph,
690                                         &blocks,
691                                         config.source.rootfs_mountpt,
692                                         config.target.rootfs_mountpt,
693                                         config.chunk_size,
694                                         data_file_fd,
695                                         data_file_size,
696                                         false));  // src_ops_allowed
697  LOG(INFO) << "done reading normal files";
698  CheckGraph(graph);
699
700  LOG(INFO) << "Starting metadata processing";
701  TEST_AND_RETURN_FALSE(Metadata::DeltaReadMetadata(
702      &graph,
703      &blocks,
704      config.source.rootfs_part,
705      config.target.rootfs_part,
706      data_file_fd,
707      data_file_size));
708  LOG(INFO) << "Done metadata processing";
709  CheckGraph(graph);
710
711  graph.emplace_back();
712  TEST_AND_RETURN_FALSE(
713      DeltaDiffGenerator::ReadUnwrittenBlocks(blocks,
714                                              data_file_fd,
715                                              data_file_size,
716                                              config.source.rootfs_part,
717                                              config.target.rootfs_part,
718                                              &graph.back()));
719  if (graph.back().op.data_length() == 0) {
720    LOG(INFO) << "No unwritten blocks to write, omitting operation";
721    graph.pop_back();
722  }
723
724  // Final scratch block (if there's space)
725  Vertex::Index scratch_vertex = Vertex::kInvalidIndex;
726  if (blocks.size() < (config.rootfs_partition_size / kBlockSize)) {
727    scratch_vertex = graph.size();
728    graph.emplace_back();
729    CreateScratchNode(
730        blocks.size(),
731        (config.rootfs_partition_size / kBlockSize) - blocks.size(),
732        &graph.back());
733  }
734
735  // Read kernel partition
736  TEST_AND_RETURN_FALSE(
737      DeltaDiffGenerator::DeltaCompressKernelPartition(
738          config.source.kernel_part,
739          config.target.kernel_part,
740          kernel_ops,
741          data_file_fd,
742          data_file_size,
743          false));  // src_ops_allowed
744
745  LOG(INFO) << "done reading kernel";
746  CheckGraph(graph);
747
748  LOG(INFO) << "Creating edges...";
749  CreateEdges(&graph, blocks);
750  LOG(INFO) << "Done creating edges";
751  CheckGraph(graph);
752
753  vector<Vertex::Index> final_order;
754  TEST_AND_RETURN_FALSE(ConvertGraphToDag(
755      &graph,
756      config.target.rootfs_mountpt,
757      data_file_fd,
758      data_file_size,
759      &final_order,
760      scratch_vertex));
761
762  // Copy operations over to the rootfs_ops in the final_order generated by the
763  // topological sort.
764  rootfs_ops->clear();
765  for (const Vertex::Index vertex_index : final_order) {
766    const Vertex& vertex = graph[vertex_index];
767    rootfs_ops->emplace_back();
768    rootfs_ops->back().op = vertex.op;
769    rootfs_ops->back().SetNameFromFileAndChunk(
770        vertex.file_name, vertex.chunk_offset, vertex.chunk_size);
771  }
772
773  return true;
774}
775
776};  // namespace chromeos_update_engine
777