inplace_generator.h revision aea4c1cea20dda7ae7e85fc8924a2d784f70d806
1//
2// Copyright (C) 2015 The Android Open Source Project
3//
4// Licensed under the Apache License, Version 2.0 (the "License");
5// you may not use this file except in compliance with the License.
6// You may obtain a copy of the License at
7//
8//      http://www.apache.org/licenses/LICENSE-2.0
9//
10// Unless required by applicable law or agreed to in writing, software
11// distributed under the License is distributed on an "AS IS" BASIS,
12// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13// See the License for the specific language governing permissions and
14// limitations under the License.
15//
16
17#ifndef UPDATE_ENGINE_PAYLOAD_GENERATOR_INPLACE_GENERATOR_H_
18#define UPDATE_ENGINE_PAYLOAD_GENERATOR_INPLACE_GENERATOR_H_
19
20#include <map>
21#include <set>
22#include <string>
23#include <vector>
24
25#include "update_engine/payload_generator/blob_file_writer.h"
26#include "update_engine/payload_generator/delta_diff_generator.h"
27#include "update_engine/payload_generator/graph_types.h"
28#include "update_engine/payload_generator/operations_generator.h"
29
30// InplaceGenerator contains all functionality related to the inplace algorithm
31// for generating update payloads. These are the functions used when delta minor
32// version is 1.
33
34namespace chromeos_update_engine {
35
36// This struct stores all relevant info for an edge that is cut between
37// nodes old_src -> old_dst by creating new vertex new_vertex. The new
38// relationship is:
39// old_src -(read before)-> new_vertex <-(write before)- old_dst
40// new_vertex is a MOVE operation that moves some existing blocks into
41// temp space. The temp extents are, by necessity, stored in new_vertex
42// (as dst extents) and old_dst (as src extents), but they are also broken
43// out into tmp_extents, as the nodes themselves may contain many more
44// extents.
45struct CutEdgeVertexes {
46  Vertex::Index new_vertex;
47  Vertex::Index old_src;
48  Vertex::Index old_dst;
49  std::vector<Extent> tmp_extents;
50};
51
52class InplaceGenerator : public OperationsGenerator {
53 public:
54  // Represents a disk block on the install partition.
55  struct Block {
56    // During install, each block on the install partition will be written
57    // and some may be read (in all likelihood, many will be read).
58    // The reading and writing will be performed by InstallOperations,
59    // each of which has a corresponding vertex in a graph.
60    // A Block object tells which vertex will read or write this block
61    // at install time.
62    // Generally, there will be a vector of Block objects whose length
63    // is the number of blocks on the install partition.
64    Block() : reader(Vertex::kInvalidIndex), writer(Vertex::kInvalidIndex) {}
65    Vertex::Index reader;
66    Vertex::Index writer;
67  };
68
69  InplaceGenerator() = default;
70
71  // Checks all the operations in the graph have a type assigned.
72  static void CheckGraph(const Graph& graph);
73
74  // Modifies blocks read by 'op' so that any blocks referred to by
75  // 'remove_extents' are replaced with blocks from 'replace_extents'.
76  // 'remove_extents' and 'replace_extents' must be the same number of blocks.
77  // Blocks will be substituted in the order listed in the vectors.
78  // E.g. if 'op' reads blocks 1, 2, 3, 4, 5, 6, 7, 8, remove_extents
79  // contains blocks 6, 2, 3, 5, and replace blocks contains
80  // 12, 13, 14, 15, then op will be changed to read from:
81  // 1, 13, 14, 4, 15, 12, 7, 8
82  static void SubstituteBlocks(Vertex* vertex,
83                               const std::vector<Extent>& remove_extents,
84                               const std::vector<Extent>& replace_extents);
85
86  // Cuts 'edges' from 'graph' according to the AU algorithm. This means
87  // for each edge A->B, remove the dependency that B occur before A.
88  // Do this by creating a new operation X that copies from the blocks
89  // specified by the edge's properties to temp space T. Modify B to read
90  // from T rather than the blocks in the edge. Modify A to depend on X,
91  // but not on B. Free space is found by looking in 'blocks'.
92  // Returns true on success.
93  static bool CutEdges(Graph* graph,
94                       const std::set<Edge>& edges,
95                       std::vector<CutEdgeVertexes>* out_cuts);
96
97  // Creates all the edges for the graph. Writers of a block point to
98  // readers of the same block. This is because for an edge A->B, B
99  // must complete before A executes.
100  static void CreateEdges(Graph* graph,
101                          const std::vector<Block>& blocks);
102
103  // Takes |op_indexes|, which is effectively a mapping from order in
104  // which the op is performed -> graph vertex index, and produces the
105  // reverse: a mapping from graph vertex index -> op_indexes index.
106  static void GenerateReverseTopoOrderMap(
107      const std::vector<Vertex::Index>& op_indexes,
108      std::vector<std::vector<Vertex::Index>::size_type>* reverse_op_indexes);
109
110  // Sorts the vector |cuts| by its |cuts[].old_dest| member. Order is
111  // determined by the order of elements in op_indexes.
112  static void SortCutsByTopoOrder(
113      const std::vector<Vertex::Index>& op_indexes,
114      std::vector<CutEdgeVertexes>* cuts);
115
116  // Given a topologically sorted graph |op_indexes| and |graph|, alters
117  // |op_indexes| to move all the full operations to the end of the vector.
118  // Full operations should not be depended on, so this is safe.
119  static void MoveAndSortFullOpsToBack(Graph* graph,
120                                std::vector<Vertex::Index>* op_indexes);
121
122  // Returns true iff there are no extents in the graph that refer to temp
123  // blocks. Temp blocks are in the range [kTempBlockStart, kSparseHole).
124  static bool NoTempBlocksRemain(const Graph& graph);
125
126  // Takes a |graph|, which has edges that must be cut, as listed in
127  // |cuts|.  Cuts the edges. Maintains a list in which the operations
128  // will be performed (in |op_indexes|) and the reverse (in
129  // |reverse_op_indexes|).  Cutting edges requires scratch space, and
130  // if insufficient scratch is found, the file is reread and will be
131  // send down (either as REPLACE or REPLACE_BZ).  Returns true on
132  // success.
133  static bool AssignTempBlocks(
134      Graph* graph,
135      const std::string& new_part,
136      BlobFileWriter* blob_file,
137      std::vector<Vertex::Index>* op_indexes,
138      std::vector<std::vector<Vertex::Index>::size_type>* reverse_op_indexes,
139      const std::vector<CutEdgeVertexes>& cuts);
140
141  // Handles allocation of temp blocks to a cut edge by converting the
142  // dest node to a full op. This removes the need for temp blocks, but
143  // comes at the cost of a worse compression ratio.
144  // For example, say we have A->B->A. It would first be cut to form:
145  // A->B->N<-A, where N copies blocks to temp space. If there are no
146  // temp blocks, this function can be called to convert it to the form:
147  // A->B. Now, A is a full operation.
148  static bool ConvertCutToFullOp(Graph* graph,
149                                 const CutEdgeVertexes& cut,
150                                 const std::string& new_part,
151                                 BlobFileWriter* blob_file);
152
153  // Takes a graph, which is not a DAG, which represents the files just
154  // read from disk, and converts it into a DAG by breaking all cycles
155  // and finding temp space to resolve broken edges.
156  // The final order of the nodes is given in |final_order|
157  // Some files may need to be reread from disk, thus |fd| and
158  // |data_file_size| are be passed.
159  // If |scratch_vertex| is not kInvalidIndex, removes it from
160  // |final_order| before returning.
161  // Returns true on success.
162  static bool ConvertGraphToDag(Graph* graph,
163                                const std::string& new_part,
164                                BlobFileWriter* blob_file,
165                                std::vector<Vertex::Index>* final_order,
166                                Vertex::Index scratch_vertex);
167
168  // Creates a dummy REPLACE_BZ node in the given |vertex|. This can be used
169  // to provide scratch space. The node writes |num_blocks| blocks starting at
170  // |start_block|The node should be marked invalid before writing all nodes to
171  // the output file.
172  static void CreateScratchNode(uint64_t start_block,
173                                uint64_t num_blocks,
174                                Vertex* vertex);
175
176  // The |blocks| vector contains a reader and writer for each block on the
177  // filesystem that's being in-place updated. We populate the reader/writer
178  // fields of |blocks| by calling this function.
179  // For each block in |operation| that is read or written, find that block
180  // in |blocks| and set the reader/writer field to the vertex passed.
181  // |graph| is not strictly necessary, but useful for printing out
182  // error messages.
183  static bool AddInstallOpToBlocksVector(const InstallOperation& operation,
184                                         const Graph& graph,
185                                         Vertex::Index vertex,
186                                         std::vector<Block>* blocks);
187
188  // Add a vertex (if |existing_vertex| is kInvalidVertex) or update an
189  // |existing_vertex| with the passed |operation|.
190  // This method will also register the vertex as the reader or writer of the
191  // blocks involved in the operation updating the |blocks| vector. The
192  // |op_name| associated with the Vertex is used for logging purposes.
193  static bool AddInstallOpToGraph(Graph* graph,
194                                  Vertex::Index existing_vertex,
195                                  std::vector<Block>* blocks,
196                                  const InstallOperation operation,
197                                  const std::string& op_name);
198
199  // Apply the transformation stored in |the_map| to the |collection| vector
200  // replacing the map keys found in |collection| with its associated value in
201  // |the_map|.
202  static void ApplyMap(std::vector<uint64_t>* collection,
203                       const std::map<uint64_t, uint64_t>& the_map);
204
205  // Resolve all read-after-write dependencies in the operation list |aops|. The
206  // operations in |aops| are such that they generate the desired |new_part| if
207  // applied reading always from the original image. This function reorders the
208  // operations and generates new operations when needed to make these
209  // operations produce the same |new_part| result when applied in-place.
210  // The new operations will create blobs in |data_file_fd| and update
211  // the file size pointed by |data_file_size| if needed.
212  // On success, stores the new operations in |aops| in the right order and
213  // returns true.
214  static bool ResolveReadAfterWriteDependencies(
215      const PartitionConfig& new_part,
216      uint64_t partition_size,
217      size_t block_size,
218      BlobFileWriter* blob_file,
219      std::vector<AnnotatedOperation>* aops);
220
221  // Generates the list of operations to update inplace from the partition
222  // |old_part| to |new_part|. The |partition_size| should be at least
223  // |new_part.size| and any extra space there could be used as scratch space.
224  // The operations generated will not write more than |chunk_blocks| blocks.
225  // The new operations will create blobs in |data_file_fd| and update
226  // the file size pointed by |data_file_size| if needed.
227  // On success, stores the new operations in |aops| and returns true.
228  static bool GenerateOperationsForPartition(
229      const PartitionConfig& old_part,
230      const PartitionConfig& new_part,
231      uint64_t partition_size,
232      size_t block_size,
233      ssize_t hard_chunk_blocks,
234      size_t soft_chunk_blocks,
235      BlobFileWriter* blob_file,
236      std::vector<AnnotatedOperation>* aops);
237
238  // Generate the update payload operations for the kernel and rootfs using
239  // only operations that read from the target and/or write to the target,
240  // hence, applying the payload "in-place" in the target partition. This method
241  // assumes that the contents of the source image are pre-copied to the target
242  // partition, up to the size of the source image. Use this method to generate
243  // a delta update with the minor version kInPlaceMinorPayloadVersion.
244  // The rootfs operations are stored in |graph| and should be executed in the
245  // |final_order| order. The kernel operations are stored in |kernel_ops|. All
246  // the offsets in the operations reference the data written to |data_file_fd|.
247  // The total amount of data written to that file is stored in
248  // |data_file_size|.
249  bool GenerateOperations(
250      const PayloadGenerationConfig& config,
251      BlobFileWriter* blob_file,
252      std::vector<AnnotatedOperation>* rootfs_ops,
253      std::vector<AnnotatedOperation>* kernel_ops) override;
254
255 private:
256  DISALLOW_COPY_AND_ASSIGN(InplaceGenerator);
257};
258
259};  // namespace chromeos_update_engine
260
261#endif  // UPDATE_ENGINE_PAYLOAD_GENERATOR_INPLACE_GENERATOR_H_
262