sparse_img.py revision e9b619108dbe0c358ba6c14e27c92a623a7c1059
1# Copyright (C) 2014 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import bisect
16import os
17import struct
18from hashlib import sha1
19
20import rangelib
21
22
23class SparseImage(object):
24  """Wraps a sparse image file into an image object.
25
26  Wraps a sparse image file (and optional file map and clobbered_blocks) into
27  an image object suitable for passing to BlockImageDiff. file_map contains
28  the mapping between files and their blocks. clobbered_blocks contains the set
29  of blocks that should be always written to the target regardless of the old
30  contents (i.e. copying instead of patching). clobbered_blocks should be in
31  the form of a string like "0" or "0 1-5 8".
32  """
33
34  def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None):
35    self.simg_f = f = open(simg_fn, "rb")
36
37    header_bin = f.read(28)
38    header = struct.unpack("<I4H4I", header_bin)
39
40    magic = header[0]
41    major_version = header[1]
42    minor_version = header[2]
43    file_hdr_sz = header[3]
44    chunk_hdr_sz = header[4]
45    self.blocksize = blk_sz = header[5]
46    self.total_blocks = total_blks = header[6]
47    total_chunks = header[7]
48
49    if magic != 0xED26FF3A:
50      raise ValueError("Magic should be 0xED26FF3A but is 0x%08X" % (magic,))
51    if major_version != 1 or minor_version != 0:
52      raise ValueError("I know about version 1.0, but this is version %u.%u" %
53                       (major_version, minor_version))
54    if file_hdr_sz != 28:
55      raise ValueError("File header size was expected to be 28, but is %u." %
56                       (file_hdr_sz,))
57    if chunk_hdr_sz != 12:
58      raise ValueError("Chunk header size was expected to be 12, but is %u." %
59                       (chunk_hdr_sz,))
60
61    print("Total of %u %u-byte output blocks in %u input chunks."
62          % (total_blks, blk_sz, total_chunks))
63
64    pos = 0   # in blocks
65    care_data = []
66    self.offset_map = offset_map = []
67    self.clobbered_blocks = rangelib.RangeSet(data=clobbered_blocks)
68
69    for i in range(total_chunks):
70      header_bin = f.read(12)
71      header = struct.unpack("<2H2I", header_bin)
72      chunk_type = header[0]
73      chunk_sz = header[2]
74      total_sz = header[3]
75      data_sz = total_sz - 12
76
77      if chunk_type == 0xCAC1:
78        if data_sz != (chunk_sz * blk_sz):
79          raise ValueError(
80              "Raw chunk input size (%u) does not match output size (%u)" %
81              (data_sz, chunk_sz * blk_sz))
82        else:
83          care_data.append(pos)
84          care_data.append(pos + chunk_sz)
85          offset_map.append((pos, chunk_sz, f.tell(), None))
86          pos += chunk_sz
87          f.seek(data_sz, os.SEEK_CUR)
88
89      elif chunk_type == 0xCAC2:
90        fill_data = f.read(4)
91        care_data.append(pos)
92        care_data.append(pos + chunk_sz)
93        offset_map.append((pos, chunk_sz, None, fill_data))
94        pos += chunk_sz
95
96      elif chunk_type == 0xCAC3:
97        if data_sz != 0:
98          raise ValueError("Don't care chunk input size is non-zero (%u)" %
99                           (data_sz))
100        else:
101          pos += chunk_sz
102
103      elif chunk_type == 0xCAC4:
104        raise ValueError("CRC32 chunks are not supported")
105
106      else:
107        raise ValueError("Unknown chunk type 0x%04X not supported" %
108                         (chunk_type,))
109
110    self.care_map = rangelib.RangeSet(care_data)
111    self.offset_index = [i[0] for i in offset_map]
112
113    # Bug: 20881595
114    # Introduce extended blocks as a workaround for the bug. dm-verity may
115    # touch blocks that are not in the care_map due to block device
116    # read-ahead. It will fail if such blocks contain non-zeroes. We zero out
117    # the extended blocks explicitly to avoid dm-verity failures. 512 blocks
118    # are the maximum read-ahead we configure for dm-verity block devices.
119    extended = self.care_map.extend(512)
120    all_blocks = rangelib.RangeSet(data=(0, self.total_blocks))
121    extended = extended.intersect(all_blocks).subtract(self.care_map)
122    self.extended = extended
123
124    if file_map_fn:
125      self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks)
126    else:
127      self.file_map = {"__DATA": self.care_map}
128
129  def ReadRangeSet(self, ranges):
130    return [d for d in self._GetRangeData(ranges)]
131
132  def TotalSha1(self, include_clobbered_blocks=False):
133    """Return the SHA-1 hash of all data in the 'care' regions.
134
135    If include_clobbered_blocks is True, it returns the hash including the
136    clobbered_blocks."""
137    ranges = self.care_map
138    if not include_clobbered_blocks:
139      ranges = ranges.subtract(self.clobbered_blocks)
140    h = sha1()
141    for d in self._GetRangeData(ranges):
142      h.update(d)
143    return h.hexdigest()
144
145  def _GetRangeData(self, ranges):
146    """Generator that produces all the image data in 'ranges'.  The
147    number of individual pieces returned is arbitrary (and in
148    particular is not necessarily equal to the number of ranges in
149    'ranges'.
150
151    This generator is stateful -- it depends on the open file object
152    contained in this SparseImage, so you should not try to run two
153    instances of this generator on the same object simultaneously."""
154
155    f = self.simg_f
156    for s, e in ranges:
157      to_read = e-s
158      idx = bisect.bisect_right(self.offset_index, s) - 1
159      chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
160
161      # for the first chunk we may be starting partway through it.
162      remain = chunk_len - (s - chunk_start)
163      this_read = min(remain, to_read)
164      if filepos is not None:
165        p = filepos + ((s - chunk_start) * self.blocksize)
166        f.seek(p, os.SEEK_SET)
167        yield f.read(this_read * self.blocksize)
168      else:
169        yield fill_data * (this_read * (self.blocksize >> 2))
170      to_read -= this_read
171
172      while to_read > 0:
173        # continue with following chunks if this range spans multiple chunks.
174        idx += 1
175        chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
176        this_read = min(chunk_len, to_read)
177        if filepos is not None:
178          f.seek(filepos, os.SEEK_SET)
179          yield f.read(this_read * self.blocksize)
180        else:
181          yield fill_data * (this_read * (self.blocksize >> 2))
182        to_read -= this_read
183
184  def LoadFileBlockMap(self, fn, clobbered_blocks):
185    remaining = self.care_map
186    self.file_map = out = {}
187
188    with open(fn) as f:
189      for line in f:
190        fn, ranges = line.split(None, 1)
191        ranges = rangelib.RangeSet.parse(ranges)
192        out[fn] = ranges
193        assert ranges.size() == ranges.intersect(remaining).size()
194
195        # Currently we assume that blocks in clobbered_blocks are not part of
196        # any file.
197        assert not clobbered_blocks.overlaps(ranges)
198        remaining = remaining.subtract(ranges)
199
200    remaining = remaining.subtract(clobbered_blocks)
201
202    # For all the remaining blocks in the care_map (ie, those that
203    # aren't part of the data for any file nor part of the clobbered_blocks),
204    # divide them into blocks that are all zero and blocks that aren't.
205    # (Zero blocks are handled specially because (1) there are usually
206    # a lot of them and (2) bsdiff handles files with long sequences of
207    # repeated bytes especially poorly.)
208
209    zero_blocks = []
210    nonzero_blocks = []
211    reference = '\0' * self.blocksize
212
213    f = self.simg_f
214    for s, e in remaining:
215      for b in range(s, e):
216        idx = bisect.bisect_right(self.offset_index, b) - 1
217        chunk_start, _, filepos, fill_data = self.offset_map[idx]
218        if filepos is not None:
219          filepos += (b-chunk_start) * self.blocksize
220          f.seek(filepos, os.SEEK_SET)
221          data = f.read(self.blocksize)
222        else:
223          if fill_data == reference[:4]:   # fill with all zeros
224            data = reference
225          else:
226            data = None
227
228        if data == reference:
229          zero_blocks.append(b)
230          zero_blocks.append(b+1)
231        else:
232          nonzero_blocks.append(b)
233          nonzero_blocks.append(b+1)
234
235    assert zero_blocks or nonzero_blocks or clobbered_blocks
236
237    if zero_blocks:
238      out["__ZERO"] = rangelib.RangeSet(data=zero_blocks)
239    if nonzero_blocks:
240      out["__NONZERO"] = rangelib.RangeSet(data=nonzero_blocks)
241    if clobbered_blocks:
242      out["__COPY"] = clobbered_blocks
243
244  def ResetFileMap(self):
245    """Throw away the file map and treat the entire image as
246    undifferentiated data."""
247    self.file_map = {"__DATA": self.care_map}
248