common.py revision cb219828ff70adcbf867968ea83ad42d03efa958
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import copy
16import errno
17import getopt
18import getpass
19import imp
20import os
21import platform
22import re
23import shlex
24import shutil
25import subprocess
26import sys
27import tempfile
28import threading
29import time
30import zipfile
31
32import blockimgdiff
33import rangelib
34
35from hashlib import sha1 as sha1
36
37
38class Options(object):
39  def __init__(self):
40    platform_search_path = {
41        "linux2": "out/host/linux-x86",
42        "darwin": "out/host/darwin-x86",
43    }
44
45    self.search_path = platform_search_path.get(sys.platform, None)
46    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
47    self.extra_signapk_args = []
48    self.java_path = "java"  # Use the one on the path by default.
49    self.java_args = "-Xmx2048m" # JVM Args
50    self.public_key_suffix = ".x509.pem"
51    self.private_key_suffix = ".pk8"
52    # use otatools built boot_signer by default
53    self.boot_signer_path = "boot_signer"
54    self.boot_signer_args = []
55    self.verity_signer_path = None
56    self.verity_signer_args = []
57    self.verbose = False
58    self.tempfiles = []
59    self.device_specific = None
60    self.extras = {}
61    self.info_dict = None
62    self.source_info_dict = None
63    self.target_info_dict = None
64    self.worker_threads = None
65    # Stash size cannot exceed cache_size * threshold.
66    self.cache_size = None
67    self.stash_threshold = 0.8
68
69
70OPTIONS = Options()
71
72
73# Values for "certificate" in apkcerts that mean special things.
74SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
75
76
77class ExternalError(RuntimeError):
78  pass
79
80
81def Run(args, **kwargs):
82  """Create and return a subprocess.Popen object, printing the command
83  line on the terminal if -v was specified."""
84  if OPTIONS.verbose:
85    print "  running: ", " ".join(args)
86  return subprocess.Popen(args, **kwargs)
87
88
89def CloseInheritedPipes():
90  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
91  before doing other work."""
92  if platform.system() != "Darwin":
93    return
94  for d in range(3, 1025):
95    try:
96      stat = os.fstat(d)
97      if stat is not None:
98        pipebit = stat[0] & 0x1000
99        if pipebit != 0:
100          os.close(d)
101    except OSError:
102      pass
103
104
105def LoadInfoDict(input_file, input_dir=None):
106  """Read and parse the META/misc_info.txt key/value pairs from the
107  input target files and return a dict."""
108
109  def read_helper(fn):
110    if isinstance(input_file, zipfile.ZipFile):
111      return input_file.read(fn)
112    else:
113      path = os.path.join(input_file, *fn.split("/"))
114      try:
115        with open(path) as f:
116          return f.read()
117      except IOError as e:
118        if e.errno == errno.ENOENT:
119          raise KeyError(fn)
120  d = {}
121  try:
122    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
123  except KeyError:
124    # ok if misc_info.txt doesn't exist
125    pass
126
127  # backwards compatibility: These values used to be in their own
128  # files.  Look for them, in case we're processing an old
129  # target_files zip.
130
131  if "mkyaffs2_extra_flags" not in d:
132    try:
133      d["mkyaffs2_extra_flags"] = read_helper(
134          "META/mkyaffs2-extra-flags.txt").strip()
135    except KeyError:
136      # ok if flags don't exist
137      pass
138
139  if "recovery_api_version" not in d:
140    try:
141      d["recovery_api_version"] = read_helper(
142          "META/recovery-api-version.txt").strip()
143    except KeyError:
144      raise ValueError("can't find recovery API version in input target-files")
145
146  if "tool_extensions" not in d:
147    try:
148      d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
149    except KeyError:
150      # ok if extensions don't exist
151      pass
152
153  if "fstab_version" not in d:
154    d["fstab_version"] = "1"
155
156  # A few properties are stored as links to the files in the out/ directory.
157  # It works fine with the build system. However, they are no longer available
158  # when (re)generating from target_files zip. If input_dir is not None, we
159  # are doing repacking. Redirect those properties to the actual files in the
160  # unzipped directory.
161  if input_dir is not None:
162    # We carry a copy of file_contexts under META/. If not available, search
163    # BOOT/RAMDISK/. Note that sometimes we may need a different file_contexts
164    # to build images than the one running on device, such as when enabling
165    # system_root_image. In that case, we must have the one for image
166    # generation copied to META/.
167    fc_config = os.path.join(input_dir, "META", "file_contexts")
168    if d.get("system_root_image") == "true":
169      assert os.path.exists(fc_config)
170    if not os.path.exists(fc_config):
171      fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", "file_contexts")
172      if not os.path.exists(fc_config):
173        fc_config = None
174
175    if fc_config:
176      d["selinux_fc"] = fc_config
177
178    # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
179    if d.get("system_root_image") == "true":
180      d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
181      d["ramdisk_fs_config"] = os.path.join(
182          input_dir, "META", "root_filesystem_config.txt")
183
184  try:
185    data = read_helper("META/imagesizes.txt")
186    for line in data.split("\n"):
187      if not line:
188        continue
189      name, value = line.split(" ", 1)
190      if not value:
191        continue
192      if name == "blocksize":
193        d[name] = value
194      else:
195        d[name + "_size"] = value
196  except KeyError:
197    pass
198
199  def makeint(key):
200    if key in d:
201      d[key] = int(d[key], 0)
202
203  makeint("recovery_api_version")
204  makeint("blocksize")
205  makeint("system_size")
206  makeint("vendor_size")
207  makeint("userdata_size")
208  makeint("cache_size")
209  makeint("recovery_size")
210  makeint("boot_size")
211  makeint("fstab_version")
212
213  d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
214                                 d.get("system_root_image", False))
215  d["build.prop"] = LoadBuildProp(read_helper)
216  return d
217
218def LoadBuildProp(read_helper):
219  try:
220    data = read_helper("SYSTEM/build.prop")
221  except KeyError:
222    print "Warning: could not find SYSTEM/build.prop in %s" % zip
223    data = ""
224  return LoadDictionaryFromLines(data.split("\n"))
225
226def LoadDictionaryFromLines(lines):
227  d = {}
228  for line in lines:
229    line = line.strip()
230    if not line or line.startswith("#"):
231      continue
232    if "=" in line:
233      name, value = line.split("=", 1)
234      d[name] = value
235  return d
236
237def LoadRecoveryFSTab(read_helper, fstab_version, system_root_image=False):
238  class Partition(object):
239    def __init__(self, mount_point, fs_type, device, length, device2, context):
240      self.mount_point = mount_point
241      self.fs_type = fs_type
242      self.device = device
243      self.length = length
244      self.device2 = device2
245      self.context = context
246
247  try:
248    data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
249  except KeyError:
250    print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
251    data = ""
252
253  if fstab_version == 1:
254    d = {}
255    for line in data.split("\n"):
256      line = line.strip()
257      if not line or line.startswith("#"):
258        continue
259      pieces = line.split()
260      if not 3 <= len(pieces) <= 4:
261        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
262      options = None
263      if len(pieces) >= 4:
264        if pieces[3].startswith("/"):
265          device2 = pieces[3]
266          if len(pieces) >= 5:
267            options = pieces[4]
268        else:
269          device2 = None
270          options = pieces[3]
271      else:
272        device2 = None
273
274      mount_point = pieces[0]
275      length = 0
276      if options:
277        options = options.split(",")
278        for i in options:
279          if i.startswith("length="):
280            length = int(i[7:])
281          else:
282            print "%s: unknown option \"%s\"" % (mount_point, i)
283
284      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
285                                 device=pieces[2], length=length,
286                                 device2=device2)
287
288  elif fstab_version == 2:
289    d = {}
290    for line in data.split("\n"):
291      line = line.strip()
292      if not line or line.startswith("#"):
293        continue
294      # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
295      pieces = line.split()
296      if len(pieces) != 5:
297        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
298
299      # Ignore entries that are managed by vold
300      options = pieces[4]
301      if "voldmanaged=" in options:
302        continue
303
304      # It's a good line, parse it
305      length = 0
306      options = options.split(",")
307      for i in options:
308        if i.startswith("length="):
309          length = int(i[7:])
310        else:
311          # Ignore all unknown options in the unified fstab
312          continue
313
314      mount_flags = pieces[3]
315      # Honor the SELinux context if present.
316      context = None
317      for i in mount_flags.split(","):
318        if i.startswith("context="):
319          context = i
320
321      mount_point = pieces[1]
322      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
323                                 device=pieces[0], length=length,
324                                 device2=None, context=context)
325
326  else:
327    raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
328
329  # / is used for the system mount point when the root directory is included in
330  # system. Other areas assume system is always at "/system" so point /system
331  # at /.
332  if system_root_image:
333    assert not d.has_key("/system") and d.has_key("/")
334    d["/system"] = d["/"]
335  return d
336
337
338def DumpInfoDict(d):
339  for k, v in sorted(d.items()):
340    print "%-25s = (%s) %s" % (k, type(v).__name__, v)
341
342
343def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
344                        has_ramdisk=False):
345  """Build a bootable image from the specified sourcedir.
346
347  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
348  'sourcedir'), and turn them into a boot image.  Return the image data, or
349  None if sourcedir does not appear to contains files for building the
350  requested image."""
351
352  def make_ramdisk():
353    ramdisk_img = tempfile.NamedTemporaryFile()
354
355    if os.access(fs_config_file, os.F_OK):
356      cmd = ["mkbootfs", "-f", fs_config_file,
357             os.path.join(sourcedir, "RAMDISK")]
358    else:
359      cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
360    p1 = Run(cmd, stdout=subprocess.PIPE)
361    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
362
363    p2.wait()
364    p1.wait()
365    assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
366    assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
367
368    return ramdisk_img
369
370  if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
371    return None
372
373  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
374    return None
375
376  if info_dict is None:
377    info_dict = OPTIONS.info_dict
378
379  img = tempfile.NamedTemporaryFile()
380
381  if has_ramdisk:
382    ramdisk_img = make_ramdisk()
383
384  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
385  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
386
387  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
388
389  fn = os.path.join(sourcedir, "second")
390  if os.access(fn, os.F_OK):
391    cmd.append("--second")
392    cmd.append(fn)
393
394  fn = os.path.join(sourcedir, "cmdline")
395  if os.access(fn, os.F_OK):
396    cmd.append("--cmdline")
397    cmd.append(open(fn).read().rstrip("\n"))
398
399  fn = os.path.join(sourcedir, "base")
400  if os.access(fn, os.F_OK):
401    cmd.append("--base")
402    cmd.append(open(fn).read().rstrip("\n"))
403
404  fn = os.path.join(sourcedir, "pagesize")
405  if os.access(fn, os.F_OK):
406    cmd.append("--pagesize")
407    cmd.append(open(fn).read().rstrip("\n"))
408
409  args = info_dict.get("mkbootimg_args", None)
410  if args and args.strip():
411    cmd.extend(shlex.split(args))
412
413  if has_ramdisk:
414    cmd.extend(["--ramdisk", ramdisk_img.name])
415
416  img_unsigned = None
417  if info_dict.get("vboot", None):
418    img_unsigned = tempfile.NamedTemporaryFile()
419    cmd.extend(["--output", img_unsigned.name])
420  else:
421    cmd.extend(["--output", img.name])
422
423  p = Run(cmd, stdout=subprocess.PIPE)
424  p.communicate()
425  assert p.returncode == 0, "mkbootimg of %s image failed" % (
426      os.path.basename(sourcedir),)
427
428  if (info_dict.get("boot_signer", None) == "true" and
429      info_dict.get("verity_key", None)):
430    path = "/" + os.path.basename(sourcedir).lower()
431    cmd = [OPTIONS.boot_signer_path]
432    cmd.extend(OPTIONS.boot_signer_args)
433    cmd.extend([path, img.name,
434                info_dict["verity_key"] + ".pk8",
435                info_dict["verity_key"] + ".x509.pem", img.name])
436    p = Run(cmd, stdout=subprocess.PIPE)
437    p.communicate()
438    assert p.returncode == 0, "boot_signer of %s image failed" % path
439
440  # Sign the image if vboot is non-empty.
441  elif info_dict.get("vboot", None):
442    path = "/" + os.path.basename(sourcedir).lower()
443    img_keyblock = tempfile.NamedTemporaryFile()
444    cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
445           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
446           info_dict["vboot_key"] + ".vbprivk",
447           info_dict["vboot_subkey"] + ".vbprivk",
448           img_keyblock.name,
449           img.name]
450    p = Run(cmd, stdout=subprocess.PIPE)
451    p.communicate()
452    assert p.returncode == 0, "vboot_signer of %s image failed" % path
453
454    # Clean up the temp files.
455    img_unsigned.close()
456    img_keyblock.close()
457
458  img.seek(os.SEEK_SET, 0)
459  data = img.read()
460
461  if has_ramdisk:
462    ramdisk_img.close()
463  img.close()
464
465  return data
466
467
468def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
469                     info_dict=None):
470  """Return a File object with the desired bootable image.
471
472  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
473  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
474  the source files in 'unpack_dir'/'tree_subdir'."""
475
476  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
477  if os.path.exists(prebuilt_path):
478    print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
479    return File.FromLocalFile(name, prebuilt_path)
480
481  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
482  if os.path.exists(prebuilt_path):
483    print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
484    return File.FromLocalFile(name, prebuilt_path)
485
486  print "building image from target_files %s..." % (tree_subdir,)
487
488  if info_dict is None:
489    info_dict = OPTIONS.info_dict
490
491  # With system_root_image == "true", we don't pack ramdisk into the boot image.
492  has_ramdisk = (info_dict.get("system_root_image", None) != "true" or
493                 prebuilt_name != "boot.img")
494
495  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
496  data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
497                             os.path.join(unpack_dir, fs_config),
498                             info_dict, has_ramdisk)
499  if data:
500    return File(name, data)
501  return None
502
503
504def UnzipTemp(filename, pattern=None):
505  """Unzip the given archive into a temporary directory and return the name.
506
507  If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
508  temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
509
510  Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
511  main file), open for reading.
512  """
513
514  tmp = tempfile.mkdtemp(prefix="targetfiles-")
515  OPTIONS.tempfiles.append(tmp)
516
517  def unzip_to_dir(filename, dirname):
518    cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
519    if pattern is not None:
520      cmd.append(pattern)
521    p = Run(cmd, stdout=subprocess.PIPE)
522    p.communicate()
523    if p.returncode != 0:
524      raise ExternalError("failed to unzip input target-files \"%s\"" %
525                          (filename,))
526
527  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
528  if m:
529    unzip_to_dir(m.group(1), tmp)
530    unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
531    filename = m.group(1)
532  else:
533    unzip_to_dir(filename, tmp)
534
535  return tmp, zipfile.ZipFile(filename, "r")
536
537
538def GetKeyPasswords(keylist):
539  """Given a list of keys, prompt the user to enter passwords for
540  those which require them.  Return a {key: password} dict.  password
541  will be None if the key has no password."""
542
543  no_passwords = []
544  need_passwords = []
545  key_passwords = {}
546  devnull = open("/dev/null", "w+b")
547  for k in sorted(keylist):
548    # We don't need a password for things that aren't really keys.
549    if k in SPECIAL_CERT_STRINGS:
550      no_passwords.append(k)
551      continue
552
553    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
554             "-inform", "DER", "-nocrypt"],
555            stdin=devnull.fileno(),
556            stdout=devnull.fileno(),
557            stderr=subprocess.STDOUT)
558    p.communicate()
559    if p.returncode == 0:
560      # Definitely an unencrypted key.
561      no_passwords.append(k)
562    else:
563      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
564               "-inform", "DER", "-passin", "pass:"],
565              stdin=devnull.fileno(),
566              stdout=devnull.fileno(),
567              stderr=subprocess.PIPE)
568      _, stderr = p.communicate()
569      if p.returncode == 0:
570        # Encrypted key with empty string as password.
571        key_passwords[k] = ''
572      elif stderr.startswith('Error decrypting key'):
573        # Definitely encrypted key.
574        # It would have said "Error reading key" if it didn't parse correctly.
575        need_passwords.append(k)
576      else:
577        # Potentially, a type of key that openssl doesn't understand.
578        # We'll let the routines in signapk.jar handle it.
579        no_passwords.append(k)
580  devnull.close()
581
582  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
583  key_passwords.update(dict.fromkeys(no_passwords, None))
584  return key_passwords
585
586
587def SignFile(input_name, output_name, key, password, align=None,
588             whole_file=False):
589  """Sign the input_name zip/jar/apk, producing output_name.  Use the
590  given key and password (the latter may be None if the key does not
591  have a password.
592
593  If align is an integer > 1, zipalign is run to align stored files in
594  the output zip on 'align'-byte boundaries.
595
596  If whole_file is true, use the "-w" option to SignApk to embed a
597  signature that covers the whole file in the archive comment of the
598  zip file.
599  """
600
601  if align == 0 or align == 1:
602    align = None
603
604  if align:
605    temp = tempfile.NamedTemporaryFile()
606    sign_name = temp.name
607  else:
608    sign_name = output_name
609
610  cmd = [OPTIONS.java_path, OPTIONS.java_args, "-jar",
611         os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
612  cmd.extend(OPTIONS.extra_signapk_args)
613  if whole_file:
614    cmd.append("-w")
615  cmd.extend([key + OPTIONS.public_key_suffix,
616              key + OPTIONS.private_key_suffix,
617              input_name, sign_name])
618
619  p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
620  if password is not None:
621    password += "\n"
622  p.communicate(password)
623  if p.returncode != 0:
624    raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
625
626  if align:
627    p = Run(["zipalign", "-f", "-p", str(align), sign_name, output_name])
628    p.communicate()
629    if p.returncode != 0:
630      raise ExternalError("zipalign failed: return code %s" % (p.returncode,))
631    temp.close()
632
633
634def CheckSize(data, target, info_dict):
635  """Check the data string passed against the max size limit, if
636  any, for the given target.  Raise exception if the data is too big.
637  Print a warning if the data is nearing the maximum size."""
638
639  if target.endswith(".img"):
640    target = target[:-4]
641  mount_point = "/" + target
642
643  fs_type = None
644  limit = None
645  if info_dict["fstab"]:
646    if mount_point == "/userdata":
647      mount_point = "/data"
648    p = info_dict["fstab"][mount_point]
649    fs_type = p.fs_type
650    device = p.device
651    if "/" in device:
652      device = device[device.rfind("/")+1:]
653    limit = info_dict.get(device + "_size", None)
654  if not fs_type or not limit:
655    return
656
657  if fs_type == "yaffs2":
658    # image size should be increased by 1/64th to account for the
659    # spare area (64 bytes per 2k page)
660    limit = limit / 2048 * (2048+64)
661  size = len(data)
662  pct = float(size) * 100.0 / limit
663  msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
664  if pct >= 99.0:
665    raise ExternalError(msg)
666  elif pct >= 95.0:
667    print
668    print "  WARNING: ", msg
669    print
670  elif OPTIONS.verbose:
671    print "  ", msg
672
673
674def ReadApkCerts(tf_zip):
675  """Given a target_files ZipFile, parse the META/apkcerts.txt file
676  and return a {package: cert} dict."""
677  certmap = {}
678  for line in tf_zip.read("META/apkcerts.txt").split("\n"):
679    line = line.strip()
680    if not line:
681      continue
682    m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
683                 r'private_key="(.*)"$', line)
684    if m:
685      name, cert, privkey = m.groups()
686      public_key_suffix_len = len(OPTIONS.public_key_suffix)
687      private_key_suffix_len = len(OPTIONS.private_key_suffix)
688      if cert in SPECIAL_CERT_STRINGS and not privkey:
689        certmap[name] = cert
690      elif (cert.endswith(OPTIONS.public_key_suffix) and
691            privkey.endswith(OPTIONS.private_key_suffix) and
692            cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
693        certmap[name] = cert[:-public_key_suffix_len]
694      else:
695        raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
696  return certmap
697
698
699COMMON_DOCSTRING = """
700  -p  (--path)  <dir>
701      Prepend <dir>/bin to the list of places to search for binaries
702      run by this script, and expect to find jars in <dir>/framework.
703
704  -s  (--device_specific) <file>
705      Path to the python module containing device-specific
706      releasetools code.
707
708  -x  (--extra)  <key=value>
709      Add a key/value pair to the 'extras' dict, which device-specific
710      extension code may look at.
711
712  -v  (--verbose)
713      Show command lines being executed.
714
715  -h  (--help)
716      Display this usage message and exit.
717"""
718
719def Usage(docstring):
720  print docstring.rstrip("\n")
721  print COMMON_DOCSTRING
722
723
724def ParseOptions(argv,
725                 docstring,
726                 extra_opts="", extra_long_opts=(),
727                 extra_option_handler=None):
728  """Parse the options in argv and return any arguments that aren't
729  flags.  docstring is the calling module's docstring, to be displayed
730  for errors and -h.  extra_opts and extra_long_opts are for flags
731  defined by the caller, which are processed by passing them to
732  extra_option_handler."""
733
734  try:
735    opts, args = getopt.getopt(
736        argv, "hvp:s:x:" + extra_opts,
737        ["help", "verbose", "path=", "signapk_path=", "extra_signapk_args=",
738         "java_path=", "java_args=", "public_key_suffix=",
739         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
740         "verity_signer_path=", "verity_signer_args=", "device_specific=",
741         "extra="] +
742        list(extra_long_opts))
743  except getopt.GetoptError as err:
744    Usage(docstring)
745    print "**", str(err), "**"
746    sys.exit(2)
747
748  for o, a in opts:
749    if o in ("-h", "--help"):
750      Usage(docstring)
751      sys.exit()
752    elif o in ("-v", "--verbose"):
753      OPTIONS.verbose = True
754    elif o in ("-p", "--path"):
755      OPTIONS.search_path = a
756    elif o in ("--signapk_path",):
757      OPTIONS.signapk_path = a
758    elif o in ("--extra_signapk_args",):
759      OPTIONS.extra_signapk_args = shlex.split(a)
760    elif o in ("--java_path",):
761      OPTIONS.java_path = a
762    elif o in ("--java_args",):
763      OPTIONS.java_args = a
764    elif o in ("--public_key_suffix",):
765      OPTIONS.public_key_suffix = a
766    elif o in ("--private_key_suffix",):
767      OPTIONS.private_key_suffix = a
768    elif o in ("--boot_signer_path",):
769      OPTIONS.boot_signer_path = a
770    elif o in ("--boot_signer_args",):
771      OPTIONS.boot_signer_args = shlex.split(a)
772    elif o in ("--verity_signer_path",):
773      OPTIONS.verity_signer_path = a
774    elif o in ("--verity_signer_args",):
775      OPTIONS.verity_signer_args = shlex.split(a)
776    elif o in ("-s", "--device_specific"):
777      OPTIONS.device_specific = a
778    elif o in ("-x", "--extra"):
779      key, value = a.split("=", 1)
780      OPTIONS.extras[key] = value
781    else:
782      if extra_option_handler is None or not extra_option_handler(o, a):
783        assert False, "unknown option \"%s\"" % (o,)
784
785  if OPTIONS.search_path:
786    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
787                          os.pathsep + os.environ["PATH"])
788
789  return args
790
791
792def MakeTempFile(prefix=None, suffix=None):
793  """Make a temp file and add it to the list of things to be deleted
794  when Cleanup() is called.  Return the filename."""
795  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
796  os.close(fd)
797  OPTIONS.tempfiles.append(fn)
798  return fn
799
800
801def Cleanup():
802  for i in OPTIONS.tempfiles:
803    if os.path.isdir(i):
804      shutil.rmtree(i)
805    else:
806      os.remove(i)
807
808
809class PasswordManager(object):
810  def __init__(self):
811    self.editor = os.getenv("EDITOR", None)
812    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
813
814  def GetPasswords(self, items):
815    """Get passwords corresponding to each string in 'items',
816    returning a dict.  (The dict may have keys in addition to the
817    values in 'items'.)
818
819    Uses the passwords in $ANDROID_PW_FILE if available, letting the
820    user edit that file to add more needed passwords.  If no editor is
821    available, or $ANDROID_PW_FILE isn't define, prompts the user
822    interactively in the ordinary way.
823    """
824
825    current = self.ReadFile()
826
827    first = True
828    while True:
829      missing = []
830      for i in items:
831        if i not in current or not current[i]:
832          missing.append(i)
833      # Are all the passwords already in the file?
834      if not missing:
835        return current
836
837      for i in missing:
838        current[i] = ""
839
840      if not first:
841        print "key file %s still missing some passwords." % (self.pwfile,)
842        answer = raw_input("try to edit again? [y]> ").strip()
843        if answer and answer[0] not in 'yY':
844          raise RuntimeError("key passwords unavailable")
845      first = False
846
847      current = self.UpdateAndReadFile(current)
848
849  def PromptResult(self, current): # pylint: disable=no-self-use
850    """Prompt the user to enter a value (password) for each key in
851    'current' whose value is fales.  Returns a new dict with all the
852    values.
853    """
854    result = {}
855    for k, v in sorted(current.iteritems()):
856      if v:
857        result[k] = v
858      else:
859        while True:
860          result[k] = getpass.getpass(
861              "Enter password for %s key> " % k).strip()
862          if result[k]:
863            break
864    return result
865
866  def UpdateAndReadFile(self, current):
867    if not self.editor or not self.pwfile:
868      return self.PromptResult(current)
869
870    f = open(self.pwfile, "w")
871    os.chmod(self.pwfile, 0o600)
872    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
873    f.write("# (Additional spaces are harmless.)\n\n")
874
875    first_line = None
876    sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
877    for i, (_, k, v) in enumerate(sorted_list):
878      f.write("[[[  %s  ]]] %s\n" % (v, k))
879      if not v and first_line is None:
880        # position cursor on first line with no password.
881        first_line = i + 4
882    f.close()
883
884    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
885    _, _ = p.communicate()
886
887    return self.ReadFile()
888
889  def ReadFile(self):
890    result = {}
891    if self.pwfile is None:
892      return result
893    try:
894      f = open(self.pwfile, "r")
895      for line in f:
896        line = line.strip()
897        if not line or line[0] == '#':
898          continue
899        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
900        if not m:
901          print "failed to parse password file: ", line
902        else:
903          result[m.group(2)] = m.group(1)
904      f.close()
905    except IOError as e:
906      if e.errno != errno.ENOENT:
907        print "error reading password file: ", str(e)
908    return result
909
910
911def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
912             compress_type=None):
913  import datetime
914
915  # http://b/18015246
916  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
917  # for files larger than 2GiB. We can work around this by adjusting their
918  # limit. Note that `zipfile.writestr()` will not work for strings larger than
919  # 2GiB. The Python interpreter sometimes rejects strings that large (though
920  # it isn't clear to me exactly what circumstances cause this).
921  # `zipfile.write()` must be used directly to work around this.
922  #
923  # This mess can be avoided if we port to python3.
924  saved_zip64_limit = zipfile.ZIP64_LIMIT
925  zipfile.ZIP64_LIMIT = (1 << 32) - 1
926
927  if compress_type is None:
928    compress_type = zip_file.compression
929  if arcname is None:
930    arcname = filename
931
932  saved_stat = os.stat(filename)
933
934  try:
935    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
936    # file to be zipped and reset it when we're done.
937    os.chmod(filename, perms)
938
939    # Use a fixed timestamp so the output is repeatable.
940    epoch = datetime.datetime.fromtimestamp(0)
941    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
942    os.utime(filename, (timestamp, timestamp))
943
944    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
945  finally:
946    os.chmod(filename, saved_stat.st_mode)
947    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
948    zipfile.ZIP64_LIMIT = saved_zip64_limit
949
950
951def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
952                compress_type=None):
953  """Wrap zipfile.writestr() function to work around the zip64 limit.
954
955  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
956  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
957  when calling crc32(bytes).
958
959  But it still works fine to write a shorter string into a large zip file.
960  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
961  when we know the string won't be too long.
962  """
963
964  saved_zip64_limit = zipfile.ZIP64_LIMIT
965  zipfile.ZIP64_LIMIT = (1 << 32) - 1
966
967  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
968    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
969    zinfo.compress_type = zip_file.compression
970    if perms is None:
971      perms = 0o644
972  else:
973    zinfo = zinfo_or_arcname
974
975  # If compress_type is given, it overrides the value in zinfo.
976  if compress_type is not None:
977    zinfo.compress_type = compress_type
978
979  # If perms is given, it has a priority.
980  if perms is not None:
981    zinfo.external_attr = perms << 16
982
983  # Use a fixed timestamp so the output is repeatable.
984  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
985
986  zip_file.writestr(zinfo, data)
987  zipfile.ZIP64_LIMIT = saved_zip64_limit
988
989
990def ZipClose(zip_file):
991  # http://b/18015246
992  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
993  # central directory.
994  saved_zip64_limit = zipfile.ZIP64_LIMIT
995  zipfile.ZIP64_LIMIT = (1 << 32) - 1
996
997  zip_file.close()
998
999  zipfile.ZIP64_LIMIT = saved_zip64_limit
1000
1001
1002class DeviceSpecificParams(object):
1003  module = None
1004  def __init__(self, **kwargs):
1005    """Keyword arguments to the constructor become attributes of this
1006    object, which is passed to all functions in the device-specific
1007    module."""
1008    for k, v in kwargs.iteritems():
1009      setattr(self, k, v)
1010    self.extras = OPTIONS.extras
1011
1012    if self.module is None:
1013      path = OPTIONS.device_specific
1014      if not path:
1015        return
1016      try:
1017        if os.path.isdir(path):
1018          info = imp.find_module("releasetools", [path])
1019        else:
1020          d, f = os.path.split(path)
1021          b, x = os.path.splitext(f)
1022          if x == ".py":
1023            f = b
1024          info = imp.find_module(f, [d])
1025        print "loaded device-specific extensions from", path
1026        self.module = imp.load_module("device_specific", *info)
1027      except ImportError:
1028        print "unable to load device-specific module; assuming none"
1029
1030  def _DoCall(self, function_name, *args, **kwargs):
1031    """Call the named function in the device-specific module, passing
1032    the given args and kwargs.  The first argument to the call will be
1033    the DeviceSpecific object itself.  If there is no module, or the
1034    module does not define the function, return the value of the
1035    'default' kwarg (which itself defaults to None)."""
1036    if self.module is None or not hasattr(self.module, function_name):
1037      return kwargs.get("default", None)
1038    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1039
1040  def FullOTA_Assertions(self):
1041    """Called after emitting the block of assertions at the top of a
1042    full OTA package.  Implementations can add whatever additional
1043    assertions they like."""
1044    return self._DoCall("FullOTA_Assertions")
1045
1046  def FullOTA_InstallBegin(self):
1047    """Called at the start of full OTA installation."""
1048    return self._DoCall("FullOTA_InstallBegin")
1049
1050  def FullOTA_InstallEnd(self):
1051    """Called at the end of full OTA installation; typically this is
1052    used to install the image for the device's baseband processor."""
1053    return self._DoCall("FullOTA_InstallEnd")
1054
1055  def IncrementalOTA_Assertions(self):
1056    """Called after emitting the block of assertions at the top of an
1057    incremental OTA package.  Implementations can add whatever
1058    additional assertions they like."""
1059    return self._DoCall("IncrementalOTA_Assertions")
1060
1061  def IncrementalOTA_VerifyBegin(self):
1062    """Called at the start of the verification phase of incremental
1063    OTA installation; additional checks can be placed here to abort
1064    the script before any changes are made."""
1065    return self._DoCall("IncrementalOTA_VerifyBegin")
1066
1067  def IncrementalOTA_VerifyEnd(self):
1068    """Called at the end of the verification phase of incremental OTA
1069    installation; additional checks can be placed here to abort the
1070    script before any changes are made."""
1071    return self._DoCall("IncrementalOTA_VerifyEnd")
1072
1073  def IncrementalOTA_InstallBegin(self):
1074    """Called at the start of incremental OTA installation (after
1075    verification is complete)."""
1076    return self._DoCall("IncrementalOTA_InstallBegin")
1077
1078  def IncrementalOTA_InstallEnd(self):
1079    """Called at the end of incremental OTA installation; typically
1080    this is used to install the image for the device's baseband
1081    processor."""
1082    return self._DoCall("IncrementalOTA_InstallEnd")
1083
1084class File(object):
1085  def __init__(self, name, data):
1086    self.name = name
1087    self.data = data
1088    self.size = len(data)
1089    self.sha1 = sha1(data).hexdigest()
1090
1091  @classmethod
1092  def FromLocalFile(cls, name, diskname):
1093    f = open(diskname, "rb")
1094    data = f.read()
1095    f.close()
1096    return File(name, data)
1097
1098  def WriteToTemp(self):
1099    t = tempfile.NamedTemporaryFile()
1100    t.write(self.data)
1101    t.flush()
1102    return t
1103
1104  def AddToZip(self, z, compression=None):
1105    ZipWriteStr(z, self.name, self.data, compress_type=compression)
1106
1107DIFF_PROGRAM_BY_EXT = {
1108    ".gz" : "imgdiff",
1109    ".zip" : ["imgdiff", "-z"],
1110    ".jar" : ["imgdiff", "-z"],
1111    ".apk" : ["imgdiff", "-z"],
1112    ".img" : "imgdiff",
1113    }
1114
1115class Difference(object):
1116  def __init__(self, tf, sf, diff_program=None):
1117    self.tf = tf
1118    self.sf = sf
1119    self.patch = None
1120    self.diff_program = diff_program
1121
1122  def ComputePatch(self):
1123    """Compute the patch (as a string of data) needed to turn sf into
1124    tf.  Returns the same tuple as GetPatch()."""
1125
1126    tf = self.tf
1127    sf = self.sf
1128
1129    if self.diff_program:
1130      diff_program = self.diff_program
1131    else:
1132      ext = os.path.splitext(tf.name)[1]
1133      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1134
1135    ttemp = tf.WriteToTemp()
1136    stemp = sf.WriteToTemp()
1137
1138    ext = os.path.splitext(tf.name)[1]
1139
1140    try:
1141      ptemp = tempfile.NamedTemporaryFile()
1142      if isinstance(diff_program, list):
1143        cmd = copy.copy(diff_program)
1144      else:
1145        cmd = [diff_program]
1146      cmd.append(stemp.name)
1147      cmd.append(ttemp.name)
1148      cmd.append(ptemp.name)
1149      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1150      err = []
1151      def run():
1152        _, e = p.communicate()
1153        if e:
1154          err.append(e)
1155      th = threading.Thread(target=run)
1156      th.start()
1157      th.join(timeout=300)   # 5 mins
1158      if th.is_alive():
1159        print "WARNING: diff command timed out"
1160        p.terminate()
1161        th.join(5)
1162        if th.is_alive():
1163          p.kill()
1164          th.join()
1165
1166      if err or p.returncode != 0:
1167        print "WARNING: failure running %s:\n%s\n" % (
1168            diff_program, "".join(err))
1169        self.patch = None
1170        return None, None, None
1171      diff = ptemp.read()
1172    finally:
1173      ptemp.close()
1174      stemp.close()
1175      ttemp.close()
1176
1177    self.patch = diff
1178    return self.tf, self.sf, self.patch
1179
1180
1181  def GetPatch(self):
1182    """Return a tuple (target_file, source_file, patch_data).
1183    patch_data may be None if ComputePatch hasn't been called, or if
1184    computing the patch failed."""
1185    return self.tf, self.sf, self.patch
1186
1187
1188def ComputeDifferences(diffs):
1189  """Call ComputePatch on all the Difference objects in 'diffs'."""
1190  print len(diffs), "diffs to compute"
1191
1192  # Do the largest files first, to try and reduce the long-pole effect.
1193  by_size = [(i.tf.size, i) for i in diffs]
1194  by_size.sort(reverse=True)
1195  by_size = [i[1] for i in by_size]
1196
1197  lock = threading.Lock()
1198  diff_iter = iter(by_size)   # accessed under lock
1199
1200  def worker():
1201    try:
1202      lock.acquire()
1203      for d in diff_iter:
1204        lock.release()
1205        start = time.time()
1206        d.ComputePatch()
1207        dur = time.time() - start
1208        lock.acquire()
1209
1210        tf, sf, patch = d.GetPatch()
1211        if sf.name == tf.name:
1212          name = tf.name
1213        else:
1214          name = "%s (%s)" % (tf.name, sf.name)
1215        if patch is None:
1216          print "patching failed!                                  %s" % (name,)
1217        else:
1218          print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1219              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1220      lock.release()
1221    except Exception as e:
1222      print e
1223      raise
1224
1225  # start worker threads; wait for them all to finish.
1226  threads = [threading.Thread(target=worker)
1227             for i in range(OPTIONS.worker_threads)]
1228  for th in threads:
1229    th.start()
1230  while threads:
1231    threads.pop().join()
1232
1233
1234class BlockDifference(object):
1235  def __init__(self, partition, tgt, src=None, check_first_block=False,
1236               version=None):
1237    self.tgt = tgt
1238    self.src = src
1239    self.partition = partition
1240    self.check_first_block = check_first_block
1241
1242    # Due to http://b/20939131, check_first_block is disabled temporarily.
1243    assert not self.check_first_block
1244
1245    if version is None:
1246      version = 1
1247      if OPTIONS.info_dict:
1248        version = max(
1249            int(i) for i in
1250            OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1251    self.version = version
1252
1253    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1254                                    version=self.version)
1255    tmpdir = tempfile.mkdtemp()
1256    OPTIONS.tempfiles.append(tmpdir)
1257    self.path = os.path.join(tmpdir, partition)
1258    b.Compute(self.path)
1259
1260    if src is None:
1261      _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1262    else:
1263      _, self.device = GetTypeAndDevice("/" + partition,
1264                                        OPTIONS.source_info_dict)
1265
1266  def WriteScript(self, script, output_zip, progress=None):
1267    if not self.src:
1268      # write the output unconditionally
1269      script.Print("Patching %s image unconditionally..." % (self.partition,))
1270    else:
1271      script.Print("Patching %s image after verification." % (self.partition,))
1272
1273    if progress:
1274      script.ShowProgress(progress, 0)
1275    self._WriteUpdate(script, output_zip)
1276    self._WritePostInstallVerifyScript(script)
1277
1278  def WriteVerifyScript(self, script):
1279    partition = self.partition
1280    if not self.src:
1281      script.Print("Image %s will be patched unconditionally." % (partition,))
1282    else:
1283      ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1284      ranges_str = ranges.to_string_raw()
1285      if self.version >= 3:
1286        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1287                            'block_image_verify("%s", '
1288                            'package_extract_file("%s.transfer.list"), '
1289                            '"%s.new.dat", "%s.patch.dat")) then') % (
1290                            self.device, ranges_str, self.src.TotalSha1(),
1291                            self.device, partition, partition, partition))
1292      else:
1293        script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1294                           self.device, ranges_str, self.src.TotalSha1()))
1295      script.Print('Verified %s image...' % (partition,))
1296      script.AppendExtra('else')
1297
1298      # When generating incrementals for the system and vendor partitions,
1299      # explicitly check the first block (which contains the superblock) of
1300      # the partition to see if it's what we expect. If this check fails,
1301      # give an explicit log message about the partition having been
1302      # remounted R/W (the most likely explanation) and the need to flash to
1303      # get OTAs working again.
1304      if self.check_first_block:
1305        self._CheckFirstBlock(script)
1306
1307      # Abort the OTA update. Note that the incremental OTA cannot be applied
1308      # even if it may match the checksum of the target partition.
1309      # a) If version < 3, operations like move and erase will make changes
1310      #    unconditionally and damage the partition.
1311      # b) If version >= 3, it won't even reach here.
1312      script.AppendExtra(('abort("%s partition has unexpected contents");\n'
1313                          'endif;') % (partition,))
1314
1315  def _WritePostInstallVerifyScript(self, script):
1316    partition = self.partition
1317    script.Print('Verifying the updated %s image...' % (partition,))
1318    # Unlike pre-install verification, clobbered_blocks should not be ignored.
1319    ranges = self.tgt.care_map
1320    ranges_str = ranges.to_string_raw()
1321    script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1322                       self.device, ranges_str,
1323                       self.tgt.TotalSha1(include_clobbered_blocks=True)))
1324
1325    # Bug: 20881595
1326    # Verify that extended blocks are really zeroed out.
1327    if self.tgt.extended:
1328      ranges_str = self.tgt.extended.to_string_raw()
1329      script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1330                         self.device, ranges_str,
1331                         self._HashZeroBlocks(self.tgt.extended.size())))
1332      script.Print('Verified the updated %s image.' % (partition,))
1333      script.AppendExtra(
1334          'else\n'
1335          '  abort("%s partition has unexpected non-zero contents after OTA '
1336          'update");\n'
1337          'endif;' % (partition,))
1338    else:
1339      script.Print('Verified the updated %s image.' % (partition,))
1340
1341    script.AppendExtra(
1342        'else\n'
1343        '  abort("%s partition has unexpected contents after OTA update");\n'
1344        'endif;' % (partition,))
1345
1346  def _WriteUpdate(self, script, output_zip):
1347    ZipWrite(output_zip,
1348             '{}.transfer.list'.format(self.path),
1349             '{}.transfer.list'.format(self.partition))
1350    ZipWrite(output_zip,
1351             '{}.new.dat'.format(self.path),
1352             '{}.new.dat'.format(self.partition))
1353    ZipWrite(output_zip,
1354             '{}.patch.dat'.format(self.path),
1355             '{}.patch.dat'.format(self.partition),
1356             compress_type=zipfile.ZIP_STORED)
1357
1358    call = ('block_image_update("{device}", '
1359            'package_extract_file("{partition}.transfer.list"), '
1360            '"{partition}.new.dat", "{partition}.patch.dat");\n'.format(
1361                device=self.device, partition=self.partition))
1362    script.AppendExtra(script.WordWrap(call))
1363
1364  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1365    data = source.ReadRangeSet(ranges)
1366    ctx = sha1()
1367
1368    for p in data:
1369      ctx.update(p)
1370
1371    return ctx.hexdigest()
1372
1373  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1374    """Return the hash value for all zero blocks."""
1375    zero_block = '\x00' * 4096
1376    ctx = sha1()
1377    for _ in range(num_blocks):
1378      ctx.update(zero_block)
1379
1380    return ctx.hexdigest()
1381
1382  # TODO(tbao): Due to http://b/20939131, block 0 may be changed without
1383  # remounting R/W. Will change the checking to a finer-grained way to
1384  # mask off those bits.
1385  def _CheckFirstBlock(self, script):
1386    r = rangelib.RangeSet((0, 1))
1387    srchash = self._HashBlocks(self.src, r)
1388
1389    script.AppendExtra(('(range_sha1("%s", "%s") == "%s") || '
1390                        'abort("%s has been remounted R/W; '
1391                        'reflash device to reenable OTA updates");')
1392                       % (self.device, r.to_string_raw(), srchash,
1393                          self.device))
1394
1395DataImage = blockimgdiff.DataImage
1396
1397
1398# map recovery.fstab's fs_types to mount/format "partition types"
1399PARTITION_TYPES = {
1400    "yaffs2": "MTD",
1401    "mtd": "MTD",
1402    "ext4": "EMMC",
1403    "emmc": "EMMC",
1404    "f2fs": "EMMC",
1405    "squashfs": "EMMC"
1406}
1407
1408def GetTypeAndDevice(mount_point, info):
1409  fstab = info["fstab"]
1410  if fstab:
1411    return (PARTITION_TYPES[fstab[mount_point].fs_type],
1412            fstab[mount_point].device)
1413  else:
1414    raise KeyError
1415
1416
1417def ParseCertificate(data):
1418  """Parse a PEM-format certificate."""
1419  cert = []
1420  save = False
1421  for line in data.split("\n"):
1422    if "--END CERTIFICATE--" in line:
1423      break
1424    if save:
1425      cert.append(line)
1426    if "--BEGIN CERTIFICATE--" in line:
1427      save = True
1428  cert = "".join(cert).decode('base64')
1429  return cert
1430
1431def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1432                      info_dict=None):
1433  """Generate a binary patch that creates the recovery image starting
1434  with the boot image.  (Most of the space in these images is just the
1435  kernel, which is identical for the two, so the resulting patch
1436  should be efficient.)  Add it to the output zip, along with a shell
1437  script that is run from init.rc on first boot to actually do the
1438  patching and install the new recovery image.
1439
1440  recovery_img and boot_img should be File objects for the
1441  corresponding images.  info should be the dictionary returned by
1442  common.LoadInfoDict() on the input target_files.
1443  """
1444
1445  if info_dict is None:
1446    info_dict = OPTIONS.info_dict
1447
1448  full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1449  system_root_image = info_dict.get("system_root_image", None) == "true"
1450
1451  if full_recovery_image:
1452    output_sink("etc/recovery.img", recovery_img.data)
1453
1454  else:
1455    diff_program = ["imgdiff"]
1456    path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1457    if os.path.exists(path):
1458      diff_program.append("-b")
1459      diff_program.append(path)
1460      bonus_args = "-b /system/etc/recovery-resource.dat"
1461    else:
1462      bonus_args = ""
1463
1464    d = Difference(recovery_img, boot_img, diff_program=diff_program)
1465    _, _, patch = d.ComputePatch()
1466    output_sink("recovery-from-boot.p", patch)
1467
1468  try:
1469    # The following GetTypeAndDevice()s need to use the path in the target
1470    # info_dict instead of source_info_dict.
1471    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1472    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1473  except KeyError:
1474    return
1475
1476  if full_recovery_image:
1477    sh = """#!/system/bin/sh
1478if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1479  applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1480else
1481  log -t recovery "Recovery image already installed"
1482fi
1483""" % {'type': recovery_type,
1484       'device': recovery_device,
1485       'sha1': recovery_img.sha1,
1486       'size': recovery_img.size}
1487  else:
1488    sh = """#!/system/bin/sh
1489if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1490  applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1491else
1492  log -t recovery "Recovery image already installed"
1493fi
1494""" % {'boot_size': boot_img.size,
1495       'boot_sha1': boot_img.sha1,
1496       'recovery_size': recovery_img.size,
1497       'recovery_sha1': recovery_img.sha1,
1498       'boot_type': boot_type,
1499       'boot_device': boot_device,
1500       'recovery_type': recovery_type,
1501       'recovery_device': recovery_device,
1502       'bonus_args': bonus_args}
1503
1504  # The install script location moved from /system/etc to /system/bin
1505  # in the L release.  Parse init.*.rc files to find out where the
1506  # target-files expects it to be, and put it there.
1507  sh_location = "etc/install-recovery.sh"
1508  found = False
1509  if system_root_image:
1510    init_rc_dir = os.path.join(input_dir, "ROOT")
1511  else:
1512    init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1513  init_rc_files = os.listdir(init_rc_dir)
1514  for init_rc_file in init_rc_files:
1515    if (not init_rc_file.startswith('init.') or
1516        not init_rc_file.endswith('.rc')):
1517      continue
1518    with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1519      for line in f:
1520        m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1521        if m:
1522          sh_location = m.group(1)
1523          found = True
1524          break
1525    if found:
1526      break
1527  print "putting script in", sh_location
1528
1529  output_sink(sh_location, sh)
1530