common.py revision b3b8ce624180a6857d776d418952767357b35ef9
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import copy
16import errno
17import getopt
18import getpass
19import imp
20import os
21import platform
22import re
23import shlex
24import shutil
25import subprocess
26import sys
27import tempfile
28import threading
29import time
30import zipfile
31
32import blockimgdiff
33import rangelib
34
35from hashlib import sha1 as sha1
36
37
38class Options(object):
39  def __init__(self):
40    platform_search_path = {
41        "linux2": "out/host/linux-x86",
42        "darwin": "out/host/darwin-x86",
43    }
44
45    self.search_path = platform_search_path.get(sys.platform, None)
46    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
47    self.extra_signapk_args = []
48    self.java_path = "java"  # Use the one on the path by default.
49    self.java_args = "-Xmx2048m" # JVM Args
50    self.public_key_suffix = ".x509.pem"
51    self.private_key_suffix = ".pk8"
52    # use otatools built boot_signer by default
53    self.boot_signer_path = "boot_signer"
54    self.boot_signer_args = []
55    self.verity_signer_path = None
56    self.verity_signer_args = []
57    self.verbose = False
58    self.tempfiles = []
59    self.device_specific = None
60    self.extras = {}
61    self.info_dict = None
62    self.source_info_dict = None
63    self.target_info_dict = None
64    self.worker_threads = None
65    # Stash size cannot exceed cache_size * threshold.
66    self.cache_size = None
67    self.stash_threshold = 0.8
68
69
70OPTIONS = Options()
71
72
73# Values for "certificate" in apkcerts that mean special things.
74SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
75
76
77class ExternalError(RuntimeError):
78  pass
79
80
81def Run(args, **kwargs):
82  """Create and return a subprocess.Popen object, printing the command
83  line on the terminal if -v was specified."""
84  if OPTIONS.verbose:
85    print "  running: ", " ".join(args)
86  return subprocess.Popen(args, **kwargs)
87
88
89def CloseInheritedPipes():
90  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
91  before doing other work."""
92  if platform.system() != "Darwin":
93    return
94  for d in range(3, 1025):
95    try:
96      stat = os.fstat(d)
97      if stat is not None:
98        pipebit = stat[0] & 0x1000
99        if pipebit != 0:
100          os.close(d)
101    except OSError:
102      pass
103
104
105def LoadInfoDict(input_file):
106  """Read and parse the META/misc_info.txt key/value pairs from the
107  input target files and return a dict."""
108
109  def read_helper(fn):
110    if isinstance(input_file, zipfile.ZipFile):
111      return input_file.read(fn)
112    else:
113      path = os.path.join(input_file, *fn.split("/"))
114      try:
115        with open(path) as f:
116          return f.read()
117      except IOError as e:
118        if e.errno == errno.ENOENT:
119          raise KeyError(fn)
120  d = {}
121  try:
122    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
123  except KeyError:
124    # ok if misc_info.txt doesn't exist
125    pass
126
127  # backwards compatibility: These values used to be in their own
128  # files.  Look for them, in case we're processing an old
129  # target_files zip.
130
131  if "mkyaffs2_extra_flags" not in d:
132    try:
133      d["mkyaffs2_extra_flags"] = read_helper(
134          "META/mkyaffs2-extra-flags.txt").strip()
135    except KeyError:
136      # ok if flags don't exist
137      pass
138
139  if "recovery_api_version" not in d:
140    try:
141      d["recovery_api_version"] = read_helper(
142          "META/recovery-api-version.txt").strip()
143    except KeyError:
144      raise ValueError("can't find recovery API version in input target-files")
145
146  if "tool_extensions" not in d:
147    try:
148      d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
149    except KeyError:
150      # ok if extensions don't exist
151      pass
152
153  if "fstab_version" not in d:
154    d["fstab_version"] = "1"
155
156  try:
157    data = read_helper("META/imagesizes.txt")
158    for line in data.split("\n"):
159      if not line:
160        continue
161      name, value = line.split(" ", 1)
162      if not value:
163        continue
164      if name == "blocksize":
165        d[name] = value
166      else:
167        d[name + "_size"] = value
168  except KeyError:
169    pass
170
171  def makeint(key):
172    if key in d:
173      d[key] = int(d[key], 0)
174
175  makeint("recovery_api_version")
176  makeint("blocksize")
177  makeint("system_size")
178  makeint("vendor_size")
179  makeint("userdata_size")
180  makeint("cache_size")
181  makeint("recovery_size")
182  makeint("boot_size")
183  makeint("fstab_version")
184
185  d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"], d.get("system_root_image", False))
186  d["build.prop"] = LoadBuildProp(read_helper)
187  return d
188
189def LoadBuildProp(read_helper):
190  try:
191    data = read_helper("SYSTEM/build.prop")
192  except KeyError:
193    print "Warning: could not find SYSTEM/build.prop in %s" % zip
194    data = ""
195  return LoadDictionaryFromLines(data.split("\n"))
196
197def LoadDictionaryFromLines(lines):
198  d = {}
199  for line in lines:
200    line = line.strip()
201    if not line or line.startswith("#"):
202      continue
203    if "=" in line:
204      name, value = line.split("=", 1)
205      d[name] = value
206  return d
207
208def LoadRecoveryFSTab(read_helper, fstab_version, system_root_image=False):
209  class Partition(object):
210    def __init__(self, mount_point, fs_type, device, length, device2, context):
211      self.mount_point = mount_point
212      self.fs_type = fs_type
213      self.device = device
214      self.length = length
215      self.device2 = device2
216      self.context = context
217
218  try:
219    data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
220  except KeyError:
221    print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
222    data = ""
223
224  if fstab_version == 1:
225    d = {}
226    for line in data.split("\n"):
227      line = line.strip()
228      if not line or line.startswith("#"):
229        continue
230      pieces = line.split()
231      if not 3 <= len(pieces) <= 4:
232        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
233      options = None
234      if len(pieces) >= 4:
235        if pieces[3].startswith("/"):
236          device2 = pieces[3]
237          if len(pieces) >= 5:
238            options = pieces[4]
239        else:
240          device2 = None
241          options = pieces[3]
242      else:
243        device2 = None
244
245      mount_point = pieces[0]
246      length = 0
247      if options:
248        options = options.split(",")
249        for i in options:
250          if i.startswith("length="):
251            length = int(i[7:])
252          else:
253            print "%s: unknown option \"%s\"" % (mount_point, i)
254
255      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
256                                 device=pieces[2], length=length,
257                                 device2=device2)
258
259  elif fstab_version == 2:
260    d = {}
261    for line in data.split("\n"):
262      line = line.strip()
263      if not line or line.startswith("#"):
264        continue
265      # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
266      pieces = line.split()
267      if len(pieces) != 5:
268        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
269
270      # Ignore entries that are managed by vold
271      options = pieces[4]
272      if "voldmanaged=" in options:
273        continue
274
275      # It's a good line, parse it
276      length = 0
277      options = options.split(",")
278      for i in options:
279        if i.startswith("length="):
280          length = int(i[7:])
281        else:
282          # Ignore all unknown options in the unified fstab
283          continue
284
285      mount_flags = pieces[3]
286      # Honor the SELinux context if present.
287      context = None
288      for i in mount_flags.split(","):
289        if i.startswith("context="):
290          context = i
291
292      mount_point = pieces[1]
293      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
294                                 device=pieces[0], length=length,
295                                 device2=None, context=context)
296
297  else:
298    raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
299
300  # / is used for the system mount point when the root directory is included in
301  # system. Other areas assume system is always at "/system" so point /system at /
302  if system_root_image:
303    assert not d.has_key("/system") and d.has_key("/")
304    d["/system"] = d["/"]
305  return d
306
307
308def DumpInfoDict(d):
309  for k, v in sorted(d.items()):
310    print "%-25s = (%s) %s" % (k, type(v).__name__, v)
311
312
313def BuildBootableImage(sourcedir, fs_config_file, info_dict=None):
314  """Take a kernel, cmdline, and ramdisk directory from the input (in
315  'sourcedir'), and turn them into a boot image.  Return the image
316  data, or None if sourcedir does not appear to contains files for
317  building the requested image."""
318
319  if (not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK) or
320      not os.access(os.path.join(sourcedir, "kernel"), os.F_OK)):
321    return None
322
323  if info_dict is None:
324    info_dict = OPTIONS.info_dict
325
326  ramdisk_img = tempfile.NamedTemporaryFile()
327  img = tempfile.NamedTemporaryFile()
328
329  if os.access(fs_config_file, os.F_OK):
330    cmd = ["mkbootfs", "-f", fs_config_file, os.path.join(sourcedir, "RAMDISK")]
331  else:
332    cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
333  p1 = Run(cmd, stdout=subprocess.PIPE)
334  p2 = Run(["minigzip"],
335           stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
336
337  p2.wait()
338  p1.wait()
339  assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
340  assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
341
342  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
343  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
344
345  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
346
347  fn = os.path.join(sourcedir, "second")
348  if os.access(fn, os.F_OK):
349    cmd.append("--second")
350    cmd.append(fn)
351
352  fn = os.path.join(sourcedir, "cmdline")
353  if os.access(fn, os.F_OK):
354    cmd.append("--cmdline")
355    cmd.append(open(fn).read().rstrip("\n"))
356
357  fn = os.path.join(sourcedir, "base")
358  if os.access(fn, os.F_OK):
359    cmd.append("--base")
360    cmd.append(open(fn).read().rstrip("\n"))
361
362  fn = os.path.join(sourcedir, "pagesize")
363  if os.access(fn, os.F_OK):
364    cmd.append("--pagesize")
365    cmd.append(open(fn).read().rstrip("\n"))
366
367  args = info_dict.get("mkbootimg_args", None)
368  if args and args.strip():
369    cmd.extend(shlex.split(args))
370
371  img_unsigned = None
372  if info_dict.get("vboot", None):
373    img_unsigned = tempfile.NamedTemporaryFile()
374    cmd.extend(["--ramdisk", ramdisk_img.name,
375                "--output", img_unsigned.name])
376  else:
377    cmd.extend(["--ramdisk", ramdisk_img.name,
378                "--output", img.name])
379
380  p = Run(cmd, stdout=subprocess.PIPE)
381  p.communicate()
382  assert p.returncode == 0, "mkbootimg of %s image failed" % (
383      os.path.basename(sourcedir),)
384
385  if (info_dict.get("boot_signer", None) == "true" and
386      info_dict.get("verity_key", None)):
387    path = "/" + os.path.basename(sourcedir).lower()
388    cmd = [OPTIONS.boot_signer_path]
389    cmd.extend(OPTIONS.boot_signer_args)
390    cmd.extend([path, img.name,
391                info_dict["verity_key"] + ".pk8",
392                info_dict["verity_key"] + ".x509.pem", img.name])
393    p = Run(cmd, stdout=subprocess.PIPE)
394    p.communicate()
395    assert p.returncode == 0, "boot_signer of %s image failed" % path
396
397  # Sign the image if vboot is non-empty.
398  elif info_dict.get("vboot", None):
399    path = "/" + os.path.basename(sourcedir).lower()
400    img_keyblock = tempfile.NamedTemporaryFile()
401    cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
402           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
403           info_dict["vboot_key"] + ".vbprivk",
404           info_dict["vboot_subkey"] + ".vbprivk",
405           img_keyblock.name,
406           img.name]
407    p = Run(cmd, stdout=subprocess.PIPE)
408    p.communicate()
409    assert p.returncode == 0, "vboot_signer of %s image failed" % path
410
411    # Clean up the temp files.
412    img_unsigned.close()
413    img_keyblock.close()
414
415  img.seek(os.SEEK_SET, 0)
416  data = img.read()
417
418  ramdisk_img.close()
419  img.close()
420
421  return data
422
423
424def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
425                     info_dict=None):
426  """Return a File object (with name 'name') with the desired bootable
427  image.  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name
428  'prebuilt_name', otherwise look for it under 'unpack_dir'/IMAGES,
429  otherwise construct it from the source files in
430  'unpack_dir'/'tree_subdir'."""
431
432  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
433  if os.path.exists(prebuilt_path):
434    print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
435    return File.FromLocalFile(name, prebuilt_path)
436
437  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
438  if os.path.exists(prebuilt_path):
439    print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
440    return File.FromLocalFile(name, prebuilt_path)
441
442  print "building image from target_files %s..." % (tree_subdir,)
443  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
444  data = BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
445                            os.path.join(unpack_dir, fs_config),
446                            info_dict)
447  if data:
448    return File(name, data)
449  return None
450
451
452def UnzipTemp(filename, pattern=None):
453  """Unzip the given archive into a temporary directory and return the name.
454
455  If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
456  temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
457
458  Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
459  main file), open for reading.
460  """
461
462  tmp = tempfile.mkdtemp(prefix="targetfiles-")
463  OPTIONS.tempfiles.append(tmp)
464
465  def unzip_to_dir(filename, dirname):
466    cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
467    if pattern is not None:
468      cmd.append(pattern)
469    p = Run(cmd, stdout=subprocess.PIPE)
470    p.communicate()
471    if p.returncode != 0:
472      raise ExternalError("failed to unzip input target-files \"%s\"" %
473                          (filename,))
474
475  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
476  if m:
477    unzip_to_dir(m.group(1), tmp)
478    unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
479    filename = m.group(1)
480  else:
481    unzip_to_dir(filename, tmp)
482
483  return tmp, zipfile.ZipFile(filename, "r")
484
485
486def GetKeyPasswords(keylist):
487  """Given a list of keys, prompt the user to enter passwords for
488  those which require them.  Return a {key: password} dict.  password
489  will be None if the key has no password."""
490
491  no_passwords = []
492  need_passwords = []
493  key_passwords = {}
494  devnull = open("/dev/null", "w+b")
495  for k in sorted(keylist):
496    # We don't need a password for things that aren't really keys.
497    if k in SPECIAL_CERT_STRINGS:
498      no_passwords.append(k)
499      continue
500
501    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
502             "-inform", "DER", "-nocrypt"],
503            stdin=devnull.fileno(),
504            stdout=devnull.fileno(),
505            stderr=subprocess.STDOUT)
506    p.communicate()
507    if p.returncode == 0:
508      # Definitely an unencrypted key.
509      no_passwords.append(k)
510    else:
511      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
512               "-inform", "DER", "-passin", "pass:"],
513              stdin=devnull.fileno(),
514              stdout=devnull.fileno(),
515              stderr=subprocess.PIPE)
516      _, stderr = p.communicate()
517      if p.returncode == 0:
518        # Encrypted key with empty string as password.
519        key_passwords[k] = ''
520      elif stderr.startswith('Error decrypting key'):
521        # Definitely encrypted key.
522        # It would have said "Error reading key" if it didn't parse correctly.
523        need_passwords.append(k)
524      else:
525        # Potentially, a type of key that openssl doesn't understand.
526        # We'll let the routines in signapk.jar handle it.
527        no_passwords.append(k)
528  devnull.close()
529
530  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
531  key_passwords.update(dict.fromkeys(no_passwords, None))
532  return key_passwords
533
534
535def SignFile(input_name, output_name, key, password, align=None,
536             whole_file=False):
537  """Sign the input_name zip/jar/apk, producing output_name.  Use the
538  given key and password (the latter may be None if the key does not
539  have a password.
540
541  If align is an integer > 1, zipalign is run to align stored files in
542  the output zip on 'align'-byte boundaries.
543
544  If whole_file is true, use the "-w" option to SignApk to embed a
545  signature that covers the whole file in the archive comment of the
546  zip file.
547  """
548
549  if align == 0 or align == 1:
550    align = None
551
552  if align:
553    temp = tempfile.NamedTemporaryFile()
554    sign_name = temp.name
555  else:
556    sign_name = output_name
557
558  cmd = [OPTIONS.java_path, OPTIONS.java_args, "-jar",
559         os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
560  cmd.extend(OPTIONS.extra_signapk_args)
561  if whole_file:
562    cmd.append("-w")
563  cmd.extend([key + OPTIONS.public_key_suffix,
564              key + OPTIONS.private_key_suffix,
565              input_name, sign_name])
566
567  p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
568  if password is not None:
569    password += "\n"
570  p.communicate(password)
571  if p.returncode != 0:
572    raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
573
574  if align:
575    p = Run(["zipalign", "-f", "-p", str(align), sign_name, output_name])
576    p.communicate()
577    if p.returncode != 0:
578      raise ExternalError("zipalign failed: return code %s" % (p.returncode,))
579    temp.close()
580
581
582def CheckSize(data, target, info_dict):
583  """Check the data string passed against the max size limit, if
584  any, for the given target.  Raise exception if the data is too big.
585  Print a warning if the data is nearing the maximum size."""
586
587  if target.endswith(".img"):
588    target = target[:-4]
589  mount_point = "/" + target
590
591  fs_type = None
592  limit = None
593  if info_dict["fstab"]:
594    if mount_point == "/userdata":
595      mount_point = "/data"
596    p = info_dict["fstab"][mount_point]
597    fs_type = p.fs_type
598    device = p.device
599    if "/" in device:
600      device = device[device.rfind("/")+1:]
601    limit = info_dict.get(device + "_size", None)
602  if not fs_type or not limit:
603    return
604
605  if fs_type == "yaffs2":
606    # image size should be increased by 1/64th to account for the
607    # spare area (64 bytes per 2k page)
608    limit = limit / 2048 * (2048+64)
609  size = len(data)
610  pct = float(size) * 100.0 / limit
611  msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
612  if pct >= 99.0:
613    raise ExternalError(msg)
614  elif pct >= 95.0:
615    print
616    print "  WARNING: ", msg
617    print
618  elif OPTIONS.verbose:
619    print "  ", msg
620
621
622def ReadApkCerts(tf_zip):
623  """Given a target_files ZipFile, parse the META/apkcerts.txt file
624  and return a {package: cert} dict."""
625  certmap = {}
626  for line in tf_zip.read("META/apkcerts.txt").split("\n"):
627    line = line.strip()
628    if not line:
629      continue
630    m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
631                 r'private_key="(.*)"$', line)
632    if m:
633      name, cert, privkey = m.groups()
634      public_key_suffix_len = len(OPTIONS.public_key_suffix)
635      private_key_suffix_len = len(OPTIONS.private_key_suffix)
636      if cert in SPECIAL_CERT_STRINGS and not privkey:
637        certmap[name] = cert
638      elif (cert.endswith(OPTIONS.public_key_suffix) and
639            privkey.endswith(OPTIONS.private_key_suffix) and
640            cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
641        certmap[name] = cert[:-public_key_suffix_len]
642      else:
643        raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
644  return certmap
645
646
647COMMON_DOCSTRING = """
648  -p  (--path)  <dir>
649      Prepend <dir>/bin to the list of places to search for binaries
650      run by this script, and expect to find jars in <dir>/framework.
651
652  -s  (--device_specific) <file>
653      Path to the python module containing device-specific
654      releasetools code.
655
656  -x  (--extra)  <key=value>
657      Add a key/value pair to the 'extras' dict, which device-specific
658      extension code may look at.
659
660  -v  (--verbose)
661      Show command lines being executed.
662
663  -h  (--help)
664      Display this usage message and exit.
665"""
666
667def Usage(docstring):
668  print docstring.rstrip("\n")
669  print COMMON_DOCSTRING
670
671
672def ParseOptions(argv,
673                 docstring,
674                 extra_opts="", extra_long_opts=(),
675                 extra_option_handler=None):
676  """Parse the options in argv and return any arguments that aren't
677  flags.  docstring is the calling module's docstring, to be displayed
678  for errors and -h.  extra_opts and extra_long_opts are for flags
679  defined by the caller, which are processed by passing them to
680  extra_option_handler."""
681
682  try:
683    opts, args = getopt.getopt(
684        argv, "hvp:s:x:" + extra_opts,
685        ["help", "verbose", "path=", "signapk_path=", "extra_signapk_args=",
686         "java_path=", "java_args=", "public_key_suffix=",
687         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
688         "verity_signer_path=", "verity_signer_args=", "device_specific=",
689         "extra="] +
690        list(extra_long_opts))
691  except getopt.GetoptError as err:
692    Usage(docstring)
693    print "**", str(err), "**"
694    sys.exit(2)
695
696  for o, a in opts:
697    if o in ("-h", "--help"):
698      Usage(docstring)
699      sys.exit()
700    elif o in ("-v", "--verbose"):
701      OPTIONS.verbose = True
702    elif o in ("-p", "--path"):
703      OPTIONS.search_path = a
704    elif o in ("--signapk_path",):
705      OPTIONS.signapk_path = a
706    elif o in ("--extra_signapk_args",):
707      OPTIONS.extra_signapk_args = shlex.split(a)
708    elif o in ("--java_path",):
709      OPTIONS.java_path = a
710    elif o in ("--java_args",):
711      OPTIONS.java_args = a
712    elif o in ("--public_key_suffix",):
713      OPTIONS.public_key_suffix = a
714    elif o in ("--private_key_suffix",):
715      OPTIONS.private_key_suffix = a
716    elif o in ("--boot_signer_path",):
717      OPTIONS.boot_signer_path = a
718    elif o in ("--boot_signer_args",):
719      OPTIONS.boot_signer_args = shlex.split(a)
720    elif o in ("--verity_signer_path",):
721      OPTIONS.verity_signer_path = a
722    elif o in ("--verity_signer_args",):
723      OPTIONS.verity_signer_args = shlex.split(a)
724    elif o in ("-s", "--device_specific"):
725      OPTIONS.device_specific = a
726    elif o in ("-x", "--extra"):
727      key, value = a.split("=", 1)
728      OPTIONS.extras[key] = value
729    else:
730      if extra_option_handler is None or not extra_option_handler(o, a):
731        assert False, "unknown option \"%s\"" % (o,)
732
733  if OPTIONS.search_path:
734    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
735                          os.pathsep + os.environ["PATH"])
736
737  return args
738
739
740def MakeTempFile(prefix=None, suffix=None):
741  """Make a temp file and add it to the list of things to be deleted
742  when Cleanup() is called.  Return the filename."""
743  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
744  os.close(fd)
745  OPTIONS.tempfiles.append(fn)
746  return fn
747
748
749def Cleanup():
750  for i in OPTIONS.tempfiles:
751    if os.path.isdir(i):
752      shutil.rmtree(i)
753    else:
754      os.remove(i)
755
756
757class PasswordManager(object):
758  def __init__(self):
759    self.editor = os.getenv("EDITOR", None)
760    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
761
762  def GetPasswords(self, items):
763    """Get passwords corresponding to each string in 'items',
764    returning a dict.  (The dict may have keys in addition to the
765    values in 'items'.)
766
767    Uses the passwords in $ANDROID_PW_FILE if available, letting the
768    user edit that file to add more needed passwords.  If no editor is
769    available, or $ANDROID_PW_FILE isn't define, prompts the user
770    interactively in the ordinary way.
771    """
772
773    current = self.ReadFile()
774
775    first = True
776    while True:
777      missing = []
778      for i in items:
779        if i not in current or not current[i]:
780          missing.append(i)
781      # Are all the passwords already in the file?
782      if not missing:
783        return current
784
785      for i in missing:
786        current[i] = ""
787
788      if not first:
789        print "key file %s still missing some passwords." % (self.pwfile,)
790        answer = raw_input("try to edit again? [y]> ").strip()
791        if answer and answer[0] not in 'yY':
792          raise RuntimeError("key passwords unavailable")
793      first = False
794
795      current = self.UpdateAndReadFile(current)
796
797  def PromptResult(self, current): # pylint: disable=no-self-use
798    """Prompt the user to enter a value (password) for each key in
799    'current' whose value is fales.  Returns a new dict with all the
800    values.
801    """
802    result = {}
803    for k, v in sorted(current.iteritems()):
804      if v:
805        result[k] = v
806      else:
807        while True:
808          result[k] = getpass.getpass(
809              "Enter password for %s key> " % k).strip()
810          if result[k]:
811            break
812    return result
813
814  def UpdateAndReadFile(self, current):
815    if not self.editor or not self.pwfile:
816      return self.PromptResult(current)
817
818    f = open(self.pwfile, "w")
819    os.chmod(self.pwfile, 0o600)
820    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
821    f.write("# (Additional spaces are harmless.)\n\n")
822
823    first_line = None
824    sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
825    for i, (_, k, v) in enumerate(sorted_list):
826      f.write("[[[  %s  ]]] %s\n" % (v, k))
827      if not v and first_line is None:
828        # position cursor on first line with no password.
829        first_line = i + 4
830    f.close()
831
832    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
833    _, _ = p.communicate()
834
835    return self.ReadFile()
836
837  def ReadFile(self):
838    result = {}
839    if self.pwfile is None:
840      return result
841    try:
842      f = open(self.pwfile, "r")
843      for line in f:
844        line = line.strip()
845        if not line or line[0] == '#':
846          continue
847        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
848        if not m:
849          print "failed to parse password file: ", line
850        else:
851          result[m.group(2)] = m.group(1)
852      f.close()
853    except IOError as e:
854      if e.errno != errno.ENOENT:
855        print "error reading password file: ", str(e)
856    return result
857
858
859def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
860             compress_type=None):
861  import datetime
862
863  # http://b/18015246
864  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
865  # for files larger than 2GiB. We can work around this by adjusting their
866  # limit. Note that `zipfile.writestr()` will not work for strings larger than
867  # 2GiB. The Python interpreter sometimes rejects strings that large (though
868  # it isn't clear to me exactly what circumstances cause this).
869  # `zipfile.write()` must be used directly to work around this.
870  #
871  # This mess can be avoided if we port to python3.
872  saved_zip64_limit = zipfile.ZIP64_LIMIT
873  zipfile.ZIP64_LIMIT = (1 << 32) - 1
874
875  if compress_type is None:
876    compress_type = zip_file.compression
877  if arcname is None:
878    arcname = filename
879
880  saved_stat = os.stat(filename)
881
882  try:
883    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
884    # file to be zipped and reset it when we're done.
885    os.chmod(filename, perms)
886
887    # Use a fixed timestamp so the output is repeatable.
888    epoch = datetime.datetime.fromtimestamp(0)
889    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
890    os.utime(filename, (timestamp, timestamp))
891
892    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
893  finally:
894    os.chmod(filename, saved_stat.st_mode)
895    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
896    zipfile.ZIP64_LIMIT = saved_zip64_limit
897
898
899def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
900                compress_type=None):
901  """Wrap zipfile.writestr() function to work around the zip64 limit.
902
903  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
904  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
905  when calling crc32(bytes).
906
907  But it still works fine to write a shorter string into a large zip file.
908  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
909  when we know the string won't be too long.
910  """
911
912  saved_zip64_limit = zipfile.ZIP64_LIMIT
913  zipfile.ZIP64_LIMIT = (1 << 32) - 1
914
915  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
916    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
917    zinfo.compress_type = zip_file.compression
918    if perms is None:
919      perms = 0o644
920  else:
921    zinfo = zinfo_or_arcname
922
923  # If compress_type is given, it overrides the value in zinfo.
924  if compress_type is not None:
925    zinfo.compress_type = compress_type
926
927  # If perms is given, it has a priority.
928  if perms is not None:
929    zinfo.external_attr = perms << 16
930
931  # Use a fixed timestamp so the output is repeatable.
932  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
933
934  zip_file.writestr(zinfo, data)
935  zipfile.ZIP64_LIMIT = saved_zip64_limit
936
937
938def ZipClose(zip_file):
939  # http://b/18015246
940  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
941  # central directory.
942  saved_zip64_limit = zipfile.ZIP64_LIMIT
943  zipfile.ZIP64_LIMIT = (1 << 32) - 1
944
945  zip_file.close()
946
947  zipfile.ZIP64_LIMIT = saved_zip64_limit
948
949
950class DeviceSpecificParams(object):
951  module = None
952  def __init__(self, **kwargs):
953    """Keyword arguments to the constructor become attributes of this
954    object, which is passed to all functions in the device-specific
955    module."""
956    for k, v in kwargs.iteritems():
957      setattr(self, k, v)
958    self.extras = OPTIONS.extras
959
960    if self.module is None:
961      path = OPTIONS.device_specific
962      if not path:
963        return
964      try:
965        if os.path.isdir(path):
966          info = imp.find_module("releasetools", [path])
967        else:
968          d, f = os.path.split(path)
969          b, x = os.path.splitext(f)
970          if x == ".py":
971            f = b
972          info = imp.find_module(f, [d])
973        print "loaded device-specific extensions from", path
974        self.module = imp.load_module("device_specific", *info)
975      except ImportError:
976        print "unable to load device-specific module; assuming none"
977
978  def _DoCall(self, function_name, *args, **kwargs):
979    """Call the named function in the device-specific module, passing
980    the given args and kwargs.  The first argument to the call will be
981    the DeviceSpecific object itself.  If there is no module, or the
982    module does not define the function, return the value of the
983    'default' kwarg (which itself defaults to None)."""
984    if self.module is None or not hasattr(self.module, function_name):
985      return kwargs.get("default", None)
986    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
987
988  def FullOTA_Assertions(self):
989    """Called after emitting the block of assertions at the top of a
990    full OTA package.  Implementations can add whatever additional
991    assertions they like."""
992    return self._DoCall("FullOTA_Assertions")
993
994  def FullOTA_InstallBegin(self):
995    """Called at the start of full OTA installation."""
996    return self._DoCall("FullOTA_InstallBegin")
997
998  def FullOTA_InstallEnd(self):
999    """Called at the end of full OTA installation; typically this is
1000    used to install the image for the device's baseband processor."""
1001    return self._DoCall("FullOTA_InstallEnd")
1002
1003  def IncrementalOTA_Assertions(self):
1004    """Called after emitting the block of assertions at the top of an
1005    incremental OTA package.  Implementations can add whatever
1006    additional assertions they like."""
1007    return self._DoCall("IncrementalOTA_Assertions")
1008
1009  def IncrementalOTA_VerifyBegin(self):
1010    """Called at the start of the verification phase of incremental
1011    OTA installation; additional checks can be placed here to abort
1012    the script before any changes are made."""
1013    return self._DoCall("IncrementalOTA_VerifyBegin")
1014
1015  def IncrementalOTA_VerifyEnd(self):
1016    """Called at the end of the verification phase of incremental OTA
1017    installation; additional checks can be placed here to abort the
1018    script before any changes are made."""
1019    return self._DoCall("IncrementalOTA_VerifyEnd")
1020
1021  def IncrementalOTA_InstallBegin(self):
1022    """Called at the start of incremental OTA installation (after
1023    verification is complete)."""
1024    return self._DoCall("IncrementalOTA_InstallBegin")
1025
1026  def IncrementalOTA_InstallEnd(self):
1027    """Called at the end of incremental OTA installation; typically
1028    this is used to install the image for the device's baseband
1029    processor."""
1030    return self._DoCall("IncrementalOTA_InstallEnd")
1031
1032class File(object):
1033  def __init__(self, name, data):
1034    self.name = name
1035    self.data = data
1036    self.size = len(data)
1037    self.sha1 = sha1(data).hexdigest()
1038
1039  @classmethod
1040  def FromLocalFile(cls, name, diskname):
1041    f = open(diskname, "rb")
1042    data = f.read()
1043    f.close()
1044    return File(name, data)
1045
1046  def WriteToTemp(self):
1047    t = tempfile.NamedTemporaryFile()
1048    t.write(self.data)
1049    t.flush()
1050    return t
1051
1052  def AddToZip(self, z, compression=None):
1053    ZipWriteStr(z, self.name, self.data, compress_type=compression)
1054
1055DIFF_PROGRAM_BY_EXT = {
1056    ".gz" : "imgdiff",
1057    ".zip" : ["imgdiff", "-z"],
1058    ".jar" : ["imgdiff", "-z"],
1059    ".apk" : ["imgdiff", "-z"],
1060    ".img" : "imgdiff",
1061    }
1062
1063class Difference(object):
1064  def __init__(self, tf, sf, diff_program=None):
1065    self.tf = tf
1066    self.sf = sf
1067    self.patch = None
1068    self.diff_program = diff_program
1069
1070  def ComputePatch(self):
1071    """Compute the patch (as a string of data) needed to turn sf into
1072    tf.  Returns the same tuple as GetPatch()."""
1073
1074    tf = self.tf
1075    sf = self.sf
1076
1077    if self.diff_program:
1078      diff_program = self.diff_program
1079    else:
1080      ext = os.path.splitext(tf.name)[1]
1081      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1082
1083    ttemp = tf.WriteToTemp()
1084    stemp = sf.WriteToTemp()
1085
1086    ext = os.path.splitext(tf.name)[1]
1087
1088    try:
1089      ptemp = tempfile.NamedTemporaryFile()
1090      if isinstance(diff_program, list):
1091        cmd = copy.copy(diff_program)
1092      else:
1093        cmd = [diff_program]
1094      cmd.append(stemp.name)
1095      cmd.append(ttemp.name)
1096      cmd.append(ptemp.name)
1097      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1098      err = []
1099      def run():
1100        _, e = p.communicate()
1101        if e:
1102          err.append(e)
1103      th = threading.Thread(target=run)
1104      th.start()
1105      th.join(timeout=300)   # 5 mins
1106      if th.is_alive():
1107        print "WARNING: diff command timed out"
1108        p.terminate()
1109        th.join(5)
1110        if th.is_alive():
1111          p.kill()
1112          th.join()
1113
1114      if err or p.returncode != 0:
1115        print "WARNING: failure running %s:\n%s\n" % (
1116            diff_program, "".join(err))
1117        self.patch = None
1118        return None, None, None
1119      diff = ptemp.read()
1120    finally:
1121      ptemp.close()
1122      stemp.close()
1123      ttemp.close()
1124
1125    self.patch = diff
1126    return self.tf, self.sf, self.patch
1127
1128
1129  def GetPatch(self):
1130    """Return a tuple (target_file, source_file, patch_data).
1131    patch_data may be None if ComputePatch hasn't been called, or if
1132    computing the patch failed."""
1133    return self.tf, self.sf, self.patch
1134
1135
1136def ComputeDifferences(diffs):
1137  """Call ComputePatch on all the Difference objects in 'diffs'."""
1138  print len(diffs), "diffs to compute"
1139
1140  # Do the largest files first, to try and reduce the long-pole effect.
1141  by_size = [(i.tf.size, i) for i in diffs]
1142  by_size.sort(reverse=True)
1143  by_size = [i[1] for i in by_size]
1144
1145  lock = threading.Lock()
1146  diff_iter = iter(by_size)   # accessed under lock
1147
1148  def worker():
1149    try:
1150      lock.acquire()
1151      for d in diff_iter:
1152        lock.release()
1153        start = time.time()
1154        d.ComputePatch()
1155        dur = time.time() - start
1156        lock.acquire()
1157
1158        tf, sf, patch = d.GetPatch()
1159        if sf.name == tf.name:
1160          name = tf.name
1161        else:
1162          name = "%s (%s)" % (tf.name, sf.name)
1163        if patch is None:
1164          print "patching failed!                                  %s" % (name,)
1165        else:
1166          print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1167              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1168      lock.release()
1169    except Exception as e:
1170      print e
1171      raise
1172
1173  # start worker threads; wait for them all to finish.
1174  threads = [threading.Thread(target=worker)
1175             for i in range(OPTIONS.worker_threads)]
1176  for th in threads:
1177    th.start()
1178  while threads:
1179    threads.pop().join()
1180
1181
1182class BlockDifference(object):
1183  def __init__(self, partition, tgt, src=None, check_first_block=False,
1184               version=None):
1185    self.tgt = tgt
1186    self.src = src
1187    self.partition = partition
1188    self.check_first_block = check_first_block
1189
1190    # Due to http://b/20939131, check_first_block is disabled temporarily.
1191    assert not self.check_first_block
1192
1193    if version is None:
1194      version = 1
1195      if OPTIONS.info_dict:
1196        version = max(
1197            int(i) for i in
1198            OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1199    self.version = version
1200
1201    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1202                                    version=self.version)
1203    tmpdir = tempfile.mkdtemp()
1204    OPTIONS.tempfiles.append(tmpdir)
1205    self.path = os.path.join(tmpdir, partition)
1206    b.Compute(self.path)
1207
1208    if src is None:
1209      _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1210    else:
1211      _, self.device = GetTypeAndDevice("/" + partition,
1212                                        OPTIONS.source_info_dict)
1213
1214  def WriteScript(self, script, output_zip, progress=None):
1215    if not self.src:
1216      # write the output unconditionally
1217      script.Print("Patching %s image unconditionally..." % (self.partition,))
1218    else:
1219      script.Print("Patching %s image after verification." % (self.partition,))
1220
1221    if progress:
1222      script.ShowProgress(progress, 0)
1223    self._WriteUpdate(script, output_zip)
1224    self._WritePostInstallVerifyScript(script)
1225
1226  def WriteVerifyScript(self, script):
1227    partition = self.partition
1228    if not self.src:
1229      script.Print("Image %s will be patched unconditionally." % (partition,))
1230    else:
1231      ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1232      ranges_str = ranges.to_string_raw()
1233      if self.version >= 3:
1234        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1235                            'block_image_verify("%s", '
1236                            'package_extract_file("%s.transfer.list"), '
1237                            '"%s.new.dat", "%s.patch.dat")) then') % (
1238                            self.device, ranges_str, self.src.TotalSha1(),
1239                            self.device, partition, partition, partition))
1240      else:
1241        script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1242                           self.device, ranges_str, self.src.TotalSha1()))
1243      script.Print('Verified %s image...' % (partition,))
1244      script.AppendExtra('else')
1245
1246      # When generating incrementals for the system and vendor partitions,
1247      # explicitly check the first block (which contains the superblock) of
1248      # the partition to see if it's what we expect. If this check fails,
1249      # give an explicit log message about the partition having been
1250      # remounted R/W (the most likely explanation) and the need to flash to
1251      # get OTAs working again.
1252      if self.check_first_block:
1253        self._CheckFirstBlock(script)
1254
1255      # Abort the OTA update. Note that the incremental OTA cannot be applied
1256      # even if it may match the checksum of the target partition.
1257      # a) If version < 3, operations like move and erase will make changes
1258      #    unconditionally and damage the partition.
1259      # b) If version >= 3, it won't even reach here.
1260      script.AppendExtra(('abort("%s partition has unexpected contents");\n'
1261                          'endif;') % (partition,))
1262
1263  def _WritePostInstallVerifyScript(self, script):
1264    partition = self.partition
1265    script.Print('Verifying the updated %s image...' % (partition,))
1266    # Unlike pre-install verification, clobbered_blocks should not be ignored.
1267    ranges = self.tgt.care_map
1268    ranges_str = ranges.to_string_raw()
1269    script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1270                       self.device, ranges_str,
1271                       self.tgt.TotalSha1(include_clobbered_blocks=True)))
1272
1273    # Bug: 20881595
1274    # Verify that extended blocks are really zeroed out.
1275    if self.tgt.extended:
1276      ranges_str = self.tgt.extended.to_string_raw()
1277      script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1278                         self.device, ranges_str,
1279                         self._HashZeroBlocks(self.tgt.extended.size())))
1280      script.Print('Verified the updated %s image.' % (partition,))
1281      script.AppendExtra(
1282          'else\n'
1283          '  abort("%s partition has unexpected non-zero contents after OTA '
1284          'update");\n'
1285          'endif;' % (partition,))
1286    else:
1287      script.Print('Verified the updated %s image.' % (partition,))
1288
1289    script.AppendExtra(
1290        'else\n'
1291        '  abort("%s partition has unexpected contents after OTA update");\n'
1292        'endif;' % (partition,))
1293
1294  def _WriteUpdate(self, script, output_zip):
1295    ZipWrite(output_zip,
1296             '{}.transfer.list'.format(self.path),
1297             '{}.transfer.list'.format(self.partition))
1298    ZipWrite(output_zip,
1299             '{}.new.dat'.format(self.path),
1300             '{}.new.dat'.format(self.partition))
1301    ZipWrite(output_zip,
1302             '{}.patch.dat'.format(self.path),
1303             '{}.patch.dat'.format(self.partition),
1304             compress_type=zipfile.ZIP_STORED)
1305
1306    call = ('block_image_update("{device}", '
1307            'package_extract_file("{partition}.transfer.list"), '
1308            '"{partition}.new.dat", "{partition}.patch.dat");\n'.format(
1309                device=self.device, partition=self.partition))
1310    script.AppendExtra(script.WordWrap(call))
1311
1312  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1313    data = source.ReadRangeSet(ranges)
1314    ctx = sha1()
1315
1316    for p in data:
1317      ctx.update(p)
1318
1319    return ctx.hexdigest()
1320
1321  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1322    """Return the hash value for all zero blocks."""
1323    zero_block = '\x00' * 4096
1324    ctx = sha1()
1325    for _ in range(num_blocks):
1326      ctx.update(zero_block)
1327
1328    return ctx.hexdigest()
1329
1330  # TODO(tbao): Due to http://b/20939131, block 0 may be changed without
1331  # remounting R/W. Will change the checking to a finer-grained way to
1332  # mask off those bits.
1333  def _CheckFirstBlock(self, script):
1334    r = rangelib.RangeSet((0, 1))
1335    srchash = self._HashBlocks(self.src, r)
1336
1337    script.AppendExtra(('(range_sha1("%s", "%s") == "%s") || '
1338                        'abort("%s has been remounted R/W; '
1339                        'reflash device to reenable OTA updates");')
1340                       % (self.device, r.to_string_raw(), srchash,
1341                          self.device))
1342
1343DataImage = blockimgdiff.DataImage
1344
1345
1346# map recovery.fstab's fs_types to mount/format "partition types"
1347PARTITION_TYPES = {
1348    "yaffs2": "MTD",
1349    "mtd": "MTD",
1350    "ext4": "EMMC",
1351    "emmc": "EMMC",
1352    "f2fs": "EMMC",
1353    "squashfs": "EMMC"
1354}
1355
1356def GetTypeAndDevice(mount_point, info):
1357  fstab = info["fstab"]
1358  if fstab:
1359    return (PARTITION_TYPES[fstab[mount_point].fs_type],
1360            fstab[mount_point].device)
1361  else:
1362    raise KeyError
1363
1364
1365def ParseCertificate(data):
1366  """Parse a PEM-format certificate."""
1367  cert = []
1368  save = False
1369  for line in data.split("\n"):
1370    if "--END CERTIFICATE--" in line:
1371      break
1372    if save:
1373      cert.append(line)
1374    if "--BEGIN CERTIFICATE--" in line:
1375      save = True
1376  cert = "".join(cert).decode('base64')
1377  return cert
1378
1379def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1380                      info_dict=None):
1381  """Generate a binary patch that creates the recovery image starting
1382  with the boot image.  (Most of the space in these images is just the
1383  kernel, which is identical for the two, so the resulting patch
1384  should be efficient.)  Add it to the output zip, along with a shell
1385  script that is run from init.rc on first boot to actually do the
1386  patching and install the new recovery image.
1387
1388  recovery_img and boot_img should be File objects for the
1389  corresponding images.  info should be the dictionary returned by
1390  common.LoadInfoDict() on the input target_files.
1391  """
1392
1393  if info_dict is None:
1394    info_dict = OPTIONS.info_dict
1395
1396  diff_program = ["imgdiff"]
1397  path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1398  if os.path.exists(path):
1399    diff_program.append("-b")
1400    diff_program.append(path)
1401    bonus_args = "-b /system/etc/recovery-resource.dat"
1402  else:
1403    bonus_args = ""
1404
1405  d = Difference(recovery_img, boot_img, diff_program=diff_program)
1406  _, _, patch = d.ComputePatch()
1407  output_sink("recovery-from-boot.p", patch)
1408
1409  try:
1410    # The following GetTypeAndDevice()s need to use the path in the target
1411    # info_dict instead of source_info_dict.
1412    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1413    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1414  except KeyError:
1415    return
1416
1417  sh = """#!/system/bin/sh
1418if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1419  applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1420else
1421  log -t recovery "Recovery image already installed"
1422fi
1423""" % {'boot_size': boot_img.size,
1424       'boot_sha1': boot_img.sha1,
1425       'recovery_size': recovery_img.size,
1426       'recovery_sha1': recovery_img.sha1,
1427       'boot_type': boot_type,
1428       'boot_device': boot_device,
1429       'recovery_type': recovery_type,
1430       'recovery_device': recovery_device,
1431       'bonus_args': bonus_args}
1432
1433  # The install script location moved from /system/etc to /system/bin
1434  # in the L release.  Parse init.*.rc files to find out where the
1435  # target-files expects it to be, and put it there.
1436  sh_location = "etc/install-recovery.sh"
1437  found = False
1438  init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1439  init_rc_files = os.listdir(init_rc_dir)
1440  for init_rc_file in init_rc_files:
1441    if (not init_rc_file.startswith('init.') or
1442        not init_rc_file.endswith('.rc')):
1443      continue
1444
1445    with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1446      for line in f:
1447        m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1448        if m:
1449          sh_location = m.group(1)
1450          found = True
1451          break
1452
1453    if found:
1454      break
1455
1456  print "putting script in", sh_location
1457
1458  output_sink(sh_location, sh)
1459