common.py revision f2cffbddb9dccc6dd46ea2be0bbde387315f09c3
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import copy
16import errno
17import getopt
18import getpass
19import imp
20import os
21import platform
22import re
23import shlex
24import shutil
25import subprocess
26import sys
27import tempfile
28import threading
29import time
30import zipfile
31
32import blockimgdiff
33import rangelib
34
35from hashlib import sha1 as sha1
36
37
38class Options(object):
39  def __init__(self):
40    platform_search_path = {
41        "linux2": "out/host/linux-x86",
42        "darwin": "out/host/darwin-x86",
43    }
44
45    self.search_path = platform_search_path.get(sys.platform, None)
46    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
47    self.extra_signapk_args = []
48    self.java_path = "java"  # Use the one on the path by default.
49    self.java_args = "-Xmx2048m" # JVM Args
50    self.public_key_suffix = ".x509.pem"
51    self.private_key_suffix = ".pk8"
52    # use otatools built boot_signer by default
53    self.boot_signer_path = "boot_signer"
54    self.verbose = False
55    self.tempfiles = []
56    self.device_specific = None
57    self.extras = {}
58    self.info_dict = None
59    self.worker_threads = None
60
61
62OPTIONS = Options()
63
64
65# Values for "certificate" in apkcerts that mean special things.
66SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
67
68
69class ExternalError(RuntimeError):
70  pass
71
72
73def Run(args, **kwargs):
74  """Create and return a subprocess.Popen object, printing the command
75  line on the terminal if -v was specified."""
76  if OPTIONS.verbose:
77    print "  running: ", " ".join(args)
78  return subprocess.Popen(args, **kwargs)
79
80
81def CloseInheritedPipes():
82  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
83  before doing other work."""
84  if platform.system() != "Darwin":
85    return
86  for d in range(3, 1025):
87    try:
88      stat = os.fstat(d)
89      if stat is not None:
90        pipebit = stat[0] & 0x1000
91        if pipebit != 0:
92          os.close(d)
93    except OSError:
94      pass
95
96
97def LoadInfoDict(input_file, input_dir=None):
98  """Read and parse the META/misc_info.txt key/value pairs from the
99  input target files and return a dict."""
100
101  def read_helper(fn):
102    if isinstance(input_file, zipfile.ZipFile):
103      return input_file.read(fn)
104    else:
105      path = os.path.join(input_file, *fn.split("/"))
106      try:
107        with open(path) as f:
108          return f.read()
109      except IOError as e:
110        if e.errno == errno.ENOENT:
111          raise KeyError(fn)
112  d = {}
113  try:
114    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
115  except KeyError:
116    # ok if misc_info.txt doesn't exist
117    pass
118
119  # backwards compatibility: These values used to be in their own
120  # files.  Look for them, in case we're processing an old
121  # target_files zip.
122
123  if "mkyaffs2_extra_flags" not in d:
124    try:
125      d["mkyaffs2_extra_flags"] = read_helper(
126          "META/mkyaffs2-extra-flags.txt").strip()
127    except KeyError:
128      # ok if flags don't exist
129      pass
130
131  if "recovery_api_version" not in d:
132    try:
133      d["recovery_api_version"] = read_helper(
134          "META/recovery-api-version.txt").strip()
135    except KeyError:
136      raise ValueError("can't find recovery API version in input target-files")
137
138  if "tool_extensions" not in d:
139    try:
140      d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
141    except KeyError:
142      # ok if extensions don't exist
143      pass
144
145  if "fstab_version" not in d:
146    d["fstab_version"] = "1"
147
148  # A few properties are stored as links to the files in the out/ directory.
149  # It works fine with the build system. However, they are no longer available
150  # when (re)generating from target_files zip. If input_dir is not None, we
151  # are doing repacking. Redirect those properties to the actual files in the
152  # unzipped directory.
153  if input_dir is not None:
154    # We carry a copy of file_contexts under META/. If not available, search
155    # BOOT/RAMDISK/. Note that sometimes we may need a different file_contexts
156    # to build images than the one running on device, such as when enabling
157    # system_root_image. In that case, we must have the one for image
158    # generation copied to META/.
159    fc_config = os.path.join(input_dir, "META", "file_contexts")
160    if d.get("system_root_image") == "true":
161      assert os.path.exists(fc_config)
162    if not os.path.exists(fc_config):
163      fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", "file_contexts")
164      if not os.path.exists(fc_config):
165        fc_config = None
166
167    if fc_config:
168      d["selinux_fc"] = fc_config
169
170    # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
171    if d.get("system_root_image") == "true":
172      d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
173      d["ramdisk_fs_config"] = os.path.join(
174          input_dir, "META", "root_filesystem_config.txt")
175
176  try:
177    data = read_helper("META/imagesizes.txt")
178    for line in data.split("\n"):
179      if not line:
180        continue
181      name, value = line.split(" ", 1)
182      if not value:
183        continue
184      if name == "blocksize":
185        d[name] = value
186      else:
187        d[name + "_size"] = value
188  except KeyError:
189    pass
190
191  def makeint(key):
192    if key in d:
193      d[key] = int(d[key], 0)
194
195  makeint("recovery_api_version")
196  makeint("blocksize")
197  makeint("system_size")
198  makeint("vendor_size")
199  makeint("userdata_size")
200  makeint("cache_size")
201  makeint("recovery_size")
202  makeint("boot_size")
203  makeint("fstab_version")
204
205  d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"])
206  d["build.prop"] = LoadBuildProp(read_helper)
207  return d
208
209def LoadBuildProp(read_helper):
210  try:
211    data = read_helper("SYSTEM/build.prop")
212  except KeyError:
213    print "Warning: could not find SYSTEM/build.prop in %s" % zip
214    data = ""
215  return LoadDictionaryFromLines(data.split("\n"))
216
217def LoadDictionaryFromLines(lines):
218  d = {}
219  for line in lines:
220    line = line.strip()
221    if not line or line.startswith("#"):
222      continue
223    if "=" in line:
224      name, value = line.split("=", 1)
225      d[name] = value
226  return d
227
228def LoadRecoveryFSTab(read_helper, fstab_version):
229  class Partition(object):
230    def __init__(self, mount_point, fs_type, device, length, device2, context):
231      self.mount_point = mount_point
232      self.fs_type = fs_type
233      self.device = device
234      self.length = length
235      self.device2 = device2
236      self.context = context
237
238  try:
239    data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
240  except KeyError:
241    print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
242    data = ""
243
244  if fstab_version == 1:
245    d = {}
246    for line in data.split("\n"):
247      line = line.strip()
248      if not line or line.startswith("#"):
249        continue
250      pieces = line.split()
251      if not 3 <= len(pieces) <= 4:
252        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
253      options = None
254      if len(pieces) >= 4:
255        if pieces[3].startswith("/"):
256          device2 = pieces[3]
257          if len(pieces) >= 5:
258            options = pieces[4]
259        else:
260          device2 = None
261          options = pieces[3]
262      else:
263        device2 = None
264
265      mount_point = pieces[0]
266      length = 0
267      if options:
268        options = options.split(",")
269        for i in options:
270          if i.startswith("length="):
271            length = int(i[7:])
272          else:
273            print "%s: unknown option \"%s\"" % (mount_point, i)
274
275      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
276                                 device=pieces[2], length=length,
277                                 device2=device2)
278
279  elif fstab_version == 2:
280    d = {}
281    for line in data.split("\n"):
282      line = line.strip()
283      if not line or line.startswith("#"):
284        continue
285      # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
286      pieces = line.split()
287      if len(pieces) != 5:
288        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
289
290      # Ignore entries that are managed by vold
291      options = pieces[4]
292      if "voldmanaged=" in options:
293        continue
294
295      # It's a good line, parse it
296      length = 0
297      options = options.split(",")
298      for i in options:
299        if i.startswith("length="):
300          length = int(i[7:])
301        else:
302          # Ignore all unknown options in the unified fstab
303          continue
304
305      mount_flags = pieces[3]
306      # Honor the SELinux context if present.
307      context = None
308      for i in mount_flags.split(","):
309        if i.startswith("context="):
310          context = i
311
312      mount_point = pieces[1]
313      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
314                                 device=pieces[0], length=length,
315                                 device2=None, context=context)
316
317  else:
318    raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
319
320  return d
321
322
323def DumpInfoDict(d):
324  for k, v in sorted(d.items()):
325    print "%-25s = (%s) %s" % (k, type(v).__name__, v)
326
327
328def BuildBootableImage(sourcedir, fs_config_file, info_dict=None):
329  """Take a kernel, cmdline, and ramdisk directory from the input (in
330  'sourcedir'), and turn them into a boot image.  Return the image
331  data, or None if sourcedir does not appear to contains files for
332  building the requested image."""
333
334  if (not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK) or
335      not os.access(os.path.join(sourcedir, "kernel"), os.F_OK)):
336    return None
337
338  if info_dict is None:
339    info_dict = OPTIONS.info_dict
340
341  ramdisk_img = tempfile.NamedTemporaryFile()
342  img = tempfile.NamedTemporaryFile()
343
344  if os.access(fs_config_file, os.F_OK):
345    cmd = ["mkbootfs", "-f", fs_config_file, os.path.join(sourcedir, "RAMDISK")]
346  else:
347    cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
348  p1 = Run(cmd, stdout=subprocess.PIPE)
349  p2 = Run(["minigzip"],
350           stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
351
352  p2.wait()
353  p1.wait()
354  assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
355  assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
356
357  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
358  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
359
360  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
361
362  fn = os.path.join(sourcedir, "second")
363  if os.access(fn, os.F_OK):
364    cmd.append("--second")
365    cmd.append(fn)
366
367  fn = os.path.join(sourcedir, "cmdline")
368  if os.access(fn, os.F_OK):
369    cmd.append("--cmdline")
370    cmd.append(open(fn).read().rstrip("\n"))
371
372  fn = os.path.join(sourcedir, "base")
373  if os.access(fn, os.F_OK):
374    cmd.append("--base")
375    cmd.append(open(fn).read().rstrip("\n"))
376
377  fn = os.path.join(sourcedir, "pagesize")
378  if os.access(fn, os.F_OK):
379    cmd.append("--pagesize")
380    cmd.append(open(fn).read().rstrip("\n"))
381
382  args = info_dict.get("mkbootimg_args", None)
383  if args and args.strip():
384    cmd.extend(shlex.split(args))
385
386  img_unsigned = None
387  if info_dict.get("vboot", None):
388    img_unsigned = tempfile.NamedTemporaryFile()
389    cmd.extend(["--ramdisk", ramdisk_img.name,
390                "--output", img_unsigned.name])
391  else:
392    cmd.extend(["--ramdisk", ramdisk_img.name,
393                "--output", img.name])
394
395  p = Run(cmd, stdout=subprocess.PIPE)
396  p.communicate()
397  assert p.returncode == 0, "mkbootimg of %s image failed" % (
398      os.path.basename(sourcedir),)
399
400  if info_dict.get("verity_key", None):
401    path = "/" + os.path.basename(sourcedir).lower()
402    cmd = [OPTIONS.boot_signer_path, path, img.name,
403           info_dict["verity_key"] + ".pk8",
404           info_dict["verity_key"] + ".x509.pem", img.name]
405    p = Run(cmd, stdout=subprocess.PIPE)
406    p.communicate()
407    assert p.returncode == 0, "boot_signer of %s image failed" % path
408
409  # Sign the image if vboot is non-empty.
410  elif info_dict.get("vboot", None):
411    path = "/" + os.path.basename(sourcedir).lower()
412    img_keyblock = tempfile.NamedTemporaryFile()
413    cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
414           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
415           info_dict["vboot_key"] + ".vbprivk", img_keyblock.name,
416           img.name]
417    p = Run(cmd, stdout=subprocess.PIPE)
418    p.communicate()
419    assert p.returncode == 0, "vboot_signer of %s image failed" % path
420
421    # Clean up the temp files.
422    img_unsigned.close()
423    img_keyblock.close()
424
425  img.seek(os.SEEK_SET, 0)
426  data = img.read()
427
428  ramdisk_img.close()
429  img.close()
430
431  return data
432
433
434def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
435                     info_dict=None):
436  """Return a File object (with name 'name') with the desired bootable
437  image.  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name
438  'prebuilt_name', otherwise look for it under 'unpack_dir'/IMAGES,
439  otherwise construct it from the source files in
440  'unpack_dir'/'tree_subdir'."""
441
442  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
443  if os.path.exists(prebuilt_path):
444    print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
445    return File.FromLocalFile(name, prebuilt_path)
446
447  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
448  if os.path.exists(prebuilt_path):
449    print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
450    return File.FromLocalFile(name, prebuilt_path)
451
452  print "building image from target_files %s..." % (tree_subdir,)
453  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
454  data = BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
455                            os.path.join(unpack_dir, fs_config),
456                            info_dict)
457  if data:
458    return File(name, data)
459  return None
460
461
462def UnzipTemp(filename, pattern=None):
463  """Unzip the given archive into a temporary directory and return the name.
464
465  If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
466  temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
467
468  Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
469  main file), open for reading.
470  """
471
472  tmp = tempfile.mkdtemp(prefix="targetfiles-")
473  OPTIONS.tempfiles.append(tmp)
474
475  def unzip_to_dir(filename, dirname):
476    cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
477    if pattern is not None:
478      cmd.append(pattern)
479    p = Run(cmd, stdout=subprocess.PIPE)
480    p.communicate()
481    if p.returncode != 0:
482      raise ExternalError("failed to unzip input target-files \"%s\"" %
483                          (filename,))
484
485  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
486  if m:
487    unzip_to_dir(m.group(1), tmp)
488    unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
489    filename = m.group(1)
490  else:
491    unzip_to_dir(filename, tmp)
492
493  return tmp, zipfile.ZipFile(filename, "r")
494
495
496def GetKeyPasswords(keylist):
497  """Given a list of keys, prompt the user to enter passwords for
498  those which require them.  Return a {key: password} dict.  password
499  will be None if the key has no password."""
500
501  no_passwords = []
502  need_passwords = []
503  key_passwords = {}
504  devnull = open("/dev/null", "w+b")
505  for k in sorted(keylist):
506    # We don't need a password for things that aren't really keys.
507    if k in SPECIAL_CERT_STRINGS:
508      no_passwords.append(k)
509      continue
510
511    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
512             "-inform", "DER", "-nocrypt"],
513            stdin=devnull.fileno(),
514            stdout=devnull.fileno(),
515            stderr=subprocess.STDOUT)
516    p.communicate()
517    if p.returncode == 0:
518      # Definitely an unencrypted key.
519      no_passwords.append(k)
520    else:
521      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
522               "-inform", "DER", "-passin", "pass:"],
523              stdin=devnull.fileno(),
524              stdout=devnull.fileno(),
525              stderr=subprocess.PIPE)
526      _, stderr = p.communicate()
527      if p.returncode == 0:
528        # Encrypted key with empty string as password.
529        key_passwords[k] = ''
530      elif stderr.startswith('Error decrypting key'):
531        # Definitely encrypted key.
532        # It would have said "Error reading key" if it didn't parse correctly.
533        need_passwords.append(k)
534      else:
535        # Potentially, a type of key that openssl doesn't understand.
536        # We'll let the routines in signapk.jar handle it.
537        no_passwords.append(k)
538  devnull.close()
539
540  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
541  key_passwords.update(dict.fromkeys(no_passwords, None))
542  return key_passwords
543
544
545def SignFile(input_name, output_name, key, password, align=None,
546             whole_file=False):
547  """Sign the input_name zip/jar/apk, producing output_name.  Use the
548  given key and password (the latter may be None if the key does not
549  have a password.
550
551  If align is an integer > 1, zipalign is run to align stored files in
552  the output zip on 'align'-byte boundaries.
553
554  If whole_file is true, use the "-w" option to SignApk to embed a
555  signature that covers the whole file in the archive comment of the
556  zip file.
557  """
558
559  if align == 0 or align == 1:
560    align = None
561
562  if align:
563    temp = tempfile.NamedTemporaryFile()
564    sign_name = temp.name
565  else:
566    sign_name = output_name
567
568  cmd = [OPTIONS.java_path, OPTIONS.java_args, "-jar",
569         os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
570  cmd.extend(OPTIONS.extra_signapk_args)
571  if whole_file:
572    cmd.append("-w")
573  cmd.extend([key + OPTIONS.public_key_suffix,
574              key + OPTIONS.private_key_suffix,
575              input_name, sign_name])
576
577  p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
578  if password is not None:
579    password += "\n"
580  p.communicate(password)
581  if p.returncode != 0:
582    raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
583
584  if align:
585    p = Run(["zipalign", "-f", "-p", str(align), sign_name, output_name])
586    p.communicate()
587    if p.returncode != 0:
588      raise ExternalError("zipalign failed: return code %s" % (p.returncode,))
589    temp.close()
590
591
592def CheckSize(data, target, info_dict):
593  """Check the data string passed against the max size limit, if
594  any, for the given target.  Raise exception if the data is too big.
595  Print a warning if the data is nearing the maximum size."""
596
597  if target.endswith(".img"):
598    target = target[:-4]
599  mount_point = "/" + target
600
601  fs_type = None
602  limit = None
603  if info_dict["fstab"]:
604    if mount_point == "/userdata":
605      mount_point = "/data"
606    p = info_dict["fstab"][mount_point]
607    fs_type = p.fs_type
608    device = p.device
609    if "/" in device:
610      device = device[device.rfind("/")+1:]
611    limit = info_dict.get(device + "_size", None)
612  if not fs_type or not limit:
613    return
614
615  if fs_type == "yaffs2":
616    # image size should be increased by 1/64th to account for the
617    # spare area (64 bytes per 2k page)
618    limit = limit / 2048 * (2048+64)
619  size = len(data)
620  pct = float(size) * 100.0 / limit
621  msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
622  if pct >= 99.0:
623    raise ExternalError(msg)
624  elif pct >= 95.0:
625    print
626    print "  WARNING: ", msg
627    print
628  elif OPTIONS.verbose:
629    print "  ", msg
630
631
632def ReadApkCerts(tf_zip):
633  """Given a target_files ZipFile, parse the META/apkcerts.txt file
634  and return a {package: cert} dict."""
635  certmap = {}
636  for line in tf_zip.read("META/apkcerts.txt").split("\n"):
637    line = line.strip()
638    if not line:
639      continue
640    m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
641                 r'private_key="(.*)"$', line)
642    if m:
643      name, cert, privkey = m.groups()
644      public_key_suffix_len = len(OPTIONS.public_key_suffix)
645      private_key_suffix_len = len(OPTIONS.private_key_suffix)
646      if cert in SPECIAL_CERT_STRINGS and not privkey:
647        certmap[name] = cert
648      elif (cert.endswith(OPTIONS.public_key_suffix) and
649            privkey.endswith(OPTIONS.private_key_suffix) and
650            cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
651        certmap[name] = cert[:-public_key_suffix_len]
652      else:
653        raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
654  return certmap
655
656
657COMMON_DOCSTRING = """
658  -p  (--path)  <dir>
659      Prepend <dir>/bin to the list of places to search for binaries
660      run by this script, and expect to find jars in <dir>/framework.
661
662  -s  (--device_specific) <file>
663      Path to the python module containing device-specific
664      releasetools code.
665
666  -x  (--extra)  <key=value>
667      Add a key/value pair to the 'extras' dict, which device-specific
668      extension code may look at.
669
670  -v  (--verbose)
671      Show command lines being executed.
672
673  -h  (--help)
674      Display this usage message and exit.
675"""
676
677def Usage(docstring):
678  print docstring.rstrip("\n")
679  print COMMON_DOCSTRING
680
681
682def ParseOptions(argv,
683                 docstring,
684                 extra_opts="", extra_long_opts=(),
685                 extra_option_handler=None):
686  """Parse the options in argv and return any arguments that aren't
687  flags.  docstring is the calling module's docstring, to be displayed
688  for errors and -h.  extra_opts and extra_long_opts are for flags
689  defined by the caller, which are processed by passing them to
690  extra_option_handler."""
691
692  try:
693    opts, args = getopt.getopt(
694        argv, "hvp:s:x:" + extra_opts,
695        ["help", "verbose", "path=", "signapk_path=", "extra_signapk_args=",
696         "java_path=", "java_args=", "public_key_suffix=",
697         "private_key_suffix=", "boot_signer_path=", "device_specific=",
698         "extra="] +
699        list(extra_long_opts))
700  except getopt.GetoptError as err:
701    Usage(docstring)
702    print "**", str(err), "**"
703    sys.exit(2)
704
705  for o, a in opts:
706    if o in ("-h", "--help"):
707      Usage(docstring)
708      sys.exit()
709    elif o in ("-v", "--verbose"):
710      OPTIONS.verbose = True
711    elif o in ("-p", "--path"):
712      OPTIONS.search_path = a
713    elif o in ("--signapk_path",):
714      OPTIONS.signapk_path = a
715    elif o in ("--extra_signapk_args",):
716      OPTIONS.extra_signapk_args = shlex.split(a)
717    elif o in ("--java_path",):
718      OPTIONS.java_path = a
719    elif o in ("--java_args",):
720      OPTIONS.java_args = a
721    elif o in ("--public_key_suffix",):
722      OPTIONS.public_key_suffix = a
723    elif o in ("--private_key_suffix",):
724      OPTIONS.private_key_suffix = a
725    elif o in ("--boot_signer_path",):
726      OPTIONS.boot_signer_path = a
727    elif o in ("-s", "--device_specific"):
728      OPTIONS.device_specific = a
729    elif o in ("-x", "--extra"):
730      key, value = a.split("=", 1)
731      OPTIONS.extras[key] = value
732    else:
733      if extra_option_handler is None or not extra_option_handler(o, a):
734        assert False, "unknown option \"%s\"" % (o,)
735
736  if OPTIONS.search_path:
737    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
738                          os.pathsep + os.environ["PATH"])
739
740  return args
741
742
743def MakeTempFile(prefix=None, suffix=None):
744  """Make a temp file and add it to the list of things to be deleted
745  when Cleanup() is called.  Return the filename."""
746  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
747  os.close(fd)
748  OPTIONS.tempfiles.append(fn)
749  return fn
750
751
752def Cleanup():
753  for i in OPTIONS.tempfiles:
754    if os.path.isdir(i):
755      shutil.rmtree(i)
756    else:
757      os.remove(i)
758
759
760class PasswordManager(object):
761  def __init__(self):
762    self.editor = os.getenv("EDITOR", None)
763    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
764
765  def GetPasswords(self, items):
766    """Get passwords corresponding to each string in 'items',
767    returning a dict.  (The dict may have keys in addition to the
768    values in 'items'.)
769
770    Uses the passwords in $ANDROID_PW_FILE if available, letting the
771    user edit that file to add more needed passwords.  If no editor is
772    available, or $ANDROID_PW_FILE isn't define, prompts the user
773    interactively in the ordinary way.
774    """
775
776    current = self.ReadFile()
777
778    first = True
779    while True:
780      missing = []
781      for i in items:
782        if i not in current or not current[i]:
783          missing.append(i)
784      # Are all the passwords already in the file?
785      if not missing:
786        return current
787
788      for i in missing:
789        current[i] = ""
790
791      if not first:
792        print "key file %s still missing some passwords." % (self.pwfile,)
793        answer = raw_input("try to edit again? [y]> ").strip()
794        if answer and answer[0] not in 'yY':
795          raise RuntimeError("key passwords unavailable")
796      first = False
797
798      current = self.UpdateAndReadFile(current)
799
800  def PromptResult(self, current): # pylint: disable=no-self-use
801    """Prompt the user to enter a value (password) for each key in
802    'current' whose value is fales.  Returns a new dict with all the
803    values.
804    """
805    result = {}
806    for k, v in sorted(current.iteritems()):
807      if v:
808        result[k] = v
809      else:
810        while True:
811          result[k] = getpass.getpass(
812              "Enter password for %s key> " % k).strip()
813          if result[k]:
814            break
815    return result
816
817  def UpdateAndReadFile(self, current):
818    if not self.editor or not self.pwfile:
819      return self.PromptResult(current)
820
821    f = open(self.pwfile, "w")
822    os.chmod(self.pwfile, 0o600)
823    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
824    f.write("# (Additional spaces are harmless.)\n\n")
825
826    first_line = None
827    sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
828    for i, (_, k, v) in enumerate(sorted_list):
829      f.write("[[[  %s  ]]] %s\n" % (v, k))
830      if not v and first_line is None:
831        # position cursor on first line with no password.
832        first_line = i + 4
833    f.close()
834
835    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
836    _, _ = p.communicate()
837
838    return self.ReadFile()
839
840  def ReadFile(self):
841    result = {}
842    if self.pwfile is None:
843      return result
844    try:
845      f = open(self.pwfile, "r")
846      for line in f:
847        line = line.strip()
848        if not line or line[0] == '#':
849          continue
850        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
851        if not m:
852          print "failed to parse password file: ", line
853        else:
854          result[m.group(2)] = m.group(1)
855      f.close()
856    except IOError as e:
857      if e.errno != errno.ENOENT:
858        print "error reading password file: ", str(e)
859    return result
860
861
862def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
863             compress_type=None):
864  import datetime
865
866  # http://b/18015246
867  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
868  # for files larger than 2GiB. We can work around this by adjusting their
869  # limit. Note that `zipfile.writestr()` will not work for strings larger than
870  # 2GiB. The Python interpreter sometimes rejects strings that large (though
871  # it isn't clear to me exactly what circumstances cause this).
872  # `zipfile.write()` must be used directly to work around this.
873  #
874  # This mess can be avoided if we port to python3.
875  saved_zip64_limit = zipfile.ZIP64_LIMIT
876  zipfile.ZIP64_LIMIT = (1 << 32) - 1
877
878  if compress_type is None:
879    compress_type = zip_file.compression
880  if arcname is None:
881    arcname = filename
882
883  saved_stat = os.stat(filename)
884
885  try:
886    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
887    # file to be zipped and reset it when we're done.
888    os.chmod(filename, perms)
889
890    # Use a fixed timestamp so the output is repeatable.
891    epoch = datetime.datetime.fromtimestamp(0)
892    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
893    os.utime(filename, (timestamp, timestamp))
894
895    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
896  finally:
897    os.chmod(filename, saved_stat.st_mode)
898    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
899    zipfile.ZIP64_LIMIT = saved_zip64_limit
900
901
902def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
903                compress_type=None):
904  """Wrap zipfile.writestr() function to work around the zip64 limit.
905
906  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
907  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
908  when calling crc32(bytes).
909
910  But it still works fine to write a shorter string into a large zip file.
911  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
912  when we know the string won't be too long.
913  """
914
915  saved_zip64_limit = zipfile.ZIP64_LIMIT
916  zipfile.ZIP64_LIMIT = (1 << 32) - 1
917
918  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
919    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
920    zinfo.compress_type = zip_file.compression
921    if perms is None:
922      perms = 0o100644
923  else:
924    zinfo = zinfo_or_arcname
925
926  # If compress_type is given, it overrides the value in zinfo.
927  if compress_type is not None:
928    zinfo.compress_type = compress_type
929
930  # If perms is given, it has a priority.
931  if perms is not None:
932    # If perms doesn't set the file type, mark it as a regular file.
933    if perms & 0o770000 == 0:
934      perms |= 0o100000
935    zinfo.external_attr = perms << 16
936
937  # Use a fixed timestamp so the output is repeatable.
938  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
939
940  zip_file.writestr(zinfo, data)
941  zipfile.ZIP64_LIMIT = saved_zip64_limit
942
943
944def ZipClose(zip_file):
945  # http://b/18015246
946  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
947  # central directory.
948  saved_zip64_limit = zipfile.ZIP64_LIMIT
949  zipfile.ZIP64_LIMIT = (1 << 32) - 1
950
951  zip_file.close()
952
953  zipfile.ZIP64_LIMIT = saved_zip64_limit
954
955
956class DeviceSpecificParams(object):
957  module = None
958  def __init__(self, **kwargs):
959    """Keyword arguments to the constructor become attributes of this
960    object, which is passed to all functions in the device-specific
961    module."""
962    for k, v in kwargs.iteritems():
963      setattr(self, k, v)
964    self.extras = OPTIONS.extras
965
966    if self.module is None:
967      path = OPTIONS.device_specific
968      if not path:
969        return
970      try:
971        if os.path.isdir(path):
972          info = imp.find_module("releasetools", [path])
973        else:
974          d, f = os.path.split(path)
975          b, x = os.path.splitext(f)
976          if x == ".py":
977            f = b
978          info = imp.find_module(f, [d])
979        print "loaded device-specific extensions from", path
980        self.module = imp.load_module("device_specific", *info)
981      except ImportError:
982        print "unable to load device-specific module; assuming none"
983
984  def _DoCall(self, function_name, *args, **kwargs):
985    """Call the named function in the device-specific module, passing
986    the given args and kwargs.  The first argument to the call will be
987    the DeviceSpecific object itself.  If there is no module, or the
988    module does not define the function, return the value of the
989    'default' kwarg (which itself defaults to None)."""
990    if self.module is None or not hasattr(self.module, function_name):
991      return kwargs.get("default", None)
992    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
993
994  def FullOTA_Assertions(self):
995    """Called after emitting the block of assertions at the top of a
996    full OTA package.  Implementations can add whatever additional
997    assertions they like."""
998    return self._DoCall("FullOTA_Assertions")
999
1000  def FullOTA_InstallBegin(self):
1001    """Called at the start of full OTA installation."""
1002    return self._DoCall("FullOTA_InstallBegin")
1003
1004  def FullOTA_InstallEnd(self):
1005    """Called at the end of full OTA installation; typically this is
1006    used to install the image for the device's baseband processor."""
1007    return self._DoCall("FullOTA_InstallEnd")
1008
1009  def IncrementalOTA_Assertions(self):
1010    """Called after emitting the block of assertions at the top of an
1011    incremental OTA package.  Implementations can add whatever
1012    additional assertions they like."""
1013    return self._DoCall("IncrementalOTA_Assertions")
1014
1015  def IncrementalOTA_VerifyBegin(self):
1016    """Called at the start of the verification phase of incremental
1017    OTA installation; additional checks can be placed here to abort
1018    the script before any changes are made."""
1019    return self._DoCall("IncrementalOTA_VerifyBegin")
1020
1021  def IncrementalOTA_VerifyEnd(self):
1022    """Called at the end of the verification phase of incremental OTA
1023    installation; additional checks can be placed here to abort the
1024    script before any changes are made."""
1025    return self._DoCall("IncrementalOTA_VerifyEnd")
1026
1027  def IncrementalOTA_InstallBegin(self):
1028    """Called at the start of incremental OTA installation (after
1029    verification is complete)."""
1030    return self._DoCall("IncrementalOTA_InstallBegin")
1031
1032  def IncrementalOTA_InstallEnd(self):
1033    """Called at the end of incremental OTA installation; typically
1034    this is used to install the image for the device's baseband
1035    processor."""
1036    return self._DoCall("IncrementalOTA_InstallEnd")
1037
1038class File(object):
1039  def __init__(self, name, data):
1040    self.name = name
1041    self.data = data
1042    self.size = len(data)
1043    self.sha1 = sha1(data).hexdigest()
1044
1045  @classmethod
1046  def FromLocalFile(cls, name, diskname):
1047    f = open(diskname, "rb")
1048    data = f.read()
1049    f.close()
1050    return File(name, data)
1051
1052  def WriteToTemp(self):
1053    t = tempfile.NamedTemporaryFile()
1054    t.write(self.data)
1055    t.flush()
1056    return t
1057
1058  def AddToZip(self, z, compression=None):
1059    ZipWriteStr(z, self.name, self.data, compress_type=compression)
1060
1061DIFF_PROGRAM_BY_EXT = {
1062    ".gz" : "imgdiff",
1063    ".zip" : ["imgdiff", "-z"],
1064    ".jar" : ["imgdiff", "-z"],
1065    ".apk" : ["imgdiff", "-z"],
1066    ".img" : "imgdiff",
1067    }
1068
1069class Difference(object):
1070  def __init__(self, tf, sf, diff_program=None):
1071    self.tf = tf
1072    self.sf = sf
1073    self.patch = None
1074    self.diff_program = diff_program
1075
1076  def ComputePatch(self):
1077    """Compute the patch (as a string of data) needed to turn sf into
1078    tf.  Returns the same tuple as GetPatch()."""
1079
1080    tf = self.tf
1081    sf = self.sf
1082
1083    if self.diff_program:
1084      diff_program = self.diff_program
1085    else:
1086      ext = os.path.splitext(tf.name)[1]
1087      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1088
1089    ttemp = tf.WriteToTemp()
1090    stemp = sf.WriteToTemp()
1091
1092    ext = os.path.splitext(tf.name)[1]
1093
1094    try:
1095      ptemp = tempfile.NamedTemporaryFile()
1096      if isinstance(diff_program, list):
1097        cmd = copy.copy(diff_program)
1098      else:
1099        cmd = [diff_program]
1100      cmd.append(stemp.name)
1101      cmd.append(ttemp.name)
1102      cmd.append(ptemp.name)
1103      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1104      err = []
1105      def run():
1106        _, e = p.communicate()
1107        if e:
1108          err.append(e)
1109      th = threading.Thread(target=run)
1110      th.start()
1111      th.join(timeout=300)   # 5 mins
1112      if th.is_alive():
1113        print "WARNING: diff command timed out"
1114        p.terminate()
1115        th.join(5)
1116        if th.is_alive():
1117          p.kill()
1118          th.join()
1119
1120      if err or p.returncode != 0:
1121        print "WARNING: failure running %s:\n%s\n" % (
1122            diff_program, "".join(err))
1123        self.patch = None
1124        return None, None, None
1125      diff = ptemp.read()
1126    finally:
1127      ptemp.close()
1128      stemp.close()
1129      ttemp.close()
1130
1131    self.patch = diff
1132    return self.tf, self.sf, self.patch
1133
1134
1135  def GetPatch(self):
1136    """Return a tuple (target_file, source_file, patch_data).
1137    patch_data may be None if ComputePatch hasn't been called, or if
1138    computing the patch failed."""
1139    return self.tf, self.sf, self.patch
1140
1141
1142def ComputeDifferences(diffs):
1143  """Call ComputePatch on all the Difference objects in 'diffs'."""
1144  print len(diffs), "diffs to compute"
1145
1146  # Do the largest files first, to try and reduce the long-pole effect.
1147  by_size = [(i.tf.size, i) for i in diffs]
1148  by_size.sort(reverse=True)
1149  by_size = [i[1] for i in by_size]
1150
1151  lock = threading.Lock()
1152  diff_iter = iter(by_size)   # accessed under lock
1153
1154  def worker():
1155    try:
1156      lock.acquire()
1157      for d in diff_iter:
1158        lock.release()
1159        start = time.time()
1160        d.ComputePatch()
1161        dur = time.time() - start
1162        lock.acquire()
1163
1164        tf, sf, patch = d.GetPatch()
1165        if sf.name == tf.name:
1166          name = tf.name
1167        else:
1168          name = "%s (%s)" % (tf.name, sf.name)
1169        if patch is None:
1170          print "patching failed!                                  %s" % (name,)
1171        else:
1172          print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1173              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1174      lock.release()
1175    except Exception as e:
1176      print e
1177      raise
1178
1179  # start worker threads; wait for them all to finish.
1180  threads = [threading.Thread(target=worker)
1181             for i in range(OPTIONS.worker_threads)]
1182  for th in threads:
1183    th.start()
1184  while threads:
1185    threads.pop().join()
1186
1187
1188class BlockDifference(object):
1189  def __init__(self, partition, tgt, src=None, check_first_block=False,
1190               version=None):
1191    self.tgt = tgt
1192    self.src = src
1193    self.partition = partition
1194    self.check_first_block = check_first_block
1195
1196    # Due to http://b/20939131, check_first_block is disabled temporarily.
1197    assert not self.check_first_block
1198
1199    if version is None:
1200      version = 1
1201      if OPTIONS.info_dict:
1202        version = max(
1203            int(i) for i in
1204            OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1205    self.version = version
1206
1207    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1208                                    version=self.version)
1209    tmpdir = tempfile.mkdtemp()
1210    OPTIONS.tempfiles.append(tmpdir)
1211    self.path = os.path.join(tmpdir, partition)
1212    b.Compute(self.path)
1213
1214    _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1215
1216  def WriteScript(self, script, output_zip, progress=None):
1217    if not self.src:
1218      # write the output unconditionally
1219      script.Print("Patching %s image unconditionally..." % (self.partition,))
1220    else:
1221      script.Print("Patching %s image after verification." % (self.partition,))
1222
1223    if progress:
1224      script.ShowProgress(progress, 0)
1225    self._WriteUpdate(script, output_zip)
1226    self._WritePostInstallVerifyScript(script)
1227
1228  def WriteVerifyScript(self, script):
1229    partition = self.partition
1230    if not self.src:
1231      script.Print("Image %s will be patched unconditionally." % (partition,))
1232    else:
1233      ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1234      ranges_str = ranges.to_string_raw()
1235      if self.version >= 3:
1236        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1237                            'block_image_verify("%s", '
1238                            'package_extract_file("%s.transfer.list"), '
1239                            '"%s.new.dat", "%s.patch.dat")) then') % (
1240                            self.device, ranges_str, self.src.TotalSha1(),
1241                            self.device, partition, partition, partition))
1242      else:
1243        script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1244                           self.device, ranges_str, self.src.TotalSha1()))
1245      script.Print('Verified %s image...' % (partition,))
1246      script.AppendExtra('else')
1247
1248      # When generating incrementals for the system and vendor partitions,
1249      # explicitly check the first block (which contains the superblock) of
1250      # the partition to see if it's what we expect. If this check fails,
1251      # give an explicit log message about the partition having been
1252      # remounted R/W (the most likely explanation) and the need to flash to
1253      # get OTAs working again.
1254      if self.check_first_block:
1255        self._CheckFirstBlock(script)
1256
1257      # Abort the OTA update. Note that the incremental OTA cannot be applied
1258      # even if it may match the checksum of the target partition.
1259      # a) If version < 3, operations like move and erase will make changes
1260      #    unconditionally and damage the partition.
1261      # b) If version >= 3, it won't even reach here.
1262      script.AppendExtra(('abort("%s partition has unexpected contents");\n'
1263                          'endif;') % (partition,))
1264
1265  def _WritePostInstallVerifyScript(self, script):
1266    partition = self.partition
1267    script.Print('Verifying the updated %s image...' % (partition,))
1268    # Unlike pre-install verification, clobbered_blocks should not be ignored.
1269    ranges = self.tgt.care_map
1270    ranges_str = ranges.to_string_raw()
1271    script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1272                       self.device, ranges_str,
1273                       self.tgt.TotalSha1(include_clobbered_blocks=True)))
1274
1275    # Bug: 20881595
1276    # Verify that extended blocks are really zeroed out.
1277    if self.tgt.extended:
1278      ranges_str = self.tgt.extended.to_string_raw()
1279      script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1280                         self.device, ranges_str,
1281                         self._HashZeroBlocks(self.tgt.extended.size())))
1282      script.Print('Verified the updated %s image.' % (partition,))
1283      script.AppendExtra(
1284          'else\n'
1285          '  abort("%s partition has unexpected non-zero contents after OTA '
1286          'update");\n'
1287          'endif;' % (partition,))
1288    else:
1289      script.Print('Verified the updated %s image.' % (partition,))
1290
1291    script.AppendExtra(
1292        'else\n'
1293        '  abort("%s partition has unexpected contents after OTA update");\n'
1294        'endif;' % (partition,))
1295
1296  def _WriteUpdate(self, script, output_zip):
1297    ZipWrite(output_zip,
1298             '{}.transfer.list'.format(self.path),
1299             '{}.transfer.list'.format(self.partition))
1300    ZipWrite(output_zip,
1301             '{}.new.dat'.format(self.path),
1302             '{}.new.dat'.format(self.partition))
1303    ZipWrite(output_zip,
1304             '{}.patch.dat'.format(self.path),
1305             '{}.patch.dat'.format(self.partition),
1306             compress_type=zipfile.ZIP_STORED)
1307
1308    call = ('block_image_update("{device}", '
1309            'package_extract_file("{partition}.transfer.list"), '
1310            '"{partition}.new.dat", "{partition}.patch.dat");\n'.format(
1311                device=self.device, partition=self.partition))
1312    script.AppendExtra(script.WordWrap(call))
1313
1314  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1315    data = source.ReadRangeSet(ranges)
1316    ctx = sha1()
1317
1318    for p in data:
1319      ctx.update(p)
1320
1321    return ctx.hexdigest()
1322
1323  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1324    """Return the hash value for all zero blocks."""
1325    zero_block = '\x00' * 4096
1326    ctx = sha1()
1327    for _ in range(num_blocks):
1328      ctx.update(zero_block)
1329
1330    return ctx.hexdigest()
1331
1332  # TODO(tbao): Due to http://b/20939131, block 0 may be changed without
1333  # remounting R/W. Will change the checking to a finer-grained way to
1334  # mask off those bits.
1335  def _CheckFirstBlock(self, script):
1336    r = rangelib.RangeSet((0, 1))
1337    srchash = self._HashBlocks(self.src, r)
1338
1339    script.AppendExtra(('(range_sha1("%s", "%s") == "%s") || '
1340                        'abort("%s has been remounted R/W; '
1341                        'reflash device to reenable OTA updates");')
1342                       % (self.device, r.to_string_raw(), srchash,
1343                          self.device))
1344
1345DataImage = blockimgdiff.DataImage
1346
1347
1348# map recovery.fstab's fs_types to mount/format "partition types"
1349PARTITION_TYPES = {
1350    "yaffs2": "MTD",
1351    "mtd": "MTD",
1352    "ext4": "EMMC",
1353    "emmc": "EMMC",
1354    "f2fs": "EMMC",
1355    "squashfs": "EMMC"
1356}
1357
1358def GetTypeAndDevice(mount_point, info):
1359  fstab = info["fstab"]
1360  if fstab:
1361    return (PARTITION_TYPES[fstab[mount_point].fs_type],
1362            fstab[mount_point].device)
1363  else:
1364    raise KeyError
1365
1366
1367def ParseCertificate(data):
1368  """Parse a PEM-format certificate."""
1369  cert = []
1370  save = False
1371  for line in data.split("\n"):
1372    if "--END CERTIFICATE--" in line:
1373      break
1374    if save:
1375      cert.append(line)
1376    if "--BEGIN CERTIFICATE--" in line:
1377      save = True
1378  cert = "".join(cert).decode('base64')
1379  return cert
1380
1381def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1382                      info_dict=None):
1383  """Generate a binary patch that creates the recovery image starting
1384  with the boot image.  (Most of the space in these images is just the
1385  kernel, which is identical for the two, so the resulting patch
1386  should be efficient.)  Add it to the output zip, along with a shell
1387  script that is run from init.rc on first boot to actually do the
1388  patching and install the new recovery image.
1389
1390  recovery_img and boot_img should be File objects for the
1391  corresponding images.  info should be the dictionary returned by
1392  common.LoadInfoDict() on the input target_files.
1393  """
1394
1395  if info_dict is None:
1396    info_dict = OPTIONS.info_dict
1397
1398  full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1399
1400  if full_recovery_image:
1401    output_sink("etc/recovery.img", recovery_img.data)
1402
1403  else:
1404    diff_program = ["imgdiff"]
1405    path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1406    if os.path.exists(path):
1407      diff_program.append("-b")
1408      diff_program.append(path)
1409      bonus_args = "-b /system/etc/recovery-resource.dat"
1410    else:
1411      bonus_args = ""
1412
1413    d = Difference(recovery_img, boot_img, diff_program=diff_program)
1414    _, _, patch = d.ComputePatch()
1415    output_sink("recovery-from-boot.p", patch)
1416
1417  try:
1418    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1419    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1420  except KeyError:
1421    return
1422
1423  if full_recovery_image:
1424    sh = """#!/system/bin/sh
1425if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1426  applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1427else
1428  log -t recovery "Recovery image already installed"
1429fi
1430""" % {'type': recovery_type,
1431       'device': recovery_device,
1432       'sha1': recovery_img.sha1,
1433       'size': recovery_img.size}
1434  else:
1435    sh = """#!/system/bin/sh
1436if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1437  applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1438else
1439  log -t recovery "Recovery image already installed"
1440fi
1441""" % {'boot_size': boot_img.size,
1442       'boot_sha1': boot_img.sha1,
1443       'recovery_size': recovery_img.size,
1444       'recovery_sha1': recovery_img.sha1,
1445       'boot_type': boot_type,
1446       'boot_device': boot_device,
1447       'recovery_type': recovery_type,
1448       'recovery_device': recovery_device,
1449       'bonus_args': bonus_args}
1450
1451  # The install script location moved from /system/etc to /system/bin
1452  # in the L release.  Parse init.*.rc files to find out where the
1453  # target-files expects it to be, and put it there.
1454  sh_location = "etc/install-recovery.sh"
1455  found = False
1456  init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1457  init_rc_files = os.listdir(init_rc_dir)
1458  for init_rc_file in init_rc_files:
1459    if (not init_rc_file.startswith('init.') or
1460        not init_rc_file.endswith('.rc')):
1461      continue
1462
1463    with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1464      for line in f:
1465        m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1466        if m:
1467          sh_location = m.group(1)
1468          found = True
1469          break
1470
1471    if found:
1472      break
1473
1474  print "putting script in", sh_location
1475
1476  output_sink(sh_location, sh)
1477