common.py revision a6a3aa9398f6a27693521abd2601462a78a81c56
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import copy
16import errno
17import getopt
18import getpass
19import imp
20import os
21import platform
22import re
23import shlex
24import shutil
25import subprocess
26import sys
27import tempfile
28import threading
29import time
30import zipfile
31
32import blockimgdiff
33import rangelib
34
35from hashlib import sha1 as sha1
36
37
38class Options(object):
39  def __init__(self):
40    platform_search_path = {
41        "linux2": "out/host/linux-x86",
42        "darwin": "out/host/darwin-x86",
43    }
44
45    self.search_path = platform_search_path.get(sys.platform, None)
46    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
47    self.extra_signapk_args = []
48    self.java_path = "java"  # Use the one on the path by default.
49    self.java_args = "-Xmx2048m" # JVM Args
50    self.public_key_suffix = ".x509.pem"
51    self.private_key_suffix = ".pk8"
52    # use otatools built boot_signer by default
53    self.boot_signer_path = "boot_signer"
54    self.boot_signer_args = []
55    self.verity_signer_path = None
56    self.verity_signer_args = []
57    self.verbose = False
58    self.tempfiles = []
59    self.device_specific = None
60    self.extras = {}
61    self.info_dict = None
62    self.source_info_dict = None
63    self.target_info_dict = None
64    self.worker_threads = None
65    # Stash size cannot exceed cache_size * threshold.
66    self.cache_size = None
67    self.stash_threshold = 0.8
68
69
70OPTIONS = Options()
71
72
73# Values for "certificate" in apkcerts that mean special things.
74SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
75
76
77class ExternalError(RuntimeError):
78  pass
79
80
81def Run(args, **kwargs):
82  """Create and return a subprocess.Popen object, printing the command
83  line on the terminal if -v was specified."""
84  if OPTIONS.verbose:
85    print "  running: ", " ".join(args)
86  return subprocess.Popen(args, **kwargs)
87
88
89def CloseInheritedPipes():
90  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
91  before doing other work."""
92  if platform.system() != "Darwin":
93    return
94  for d in range(3, 1025):
95    try:
96      stat = os.fstat(d)
97      if stat is not None:
98        pipebit = stat[0] & 0x1000
99        if pipebit != 0:
100          os.close(d)
101    except OSError:
102      pass
103
104
105def LoadInfoDict(input_file, input_dir=None):
106  """Read and parse the META/misc_info.txt key/value pairs from the
107  input target files and return a dict."""
108
109  def read_helper(fn):
110    if isinstance(input_file, zipfile.ZipFile):
111      return input_file.read(fn)
112    else:
113      path = os.path.join(input_file, *fn.split("/"))
114      try:
115        with open(path) as f:
116          return f.read()
117      except IOError as e:
118        if e.errno == errno.ENOENT:
119          raise KeyError(fn)
120  d = {}
121  try:
122    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
123  except KeyError:
124    # ok if misc_info.txt doesn't exist
125    pass
126
127  # backwards compatibility: These values used to be in their own
128  # files.  Look for them, in case we're processing an old
129  # target_files zip.
130
131  if "mkyaffs2_extra_flags" not in d:
132    try:
133      d["mkyaffs2_extra_flags"] = read_helper(
134          "META/mkyaffs2-extra-flags.txt").strip()
135    except KeyError:
136      # ok if flags don't exist
137      pass
138
139  if "recovery_api_version" not in d:
140    try:
141      d["recovery_api_version"] = read_helper(
142          "META/recovery-api-version.txt").strip()
143    except KeyError:
144      raise ValueError("can't find recovery API version in input target-files")
145
146  if "tool_extensions" not in d:
147    try:
148      d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
149    except KeyError:
150      # ok if extensions don't exist
151      pass
152
153  if "fstab_version" not in d:
154    d["fstab_version"] = "1"
155
156  # During building, we use the "file_contexts" in the out/ directory tree.
157  # It is no longer available when (re)generating from target_files zip. So
158  # when generating from target_files zip, we look for a copy under META/
159  # first, if not available search under BOOT/RAMDISK/. Note that we may need
160  # a different file_contexts to build images than the one running on device,
161  # such as when enabling system_root_image. In that case, we must have the
162  # one for building copied to META/.
163  if input_dir is not None:
164    fc_config = os.path.join(input_dir, "META", "file_contexts")
165    if not os.path.exists(fc_config):
166      fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", "file_contexts")
167      if not os.path.exists(fc_config):
168        fc_config = None
169
170    if fc_config:
171      d["selinux_fc"] = fc_config
172
173  try:
174    data = read_helper("META/imagesizes.txt")
175    for line in data.split("\n"):
176      if not line:
177        continue
178      name, value = line.split(" ", 1)
179      if not value:
180        continue
181      if name == "blocksize":
182        d[name] = value
183      else:
184        d[name + "_size"] = value
185  except KeyError:
186    pass
187
188  def makeint(key):
189    if key in d:
190      d[key] = int(d[key], 0)
191
192  makeint("recovery_api_version")
193  makeint("blocksize")
194  makeint("system_size")
195  makeint("vendor_size")
196  makeint("userdata_size")
197  makeint("cache_size")
198  makeint("recovery_size")
199  makeint("boot_size")
200  makeint("fstab_version")
201
202  d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
203                                 d.get("system_root_image", False))
204  d["build.prop"] = LoadBuildProp(read_helper)
205  return d
206
207def LoadBuildProp(read_helper):
208  try:
209    data = read_helper("SYSTEM/build.prop")
210  except KeyError:
211    print "Warning: could not find SYSTEM/build.prop in %s" % zip
212    data = ""
213  return LoadDictionaryFromLines(data.split("\n"))
214
215def LoadDictionaryFromLines(lines):
216  d = {}
217  for line in lines:
218    line = line.strip()
219    if not line or line.startswith("#"):
220      continue
221    if "=" in line:
222      name, value = line.split("=", 1)
223      d[name] = value
224  return d
225
226def LoadRecoveryFSTab(read_helper, fstab_version, system_root_image=False):
227  class Partition(object):
228    def __init__(self, mount_point, fs_type, device, length, device2, context):
229      self.mount_point = mount_point
230      self.fs_type = fs_type
231      self.device = device
232      self.length = length
233      self.device2 = device2
234      self.context = context
235
236  try:
237    data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
238  except KeyError:
239    print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
240    data = ""
241
242  if fstab_version == 1:
243    d = {}
244    for line in data.split("\n"):
245      line = line.strip()
246      if not line or line.startswith("#"):
247        continue
248      pieces = line.split()
249      if not 3 <= len(pieces) <= 4:
250        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
251      options = None
252      if len(pieces) >= 4:
253        if pieces[3].startswith("/"):
254          device2 = pieces[3]
255          if len(pieces) >= 5:
256            options = pieces[4]
257        else:
258          device2 = None
259          options = pieces[3]
260      else:
261        device2 = None
262
263      mount_point = pieces[0]
264      length = 0
265      if options:
266        options = options.split(",")
267        for i in options:
268          if i.startswith("length="):
269            length = int(i[7:])
270          else:
271            print "%s: unknown option \"%s\"" % (mount_point, i)
272
273      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
274                                 device=pieces[2], length=length,
275                                 device2=device2)
276
277  elif fstab_version == 2:
278    d = {}
279    for line in data.split("\n"):
280      line = line.strip()
281      if not line or line.startswith("#"):
282        continue
283      # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
284      pieces = line.split()
285      if len(pieces) != 5:
286        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
287
288      # Ignore entries that are managed by vold
289      options = pieces[4]
290      if "voldmanaged=" in options:
291        continue
292
293      # It's a good line, parse it
294      length = 0
295      options = options.split(",")
296      for i in options:
297        if i.startswith("length="):
298          length = int(i[7:])
299        else:
300          # Ignore all unknown options in the unified fstab
301          continue
302
303      mount_flags = pieces[3]
304      # Honor the SELinux context if present.
305      context = None
306      for i in mount_flags.split(","):
307        if i.startswith("context="):
308          context = i
309
310      mount_point = pieces[1]
311      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
312                                 device=pieces[0], length=length,
313                                 device2=None, context=context)
314
315  else:
316    raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
317
318  # / is used for the system mount point when the root directory is included in
319  # system. Other areas assume system is always at "/system" so point /system
320  # at /.
321  if system_root_image:
322    assert not d.has_key("/system") and d.has_key("/")
323    d["/system"] = d["/"]
324  return d
325
326
327def DumpInfoDict(d):
328  for k, v in sorted(d.items()):
329    print "%-25s = (%s) %s" % (k, type(v).__name__, v)
330
331
332def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
333                        has_ramdisk=False):
334  """Build a bootable image from the specified sourcedir.
335
336  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
337  'sourcedir'), and turn them into a boot image.  Return the image data, or
338  None if sourcedir does not appear to contains files for building the
339  requested image."""
340
341  def make_ramdisk():
342    ramdisk_img = tempfile.NamedTemporaryFile()
343
344    if os.access(fs_config_file, os.F_OK):
345      cmd = ["mkbootfs", "-f", fs_config_file,
346             os.path.join(sourcedir, "RAMDISK")]
347    else:
348      cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
349    p1 = Run(cmd, stdout=subprocess.PIPE)
350    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
351
352    p2.wait()
353    p1.wait()
354    assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
355    assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
356
357    return ramdisk_img
358
359  if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
360    return None
361
362  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
363    return None
364
365  if info_dict is None:
366    info_dict = OPTIONS.info_dict
367
368  img = tempfile.NamedTemporaryFile()
369
370  if has_ramdisk:
371    ramdisk_img = make_ramdisk()
372
373  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
374  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
375
376  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
377
378  fn = os.path.join(sourcedir, "second")
379  if os.access(fn, os.F_OK):
380    cmd.append("--second")
381    cmd.append(fn)
382
383  fn = os.path.join(sourcedir, "cmdline")
384  if os.access(fn, os.F_OK):
385    cmd.append("--cmdline")
386    cmd.append(open(fn).read().rstrip("\n"))
387
388  fn = os.path.join(sourcedir, "base")
389  if os.access(fn, os.F_OK):
390    cmd.append("--base")
391    cmd.append(open(fn).read().rstrip("\n"))
392
393  fn = os.path.join(sourcedir, "pagesize")
394  if os.access(fn, os.F_OK):
395    cmd.append("--pagesize")
396    cmd.append(open(fn).read().rstrip("\n"))
397
398  args = info_dict.get("mkbootimg_args", None)
399  if args and args.strip():
400    cmd.extend(shlex.split(args))
401
402  if has_ramdisk:
403    cmd.extend(["--ramdisk", ramdisk_img.name])
404
405  img_unsigned = None
406  if info_dict.get("vboot", None):
407    img_unsigned = tempfile.NamedTemporaryFile()
408    cmd.extend(["--output", img_unsigned.name])
409  else:
410    cmd.extend(["--output", img.name])
411
412  p = Run(cmd, stdout=subprocess.PIPE)
413  p.communicate()
414  assert p.returncode == 0, "mkbootimg of %s image failed" % (
415      os.path.basename(sourcedir),)
416
417  if (info_dict.get("boot_signer", None) == "true" and
418      info_dict.get("verity_key", None)):
419    path = "/" + os.path.basename(sourcedir).lower()
420    cmd = [OPTIONS.boot_signer_path]
421    cmd.extend(OPTIONS.boot_signer_args)
422    cmd.extend([path, img.name,
423                info_dict["verity_key"] + ".pk8",
424                info_dict["verity_key"] + ".x509.pem", img.name])
425    p = Run(cmd, stdout=subprocess.PIPE)
426    p.communicate()
427    assert p.returncode == 0, "boot_signer of %s image failed" % path
428
429  # Sign the image if vboot is non-empty.
430  elif info_dict.get("vboot", None):
431    path = "/" + os.path.basename(sourcedir).lower()
432    img_keyblock = tempfile.NamedTemporaryFile()
433    cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
434           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
435           info_dict["vboot_key"] + ".vbprivk",
436           info_dict["vboot_subkey"] + ".vbprivk",
437           img_keyblock.name,
438           img.name]
439    p = Run(cmd, stdout=subprocess.PIPE)
440    p.communicate()
441    assert p.returncode == 0, "vboot_signer of %s image failed" % path
442
443    # Clean up the temp files.
444    img_unsigned.close()
445    img_keyblock.close()
446
447  img.seek(os.SEEK_SET, 0)
448  data = img.read()
449
450  if has_ramdisk:
451    ramdisk_img.close()
452  img.close()
453
454  return data
455
456
457def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
458                     info_dict=None):
459  """Return a File object with the desired bootable image.
460
461  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
462  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
463  the source files in 'unpack_dir'/'tree_subdir'."""
464
465  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
466  if os.path.exists(prebuilt_path):
467    print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
468    return File.FromLocalFile(name, prebuilt_path)
469
470  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
471  if os.path.exists(prebuilt_path):
472    print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
473    return File.FromLocalFile(name, prebuilt_path)
474
475  print "building image from target_files %s..." % (tree_subdir,)
476
477  if info_dict is None:
478    info_dict = OPTIONS.info_dict
479
480  # With system_root_image == "true", we don't pack ramdisk into the boot image.
481  has_ramdisk = (info_dict.get("system_root_image", None) != "true" or
482                 prebuilt_name != "boot.img")
483
484  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
485  data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
486                             os.path.join(unpack_dir, fs_config),
487                             info_dict, has_ramdisk)
488  if data:
489    return File(name, data)
490  return None
491
492
493def UnzipTemp(filename, pattern=None):
494  """Unzip the given archive into a temporary directory and return the name.
495
496  If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
497  temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
498
499  Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
500  main file), open for reading.
501  """
502
503  tmp = tempfile.mkdtemp(prefix="targetfiles-")
504  OPTIONS.tempfiles.append(tmp)
505
506  def unzip_to_dir(filename, dirname):
507    cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
508    if pattern is not None:
509      cmd.append(pattern)
510    p = Run(cmd, stdout=subprocess.PIPE)
511    p.communicate()
512    if p.returncode != 0:
513      raise ExternalError("failed to unzip input target-files \"%s\"" %
514                          (filename,))
515
516  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
517  if m:
518    unzip_to_dir(m.group(1), tmp)
519    unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
520    filename = m.group(1)
521  else:
522    unzip_to_dir(filename, tmp)
523
524  return tmp, zipfile.ZipFile(filename, "r")
525
526
527def GetKeyPasswords(keylist):
528  """Given a list of keys, prompt the user to enter passwords for
529  those which require them.  Return a {key: password} dict.  password
530  will be None if the key has no password."""
531
532  no_passwords = []
533  need_passwords = []
534  key_passwords = {}
535  devnull = open("/dev/null", "w+b")
536  for k in sorted(keylist):
537    # We don't need a password for things that aren't really keys.
538    if k in SPECIAL_CERT_STRINGS:
539      no_passwords.append(k)
540      continue
541
542    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
543             "-inform", "DER", "-nocrypt"],
544            stdin=devnull.fileno(),
545            stdout=devnull.fileno(),
546            stderr=subprocess.STDOUT)
547    p.communicate()
548    if p.returncode == 0:
549      # Definitely an unencrypted key.
550      no_passwords.append(k)
551    else:
552      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
553               "-inform", "DER", "-passin", "pass:"],
554              stdin=devnull.fileno(),
555              stdout=devnull.fileno(),
556              stderr=subprocess.PIPE)
557      _, stderr = p.communicate()
558      if p.returncode == 0:
559        # Encrypted key with empty string as password.
560        key_passwords[k] = ''
561      elif stderr.startswith('Error decrypting key'):
562        # Definitely encrypted key.
563        # It would have said "Error reading key" if it didn't parse correctly.
564        need_passwords.append(k)
565      else:
566        # Potentially, a type of key that openssl doesn't understand.
567        # We'll let the routines in signapk.jar handle it.
568        no_passwords.append(k)
569  devnull.close()
570
571  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
572  key_passwords.update(dict.fromkeys(no_passwords, None))
573  return key_passwords
574
575
576def SignFile(input_name, output_name, key, password, align=None,
577             whole_file=False):
578  """Sign the input_name zip/jar/apk, producing output_name.  Use the
579  given key and password (the latter may be None if the key does not
580  have a password.
581
582  If align is an integer > 1, zipalign is run to align stored files in
583  the output zip on 'align'-byte boundaries.
584
585  If whole_file is true, use the "-w" option to SignApk to embed a
586  signature that covers the whole file in the archive comment of the
587  zip file.
588  """
589
590  if align == 0 or align == 1:
591    align = None
592
593  if align:
594    temp = tempfile.NamedTemporaryFile()
595    sign_name = temp.name
596  else:
597    sign_name = output_name
598
599  cmd = [OPTIONS.java_path, OPTIONS.java_args, "-jar",
600         os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
601  cmd.extend(OPTIONS.extra_signapk_args)
602  if whole_file:
603    cmd.append("-w")
604  cmd.extend([key + OPTIONS.public_key_suffix,
605              key + OPTIONS.private_key_suffix,
606              input_name, sign_name])
607
608  p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
609  if password is not None:
610    password += "\n"
611  p.communicate(password)
612  if p.returncode != 0:
613    raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
614
615  if align:
616    p = Run(["zipalign", "-f", "-p", str(align), sign_name, output_name])
617    p.communicate()
618    if p.returncode != 0:
619      raise ExternalError("zipalign failed: return code %s" % (p.returncode,))
620    temp.close()
621
622
623def CheckSize(data, target, info_dict):
624  """Check the data string passed against the max size limit, if
625  any, for the given target.  Raise exception if the data is too big.
626  Print a warning if the data is nearing the maximum size."""
627
628  if target.endswith(".img"):
629    target = target[:-4]
630  mount_point = "/" + target
631
632  fs_type = None
633  limit = None
634  if info_dict["fstab"]:
635    if mount_point == "/userdata":
636      mount_point = "/data"
637    p = info_dict["fstab"][mount_point]
638    fs_type = p.fs_type
639    device = p.device
640    if "/" in device:
641      device = device[device.rfind("/")+1:]
642    limit = info_dict.get(device + "_size", None)
643  if not fs_type or not limit:
644    return
645
646  if fs_type == "yaffs2":
647    # image size should be increased by 1/64th to account for the
648    # spare area (64 bytes per 2k page)
649    limit = limit / 2048 * (2048+64)
650  size = len(data)
651  pct = float(size) * 100.0 / limit
652  msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
653  if pct >= 99.0:
654    raise ExternalError(msg)
655  elif pct >= 95.0:
656    print
657    print "  WARNING: ", msg
658    print
659  elif OPTIONS.verbose:
660    print "  ", msg
661
662
663def ReadApkCerts(tf_zip):
664  """Given a target_files ZipFile, parse the META/apkcerts.txt file
665  and return a {package: cert} dict."""
666  certmap = {}
667  for line in tf_zip.read("META/apkcerts.txt").split("\n"):
668    line = line.strip()
669    if not line:
670      continue
671    m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
672                 r'private_key="(.*)"$', line)
673    if m:
674      name, cert, privkey = m.groups()
675      public_key_suffix_len = len(OPTIONS.public_key_suffix)
676      private_key_suffix_len = len(OPTIONS.private_key_suffix)
677      if cert in SPECIAL_CERT_STRINGS and not privkey:
678        certmap[name] = cert
679      elif (cert.endswith(OPTIONS.public_key_suffix) and
680            privkey.endswith(OPTIONS.private_key_suffix) and
681            cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
682        certmap[name] = cert[:-public_key_suffix_len]
683      else:
684        raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
685  return certmap
686
687
688COMMON_DOCSTRING = """
689  -p  (--path)  <dir>
690      Prepend <dir>/bin to the list of places to search for binaries
691      run by this script, and expect to find jars in <dir>/framework.
692
693  -s  (--device_specific) <file>
694      Path to the python module containing device-specific
695      releasetools code.
696
697  -x  (--extra)  <key=value>
698      Add a key/value pair to the 'extras' dict, which device-specific
699      extension code may look at.
700
701  -v  (--verbose)
702      Show command lines being executed.
703
704  -h  (--help)
705      Display this usage message and exit.
706"""
707
708def Usage(docstring):
709  print docstring.rstrip("\n")
710  print COMMON_DOCSTRING
711
712
713def ParseOptions(argv,
714                 docstring,
715                 extra_opts="", extra_long_opts=(),
716                 extra_option_handler=None):
717  """Parse the options in argv and return any arguments that aren't
718  flags.  docstring is the calling module's docstring, to be displayed
719  for errors and -h.  extra_opts and extra_long_opts are for flags
720  defined by the caller, which are processed by passing them to
721  extra_option_handler."""
722
723  try:
724    opts, args = getopt.getopt(
725        argv, "hvp:s:x:" + extra_opts,
726        ["help", "verbose", "path=", "signapk_path=", "extra_signapk_args=",
727         "java_path=", "java_args=", "public_key_suffix=",
728         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
729         "verity_signer_path=", "verity_signer_args=", "device_specific=",
730         "extra="] +
731        list(extra_long_opts))
732  except getopt.GetoptError as err:
733    Usage(docstring)
734    print "**", str(err), "**"
735    sys.exit(2)
736
737  for o, a in opts:
738    if o in ("-h", "--help"):
739      Usage(docstring)
740      sys.exit()
741    elif o in ("-v", "--verbose"):
742      OPTIONS.verbose = True
743    elif o in ("-p", "--path"):
744      OPTIONS.search_path = a
745    elif o in ("--signapk_path",):
746      OPTIONS.signapk_path = a
747    elif o in ("--extra_signapk_args",):
748      OPTIONS.extra_signapk_args = shlex.split(a)
749    elif o in ("--java_path",):
750      OPTIONS.java_path = a
751    elif o in ("--java_args",):
752      OPTIONS.java_args = a
753    elif o in ("--public_key_suffix",):
754      OPTIONS.public_key_suffix = a
755    elif o in ("--private_key_suffix",):
756      OPTIONS.private_key_suffix = a
757    elif o in ("--boot_signer_path",):
758      OPTIONS.boot_signer_path = a
759    elif o in ("--boot_signer_args",):
760      OPTIONS.boot_signer_args = shlex.split(a)
761    elif o in ("--verity_signer_path",):
762      OPTIONS.verity_signer_path = a
763    elif o in ("--verity_signer_args",):
764      OPTIONS.verity_signer_args = shlex.split(a)
765    elif o in ("-s", "--device_specific"):
766      OPTIONS.device_specific = a
767    elif o in ("-x", "--extra"):
768      key, value = a.split("=", 1)
769      OPTIONS.extras[key] = value
770    else:
771      if extra_option_handler is None or not extra_option_handler(o, a):
772        assert False, "unknown option \"%s\"" % (o,)
773
774  if OPTIONS.search_path:
775    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
776                          os.pathsep + os.environ["PATH"])
777
778  return args
779
780
781def MakeTempFile(prefix=None, suffix=None):
782  """Make a temp file and add it to the list of things to be deleted
783  when Cleanup() is called.  Return the filename."""
784  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
785  os.close(fd)
786  OPTIONS.tempfiles.append(fn)
787  return fn
788
789
790def Cleanup():
791  for i in OPTIONS.tempfiles:
792    if os.path.isdir(i):
793      shutil.rmtree(i)
794    else:
795      os.remove(i)
796
797
798class PasswordManager(object):
799  def __init__(self):
800    self.editor = os.getenv("EDITOR", None)
801    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
802
803  def GetPasswords(self, items):
804    """Get passwords corresponding to each string in 'items',
805    returning a dict.  (The dict may have keys in addition to the
806    values in 'items'.)
807
808    Uses the passwords in $ANDROID_PW_FILE if available, letting the
809    user edit that file to add more needed passwords.  If no editor is
810    available, or $ANDROID_PW_FILE isn't define, prompts the user
811    interactively in the ordinary way.
812    """
813
814    current = self.ReadFile()
815
816    first = True
817    while True:
818      missing = []
819      for i in items:
820        if i not in current or not current[i]:
821          missing.append(i)
822      # Are all the passwords already in the file?
823      if not missing:
824        return current
825
826      for i in missing:
827        current[i] = ""
828
829      if not first:
830        print "key file %s still missing some passwords." % (self.pwfile,)
831        answer = raw_input("try to edit again? [y]> ").strip()
832        if answer and answer[0] not in 'yY':
833          raise RuntimeError("key passwords unavailable")
834      first = False
835
836      current = self.UpdateAndReadFile(current)
837
838  def PromptResult(self, current): # pylint: disable=no-self-use
839    """Prompt the user to enter a value (password) for each key in
840    'current' whose value is fales.  Returns a new dict with all the
841    values.
842    """
843    result = {}
844    for k, v in sorted(current.iteritems()):
845      if v:
846        result[k] = v
847      else:
848        while True:
849          result[k] = getpass.getpass(
850              "Enter password for %s key> " % k).strip()
851          if result[k]:
852            break
853    return result
854
855  def UpdateAndReadFile(self, current):
856    if not self.editor or not self.pwfile:
857      return self.PromptResult(current)
858
859    f = open(self.pwfile, "w")
860    os.chmod(self.pwfile, 0o600)
861    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
862    f.write("# (Additional spaces are harmless.)\n\n")
863
864    first_line = None
865    sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
866    for i, (_, k, v) in enumerate(sorted_list):
867      f.write("[[[  %s  ]]] %s\n" % (v, k))
868      if not v and first_line is None:
869        # position cursor on first line with no password.
870        first_line = i + 4
871    f.close()
872
873    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
874    _, _ = p.communicate()
875
876    return self.ReadFile()
877
878  def ReadFile(self):
879    result = {}
880    if self.pwfile is None:
881      return result
882    try:
883      f = open(self.pwfile, "r")
884      for line in f:
885        line = line.strip()
886        if not line or line[0] == '#':
887          continue
888        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
889        if not m:
890          print "failed to parse password file: ", line
891        else:
892          result[m.group(2)] = m.group(1)
893      f.close()
894    except IOError as e:
895      if e.errno != errno.ENOENT:
896        print "error reading password file: ", str(e)
897    return result
898
899
900def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
901             compress_type=None):
902  import datetime
903
904  # http://b/18015246
905  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
906  # for files larger than 2GiB. We can work around this by adjusting their
907  # limit. Note that `zipfile.writestr()` will not work for strings larger than
908  # 2GiB. The Python interpreter sometimes rejects strings that large (though
909  # it isn't clear to me exactly what circumstances cause this).
910  # `zipfile.write()` must be used directly to work around this.
911  #
912  # This mess can be avoided if we port to python3.
913  saved_zip64_limit = zipfile.ZIP64_LIMIT
914  zipfile.ZIP64_LIMIT = (1 << 32) - 1
915
916  if compress_type is None:
917    compress_type = zip_file.compression
918  if arcname is None:
919    arcname = filename
920
921  saved_stat = os.stat(filename)
922
923  try:
924    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
925    # file to be zipped and reset it when we're done.
926    os.chmod(filename, perms)
927
928    # Use a fixed timestamp so the output is repeatable.
929    epoch = datetime.datetime.fromtimestamp(0)
930    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
931    os.utime(filename, (timestamp, timestamp))
932
933    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
934  finally:
935    os.chmod(filename, saved_stat.st_mode)
936    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
937    zipfile.ZIP64_LIMIT = saved_zip64_limit
938
939
940def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
941                compress_type=None):
942  """Wrap zipfile.writestr() function to work around the zip64 limit.
943
944  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
945  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
946  when calling crc32(bytes).
947
948  But it still works fine to write a shorter string into a large zip file.
949  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
950  when we know the string won't be too long.
951  """
952
953  saved_zip64_limit = zipfile.ZIP64_LIMIT
954  zipfile.ZIP64_LIMIT = (1 << 32) - 1
955
956  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
957    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
958    zinfo.compress_type = zip_file.compression
959    if perms is None:
960      perms = 0o644
961  else:
962    zinfo = zinfo_or_arcname
963
964  # If compress_type is given, it overrides the value in zinfo.
965  if compress_type is not None:
966    zinfo.compress_type = compress_type
967
968  # If perms is given, it has a priority.
969  if perms is not None:
970    zinfo.external_attr = perms << 16
971
972  # Use a fixed timestamp so the output is repeatable.
973  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
974
975  zip_file.writestr(zinfo, data)
976  zipfile.ZIP64_LIMIT = saved_zip64_limit
977
978
979def ZipClose(zip_file):
980  # http://b/18015246
981  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
982  # central directory.
983  saved_zip64_limit = zipfile.ZIP64_LIMIT
984  zipfile.ZIP64_LIMIT = (1 << 32) - 1
985
986  zip_file.close()
987
988  zipfile.ZIP64_LIMIT = saved_zip64_limit
989
990
991class DeviceSpecificParams(object):
992  module = None
993  def __init__(self, **kwargs):
994    """Keyword arguments to the constructor become attributes of this
995    object, which is passed to all functions in the device-specific
996    module."""
997    for k, v in kwargs.iteritems():
998      setattr(self, k, v)
999    self.extras = OPTIONS.extras
1000
1001    if self.module is None:
1002      path = OPTIONS.device_specific
1003      if not path:
1004        return
1005      try:
1006        if os.path.isdir(path):
1007          info = imp.find_module("releasetools", [path])
1008        else:
1009          d, f = os.path.split(path)
1010          b, x = os.path.splitext(f)
1011          if x == ".py":
1012            f = b
1013          info = imp.find_module(f, [d])
1014        print "loaded device-specific extensions from", path
1015        self.module = imp.load_module("device_specific", *info)
1016      except ImportError:
1017        print "unable to load device-specific module; assuming none"
1018
1019  def _DoCall(self, function_name, *args, **kwargs):
1020    """Call the named function in the device-specific module, passing
1021    the given args and kwargs.  The first argument to the call will be
1022    the DeviceSpecific object itself.  If there is no module, or the
1023    module does not define the function, return the value of the
1024    'default' kwarg (which itself defaults to None)."""
1025    if self.module is None or not hasattr(self.module, function_name):
1026      return kwargs.get("default", None)
1027    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1028
1029  def FullOTA_Assertions(self):
1030    """Called after emitting the block of assertions at the top of a
1031    full OTA package.  Implementations can add whatever additional
1032    assertions they like."""
1033    return self._DoCall("FullOTA_Assertions")
1034
1035  def FullOTA_InstallBegin(self):
1036    """Called at the start of full OTA installation."""
1037    return self._DoCall("FullOTA_InstallBegin")
1038
1039  def FullOTA_InstallEnd(self):
1040    """Called at the end of full OTA installation; typically this is
1041    used to install the image for the device's baseband processor."""
1042    return self._DoCall("FullOTA_InstallEnd")
1043
1044  def IncrementalOTA_Assertions(self):
1045    """Called after emitting the block of assertions at the top of an
1046    incremental OTA package.  Implementations can add whatever
1047    additional assertions they like."""
1048    return self._DoCall("IncrementalOTA_Assertions")
1049
1050  def IncrementalOTA_VerifyBegin(self):
1051    """Called at the start of the verification phase of incremental
1052    OTA installation; additional checks can be placed here to abort
1053    the script before any changes are made."""
1054    return self._DoCall("IncrementalOTA_VerifyBegin")
1055
1056  def IncrementalOTA_VerifyEnd(self):
1057    """Called at the end of the verification phase of incremental OTA
1058    installation; additional checks can be placed here to abort the
1059    script before any changes are made."""
1060    return self._DoCall("IncrementalOTA_VerifyEnd")
1061
1062  def IncrementalOTA_InstallBegin(self):
1063    """Called at the start of incremental OTA installation (after
1064    verification is complete)."""
1065    return self._DoCall("IncrementalOTA_InstallBegin")
1066
1067  def IncrementalOTA_InstallEnd(self):
1068    """Called at the end of incremental OTA installation; typically
1069    this is used to install the image for the device's baseband
1070    processor."""
1071    return self._DoCall("IncrementalOTA_InstallEnd")
1072
1073class File(object):
1074  def __init__(self, name, data):
1075    self.name = name
1076    self.data = data
1077    self.size = len(data)
1078    self.sha1 = sha1(data).hexdigest()
1079
1080  @classmethod
1081  def FromLocalFile(cls, name, diskname):
1082    f = open(diskname, "rb")
1083    data = f.read()
1084    f.close()
1085    return File(name, data)
1086
1087  def WriteToTemp(self):
1088    t = tempfile.NamedTemporaryFile()
1089    t.write(self.data)
1090    t.flush()
1091    return t
1092
1093  def AddToZip(self, z, compression=None):
1094    ZipWriteStr(z, self.name, self.data, compress_type=compression)
1095
1096DIFF_PROGRAM_BY_EXT = {
1097    ".gz" : "imgdiff",
1098    ".zip" : ["imgdiff", "-z"],
1099    ".jar" : ["imgdiff", "-z"],
1100    ".apk" : ["imgdiff", "-z"],
1101    ".img" : "imgdiff",
1102    }
1103
1104class Difference(object):
1105  def __init__(self, tf, sf, diff_program=None):
1106    self.tf = tf
1107    self.sf = sf
1108    self.patch = None
1109    self.diff_program = diff_program
1110
1111  def ComputePatch(self):
1112    """Compute the patch (as a string of data) needed to turn sf into
1113    tf.  Returns the same tuple as GetPatch()."""
1114
1115    tf = self.tf
1116    sf = self.sf
1117
1118    if self.diff_program:
1119      diff_program = self.diff_program
1120    else:
1121      ext = os.path.splitext(tf.name)[1]
1122      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1123
1124    ttemp = tf.WriteToTemp()
1125    stemp = sf.WriteToTemp()
1126
1127    ext = os.path.splitext(tf.name)[1]
1128
1129    try:
1130      ptemp = tempfile.NamedTemporaryFile()
1131      if isinstance(diff_program, list):
1132        cmd = copy.copy(diff_program)
1133      else:
1134        cmd = [diff_program]
1135      cmd.append(stemp.name)
1136      cmd.append(ttemp.name)
1137      cmd.append(ptemp.name)
1138      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1139      err = []
1140      def run():
1141        _, e = p.communicate()
1142        if e:
1143          err.append(e)
1144      th = threading.Thread(target=run)
1145      th.start()
1146      th.join(timeout=300)   # 5 mins
1147      if th.is_alive():
1148        print "WARNING: diff command timed out"
1149        p.terminate()
1150        th.join(5)
1151        if th.is_alive():
1152          p.kill()
1153          th.join()
1154
1155      if err or p.returncode != 0:
1156        print "WARNING: failure running %s:\n%s\n" % (
1157            diff_program, "".join(err))
1158        self.patch = None
1159        return None, None, None
1160      diff = ptemp.read()
1161    finally:
1162      ptemp.close()
1163      stemp.close()
1164      ttemp.close()
1165
1166    self.patch = diff
1167    return self.tf, self.sf, self.patch
1168
1169
1170  def GetPatch(self):
1171    """Return a tuple (target_file, source_file, patch_data).
1172    patch_data may be None if ComputePatch hasn't been called, or if
1173    computing the patch failed."""
1174    return self.tf, self.sf, self.patch
1175
1176
1177def ComputeDifferences(diffs):
1178  """Call ComputePatch on all the Difference objects in 'diffs'."""
1179  print len(diffs), "diffs to compute"
1180
1181  # Do the largest files first, to try and reduce the long-pole effect.
1182  by_size = [(i.tf.size, i) for i in diffs]
1183  by_size.sort(reverse=True)
1184  by_size = [i[1] for i in by_size]
1185
1186  lock = threading.Lock()
1187  diff_iter = iter(by_size)   # accessed under lock
1188
1189  def worker():
1190    try:
1191      lock.acquire()
1192      for d in diff_iter:
1193        lock.release()
1194        start = time.time()
1195        d.ComputePatch()
1196        dur = time.time() - start
1197        lock.acquire()
1198
1199        tf, sf, patch = d.GetPatch()
1200        if sf.name == tf.name:
1201          name = tf.name
1202        else:
1203          name = "%s (%s)" % (tf.name, sf.name)
1204        if patch is None:
1205          print "patching failed!                                  %s" % (name,)
1206        else:
1207          print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1208              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1209      lock.release()
1210    except Exception as e:
1211      print e
1212      raise
1213
1214  # start worker threads; wait for them all to finish.
1215  threads = [threading.Thread(target=worker)
1216             for i in range(OPTIONS.worker_threads)]
1217  for th in threads:
1218    th.start()
1219  while threads:
1220    threads.pop().join()
1221
1222
1223class BlockDifference(object):
1224  def __init__(self, partition, tgt, src=None, check_first_block=False,
1225               version=None):
1226    self.tgt = tgt
1227    self.src = src
1228    self.partition = partition
1229    self.check_first_block = check_first_block
1230
1231    # Due to http://b/20939131, check_first_block is disabled temporarily.
1232    assert not self.check_first_block
1233
1234    if version is None:
1235      version = 1
1236      if OPTIONS.info_dict:
1237        version = max(
1238            int(i) for i in
1239            OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1240    self.version = version
1241
1242    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1243                                    version=self.version)
1244    tmpdir = tempfile.mkdtemp()
1245    OPTIONS.tempfiles.append(tmpdir)
1246    self.path = os.path.join(tmpdir, partition)
1247    b.Compute(self.path)
1248
1249    if src is None:
1250      _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1251    else:
1252      _, self.device = GetTypeAndDevice("/" + partition,
1253                                        OPTIONS.source_info_dict)
1254
1255  def WriteScript(self, script, output_zip, progress=None):
1256    if not self.src:
1257      # write the output unconditionally
1258      script.Print("Patching %s image unconditionally..." % (self.partition,))
1259    else:
1260      script.Print("Patching %s image after verification." % (self.partition,))
1261
1262    if progress:
1263      script.ShowProgress(progress, 0)
1264    self._WriteUpdate(script, output_zip)
1265    self._WritePostInstallVerifyScript(script)
1266
1267  def WriteVerifyScript(self, script):
1268    partition = self.partition
1269    if not self.src:
1270      script.Print("Image %s will be patched unconditionally." % (partition,))
1271    else:
1272      ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1273      ranges_str = ranges.to_string_raw()
1274      if self.version >= 3:
1275        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1276                            'block_image_verify("%s", '
1277                            'package_extract_file("%s.transfer.list"), '
1278                            '"%s.new.dat", "%s.patch.dat")) then') % (
1279                            self.device, ranges_str, self.src.TotalSha1(),
1280                            self.device, partition, partition, partition))
1281      else:
1282        script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1283                           self.device, ranges_str, self.src.TotalSha1()))
1284      script.Print('Verified %s image...' % (partition,))
1285      script.AppendExtra('else')
1286
1287      # When generating incrementals for the system and vendor partitions,
1288      # explicitly check the first block (which contains the superblock) of
1289      # the partition to see if it's what we expect. If this check fails,
1290      # give an explicit log message about the partition having been
1291      # remounted R/W (the most likely explanation) and the need to flash to
1292      # get OTAs working again.
1293      if self.check_first_block:
1294        self._CheckFirstBlock(script)
1295
1296      # Abort the OTA update. Note that the incremental OTA cannot be applied
1297      # even if it may match the checksum of the target partition.
1298      # a) If version < 3, operations like move and erase will make changes
1299      #    unconditionally and damage the partition.
1300      # b) If version >= 3, it won't even reach here.
1301      script.AppendExtra(('abort("%s partition has unexpected contents");\n'
1302                          'endif;') % (partition,))
1303
1304  def _WritePostInstallVerifyScript(self, script):
1305    partition = self.partition
1306    script.Print('Verifying the updated %s image...' % (partition,))
1307    # Unlike pre-install verification, clobbered_blocks should not be ignored.
1308    ranges = self.tgt.care_map
1309    ranges_str = ranges.to_string_raw()
1310    script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1311                       self.device, ranges_str,
1312                       self.tgt.TotalSha1(include_clobbered_blocks=True)))
1313
1314    # Bug: 20881595
1315    # Verify that extended blocks are really zeroed out.
1316    if self.tgt.extended:
1317      ranges_str = self.tgt.extended.to_string_raw()
1318      script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1319                         self.device, ranges_str,
1320                         self._HashZeroBlocks(self.tgt.extended.size())))
1321      script.Print('Verified the updated %s image.' % (partition,))
1322      script.AppendExtra(
1323          'else\n'
1324          '  abort("%s partition has unexpected non-zero contents after OTA '
1325          'update");\n'
1326          'endif;' % (partition,))
1327    else:
1328      script.Print('Verified the updated %s image.' % (partition,))
1329
1330    script.AppendExtra(
1331        'else\n'
1332        '  abort("%s partition has unexpected contents after OTA update");\n'
1333        'endif;' % (partition,))
1334
1335  def _WriteUpdate(self, script, output_zip):
1336    ZipWrite(output_zip,
1337             '{}.transfer.list'.format(self.path),
1338             '{}.transfer.list'.format(self.partition))
1339    ZipWrite(output_zip,
1340             '{}.new.dat'.format(self.path),
1341             '{}.new.dat'.format(self.partition))
1342    ZipWrite(output_zip,
1343             '{}.patch.dat'.format(self.path),
1344             '{}.patch.dat'.format(self.partition),
1345             compress_type=zipfile.ZIP_STORED)
1346
1347    call = ('block_image_update("{device}", '
1348            'package_extract_file("{partition}.transfer.list"), '
1349            '"{partition}.new.dat", "{partition}.patch.dat");\n'.format(
1350                device=self.device, partition=self.partition))
1351    script.AppendExtra(script.WordWrap(call))
1352
1353  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1354    data = source.ReadRangeSet(ranges)
1355    ctx = sha1()
1356
1357    for p in data:
1358      ctx.update(p)
1359
1360    return ctx.hexdigest()
1361
1362  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1363    """Return the hash value for all zero blocks."""
1364    zero_block = '\x00' * 4096
1365    ctx = sha1()
1366    for _ in range(num_blocks):
1367      ctx.update(zero_block)
1368
1369    return ctx.hexdigest()
1370
1371  # TODO(tbao): Due to http://b/20939131, block 0 may be changed without
1372  # remounting R/W. Will change the checking to a finer-grained way to
1373  # mask off those bits.
1374  def _CheckFirstBlock(self, script):
1375    r = rangelib.RangeSet((0, 1))
1376    srchash = self._HashBlocks(self.src, r)
1377
1378    script.AppendExtra(('(range_sha1("%s", "%s") == "%s") || '
1379                        'abort("%s has been remounted R/W; '
1380                        'reflash device to reenable OTA updates");')
1381                       % (self.device, r.to_string_raw(), srchash,
1382                          self.device))
1383
1384DataImage = blockimgdiff.DataImage
1385
1386
1387# map recovery.fstab's fs_types to mount/format "partition types"
1388PARTITION_TYPES = {
1389    "yaffs2": "MTD",
1390    "mtd": "MTD",
1391    "ext4": "EMMC",
1392    "emmc": "EMMC",
1393    "f2fs": "EMMC",
1394    "squashfs": "EMMC"
1395}
1396
1397def GetTypeAndDevice(mount_point, info):
1398  fstab = info["fstab"]
1399  if fstab:
1400    return (PARTITION_TYPES[fstab[mount_point].fs_type],
1401            fstab[mount_point].device)
1402  else:
1403    raise KeyError
1404
1405
1406def ParseCertificate(data):
1407  """Parse a PEM-format certificate."""
1408  cert = []
1409  save = False
1410  for line in data.split("\n"):
1411    if "--END CERTIFICATE--" in line:
1412      break
1413    if save:
1414      cert.append(line)
1415    if "--BEGIN CERTIFICATE--" in line:
1416      save = True
1417  cert = "".join(cert).decode('base64')
1418  return cert
1419
1420def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1421                      info_dict=None):
1422  """Generate a binary patch that creates the recovery image starting
1423  with the boot image.  (Most of the space in these images is just the
1424  kernel, which is identical for the two, so the resulting patch
1425  should be efficient.)  Add it to the output zip, along with a shell
1426  script that is run from init.rc on first boot to actually do the
1427  patching and install the new recovery image.
1428
1429  recovery_img and boot_img should be File objects for the
1430  corresponding images.  info should be the dictionary returned by
1431  common.LoadInfoDict() on the input target_files.
1432  """
1433
1434  if info_dict is None:
1435    info_dict = OPTIONS.info_dict
1436
1437  full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1438  system_root_image = info_dict.get("system_root_image", None) == "true"
1439
1440  if full_recovery_image:
1441    output_sink("etc/recovery.img", recovery_img.data)
1442
1443  else:
1444    diff_program = ["imgdiff"]
1445    path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1446    if os.path.exists(path):
1447      diff_program.append("-b")
1448      diff_program.append(path)
1449      bonus_args = "-b /system/etc/recovery-resource.dat"
1450    else:
1451      bonus_args = ""
1452
1453    d = Difference(recovery_img, boot_img, diff_program=diff_program)
1454    _, _, patch = d.ComputePatch()
1455    output_sink("recovery-from-boot.p", patch)
1456
1457  try:
1458    # The following GetTypeAndDevice()s need to use the path in the target
1459    # info_dict instead of source_info_dict.
1460    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1461    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1462  except KeyError:
1463    return
1464
1465  if full_recovery_image:
1466    sh = """#!/system/bin/sh
1467if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1468  applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1469else
1470  log -t recovery "Recovery image already installed"
1471fi
1472""" % {'type': recovery_type,
1473       'device': recovery_device,
1474       'sha1': recovery_img.sha1,
1475       'size': recovery_img.size}
1476  else:
1477    sh = """#!/system/bin/sh
1478if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1479  applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1480else
1481  log -t recovery "Recovery image already installed"
1482fi
1483""" % {'boot_size': boot_img.size,
1484       'boot_sha1': boot_img.sha1,
1485       'recovery_size': recovery_img.size,
1486       'recovery_sha1': recovery_img.sha1,
1487       'boot_type': boot_type,
1488       'boot_device': boot_device,
1489       'recovery_type': recovery_type,
1490       'recovery_device': recovery_device,
1491       'bonus_args': bonus_args}
1492
1493  # The install script location moved from /system/etc to /system/bin
1494  # in the L release.  Parse init.*.rc files to find out where the
1495  # target-files expects it to be, and put it there.
1496  sh_location = "etc/install-recovery.sh"
1497  found = False
1498  if system_root_image:
1499    init_rc_dir = os.path.join(input_dir, "ROOT")
1500  else:
1501    init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1502  init_rc_files = os.listdir(init_rc_dir)
1503  for init_rc_file in init_rc_files:
1504    if (not init_rc_file.startswith('init.') or
1505        not init_rc_file.endswith('.rc')):
1506      continue
1507    with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1508      for line in f:
1509        m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1510        if m:
1511          sh_location = m.group(1)
1512          found = True
1513          break
1514    if found:
1515      break
1516  print "putting script in", sh_location
1517
1518  output_sink(sh_location, sh)
1519