common.py revision a80a8085e037c949bf8f8741b1aeb8679baa8c8b
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import copy
16import errno
17import getopt
18import getpass
19import imp
20import os
21import platform
22import re
23import shlex
24import shutil
25import subprocess
26import sys
27import tempfile
28import threading
29import time
30import zipfile
31
32import blockimgdiff
33import rangelib
34
35from hashlib import sha1 as sha1
36
37
38class Options(object):
39  def __init__(self):
40    platform_search_path = {
41        "linux2": "out/host/linux-x86",
42        "darwin": "out/host/darwin-x86",
43    }
44
45    self.search_path = platform_search_path.get(sys.platform, None)
46    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
47    self.signapk_shared_library_path = "lib64"   # Relative to search_path
48    self.extra_signapk_args = []
49    self.java_path = "java"  # Use the one on the path by default.
50    self.java_args = "-Xmx2048m" # JVM Args
51    self.public_key_suffix = ".x509.pem"
52    self.private_key_suffix = ".pk8"
53    # use otatools built boot_signer by default
54    self.boot_signer_path = "boot_signer"
55    self.boot_signer_args = []
56    self.verity_signer_path = None
57    self.verity_signer_args = []
58    self.verbose = False
59    self.tempfiles = []
60    self.device_specific = None
61    self.extras = {}
62    self.info_dict = None
63    self.source_info_dict = None
64    self.target_info_dict = None
65    self.worker_threads = None
66    # Stash size cannot exceed cache_size * threshold.
67    self.cache_size = None
68    self.stash_threshold = 0.8
69
70
71OPTIONS = Options()
72
73
74# Values for "certificate" in apkcerts that mean special things.
75SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
76
77
78class ExternalError(RuntimeError):
79  pass
80
81
82def Run(args, **kwargs):
83  """Create and return a subprocess.Popen object, printing the command
84  line on the terminal if -v was specified."""
85  if OPTIONS.verbose:
86    print "  running: ", " ".join(args)
87  return subprocess.Popen(args, **kwargs)
88
89
90def CloseInheritedPipes():
91  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
92  before doing other work."""
93  if platform.system() != "Darwin":
94    return
95  for d in range(3, 1025):
96    try:
97      stat = os.fstat(d)
98      if stat is not None:
99        pipebit = stat[0] & 0x1000
100        if pipebit != 0:
101          os.close(d)
102    except OSError:
103      pass
104
105
106def LoadInfoDict(input_file, input_dir=None):
107  """Read and parse the META/misc_info.txt key/value pairs from the
108  input target files and return a dict."""
109
110  def read_helper(fn):
111    if isinstance(input_file, zipfile.ZipFile):
112      return input_file.read(fn)
113    else:
114      path = os.path.join(input_file, *fn.split("/"))
115      try:
116        with open(path) as f:
117          return f.read()
118      except IOError as e:
119        if e.errno == errno.ENOENT:
120          raise KeyError(fn)
121  d = {}
122  try:
123    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
124  except KeyError:
125    # ok if misc_info.txt doesn't exist
126    pass
127
128  # backwards compatibility: These values used to be in their own
129  # files.  Look for them, in case we're processing an old
130  # target_files zip.
131
132  if "mkyaffs2_extra_flags" not in d:
133    try:
134      d["mkyaffs2_extra_flags"] = read_helper(
135          "META/mkyaffs2-extra-flags.txt").strip()
136    except KeyError:
137      # ok if flags don't exist
138      pass
139
140  if "recovery_api_version" not in d:
141    try:
142      d["recovery_api_version"] = read_helper(
143          "META/recovery-api-version.txt").strip()
144    except KeyError:
145      raise ValueError("can't find recovery API version in input target-files")
146
147  if "tool_extensions" not in d:
148    try:
149      d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
150    except KeyError:
151      # ok if extensions don't exist
152      pass
153
154  if "fstab_version" not in d:
155    d["fstab_version"] = "1"
156
157  # A few properties are stored as links to the files in the out/ directory.
158  # It works fine with the build system. However, they are no longer available
159  # when (re)generating from target_files zip. If input_dir is not None, we
160  # are doing repacking. Redirect those properties to the actual files in the
161  # unzipped directory.
162  if input_dir is not None:
163    # We carry a copy of file_contexts.bin under META/. If not available,
164    # search BOOT/RAMDISK/. Note that sometimes we may need a different file
165    # to build images than the one running on device, such as when enabling
166    # system_root_image. In that case, we must have the one for image
167    # generation copied to META/.
168    fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
169    fc_config = os.path.join(input_dir, "META", fc_basename)
170    if d.get("system_root_image") == "true":
171      assert os.path.exists(fc_config)
172    if not os.path.exists(fc_config):
173      fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
174      if not os.path.exists(fc_config):
175        fc_config = None
176
177    if fc_config:
178      d["selinux_fc"] = fc_config
179
180    # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
181    if d.get("system_root_image") == "true":
182      d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
183      d["ramdisk_fs_config"] = os.path.join(
184          input_dir, "META", "root_filesystem_config.txt")
185
186  try:
187    data = read_helper("META/imagesizes.txt")
188    for line in data.split("\n"):
189      if not line:
190        continue
191      name, value = line.split(" ", 1)
192      if not value:
193        continue
194      if name == "blocksize":
195        d[name] = value
196      else:
197        d[name + "_size"] = value
198  except KeyError:
199    pass
200
201  def makeint(key):
202    if key in d:
203      d[key] = int(d[key], 0)
204
205  makeint("recovery_api_version")
206  makeint("blocksize")
207  makeint("system_size")
208  makeint("vendor_size")
209  makeint("userdata_size")
210  makeint("cache_size")
211  makeint("recovery_size")
212  makeint("boot_size")
213  makeint("fstab_version")
214
215  if d.get("no_recovery", False) == "true":
216    d["fstab"] = None
217  else:
218    d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
219                                   d.get("system_root_image", False))
220  d["build.prop"] = LoadBuildProp(read_helper)
221  return d
222
223def LoadBuildProp(read_helper):
224  try:
225    data = read_helper("SYSTEM/build.prop")
226  except KeyError:
227    print "Warning: could not find SYSTEM/build.prop in %s" % zip
228    data = ""
229  return LoadDictionaryFromLines(data.split("\n"))
230
231def LoadDictionaryFromLines(lines):
232  d = {}
233  for line in lines:
234    line = line.strip()
235    if not line or line.startswith("#"):
236      continue
237    if "=" in line:
238      name, value = line.split("=", 1)
239      d[name] = value
240  return d
241
242def LoadRecoveryFSTab(read_helper, fstab_version, system_root_image=False):
243  class Partition(object):
244    def __init__(self, mount_point, fs_type, device, length, device2, context):
245      self.mount_point = mount_point
246      self.fs_type = fs_type
247      self.device = device
248      self.length = length
249      self.device2 = device2
250      self.context = context
251
252  try:
253    data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
254  except KeyError:
255    print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
256    data = ""
257
258  if fstab_version == 1:
259    d = {}
260    for line in data.split("\n"):
261      line = line.strip()
262      if not line or line.startswith("#"):
263        continue
264      pieces = line.split()
265      if not 3 <= len(pieces) <= 4:
266        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
267      options = None
268      if len(pieces) >= 4:
269        if pieces[3].startswith("/"):
270          device2 = pieces[3]
271          if len(pieces) >= 5:
272            options = pieces[4]
273        else:
274          device2 = None
275          options = pieces[3]
276      else:
277        device2 = None
278
279      mount_point = pieces[0]
280      length = 0
281      if options:
282        options = options.split(",")
283        for i in options:
284          if i.startswith("length="):
285            length = int(i[7:])
286          else:
287            print "%s: unknown option \"%s\"" % (mount_point, i)
288
289      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
290                                 device=pieces[2], length=length,
291                                 device2=device2)
292
293  elif fstab_version == 2:
294    d = {}
295    for line in data.split("\n"):
296      line = line.strip()
297      if not line or line.startswith("#"):
298        continue
299      # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
300      pieces = line.split()
301      if len(pieces) != 5:
302        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
303
304      # Ignore entries that are managed by vold
305      options = pieces[4]
306      if "voldmanaged=" in options:
307        continue
308
309      # It's a good line, parse it
310      length = 0
311      options = options.split(",")
312      for i in options:
313        if i.startswith("length="):
314          length = int(i[7:])
315        else:
316          # Ignore all unknown options in the unified fstab
317          continue
318
319      mount_flags = pieces[3]
320      # Honor the SELinux context if present.
321      context = None
322      for i in mount_flags.split(","):
323        if i.startswith("context="):
324          context = i
325
326      mount_point = pieces[1]
327      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
328                                 device=pieces[0], length=length,
329                                 device2=None, context=context)
330
331  else:
332    raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
333
334  # / is used for the system mount point when the root directory is included in
335  # system. Other areas assume system is always at "/system" so point /system
336  # at /.
337  if system_root_image:
338    assert not d.has_key("/system") and d.has_key("/")
339    d["/system"] = d["/"]
340  return d
341
342
343def DumpInfoDict(d):
344  for k, v in sorted(d.items()):
345    print "%-25s = (%s) %s" % (k, type(v).__name__, v)
346
347
348def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
349                        has_ramdisk=False):
350  """Build a bootable image from the specified sourcedir.
351
352  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
353  'sourcedir'), and turn them into a boot image.  Return the image data, or
354  None if sourcedir does not appear to contains files for building the
355  requested image."""
356
357  def make_ramdisk():
358    ramdisk_img = tempfile.NamedTemporaryFile()
359
360    if os.access(fs_config_file, os.F_OK):
361      cmd = ["mkbootfs", "-f", fs_config_file,
362             os.path.join(sourcedir, "RAMDISK")]
363    else:
364      cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
365    p1 = Run(cmd, stdout=subprocess.PIPE)
366    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
367
368    p2.wait()
369    p1.wait()
370    assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
371    assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
372
373    return ramdisk_img
374
375  if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
376    return None
377
378  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
379    return None
380
381  if info_dict is None:
382    info_dict = OPTIONS.info_dict
383
384  img = tempfile.NamedTemporaryFile()
385
386  if has_ramdisk:
387    ramdisk_img = make_ramdisk()
388
389  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
390  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
391
392  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
393
394  fn = os.path.join(sourcedir, "second")
395  if os.access(fn, os.F_OK):
396    cmd.append("--second")
397    cmd.append(fn)
398
399  fn = os.path.join(sourcedir, "cmdline")
400  if os.access(fn, os.F_OK):
401    cmd.append("--cmdline")
402    cmd.append(open(fn).read().rstrip("\n"))
403
404  fn = os.path.join(sourcedir, "base")
405  if os.access(fn, os.F_OK):
406    cmd.append("--base")
407    cmd.append(open(fn).read().rstrip("\n"))
408
409  fn = os.path.join(sourcedir, "pagesize")
410  if os.access(fn, os.F_OK):
411    cmd.append("--pagesize")
412    cmd.append(open(fn).read().rstrip("\n"))
413
414  args = info_dict.get("mkbootimg_args", None)
415  if args and args.strip():
416    cmd.extend(shlex.split(args))
417
418  if has_ramdisk:
419    cmd.extend(["--ramdisk", ramdisk_img.name])
420
421  img_unsigned = None
422  if info_dict.get("vboot", None):
423    img_unsigned = tempfile.NamedTemporaryFile()
424    cmd.extend(["--output", img_unsigned.name])
425  else:
426    cmd.extend(["--output", img.name])
427
428  p = Run(cmd, stdout=subprocess.PIPE)
429  p.communicate()
430  assert p.returncode == 0, "mkbootimg of %s image failed" % (
431      os.path.basename(sourcedir),)
432
433  if (info_dict.get("boot_signer", None) == "true" and
434      info_dict.get("verity_key", None)):
435    path = "/" + os.path.basename(sourcedir).lower()
436    cmd = [OPTIONS.boot_signer_path]
437    cmd.extend(OPTIONS.boot_signer_args)
438    cmd.extend([path, img.name,
439                info_dict["verity_key"] + ".pk8",
440                info_dict["verity_key"] + ".x509.pem", img.name])
441    p = Run(cmd, stdout=subprocess.PIPE)
442    p.communicate()
443    assert p.returncode == 0, "boot_signer of %s image failed" % path
444
445  # Sign the image if vboot is non-empty.
446  elif info_dict.get("vboot", None):
447    path = "/" + os.path.basename(sourcedir).lower()
448    img_keyblock = tempfile.NamedTemporaryFile()
449    cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
450           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
451           info_dict["vboot_key"] + ".vbprivk",
452           info_dict["vboot_subkey"] + ".vbprivk",
453           img_keyblock.name,
454           img.name]
455    p = Run(cmd, stdout=subprocess.PIPE)
456    p.communicate()
457    assert p.returncode == 0, "vboot_signer of %s image failed" % path
458
459    # Clean up the temp files.
460    img_unsigned.close()
461    img_keyblock.close()
462
463  img.seek(os.SEEK_SET, 0)
464  data = img.read()
465
466  if has_ramdisk:
467    ramdisk_img.close()
468  img.close()
469
470  return data
471
472
473def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
474                     info_dict=None):
475  """Return a File object with the desired bootable image.
476
477  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
478  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
479  the source files in 'unpack_dir'/'tree_subdir'."""
480
481  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
482  if os.path.exists(prebuilt_path):
483    print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
484    return File.FromLocalFile(name, prebuilt_path)
485
486  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
487  if os.path.exists(prebuilt_path):
488    print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
489    return File.FromLocalFile(name, prebuilt_path)
490
491  print "building image from target_files %s..." % (tree_subdir,)
492
493  if info_dict is None:
494    info_dict = OPTIONS.info_dict
495
496  # With system_root_image == "true", we don't pack ramdisk into the boot image.
497  # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
498  # for recovery.
499  has_ramdisk = (info_dict.get("system_root_image") != "true" or
500                 prebuilt_name != "boot.img" or
501                 info_dict.get("recovery_as_boot") == "true")
502
503  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
504  data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
505                             os.path.join(unpack_dir, fs_config),
506                             info_dict, has_ramdisk)
507  if data:
508    return File(name, data)
509  return None
510
511
512def UnzipTemp(filename, pattern=None):
513  """Unzip the given archive into a temporary directory and return the name.
514
515  If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
516  temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
517
518  Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
519  main file), open for reading.
520  """
521
522  tmp = tempfile.mkdtemp(prefix="targetfiles-")
523  OPTIONS.tempfiles.append(tmp)
524
525  def unzip_to_dir(filename, dirname):
526    cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
527    if pattern is not None:
528      cmd.append(pattern)
529    p = Run(cmd, stdout=subprocess.PIPE)
530    p.communicate()
531    if p.returncode != 0:
532      raise ExternalError("failed to unzip input target-files \"%s\"" %
533                          (filename,))
534
535  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
536  if m:
537    unzip_to_dir(m.group(1), tmp)
538    unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
539    filename = m.group(1)
540  else:
541    unzip_to_dir(filename, tmp)
542
543  return tmp, zipfile.ZipFile(filename, "r")
544
545
546def GetKeyPasswords(keylist):
547  """Given a list of keys, prompt the user to enter passwords for
548  those which require them.  Return a {key: password} dict.  password
549  will be None if the key has no password."""
550
551  no_passwords = []
552  need_passwords = []
553  key_passwords = {}
554  devnull = open("/dev/null", "w+b")
555  for k in sorted(keylist):
556    # We don't need a password for things that aren't really keys.
557    if k in SPECIAL_CERT_STRINGS:
558      no_passwords.append(k)
559      continue
560
561    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
562             "-inform", "DER", "-nocrypt"],
563            stdin=devnull.fileno(),
564            stdout=devnull.fileno(),
565            stderr=subprocess.STDOUT)
566    p.communicate()
567    if p.returncode == 0:
568      # Definitely an unencrypted key.
569      no_passwords.append(k)
570    else:
571      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
572               "-inform", "DER", "-passin", "pass:"],
573              stdin=devnull.fileno(),
574              stdout=devnull.fileno(),
575              stderr=subprocess.PIPE)
576      _, stderr = p.communicate()
577      if p.returncode == 0:
578        # Encrypted key with empty string as password.
579        key_passwords[k] = ''
580      elif stderr.startswith('Error decrypting key'):
581        # Definitely encrypted key.
582        # It would have said "Error reading key" if it didn't parse correctly.
583        need_passwords.append(k)
584      else:
585        # Potentially, a type of key that openssl doesn't understand.
586        # We'll let the routines in signapk.jar handle it.
587        no_passwords.append(k)
588  devnull.close()
589
590  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
591  key_passwords.update(dict.fromkeys(no_passwords, None))
592  return key_passwords
593
594
595def SignFile(input_name, output_name, key, password, whole_file=False):
596  """Sign the input_name zip/jar/apk, producing output_name.  Use the
597  given key and password (the latter may be None if the key does not
598  have a password.
599
600  If whole_file is true, use the "-w" option to SignApk to embed a
601  signature that covers the whole file in the archive comment of the
602  zip file.
603  """
604
605  java_library_path = os.path.join(
606      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
607
608  cmd = [OPTIONS.java_path, OPTIONS.java_args,
609         "-Djava.library.path=" + java_library_path,
610         "-jar",
611         os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
612  cmd.extend(OPTIONS.extra_signapk_args)
613  if whole_file:
614    cmd.append("-w")
615  cmd.extend([key + OPTIONS.public_key_suffix,
616              key + OPTIONS.private_key_suffix,
617              input_name, output_name])
618
619  p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
620  if password is not None:
621    password += "\n"
622  p.communicate(password)
623  if p.returncode != 0:
624    raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
625
626
627def CheckSize(data, target, info_dict):
628  """Check the data string passed against the max size limit, if
629  any, for the given target.  Raise exception if the data is too big.
630  Print a warning if the data is nearing the maximum size."""
631
632  if target.endswith(".img"):
633    target = target[:-4]
634  mount_point = "/" + target
635
636  fs_type = None
637  limit = None
638  if info_dict["fstab"]:
639    if mount_point == "/userdata":
640      mount_point = "/data"
641    p = info_dict["fstab"][mount_point]
642    fs_type = p.fs_type
643    device = p.device
644    if "/" in device:
645      device = device[device.rfind("/")+1:]
646    limit = info_dict.get(device + "_size", None)
647  if not fs_type or not limit:
648    return
649
650  if fs_type == "yaffs2":
651    # image size should be increased by 1/64th to account for the
652    # spare area (64 bytes per 2k page)
653    limit = limit / 2048 * (2048+64)
654  size = len(data)
655  pct = float(size) * 100.0 / limit
656  msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
657  if pct >= 99.0:
658    raise ExternalError(msg)
659  elif pct >= 95.0:
660    print
661    print "  WARNING: ", msg
662    print
663  elif OPTIONS.verbose:
664    print "  ", msg
665
666
667def ReadApkCerts(tf_zip):
668  """Given a target_files ZipFile, parse the META/apkcerts.txt file
669  and return a {package: cert} dict."""
670  certmap = {}
671  for line in tf_zip.read("META/apkcerts.txt").split("\n"):
672    line = line.strip()
673    if not line:
674      continue
675    m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
676                 r'private_key="(.*)"$', line)
677    if m:
678      name, cert, privkey = m.groups()
679      public_key_suffix_len = len(OPTIONS.public_key_suffix)
680      private_key_suffix_len = len(OPTIONS.private_key_suffix)
681      if cert in SPECIAL_CERT_STRINGS and not privkey:
682        certmap[name] = cert
683      elif (cert.endswith(OPTIONS.public_key_suffix) and
684            privkey.endswith(OPTIONS.private_key_suffix) and
685            cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
686        certmap[name] = cert[:-public_key_suffix_len]
687      else:
688        raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
689  return certmap
690
691
692COMMON_DOCSTRING = """
693  -p  (--path)  <dir>
694      Prepend <dir>/bin to the list of places to search for binaries
695      run by this script, and expect to find jars in <dir>/framework.
696
697  -s  (--device_specific) <file>
698      Path to the python module containing device-specific
699      releasetools code.
700
701  -x  (--extra)  <key=value>
702      Add a key/value pair to the 'extras' dict, which device-specific
703      extension code may look at.
704
705  -v  (--verbose)
706      Show command lines being executed.
707
708  -h  (--help)
709      Display this usage message and exit.
710"""
711
712def Usage(docstring):
713  print docstring.rstrip("\n")
714  print COMMON_DOCSTRING
715
716
717def ParseOptions(argv,
718                 docstring,
719                 extra_opts="", extra_long_opts=(),
720                 extra_option_handler=None):
721  """Parse the options in argv and return any arguments that aren't
722  flags.  docstring is the calling module's docstring, to be displayed
723  for errors and -h.  extra_opts and extra_long_opts are for flags
724  defined by the caller, which are processed by passing them to
725  extra_option_handler."""
726
727  try:
728    opts, args = getopt.getopt(
729        argv, "hvp:s:x:" + extra_opts,
730        ["help", "verbose", "path=", "signapk_path=",
731         "signapk_shared_library_path=", "extra_signapk_args=",
732         "java_path=", "java_args=", "public_key_suffix=",
733         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
734         "verity_signer_path=", "verity_signer_args=", "device_specific=",
735         "extra="] +
736        list(extra_long_opts))
737  except getopt.GetoptError as err:
738    Usage(docstring)
739    print "**", str(err), "**"
740    sys.exit(2)
741
742  for o, a in opts:
743    if o in ("-h", "--help"):
744      Usage(docstring)
745      sys.exit()
746    elif o in ("-v", "--verbose"):
747      OPTIONS.verbose = True
748    elif o in ("-p", "--path"):
749      OPTIONS.search_path = a
750    elif o in ("--signapk_path",):
751      OPTIONS.signapk_path = a
752    elif o in ("--signapk_shared_library_path",):
753      OPTIONS.signapk_shared_library_path = a
754    elif o in ("--extra_signapk_args",):
755      OPTIONS.extra_signapk_args = shlex.split(a)
756    elif o in ("--java_path",):
757      OPTIONS.java_path = a
758    elif o in ("--java_args",):
759      OPTIONS.java_args = a
760    elif o in ("--public_key_suffix",):
761      OPTIONS.public_key_suffix = a
762    elif o in ("--private_key_suffix",):
763      OPTIONS.private_key_suffix = a
764    elif o in ("--boot_signer_path",):
765      OPTIONS.boot_signer_path = a
766    elif o in ("--boot_signer_args",):
767      OPTIONS.boot_signer_args = shlex.split(a)
768    elif o in ("--verity_signer_path",):
769      OPTIONS.verity_signer_path = a
770    elif o in ("--verity_signer_args",):
771      OPTIONS.verity_signer_args = shlex.split(a)
772    elif o in ("-s", "--device_specific"):
773      OPTIONS.device_specific = a
774    elif o in ("-x", "--extra"):
775      key, value = a.split("=", 1)
776      OPTIONS.extras[key] = value
777    else:
778      if extra_option_handler is None or not extra_option_handler(o, a):
779        assert False, "unknown option \"%s\"" % (o,)
780
781  if OPTIONS.search_path:
782    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
783                          os.pathsep + os.environ["PATH"])
784
785  return args
786
787
788def MakeTempFile(prefix=None, suffix=None):
789  """Make a temp file and add it to the list of things to be deleted
790  when Cleanup() is called.  Return the filename."""
791  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
792  os.close(fd)
793  OPTIONS.tempfiles.append(fn)
794  return fn
795
796
797def Cleanup():
798  for i in OPTIONS.tempfiles:
799    if os.path.isdir(i):
800      shutil.rmtree(i)
801    else:
802      os.remove(i)
803
804
805class PasswordManager(object):
806  def __init__(self):
807    self.editor = os.getenv("EDITOR", None)
808    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
809
810  def GetPasswords(self, items):
811    """Get passwords corresponding to each string in 'items',
812    returning a dict.  (The dict may have keys in addition to the
813    values in 'items'.)
814
815    Uses the passwords in $ANDROID_PW_FILE if available, letting the
816    user edit that file to add more needed passwords.  If no editor is
817    available, or $ANDROID_PW_FILE isn't define, prompts the user
818    interactively in the ordinary way.
819    """
820
821    current = self.ReadFile()
822
823    first = True
824    while True:
825      missing = []
826      for i in items:
827        if i not in current or not current[i]:
828          missing.append(i)
829      # Are all the passwords already in the file?
830      if not missing:
831        return current
832
833      for i in missing:
834        current[i] = ""
835
836      if not first:
837        print "key file %s still missing some passwords." % (self.pwfile,)
838        answer = raw_input("try to edit again? [y]> ").strip()
839        if answer and answer[0] not in 'yY':
840          raise RuntimeError("key passwords unavailable")
841      first = False
842
843      current = self.UpdateAndReadFile(current)
844
845  def PromptResult(self, current): # pylint: disable=no-self-use
846    """Prompt the user to enter a value (password) for each key in
847    'current' whose value is fales.  Returns a new dict with all the
848    values.
849    """
850    result = {}
851    for k, v in sorted(current.iteritems()):
852      if v:
853        result[k] = v
854      else:
855        while True:
856          result[k] = getpass.getpass(
857              "Enter password for %s key> " % k).strip()
858          if result[k]:
859            break
860    return result
861
862  def UpdateAndReadFile(self, current):
863    if not self.editor or not self.pwfile:
864      return self.PromptResult(current)
865
866    f = open(self.pwfile, "w")
867    os.chmod(self.pwfile, 0o600)
868    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
869    f.write("# (Additional spaces are harmless.)\n\n")
870
871    first_line = None
872    sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
873    for i, (_, k, v) in enumerate(sorted_list):
874      f.write("[[[  %s  ]]] %s\n" % (v, k))
875      if not v and first_line is None:
876        # position cursor on first line with no password.
877        first_line = i + 4
878    f.close()
879
880    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
881    _, _ = p.communicate()
882
883    return self.ReadFile()
884
885  def ReadFile(self):
886    result = {}
887    if self.pwfile is None:
888      return result
889    try:
890      f = open(self.pwfile, "r")
891      for line in f:
892        line = line.strip()
893        if not line or line[0] == '#':
894          continue
895        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
896        if not m:
897          print "failed to parse password file: ", line
898        else:
899          result[m.group(2)] = m.group(1)
900      f.close()
901    except IOError as e:
902      if e.errno != errno.ENOENT:
903        print "error reading password file: ", str(e)
904    return result
905
906
907def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
908             compress_type=None):
909  import datetime
910
911  # http://b/18015246
912  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
913  # for files larger than 2GiB. We can work around this by adjusting their
914  # limit. Note that `zipfile.writestr()` will not work for strings larger than
915  # 2GiB. The Python interpreter sometimes rejects strings that large (though
916  # it isn't clear to me exactly what circumstances cause this).
917  # `zipfile.write()` must be used directly to work around this.
918  #
919  # This mess can be avoided if we port to python3.
920  saved_zip64_limit = zipfile.ZIP64_LIMIT
921  zipfile.ZIP64_LIMIT = (1 << 32) - 1
922
923  if compress_type is None:
924    compress_type = zip_file.compression
925  if arcname is None:
926    arcname = filename
927
928  saved_stat = os.stat(filename)
929
930  try:
931    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
932    # file to be zipped and reset it when we're done.
933    os.chmod(filename, perms)
934
935    # Use a fixed timestamp so the output is repeatable.
936    epoch = datetime.datetime.fromtimestamp(0)
937    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
938    os.utime(filename, (timestamp, timestamp))
939
940    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
941  finally:
942    os.chmod(filename, saved_stat.st_mode)
943    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
944    zipfile.ZIP64_LIMIT = saved_zip64_limit
945
946
947def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
948                compress_type=None):
949  """Wrap zipfile.writestr() function to work around the zip64 limit.
950
951  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
952  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
953  when calling crc32(bytes).
954
955  But it still works fine to write a shorter string into a large zip file.
956  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
957  when we know the string won't be too long.
958  """
959
960  saved_zip64_limit = zipfile.ZIP64_LIMIT
961  zipfile.ZIP64_LIMIT = (1 << 32) - 1
962
963  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
964    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
965    zinfo.compress_type = zip_file.compression
966    if perms is None:
967      perms = 0o100644
968  else:
969    zinfo = zinfo_or_arcname
970
971  # If compress_type is given, it overrides the value in zinfo.
972  if compress_type is not None:
973    zinfo.compress_type = compress_type
974
975  # If perms is given, it has a priority.
976  if perms is not None:
977    # If perms doesn't set the file type, mark it as a regular file.
978    if perms & 0o770000 == 0:
979      perms |= 0o100000
980    zinfo.external_attr = perms << 16
981
982  # Use a fixed timestamp so the output is repeatable.
983  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
984
985  zip_file.writestr(zinfo, data)
986  zipfile.ZIP64_LIMIT = saved_zip64_limit
987
988
989def ZipClose(zip_file):
990  # http://b/18015246
991  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
992  # central directory.
993  saved_zip64_limit = zipfile.ZIP64_LIMIT
994  zipfile.ZIP64_LIMIT = (1 << 32) - 1
995
996  zip_file.close()
997
998  zipfile.ZIP64_LIMIT = saved_zip64_limit
999
1000
1001class DeviceSpecificParams(object):
1002  module = None
1003  def __init__(self, **kwargs):
1004    """Keyword arguments to the constructor become attributes of this
1005    object, which is passed to all functions in the device-specific
1006    module."""
1007    for k, v in kwargs.iteritems():
1008      setattr(self, k, v)
1009    self.extras = OPTIONS.extras
1010
1011    if self.module is None:
1012      path = OPTIONS.device_specific
1013      if not path:
1014        return
1015      try:
1016        if os.path.isdir(path):
1017          info = imp.find_module("releasetools", [path])
1018        else:
1019          d, f = os.path.split(path)
1020          b, x = os.path.splitext(f)
1021          if x == ".py":
1022            f = b
1023          info = imp.find_module(f, [d])
1024        print "loaded device-specific extensions from", path
1025        self.module = imp.load_module("device_specific", *info)
1026      except ImportError:
1027        print "unable to load device-specific module; assuming none"
1028
1029  def _DoCall(self, function_name, *args, **kwargs):
1030    """Call the named function in the device-specific module, passing
1031    the given args and kwargs.  The first argument to the call will be
1032    the DeviceSpecific object itself.  If there is no module, or the
1033    module does not define the function, return the value of the
1034    'default' kwarg (which itself defaults to None)."""
1035    if self.module is None or not hasattr(self.module, function_name):
1036      return kwargs.get("default", None)
1037    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1038
1039  def FullOTA_Assertions(self):
1040    """Called after emitting the block of assertions at the top of a
1041    full OTA package.  Implementations can add whatever additional
1042    assertions they like."""
1043    return self._DoCall("FullOTA_Assertions")
1044
1045  def FullOTA_InstallBegin(self):
1046    """Called at the start of full OTA installation."""
1047    return self._DoCall("FullOTA_InstallBegin")
1048
1049  def FullOTA_InstallEnd(self):
1050    """Called at the end of full OTA installation; typically this is
1051    used to install the image for the device's baseband processor."""
1052    return self._DoCall("FullOTA_InstallEnd")
1053
1054  def IncrementalOTA_Assertions(self):
1055    """Called after emitting the block of assertions at the top of an
1056    incremental OTA package.  Implementations can add whatever
1057    additional assertions they like."""
1058    return self._DoCall("IncrementalOTA_Assertions")
1059
1060  def IncrementalOTA_VerifyBegin(self):
1061    """Called at the start of the verification phase of incremental
1062    OTA installation; additional checks can be placed here to abort
1063    the script before any changes are made."""
1064    return self._DoCall("IncrementalOTA_VerifyBegin")
1065
1066  def IncrementalOTA_VerifyEnd(self):
1067    """Called at the end of the verification phase of incremental OTA
1068    installation; additional checks can be placed here to abort the
1069    script before any changes are made."""
1070    return self._DoCall("IncrementalOTA_VerifyEnd")
1071
1072  def IncrementalOTA_InstallBegin(self):
1073    """Called at the start of incremental OTA installation (after
1074    verification is complete)."""
1075    return self._DoCall("IncrementalOTA_InstallBegin")
1076
1077  def IncrementalOTA_InstallEnd(self):
1078    """Called at the end of incremental OTA installation; typically
1079    this is used to install the image for the device's baseband
1080    processor."""
1081    return self._DoCall("IncrementalOTA_InstallEnd")
1082
1083  def VerifyOTA_Assertions(self):
1084    return self._DoCall("VerifyOTA_Assertions")
1085
1086class File(object):
1087  def __init__(self, name, data):
1088    self.name = name
1089    self.data = data
1090    self.size = len(data)
1091    self.sha1 = sha1(data).hexdigest()
1092
1093  @classmethod
1094  def FromLocalFile(cls, name, diskname):
1095    f = open(diskname, "rb")
1096    data = f.read()
1097    f.close()
1098    return File(name, data)
1099
1100  def WriteToTemp(self):
1101    t = tempfile.NamedTemporaryFile()
1102    t.write(self.data)
1103    t.flush()
1104    return t
1105
1106  def AddToZip(self, z, compression=None):
1107    ZipWriteStr(z, self.name, self.data, compress_type=compression)
1108
1109DIFF_PROGRAM_BY_EXT = {
1110    ".gz" : "imgdiff",
1111    ".zip" : ["imgdiff", "-z"],
1112    ".jar" : ["imgdiff", "-z"],
1113    ".apk" : ["imgdiff", "-z"],
1114    ".img" : "imgdiff",
1115    }
1116
1117class Difference(object):
1118  def __init__(self, tf, sf, diff_program=None):
1119    self.tf = tf
1120    self.sf = sf
1121    self.patch = None
1122    self.diff_program = diff_program
1123
1124  def ComputePatch(self):
1125    """Compute the patch (as a string of data) needed to turn sf into
1126    tf.  Returns the same tuple as GetPatch()."""
1127
1128    tf = self.tf
1129    sf = self.sf
1130
1131    if self.diff_program:
1132      diff_program = self.diff_program
1133    else:
1134      ext = os.path.splitext(tf.name)[1]
1135      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1136
1137    ttemp = tf.WriteToTemp()
1138    stemp = sf.WriteToTemp()
1139
1140    ext = os.path.splitext(tf.name)[1]
1141
1142    try:
1143      ptemp = tempfile.NamedTemporaryFile()
1144      if isinstance(diff_program, list):
1145        cmd = copy.copy(diff_program)
1146      else:
1147        cmd = [diff_program]
1148      cmd.append(stemp.name)
1149      cmd.append(ttemp.name)
1150      cmd.append(ptemp.name)
1151      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1152      err = []
1153      def run():
1154        _, e = p.communicate()
1155        if e:
1156          err.append(e)
1157      th = threading.Thread(target=run)
1158      th.start()
1159      th.join(timeout=300)   # 5 mins
1160      if th.is_alive():
1161        print "WARNING: diff command timed out"
1162        p.terminate()
1163        th.join(5)
1164        if th.is_alive():
1165          p.kill()
1166          th.join()
1167
1168      if err or p.returncode != 0:
1169        print "WARNING: failure running %s:\n%s\n" % (
1170            diff_program, "".join(err))
1171        self.patch = None
1172        return None, None, None
1173      diff = ptemp.read()
1174    finally:
1175      ptemp.close()
1176      stemp.close()
1177      ttemp.close()
1178
1179    self.patch = diff
1180    return self.tf, self.sf, self.patch
1181
1182
1183  def GetPatch(self):
1184    """Return a tuple (target_file, source_file, patch_data).
1185    patch_data may be None if ComputePatch hasn't been called, or if
1186    computing the patch failed."""
1187    return self.tf, self.sf, self.patch
1188
1189
1190def ComputeDifferences(diffs):
1191  """Call ComputePatch on all the Difference objects in 'diffs'."""
1192  print len(diffs), "diffs to compute"
1193
1194  # Do the largest files first, to try and reduce the long-pole effect.
1195  by_size = [(i.tf.size, i) for i in diffs]
1196  by_size.sort(reverse=True)
1197  by_size = [i[1] for i in by_size]
1198
1199  lock = threading.Lock()
1200  diff_iter = iter(by_size)   # accessed under lock
1201
1202  def worker():
1203    try:
1204      lock.acquire()
1205      for d in diff_iter:
1206        lock.release()
1207        start = time.time()
1208        d.ComputePatch()
1209        dur = time.time() - start
1210        lock.acquire()
1211
1212        tf, sf, patch = d.GetPatch()
1213        if sf.name == tf.name:
1214          name = tf.name
1215        else:
1216          name = "%s (%s)" % (tf.name, sf.name)
1217        if patch is None:
1218          print "patching failed!                                  %s" % (name,)
1219        else:
1220          print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1221              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1222      lock.release()
1223    except Exception as e:
1224      print e
1225      raise
1226
1227  # start worker threads; wait for them all to finish.
1228  threads = [threading.Thread(target=worker)
1229             for i in range(OPTIONS.worker_threads)]
1230  for th in threads:
1231    th.start()
1232  while threads:
1233    threads.pop().join()
1234
1235
1236class BlockDifference(object):
1237  def __init__(self, partition, tgt, src=None, check_first_block=False,
1238               version=None):
1239    self.tgt = tgt
1240    self.src = src
1241    self.partition = partition
1242    self.check_first_block = check_first_block
1243
1244    if version is None:
1245      version = 1
1246      if OPTIONS.info_dict:
1247        version = max(
1248            int(i) for i in
1249            OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1250    self.version = version
1251
1252    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1253                                    version=self.version)
1254    tmpdir = tempfile.mkdtemp()
1255    OPTIONS.tempfiles.append(tmpdir)
1256    self.path = os.path.join(tmpdir, partition)
1257    b.Compute(self.path)
1258
1259    if src is None:
1260      _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1261    else:
1262      _, self.device = GetTypeAndDevice("/" + partition,
1263                                        OPTIONS.source_info_dict)
1264
1265  def WriteScript(self, script, output_zip, progress=None):
1266    if not self.src:
1267      # write the output unconditionally
1268      script.Print("Patching %s image unconditionally..." % (self.partition,))
1269    else:
1270      script.Print("Patching %s image after verification." % (self.partition,))
1271
1272    if progress:
1273      script.ShowProgress(progress, 0)
1274    self._WriteUpdate(script, output_zip)
1275    self._WritePostInstallVerifyScript(script)
1276
1277  def WriteStrictVerifyScript(self, script):
1278    """Verify all the blocks in the care_map, including clobbered blocks.
1279
1280    This differs from the WriteVerifyScript() function: a) it prints different
1281    error messages; b) it doesn't allow half-way updated images to pass the
1282    verification."""
1283
1284    partition = self.partition
1285    script.Print("Verifying %s..." % (partition,))
1286    ranges = self.tgt.care_map
1287    ranges_str = ranges.to_string_raw()
1288    script.AppendExtra('range_sha1("%s", "%s") == "%s" && '
1289                       'ui_print("    Verified.") || '
1290                       'ui_print("\\"%s\\" has unexpected contents.");' % (
1291                       self.device, ranges_str,
1292                       self.tgt.TotalSha1(include_clobbered_blocks=True),
1293                       self.device))
1294    script.AppendExtra("")
1295
1296  def WriteVerifyScript(self, script):
1297    partition = self.partition
1298    if not self.src:
1299      script.Print("Image %s will be patched unconditionally." % (partition,))
1300    else:
1301      ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1302      ranges_str = ranges.to_string_raw()
1303      if self.version >= 4:
1304        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1305                            'block_image_verify("%s", '
1306                            'package_extract_file("%s.transfer.list"), '
1307                            '"%s.new.dat", "%s.patch.dat")) then') % (
1308                            self.device, ranges_str, self.src.TotalSha1(),
1309                            self.device, partition, partition, partition))
1310      elif self.version == 3:
1311        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1312                            'block_image_verify("%s", '
1313                            'package_extract_file("%s.transfer.list"), '
1314                            '"%s.new.dat", "%s.patch.dat")) then') % (
1315                            self.device, ranges_str, self.src.TotalSha1(),
1316                            self.device, partition, partition, partition))
1317      else:
1318        script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1319                           self.device, ranges_str, self.src.TotalSha1()))
1320      script.Print('Verified %s image...' % (partition,))
1321      script.AppendExtra('else')
1322
1323      if self.version >= 4:
1324
1325        # Bug: 21124327
1326        # When generating incrementals for the system and vendor partitions in
1327        # version 4 or newer, explicitly check the first block (which contains
1328        # the superblock) of the partition to see if it's what we expect. If
1329        # this check fails, give an explicit log message about the partition
1330        # having been remounted R/W (the most likely explanation).
1331        if self.check_first_block:
1332          script.AppendExtra('check_first_block("%s");' % (self.device,))
1333
1334        # If version >= 4, try block recovery before abort update
1335        script.AppendExtra((
1336            'ifelse (block_image_recover("{device}", "{ranges}") && '
1337            'block_image_verify("{device}", '
1338            'package_extract_file("{partition}.transfer.list"), '
1339            '"{partition}.new.dat", "{partition}.patch.dat"), '
1340            'ui_print("{partition} recovered successfully."), '
1341            'abort("{partition} partition fails to recover"));\n'
1342            'endif;').format(device=self.device, ranges=ranges_str,
1343                             partition=partition))
1344
1345      # Abort the OTA update. Note that the incremental OTA cannot be applied
1346      # even if it may match the checksum of the target partition.
1347      # a) If version < 3, operations like move and erase will make changes
1348      #    unconditionally and damage the partition.
1349      # b) If version >= 3, it won't even reach here.
1350      else:
1351        script.AppendExtra(('abort("%s partition has unexpected contents");\n'
1352                            'endif;') % (partition,))
1353
1354  def _WritePostInstallVerifyScript(self, script):
1355    partition = self.partition
1356    script.Print('Verifying the updated %s image...' % (partition,))
1357    # Unlike pre-install verification, clobbered_blocks should not be ignored.
1358    ranges = self.tgt.care_map
1359    ranges_str = ranges.to_string_raw()
1360    script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1361                       self.device, ranges_str,
1362                       self.tgt.TotalSha1(include_clobbered_blocks=True)))
1363
1364    # Bug: 20881595
1365    # Verify that extended blocks are really zeroed out.
1366    if self.tgt.extended:
1367      ranges_str = self.tgt.extended.to_string_raw()
1368      script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1369                         self.device, ranges_str,
1370                         self._HashZeroBlocks(self.tgt.extended.size())))
1371      script.Print('Verified the updated %s image.' % (partition,))
1372      script.AppendExtra(
1373          'else\n'
1374          '  abort("%s partition has unexpected non-zero contents after OTA '
1375          'update");\n'
1376          'endif;' % (partition,))
1377    else:
1378      script.Print('Verified the updated %s image.' % (partition,))
1379
1380    script.AppendExtra(
1381        'else\n'
1382        '  abort("%s partition has unexpected contents after OTA update");\n'
1383        'endif;' % (partition,))
1384
1385  def _WriteUpdate(self, script, output_zip):
1386    ZipWrite(output_zip,
1387             '{}.transfer.list'.format(self.path),
1388             '{}.transfer.list'.format(self.partition))
1389    ZipWrite(output_zip,
1390             '{}.new.dat'.format(self.path),
1391             '{}.new.dat'.format(self.partition))
1392    ZipWrite(output_zip,
1393             '{}.patch.dat'.format(self.path),
1394             '{}.patch.dat'.format(self.partition),
1395             compress_type=zipfile.ZIP_STORED)
1396
1397    call = ('block_image_update("{device}", '
1398            'package_extract_file("{partition}.transfer.list"), '
1399            '"{partition}.new.dat", "{partition}.patch.dat");\n'.format(
1400                device=self.device, partition=self.partition))
1401    script.AppendExtra(script.WordWrap(call))
1402
1403  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1404    data = source.ReadRangeSet(ranges)
1405    ctx = sha1()
1406
1407    for p in data:
1408      ctx.update(p)
1409
1410    return ctx.hexdigest()
1411
1412  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1413    """Return the hash value for all zero blocks."""
1414    zero_block = '\x00' * 4096
1415    ctx = sha1()
1416    for _ in range(num_blocks):
1417      ctx.update(zero_block)
1418
1419    return ctx.hexdigest()
1420
1421
1422DataImage = blockimgdiff.DataImage
1423
1424# map recovery.fstab's fs_types to mount/format "partition types"
1425PARTITION_TYPES = {
1426    "yaffs2": "MTD",
1427    "mtd": "MTD",
1428    "ext4": "EMMC",
1429    "emmc": "EMMC",
1430    "f2fs": "EMMC",
1431    "squashfs": "EMMC"
1432}
1433
1434def GetTypeAndDevice(mount_point, info):
1435  fstab = info["fstab"]
1436  if fstab:
1437    return (PARTITION_TYPES[fstab[mount_point].fs_type],
1438            fstab[mount_point].device)
1439  else:
1440    raise KeyError
1441
1442
1443def ParseCertificate(data):
1444  """Parse a PEM-format certificate."""
1445  cert = []
1446  save = False
1447  for line in data.split("\n"):
1448    if "--END CERTIFICATE--" in line:
1449      break
1450    if save:
1451      cert.append(line)
1452    if "--BEGIN CERTIFICATE--" in line:
1453      save = True
1454  cert = "".join(cert).decode('base64')
1455  return cert
1456
1457def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1458                      info_dict=None):
1459  """Generate a binary patch that creates the recovery image starting
1460  with the boot image.  (Most of the space in these images is just the
1461  kernel, which is identical for the two, so the resulting patch
1462  should be efficient.)  Add it to the output zip, along with a shell
1463  script that is run from init.rc on first boot to actually do the
1464  patching and install the new recovery image.
1465
1466  recovery_img and boot_img should be File objects for the
1467  corresponding images.  info should be the dictionary returned by
1468  common.LoadInfoDict() on the input target_files.
1469  """
1470
1471  if info_dict is None:
1472    info_dict = OPTIONS.info_dict
1473
1474  full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1475  system_root_image = info_dict.get("system_root_image", None) == "true"
1476
1477  if full_recovery_image:
1478    output_sink("etc/recovery.img", recovery_img.data)
1479
1480  else:
1481    diff_program = ["imgdiff"]
1482    path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1483    if os.path.exists(path):
1484      diff_program.append("-b")
1485      diff_program.append(path)
1486      bonus_args = "-b /system/etc/recovery-resource.dat"
1487    else:
1488      bonus_args = ""
1489
1490    d = Difference(recovery_img, boot_img, diff_program=diff_program)
1491    _, _, patch = d.ComputePatch()
1492    output_sink("recovery-from-boot.p", patch)
1493
1494  try:
1495    # The following GetTypeAndDevice()s need to use the path in the target
1496    # info_dict instead of source_info_dict.
1497    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1498    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1499  except KeyError:
1500    return
1501
1502  if full_recovery_image:
1503    sh = """#!/system/bin/sh
1504if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1505  applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1506else
1507  log -t recovery "Recovery image already installed"
1508fi
1509""" % {'type': recovery_type,
1510       'device': recovery_device,
1511       'sha1': recovery_img.sha1,
1512       'size': recovery_img.size}
1513  else:
1514    sh = """#!/system/bin/sh
1515if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1516  applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1517else
1518  log -t recovery "Recovery image already installed"
1519fi
1520""" % {'boot_size': boot_img.size,
1521       'boot_sha1': boot_img.sha1,
1522       'recovery_size': recovery_img.size,
1523       'recovery_sha1': recovery_img.sha1,
1524       'boot_type': boot_type,
1525       'boot_device': boot_device,
1526       'recovery_type': recovery_type,
1527       'recovery_device': recovery_device,
1528       'bonus_args': bonus_args}
1529
1530  # The install script location moved from /system/etc to /system/bin
1531  # in the L release.  Parse init.*.rc files to find out where the
1532  # target-files expects it to be, and put it there.
1533  sh_location = "etc/install-recovery.sh"
1534  found = False
1535  if system_root_image:
1536    init_rc_dir = os.path.join(input_dir, "ROOT")
1537  else:
1538    init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1539  init_rc_files = os.listdir(init_rc_dir)
1540  for init_rc_file in init_rc_files:
1541    if (not init_rc_file.startswith('init.') or
1542        not init_rc_file.endswith('.rc')):
1543      continue
1544
1545    with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1546      for line in f:
1547        m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1548        if m:
1549          sh_location = m.group(1)
1550          found = True
1551          break
1552
1553    if found:
1554      break
1555
1556  print "putting script in", sh_location
1557
1558  output_sink(sh_location, sh)
1559