common.py revision 1bca9fb6d4a71f4ce059f6a435f67b930d827646
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import copy
16import errno
17import getopt
18import getpass
19import imp
20import os
21import platform
22import re
23import shlex
24import shutil
25import subprocess
26import sys
27import tempfile
28import threading
29import time
30import zipfile
31
32import blockimgdiff
33
34from hashlib import sha1 as sha1
35
36
37class Options(object):
38  def __init__(self):
39    platform_search_path = {
40        "linux2": "out/host/linux-x86",
41        "darwin": "out/host/darwin-x86",
42    }
43
44    self.search_path = platform_search_path.get(sys.platform, None)
45    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
46    self.signapk_shared_library_path = "lib64"   # Relative to search_path
47    self.extra_signapk_args = []
48    self.java_path = "java"  # Use the one on the path by default.
49    self.java_args = "-Xmx2048m" # JVM Args
50    self.public_key_suffix = ".x509.pem"
51    self.private_key_suffix = ".pk8"
52    # use otatools built boot_signer by default
53    self.boot_signer_path = "boot_signer"
54    self.boot_signer_args = []
55    self.verity_signer_path = None
56    self.verity_signer_args = []
57    self.verbose = False
58    self.tempfiles = []
59    self.device_specific = None
60    self.extras = {}
61    self.info_dict = None
62    self.source_info_dict = None
63    self.target_info_dict = None
64    self.worker_threads = None
65    # Stash size cannot exceed cache_size * threshold.
66    self.cache_size = None
67    self.stash_threshold = 0.8
68
69
70OPTIONS = Options()
71
72
73# Values for "certificate" in apkcerts that mean special things.
74SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
75
76
77class ExternalError(RuntimeError):
78  pass
79
80
81def Run(args, **kwargs):
82  """Create and return a subprocess.Popen object, printing the command
83  line on the terminal if -v was specified."""
84  if OPTIONS.verbose:
85    print "  running: ", " ".join(args)
86  return subprocess.Popen(args, **kwargs)
87
88
89def CloseInheritedPipes():
90  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
91  before doing other work."""
92  if platform.system() != "Darwin":
93    return
94  for d in range(3, 1025):
95    try:
96      stat = os.fstat(d)
97      if stat is not None:
98        pipebit = stat[0] & 0x1000
99        if pipebit != 0:
100          os.close(d)
101    except OSError:
102      pass
103
104
105def LoadInfoDict(input_file, input_dir=None):
106  """Read and parse the META/misc_info.txt key/value pairs from the
107  input target files and return a dict."""
108
109  def read_helper(fn):
110    if isinstance(input_file, zipfile.ZipFile):
111      return input_file.read(fn)
112    else:
113      path = os.path.join(input_file, *fn.split("/"))
114      try:
115        with open(path) as f:
116          return f.read()
117      except IOError as e:
118        if e.errno == errno.ENOENT:
119          raise KeyError(fn)
120  d = {}
121  try:
122    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
123  except KeyError:
124    # ok if misc_info.txt doesn't exist
125    pass
126
127  # backwards compatibility: These values used to be in their own
128  # files.  Look for them, in case we're processing an old
129  # target_files zip.
130
131  if "mkyaffs2_extra_flags" not in d:
132    try:
133      d["mkyaffs2_extra_flags"] = read_helper(
134          "META/mkyaffs2-extra-flags.txt").strip()
135    except KeyError:
136      # ok if flags don't exist
137      pass
138
139  if "recovery_api_version" not in d:
140    try:
141      d["recovery_api_version"] = read_helper(
142          "META/recovery-api-version.txt").strip()
143    except KeyError:
144      raise ValueError("can't find recovery API version in input target-files")
145
146  if "tool_extensions" not in d:
147    try:
148      d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
149    except KeyError:
150      # ok if extensions don't exist
151      pass
152
153  if "fstab_version" not in d:
154    d["fstab_version"] = "1"
155
156  # A few properties are stored as links to the files in the out/ directory.
157  # It works fine with the build system. However, they are no longer available
158  # when (re)generating from target_files zip. If input_dir is not None, we
159  # are doing repacking. Redirect those properties to the actual files in the
160  # unzipped directory.
161  if input_dir is not None:
162    # We carry a copy of file_contexts.bin under META/. If not available,
163    # search BOOT/RAMDISK/. Note that sometimes we may need a different file
164    # to build images than the one running on device, such as when enabling
165    # system_root_image. In that case, we must have the one for image
166    # generation copied to META/.
167    fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
168    fc_config = os.path.join(input_dir, "META", fc_basename)
169    if d.get("system_root_image") == "true":
170      assert os.path.exists(fc_config)
171    if not os.path.exists(fc_config):
172      fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
173      if not os.path.exists(fc_config):
174        fc_config = None
175
176    if fc_config:
177      d["selinux_fc"] = fc_config
178
179    # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
180    if d.get("system_root_image") == "true":
181      d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
182      d["ramdisk_fs_config"] = os.path.join(
183          input_dir, "META", "root_filesystem_config.txt")
184
185  try:
186    data = read_helper("META/imagesizes.txt")
187    for line in data.split("\n"):
188      if not line:
189        continue
190      name, value = line.split(" ", 1)
191      if not value:
192        continue
193      if name == "blocksize":
194        d[name] = value
195      else:
196        d[name + "_size"] = value
197  except KeyError:
198    pass
199
200  def makeint(key):
201    if key in d:
202      d[key] = int(d[key], 0)
203
204  makeint("recovery_api_version")
205  makeint("blocksize")
206  makeint("system_size")
207  makeint("vendor_size")
208  makeint("userdata_size")
209  makeint("cache_size")
210  makeint("recovery_size")
211  makeint("boot_size")
212  makeint("fstab_version")
213
214  if d.get("no_recovery", False) == "true":
215    d["fstab"] = None
216  else:
217    d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
218                                   d.get("system_root_image", False))
219  d["build.prop"] = LoadBuildProp(read_helper)
220  return d
221
222def LoadBuildProp(read_helper):
223  try:
224    data = read_helper("SYSTEM/build.prop")
225  except KeyError:
226    print "Warning: could not find SYSTEM/build.prop in %s" % zip
227    data = ""
228  return LoadDictionaryFromLines(data.split("\n"))
229
230def LoadDictionaryFromLines(lines):
231  d = {}
232  for line in lines:
233    line = line.strip()
234    if not line or line.startswith("#"):
235      continue
236    if "=" in line:
237      name, value = line.split("=", 1)
238      d[name] = value
239  return d
240
241def LoadRecoveryFSTab(read_helper, fstab_version, system_root_image=False):
242  class Partition(object):
243    def __init__(self, mount_point, fs_type, device, length, device2, context):
244      self.mount_point = mount_point
245      self.fs_type = fs_type
246      self.device = device
247      self.length = length
248      self.device2 = device2
249      self.context = context
250
251  try:
252    data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
253  except KeyError:
254    print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
255    data = ""
256
257  if fstab_version == 1:
258    d = {}
259    for line in data.split("\n"):
260      line = line.strip()
261      if not line or line.startswith("#"):
262        continue
263      pieces = line.split()
264      if not 3 <= len(pieces) <= 4:
265        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
266      options = None
267      if len(pieces) >= 4:
268        if pieces[3].startswith("/"):
269          device2 = pieces[3]
270          if len(pieces) >= 5:
271            options = pieces[4]
272        else:
273          device2 = None
274          options = pieces[3]
275      else:
276        device2 = None
277
278      mount_point = pieces[0]
279      length = 0
280      if options:
281        options = options.split(",")
282        for i in options:
283          if i.startswith("length="):
284            length = int(i[7:])
285          else:
286            print "%s: unknown option \"%s\"" % (mount_point, i)
287
288      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
289                                 device=pieces[2], length=length,
290                                 device2=device2)
291
292  elif fstab_version == 2:
293    d = {}
294    for line in data.split("\n"):
295      line = line.strip()
296      if not line or line.startswith("#"):
297        continue
298      # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
299      pieces = line.split()
300      if len(pieces) != 5:
301        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
302
303      # Ignore entries that are managed by vold
304      options = pieces[4]
305      if "voldmanaged=" in options:
306        continue
307
308      # It's a good line, parse it
309      length = 0
310      options = options.split(",")
311      for i in options:
312        if i.startswith("length="):
313          length = int(i[7:])
314        else:
315          # Ignore all unknown options in the unified fstab
316          continue
317
318      mount_flags = pieces[3]
319      # Honor the SELinux context if present.
320      context = None
321      for i in mount_flags.split(","):
322        if i.startswith("context="):
323          context = i
324
325      mount_point = pieces[1]
326      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
327                                 device=pieces[0], length=length,
328                                 device2=None, context=context)
329
330  else:
331    raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
332
333  # / is used for the system mount point when the root directory is included in
334  # system. Other areas assume system is always at "/system" so point /system
335  # at /.
336  if system_root_image:
337    assert not d.has_key("/system") and d.has_key("/")
338    d["/system"] = d["/"]
339  return d
340
341
342def DumpInfoDict(d):
343  for k, v in sorted(d.items()):
344    print "%-25s = (%s) %s" % (k, type(v).__name__, v)
345
346
347def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
348                        has_ramdisk=False):
349  """Build a bootable image from the specified sourcedir.
350
351  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
352  'sourcedir'), and turn them into a boot image.  Return the image data, or
353  None if sourcedir does not appear to contains files for building the
354  requested image."""
355
356  def make_ramdisk():
357    ramdisk_img = tempfile.NamedTemporaryFile()
358
359    if os.access(fs_config_file, os.F_OK):
360      cmd = ["mkbootfs", "-f", fs_config_file,
361             os.path.join(sourcedir, "RAMDISK")]
362    else:
363      cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
364    p1 = Run(cmd, stdout=subprocess.PIPE)
365    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
366
367    p2.wait()
368    p1.wait()
369    assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
370    assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
371
372    return ramdisk_img
373
374  if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
375    return None
376
377  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
378    return None
379
380  if info_dict is None:
381    info_dict = OPTIONS.info_dict
382
383  img = tempfile.NamedTemporaryFile()
384
385  if has_ramdisk:
386    ramdisk_img = make_ramdisk()
387
388  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
389  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
390
391  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
392
393  fn = os.path.join(sourcedir, "second")
394  if os.access(fn, os.F_OK):
395    cmd.append("--second")
396    cmd.append(fn)
397
398  fn = os.path.join(sourcedir, "cmdline")
399  if os.access(fn, os.F_OK):
400    cmd.append("--cmdline")
401    cmd.append(open(fn).read().rstrip("\n"))
402
403  fn = os.path.join(sourcedir, "base")
404  if os.access(fn, os.F_OK):
405    cmd.append("--base")
406    cmd.append(open(fn).read().rstrip("\n"))
407
408  fn = os.path.join(sourcedir, "pagesize")
409  if os.access(fn, os.F_OK):
410    cmd.append("--pagesize")
411    cmd.append(open(fn).read().rstrip("\n"))
412
413  args = info_dict.get("mkbootimg_args", None)
414  if args and args.strip():
415    cmd.extend(shlex.split(args))
416
417  if has_ramdisk:
418    cmd.extend(["--ramdisk", ramdisk_img.name])
419
420  img_unsigned = None
421  if info_dict.get("vboot", None):
422    img_unsigned = tempfile.NamedTemporaryFile()
423    cmd.extend(["--output", img_unsigned.name])
424  else:
425    cmd.extend(["--output", img.name])
426
427  p = Run(cmd, stdout=subprocess.PIPE)
428  p.communicate()
429  assert p.returncode == 0, "mkbootimg of %s image failed" % (
430      os.path.basename(sourcedir),)
431
432  if (info_dict.get("boot_signer", None) == "true" and
433      info_dict.get("verity_key", None)):
434    path = "/" + os.path.basename(sourcedir).lower()
435    cmd = [OPTIONS.boot_signer_path]
436    cmd.extend(OPTIONS.boot_signer_args)
437    cmd.extend([path, img.name,
438                info_dict["verity_key"] + ".pk8",
439                info_dict["verity_key"] + ".x509.pem", img.name])
440    p = Run(cmd, stdout=subprocess.PIPE)
441    p.communicate()
442    assert p.returncode == 0, "boot_signer of %s image failed" % path
443
444  # Sign the image if vboot is non-empty.
445  elif info_dict.get("vboot", None):
446    path = "/" + os.path.basename(sourcedir).lower()
447    img_keyblock = tempfile.NamedTemporaryFile()
448    cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
449           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
450           info_dict["vboot_key"] + ".vbprivk",
451           info_dict["vboot_subkey"] + ".vbprivk",
452           img_keyblock.name,
453           img.name]
454    p = Run(cmd, stdout=subprocess.PIPE)
455    p.communicate()
456    assert p.returncode == 0, "vboot_signer of %s image failed" % path
457
458    # Clean up the temp files.
459    img_unsigned.close()
460    img_keyblock.close()
461
462  img.seek(os.SEEK_SET, 0)
463  data = img.read()
464
465  if has_ramdisk:
466    ramdisk_img.close()
467  img.close()
468
469  return data
470
471
472def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
473                     info_dict=None):
474  """Return a File object with the desired bootable image.
475
476  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
477  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
478  the source files in 'unpack_dir'/'tree_subdir'."""
479
480  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
481  if os.path.exists(prebuilt_path):
482    print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
483    return File.FromLocalFile(name, prebuilt_path)
484
485  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
486  if os.path.exists(prebuilt_path):
487    print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
488    return File.FromLocalFile(name, prebuilt_path)
489
490  print "building image from target_files %s..." % (tree_subdir,)
491
492  if info_dict is None:
493    info_dict = OPTIONS.info_dict
494
495  # With system_root_image == "true", we don't pack ramdisk into the boot image.
496  # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
497  # for recovery.
498  has_ramdisk = (info_dict.get("system_root_image") != "true" or
499                 prebuilt_name != "boot.img" or
500                 info_dict.get("recovery_as_boot") == "true")
501
502  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
503  data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
504                             os.path.join(unpack_dir, fs_config),
505                             info_dict, has_ramdisk)
506  if data:
507    return File(name, data)
508  return None
509
510
511def UnzipTemp(filename, pattern=None):
512  """Unzip the given archive into a temporary directory and return the name.
513
514  If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
515  temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
516
517  Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
518  main file), open for reading.
519  """
520
521  tmp = tempfile.mkdtemp(prefix="targetfiles-")
522  OPTIONS.tempfiles.append(tmp)
523
524  def unzip_to_dir(filename, dirname):
525    cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
526    if pattern is not None:
527      cmd.append(pattern)
528    p = Run(cmd, stdout=subprocess.PIPE)
529    p.communicate()
530    if p.returncode != 0:
531      raise ExternalError("failed to unzip input target-files \"%s\"" %
532                          (filename,))
533
534  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
535  if m:
536    unzip_to_dir(m.group(1), tmp)
537    unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
538    filename = m.group(1)
539  else:
540    unzip_to_dir(filename, tmp)
541
542  return tmp, zipfile.ZipFile(filename, "r")
543
544
545def GetKeyPasswords(keylist):
546  """Given a list of keys, prompt the user to enter passwords for
547  those which require them.  Return a {key: password} dict.  password
548  will be None if the key has no password."""
549
550  no_passwords = []
551  need_passwords = []
552  key_passwords = {}
553  devnull = open("/dev/null", "w+b")
554  for k in sorted(keylist):
555    # We don't need a password for things that aren't really keys.
556    if k in SPECIAL_CERT_STRINGS:
557      no_passwords.append(k)
558      continue
559
560    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
561             "-inform", "DER", "-nocrypt"],
562            stdin=devnull.fileno(),
563            stdout=devnull.fileno(),
564            stderr=subprocess.STDOUT)
565    p.communicate()
566    if p.returncode == 0:
567      # Definitely an unencrypted key.
568      no_passwords.append(k)
569    else:
570      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
571               "-inform", "DER", "-passin", "pass:"],
572              stdin=devnull.fileno(),
573              stdout=devnull.fileno(),
574              stderr=subprocess.PIPE)
575      _, stderr = p.communicate()
576      if p.returncode == 0:
577        # Encrypted key with empty string as password.
578        key_passwords[k] = ''
579      elif stderr.startswith('Error decrypting key'):
580        # Definitely encrypted key.
581        # It would have said "Error reading key" if it didn't parse correctly.
582        need_passwords.append(k)
583      else:
584        # Potentially, a type of key that openssl doesn't understand.
585        # We'll let the routines in signapk.jar handle it.
586        no_passwords.append(k)
587  devnull.close()
588
589  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
590  key_passwords.update(dict.fromkeys(no_passwords, None))
591  return key_passwords
592
593
594def GetMinSdkVersion(apk_name):
595  """Get the minSdkVersion delared in the APK. This can be both a decimal number
596  (API Level) or a codename.
597  """
598
599  p = Run(["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE)
600  output, err = p.communicate()
601  if err:
602    raise ExternalError("Failed to obtain minSdkVersion: aapt return code %s"
603        % (p.returncode,))
604
605  for line in output.split("\n"):
606    # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'
607    m = re.match(r'sdkVersion:\'([^\']*)\'', line)
608    if m:
609      return m.group(1)
610  raise ExternalError("No minSdkVersion returned by aapt")
611
612
613def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
614  """Get the minSdkVersion declared in the APK as a number (API Level). If
615  minSdkVersion is set to a codename, it is translated to a number using the
616  provided map.
617  """
618
619  version = GetMinSdkVersion(apk_name)
620  try:
621    return int(version)
622  except ValueError:
623    # Not a decimal number. Codename?
624    if version in codename_to_api_level_map:
625      return codename_to_api_level_map[version]
626    else:
627      raise ExternalError("Unknown minSdkVersion: '%s'. Known codenames: %s"
628                          % (version, codename_to_api_level_map))
629
630
631def SignFile(input_name, output_name, key, password, min_api_level=None,
632    codename_to_api_level_map=dict(),
633    whole_file=False):
634  """Sign the input_name zip/jar/apk, producing output_name.  Use the
635  given key and password (the latter may be None if the key does not
636  have a password.
637
638  If whole_file is true, use the "-w" option to SignApk to embed a
639  signature that covers the whole file in the archive comment of the
640  zip file.
641
642  min_api_level is the API Level (int) of the oldest platform this file may end
643  up on. If not specified for an APK, the API Level is obtained by interpreting
644  the minSdkVersion attribute of the APK's AndroidManifest.xml.
645
646  codename_to_api_level_map is needed to translate the codename which may be
647  encountered as the APK's minSdkVersion.
648  """
649
650  java_library_path = os.path.join(
651      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
652
653  cmd = [OPTIONS.java_path, OPTIONS.java_args,
654         "-Djava.library.path=" + java_library_path,
655         "-jar",
656         os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
657  cmd.extend(OPTIONS.extra_signapk_args)
658  if whole_file:
659    cmd.append("-w")
660
661  min_sdk_version = min_api_level
662  if min_sdk_version is None:
663    if not whole_file:
664      min_sdk_version = GetMinSdkVersionInt(
665          input_name, codename_to_api_level_map)
666  if min_sdk_version is not None:
667    cmd.extend(["--min-sdk-version", str(min_sdk_version)])
668
669  cmd.extend([key + OPTIONS.public_key_suffix,
670              key + OPTIONS.private_key_suffix,
671              input_name, output_name])
672
673  p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
674  if password is not None:
675    password += "\n"
676  p.communicate(password)
677  if p.returncode != 0:
678    raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
679
680
681def CheckSize(data, target, info_dict):
682  """Check the data string passed against the max size limit, if
683  any, for the given target.  Raise exception if the data is too big.
684  Print a warning if the data is nearing the maximum size."""
685
686  if target.endswith(".img"):
687    target = target[:-4]
688  mount_point = "/" + target
689
690  fs_type = None
691  limit = None
692  if info_dict["fstab"]:
693    if mount_point == "/userdata":
694      mount_point = "/data"
695    p = info_dict["fstab"][mount_point]
696    fs_type = p.fs_type
697    device = p.device
698    if "/" in device:
699      device = device[device.rfind("/")+1:]
700    limit = info_dict.get(device + "_size", None)
701  if not fs_type or not limit:
702    return
703
704  if fs_type == "yaffs2":
705    # image size should be increased by 1/64th to account for the
706    # spare area (64 bytes per 2k page)
707    limit = limit / 2048 * (2048+64)
708  size = len(data)
709  pct = float(size) * 100.0 / limit
710  msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
711  if pct >= 99.0:
712    raise ExternalError(msg)
713  elif pct >= 95.0:
714    print
715    print "  WARNING: ", msg
716    print
717  elif OPTIONS.verbose:
718    print "  ", msg
719
720
721def ReadApkCerts(tf_zip):
722  """Given a target_files ZipFile, parse the META/apkcerts.txt file
723  and return a {package: cert} dict."""
724  certmap = {}
725  for line in tf_zip.read("META/apkcerts.txt").split("\n"):
726    line = line.strip()
727    if not line:
728      continue
729    m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
730                 r'private_key="(.*)"$', line)
731    if m:
732      name, cert, privkey = m.groups()
733      public_key_suffix_len = len(OPTIONS.public_key_suffix)
734      private_key_suffix_len = len(OPTIONS.private_key_suffix)
735      if cert in SPECIAL_CERT_STRINGS and not privkey:
736        certmap[name] = cert
737      elif (cert.endswith(OPTIONS.public_key_suffix) and
738            privkey.endswith(OPTIONS.private_key_suffix) and
739            cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
740        certmap[name] = cert[:-public_key_suffix_len]
741      else:
742        raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
743  return certmap
744
745
746COMMON_DOCSTRING = """
747  -p  (--path)  <dir>
748      Prepend <dir>/bin to the list of places to search for binaries
749      run by this script, and expect to find jars in <dir>/framework.
750
751  -s  (--device_specific) <file>
752      Path to the python module containing device-specific
753      releasetools code.
754
755  -x  (--extra)  <key=value>
756      Add a key/value pair to the 'extras' dict, which device-specific
757      extension code may look at.
758
759  -v  (--verbose)
760      Show command lines being executed.
761
762  -h  (--help)
763      Display this usage message and exit.
764"""
765
766def Usage(docstring):
767  print docstring.rstrip("\n")
768  print COMMON_DOCSTRING
769
770
771def ParseOptions(argv,
772                 docstring,
773                 extra_opts="", extra_long_opts=(),
774                 extra_option_handler=None):
775  """Parse the options in argv and return any arguments that aren't
776  flags.  docstring is the calling module's docstring, to be displayed
777  for errors and -h.  extra_opts and extra_long_opts are for flags
778  defined by the caller, which are processed by passing them to
779  extra_option_handler."""
780
781  try:
782    opts, args = getopt.getopt(
783        argv, "hvp:s:x:" + extra_opts,
784        ["help", "verbose", "path=", "signapk_path=",
785         "signapk_shared_library_path=", "extra_signapk_args=",
786         "java_path=", "java_args=", "public_key_suffix=",
787         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
788         "verity_signer_path=", "verity_signer_args=", "device_specific=",
789         "extra="] +
790        list(extra_long_opts))
791  except getopt.GetoptError as err:
792    Usage(docstring)
793    print "**", str(err), "**"
794    sys.exit(2)
795
796  for o, a in opts:
797    if o in ("-h", "--help"):
798      Usage(docstring)
799      sys.exit()
800    elif o in ("-v", "--verbose"):
801      OPTIONS.verbose = True
802    elif o in ("-p", "--path"):
803      OPTIONS.search_path = a
804    elif o in ("--signapk_path",):
805      OPTIONS.signapk_path = a
806    elif o in ("--signapk_shared_library_path",):
807      OPTIONS.signapk_shared_library_path = a
808    elif o in ("--extra_signapk_args",):
809      OPTIONS.extra_signapk_args = shlex.split(a)
810    elif o in ("--java_path",):
811      OPTIONS.java_path = a
812    elif o in ("--java_args",):
813      OPTIONS.java_args = a
814    elif o in ("--public_key_suffix",):
815      OPTIONS.public_key_suffix = a
816    elif o in ("--private_key_suffix",):
817      OPTIONS.private_key_suffix = a
818    elif o in ("--boot_signer_path",):
819      OPTIONS.boot_signer_path = a
820    elif o in ("--boot_signer_args",):
821      OPTIONS.boot_signer_args = shlex.split(a)
822    elif o in ("--verity_signer_path",):
823      OPTIONS.verity_signer_path = a
824    elif o in ("--verity_signer_args",):
825      OPTIONS.verity_signer_args = shlex.split(a)
826    elif o in ("-s", "--device_specific"):
827      OPTIONS.device_specific = a
828    elif o in ("-x", "--extra"):
829      key, value = a.split("=", 1)
830      OPTIONS.extras[key] = value
831    else:
832      if extra_option_handler is None or not extra_option_handler(o, a):
833        assert False, "unknown option \"%s\"" % (o,)
834
835  if OPTIONS.search_path:
836    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
837                          os.pathsep + os.environ["PATH"])
838
839  return args
840
841
842def MakeTempFile(prefix=None, suffix=None):
843  """Make a temp file and add it to the list of things to be deleted
844  when Cleanup() is called.  Return the filename."""
845  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
846  os.close(fd)
847  OPTIONS.tempfiles.append(fn)
848  return fn
849
850
851def Cleanup():
852  for i in OPTIONS.tempfiles:
853    if os.path.isdir(i):
854      shutil.rmtree(i)
855    else:
856      os.remove(i)
857
858
859class PasswordManager(object):
860  def __init__(self):
861    self.editor = os.getenv("EDITOR", None)
862    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
863
864  def GetPasswords(self, items):
865    """Get passwords corresponding to each string in 'items',
866    returning a dict.  (The dict may have keys in addition to the
867    values in 'items'.)
868
869    Uses the passwords in $ANDROID_PW_FILE if available, letting the
870    user edit that file to add more needed passwords.  If no editor is
871    available, or $ANDROID_PW_FILE isn't define, prompts the user
872    interactively in the ordinary way.
873    """
874
875    current = self.ReadFile()
876
877    first = True
878    while True:
879      missing = []
880      for i in items:
881        if i not in current or not current[i]:
882          missing.append(i)
883      # Are all the passwords already in the file?
884      if not missing:
885        return current
886
887      for i in missing:
888        current[i] = ""
889
890      if not first:
891        print "key file %s still missing some passwords." % (self.pwfile,)
892        answer = raw_input("try to edit again? [y]> ").strip()
893        if answer and answer[0] not in 'yY':
894          raise RuntimeError("key passwords unavailable")
895      first = False
896
897      current = self.UpdateAndReadFile(current)
898
899  def PromptResult(self, current): # pylint: disable=no-self-use
900    """Prompt the user to enter a value (password) for each key in
901    'current' whose value is fales.  Returns a new dict with all the
902    values.
903    """
904    result = {}
905    for k, v in sorted(current.iteritems()):
906      if v:
907        result[k] = v
908      else:
909        while True:
910          result[k] = getpass.getpass(
911              "Enter password for %s key> " % k).strip()
912          if result[k]:
913            break
914    return result
915
916  def UpdateAndReadFile(self, current):
917    if not self.editor or not self.pwfile:
918      return self.PromptResult(current)
919
920    f = open(self.pwfile, "w")
921    os.chmod(self.pwfile, 0o600)
922    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
923    f.write("# (Additional spaces are harmless.)\n\n")
924
925    first_line = None
926    sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
927    for i, (_, k, v) in enumerate(sorted_list):
928      f.write("[[[  %s  ]]] %s\n" % (v, k))
929      if not v and first_line is None:
930        # position cursor on first line with no password.
931        first_line = i + 4
932    f.close()
933
934    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
935    _, _ = p.communicate()
936
937    return self.ReadFile()
938
939  def ReadFile(self):
940    result = {}
941    if self.pwfile is None:
942      return result
943    try:
944      f = open(self.pwfile, "r")
945      for line in f:
946        line = line.strip()
947        if not line or line[0] == '#':
948          continue
949        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
950        if not m:
951          print "failed to parse password file: ", line
952        else:
953          result[m.group(2)] = m.group(1)
954      f.close()
955    except IOError as e:
956      if e.errno != errno.ENOENT:
957        print "error reading password file: ", str(e)
958    return result
959
960
961def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
962             compress_type=None):
963  import datetime
964
965  # http://b/18015246
966  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
967  # for files larger than 2GiB. We can work around this by adjusting their
968  # limit. Note that `zipfile.writestr()` will not work for strings larger than
969  # 2GiB. The Python interpreter sometimes rejects strings that large (though
970  # it isn't clear to me exactly what circumstances cause this).
971  # `zipfile.write()` must be used directly to work around this.
972  #
973  # This mess can be avoided if we port to python3.
974  saved_zip64_limit = zipfile.ZIP64_LIMIT
975  zipfile.ZIP64_LIMIT = (1 << 32) - 1
976
977  if compress_type is None:
978    compress_type = zip_file.compression
979  if arcname is None:
980    arcname = filename
981
982  saved_stat = os.stat(filename)
983
984  try:
985    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
986    # file to be zipped and reset it when we're done.
987    os.chmod(filename, perms)
988
989    # Use a fixed timestamp so the output is repeatable.
990    epoch = datetime.datetime.fromtimestamp(0)
991    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
992    os.utime(filename, (timestamp, timestamp))
993
994    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
995  finally:
996    os.chmod(filename, saved_stat.st_mode)
997    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
998    zipfile.ZIP64_LIMIT = saved_zip64_limit
999
1000
1001def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
1002                compress_type=None):
1003  """Wrap zipfile.writestr() function to work around the zip64 limit.
1004
1005  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
1006  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
1007  when calling crc32(bytes).
1008
1009  But it still works fine to write a shorter string into a large zip file.
1010  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
1011  when we know the string won't be too long.
1012  """
1013
1014  saved_zip64_limit = zipfile.ZIP64_LIMIT
1015  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1016
1017  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
1018    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
1019    zinfo.compress_type = zip_file.compression
1020    if perms is None:
1021      perms = 0o100644
1022  else:
1023    zinfo = zinfo_or_arcname
1024
1025  # If compress_type is given, it overrides the value in zinfo.
1026  if compress_type is not None:
1027    zinfo.compress_type = compress_type
1028
1029  # If perms is given, it has a priority.
1030  if perms is not None:
1031    # If perms doesn't set the file type, mark it as a regular file.
1032    if perms & 0o770000 == 0:
1033      perms |= 0o100000
1034    zinfo.external_attr = perms << 16
1035
1036  # Use a fixed timestamp so the output is repeatable.
1037  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
1038
1039  zip_file.writestr(zinfo, data)
1040  zipfile.ZIP64_LIMIT = saved_zip64_limit
1041
1042
1043def ZipClose(zip_file):
1044  # http://b/18015246
1045  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
1046  # central directory.
1047  saved_zip64_limit = zipfile.ZIP64_LIMIT
1048  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1049
1050  zip_file.close()
1051
1052  zipfile.ZIP64_LIMIT = saved_zip64_limit
1053
1054
1055class DeviceSpecificParams(object):
1056  module = None
1057  def __init__(self, **kwargs):
1058    """Keyword arguments to the constructor become attributes of this
1059    object, which is passed to all functions in the device-specific
1060    module."""
1061    for k, v in kwargs.iteritems():
1062      setattr(self, k, v)
1063    self.extras = OPTIONS.extras
1064
1065    if self.module is None:
1066      path = OPTIONS.device_specific
1067      if not path:
1068        return
1069      try:
1070        if os.path.isdir(path):
1071          info = imp.find_module("releasetools", [path])
1072        else:
1073          d, f = os.path.split(path)
1074          b, x = os.path.splitext(f)
1075          if x == ".py":
1076            f = b
1077          info = imp.find_module(f, [d])
1078        print "loaded device-specific extensions from", path
1079        self.module = imp.load_module("device_specific", *info)
1080      except ImportError:
1081        print "unable to load device-specific module; assuming none"
1082
1083  def _DoCall(self, function_name, *args, **kwargs):
1084    """Call the named function in the device-specific module, passing
1085    the given args and kwargs.  The first argument to the call will be
1086    the DeviceSpecific object itself.  If there is no module, or the
1087    module does not define the function, return the value of the
1088    'default' kwarg (which itself defaults to None)."""
1089    if self.module is None or not hasattr(self.module, function_name):
1090      return kwargs.get("default", None)
1091    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1092
1093  def FullOTA_Assertions(self):
1094    """Called after emitting the block of assertions at the top of a
1095    full OTA package.  Implementations can add whatever additional
1096    assertions they like."""
1097    return self._DoCall("FullOTA_Assertions")
1098
1099  def FullOTA_InstallBegin(self):
1100    """Called at the start of full OTA installation."""
1101    return self._DoCall("FullOTA_InstallBegin")
1102
1103  def FullOTA_InstallEnd(self):
1104    """Called at the end of full OTA installation; typically this is
1105    used to install the image for the device's baseband processor."""
1106    return self._DoCall("FullOTA_InstallEnd")
1107
1108  def IncrementalOTA_Assertions(self):
1109    """Called after emitting the block of assertions at the top of an
1110    incremental OTA package.  Implementations can add whatever
1111    additional assertions they like."""
1112    return self._DoCall("IncrementalOTA_Assertions")
1113
1114  def IncrementalOTA_VerifyBegin(self):
1115    """Called at the start of the verification phase of incremental
1116    OTA installation; additional checks can be placed here to abort
1117    the script before any changes are made."""
1118    return self._DoCall("IncrementalOTA_VerifyBegin")
1119
1120  def IncrementalOTA_VerifyEnd(self):
1121    """Called at the end of the verification phase of incremental OTA
1122    installation; additional checks can be placed here to abort the
1123    script before any changes are made."""
1124    return self._DoCall("IncrementalOTA_VerifyEnd")
1125
1126  def IncrementalOTA_InstallBegin(self):
1127    """Called at the start of incremental OTA installation (after
1128    verification is complete)."""
1129    return self._DoCall("IncrementalOTA_InstallBegin")
1130
1131  def IncrementalOTA_InstallEnd(self):
1132    """Called at the end of incremental OTA installation; typically
1133    this is used to install the image for the device's baseband
1134    processor."""
1135    return self._DoCall("IncrementalOTA_InstallEnd")
1136
1137  def VerifyOTA_Assertions(self):
1138    return self._DoCall("VerifyOTA_Assertions")
1139
1140class File(object):
1141  def __init__(self, name, data):
1142    self.name = name
1143    self.data = data
1144    self.size = len(data)
1145    self.sha1 = sha1(data).hexdigest()
1146
1147  @classmethod
1148  def FromLocalFile(cls, name, diskname):
1149    f = open(diskname, "rb")
1150    data = f.read()
1151    f.close()
1152    return File(name, data)
1153
1154  def WriteToTemp(self):
1155    t = tempfile.NamedTemporaryFile()
1156    t.write(self.data)
1157    t.flush()
1158    return t
1159
1160  def AddToZip(self, z, compression=None):
1161    ZipWriteStr(z, self.name, self.data, compress_type=compression)
1162
1163DIFF_PROGRAM_BY_EXT = {
1164    ".gz" : "imgdiff",
1165    ".zip" : ["imgdiff", "-z"],
1166    ".jar" : ["imgdiff", "-z"],
1167    ".apk" : ["imgdiff", "-z"],
1168    ".img" : "imgdiff",
1169    }
1170
1171class Difference(object):
1172  def __init__(self, tf, sf, diff_program=None):
1173    self.tf = tf
1174    self.sf = sf
1175    self.patch = None
1176    self.diff_program = diff_program
1177
1178  def ComputePatch(self):
1179    """Compute the patch (as a string of data) needed to turn sf into
1180    tf.  Returns the same tuple as GetPatch()."""
1181
1182    tf = self.tf
1183    sf = self.sf
1184
1185    if self.diff_program:
1186      diff_program = self.diff_program
1187    else:
1188      ext = os.path.splitext(tf.name)[1]
1189      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1190
1191    ttemp = tf.WriteToTemp()
1192    stemp = sf.WriteToTemp()
1193
1194    ext = os.path.splitext(tf.name)[1]
1195
1196    try:
1197      ptemp = tempfile.NamedTemporaryFile()
1198      if isinstance(diff_program, list):
1199        cmd = copy.copy(diff_program)
1200      else:
1201        cmd = [diff_program]
1202      cmd.append(stemp.name)
1203      cmd.append(ttemp.name)
1204      cmd.append(ptemp.name)
1205      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1206      err = []
1207      def run():
1208        _, e = p.communicate()
1209        if e:
1210          err.append(e)
1211      th = threading.Thread(target=run)
1212      th.start()
1213      th.join(timeout=300)   # 5 mins
1214      if th.is_alive():
1215        print "WARNING: diff command timed out"
1216        p.terminate()
1217        th.join(5)
1218        if th.is_alive():
1219          p.kill()
1220          th.join()
1221
1222      if err or p.returncode != 0:
1223        print "WARNING: failure running %s:\n%s\n" % (
1224            diff_program, "".join(err))
1225        self.patch = None
1226        return None, None, None
1227      diff = ptemp.read()
1228    finally:
1229      ptemp.close()
1230      stemp.close()
1231      ttemp.close()
1232
1233    self.patch = diff
1234    return self.tf, self.sf, self.patch
1235
1236
1237  def GetPatch(self):
1238    """Return a tuple (target_file, source_file, patch_data).
1239    patch_data may be None if ComputePatch hasn't been called, or if
1240    computing the patch failed."""
1241    return self.tf, self.sf, self.patch
1242
1243
1244def ComputeDifferences(diffs):
1245  """Call ComputePatch on all the Difference objects in 'diffs'."""
1246  print len(diffs), "diffs to compute"
1247
1248  # Do the largest files first, to try and reduce the long-pole effect.
1249  by_size = [(i.tf.size, i) for i in diffs]
1250  by_size.sort(reverse=True)
1251  by_size = [i[1] for i in by_size]
1252
1253  lock = threading.Lock()
1254  diff_iter = iter(by_size)   # accessed under lock
1255
1256  def worker():
1257    try:
1258      lock.acquire()
1259      for d in diff_iter:
1260        lock.release()
1261        start = time.time()
1262        d.ComputePatch()
1263        dur = time.time() - start
1264        lock.acquire()
1265
1266        tf, sf, patch = d.GetPatch()
1267        if sf.name == tf.name:
1268          name = tf.name
1269        else:
1270          name = "%s (%s)" % (tf.name, sf.name)
1271        if patch is None:
1272          print "patching failed!                                  %s" % (name,)
1273        else:
1274          print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1275              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1276      lock.release()
1277    except Exception as e:
1278      print e
1279      raise
1280
1281  # start worker threads; wait for them all to finish.
1282  threads = [threading.Thread(target=worker)
1283             for i in range(OPTIONS.worker_threads)]
1284  for th in threads:
1285    th.start()
1286  while threads:
1287    threads.pop().join()
1288
1289
1290class BlockDifference(object):
1291  def __init__(self, partition, tgt, src=None, check_first_block=False,
1292               version=None):
1293    self.tgt = tgt
1294    self.src = src
1295    self.partition = partition
1296    self.check_first_block = check_first_block
1297
1298    if version is None:
1299      version = 1
1300      if OPTIONS.info_dict:
1301        version = max(
1302            int(i) for i in
1303            OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1304    self.version = version
1305
1306    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1307                                    version=self.version)
1308    tmpdir = tempfile.mkdtemp()
1309    OPTIONS.tempfiles.append(tmpdir)
1310    self.path = os.path.join(tmpdir, partition)
1311    b.Compute(self.path)
1312    self._required_cache = b.max_stashed_size
1313
1314    if src is None:
1315      _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1316    else:
1317      _, self.device = GetTypeAndDevice("/" + partition,
1318                                        OPTIONS.source_info_dict)
1319
1320  @property
1321  def required_cache(self):
1322    return self._required_cache
1323
1324  def WriteScript(self, script, output_zip, progress=None):
1325    if not self.src:
1326      # write the output unconditionally
1327      script.Print("Patching %s image unconditionally..." % (self.partition,))
1328    else:
1329      script.Print("Patching %s image after verification." % (self.partition,))
1330
1331    if progress:
1332      script.ShowProgress(progress, 0)
1333    self._WriteUpdate(script, output_zip)
1334    self._WritePostInstallVerifyScript(script)
1335
1336  def WriteStrictVerifyScript(self, script):
1337    """Verify all the blocks in the care_map, including clobbered blocks.
1338
1339    This differs from the WriteVerifyScript() function: a) it prints different
1340    error messages; b) it doesn't allow half-way updated images to pass the
1341    verification."""
1342
1343    partition = self.partition
1344    script.Print("Verifying %s..." % (partition,))
1345    ranges = self.tgt.care_map
1346    ranges_str = ranges.to_string_raw()
1347    script.AppendExtra('range_sha1("%s", "%s") == "%s" && '
1348                       'ui_print("    Verified.") || '
1349                       'ui_print("\\"%s\\" has unexpected contents.");' % (
1350                       self.device, ranges_str,
1351                       self.tgt.TotalSha1(include_clobbered_blocks=True),
1352                       self.device))
1353    script.AppendExtra("")
1354
1355  def WriteVerifyScript(self, script):
1356    partition = self.partition
1357    if not self.src:
1358      script.Print("Image %s will be patched unconditionally." % (partition,))
1359    else:
1360      ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1361      ranges_str = ranges.to_string_raw()
1362      if self.version >= 4:
1363        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1364                            'block_image_verify("%s", '
1365                            'package_extract_file("%s.transfer.list"), '
1366                            '"%s.new.dat", "%s.patch.dat")) then') % (
1367                            self.device, ranges_str, self.src.TotalSha1(),
1368                            self.device, partition, partition, partition))
1369      elif self.version == 3:
1370        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1371                            'block_image_verify("%s", '
1372                            'package_extract_file("%s.transfer.list"), '
1373                            '"%s.new.dat", "%s.patch.dat")) then') % (
1374                            self.device, ranges_str, self.src.TotalSha1(),
1375                            self.device, partition, partition, partition))
1376      else:
1377        script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1378                           self.device, ranges_str, self.src.TotalSha1()))
1379      script.Print('Verified %s image...' % (partition,))
1380      script.AppendExtra('else')
1381
1382      if self.version >= 4:
1383
1384        # Bug: 21124327
1385        # When generating incrementals for the system and vendor partitions in
1386        # version 4 or newer, explicitly check the first block (which contains
1387        # the superblock) of the partition to see if it's what we expect. If
1388        # this check fails, give an explicit log message about the partition
1389        # having been remounted R/W (the most likely explanation).
1390        if self.check_first_block:
1391          script.AppendExtra('check_first_block("%s");' % (self.device,))
1392
1393        # If version >= 4, try block recovery before abort update
1394        script.AppendExtra((
1395            'ifelse (block_image_recover("{device}", "{ranges}") && '
1396            'block_image_verify("{device}", '
1397            'package_extract_file("{partition}.transfer.list"), '
1398            '"{partition}.new.dat", "{partition}.patch.dat"), '
1399            'ui_print("{partition} recovered successfully."), '
1400            'abort("{partition} partition fails to recover"));\n'
1401            'endif;').format(device=self.device, ranges=ranges_str,
1402                             partition=partition))
1403
1404      # Abort the OTA update. Note that the incremental OTA cannot be applied
1405      # even if it may match the checksum of the target partition.
1406      # a) If version < 3, operations like move and erase will make changes
1407      #    unconditionally and damage the partition.
1408      # b) If version >= 3, it won't even reach here.
1409      else:
1410        script.AppendExtra(('abort("%s partition has unexpected contents");\n'
1411                            'endif;') % (partition,))
1412
1413  def _WritePostInstallVerifyScript(self, script):
1414    partition = self.partition
1415    script.Print('Verifying the updated %s image...' % (partition,))
1416    # Unlike pre-install verification, clobbered_blocks should not be ignored.
1417    ranges = self.tgt.care_map
1418    ranges_str = ranges.to_string_raw()
1419    script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1420                       self.device, ranges_str,
1421                       self.tgt.TotalSha1(include_clobbered_blocks=True)))
1422
1423    # Bug: 20881595
1424    # Verify that extended blocks are really zeroed out.
1425    if self.tgt.extended:
1426      ranges_str = self.tgt.extended.to_string_raw()
1427      script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1428                         self.device, ranges_str,
1429                         self._HashZeroBlocks(self.tgt.extended.size())))
1430      script.Print('Verified the updated %s image.' % (partition,))
1431      script.AppendExtra(
1432          'else\n'
1433          '  abort("%s partition has unexpected non-zero contents after OTA '
1434          'update");\n'
1435          'endif;' % (partition,))
1436    else:
1437      script.Print('Verified the updated %s image.' % (partition,))
1438
1439    script.AppendExtra(
1440        'else\n'
1441        '  abort("%s partition has unexpected contents after OTA update");\n'
1442        'endif;' % (partition,))
1443
1444  def _WriteUpdate(self, script, output_zip):
1445    ZipWrite(output_zip,
1446             '{}.transfer.list'.format(self.path),
1447             '{}.transfer.list'.format(self.partition))
1448    ZipWrite(output_zip,
1449             '{}.new.dat'.format(self.path),
1450             '{}.new.dat'.format(self.partition))
1451    ZipWrite(output_zip,
1452             '{}.patch.dat'.format(self.path),
1453             '{}.patch.dat'.format(self.partition),
1454             compress_type=zipfile.ZIP_STORED)
1455
1456    call = ('block_image_update("{device}", '
1457            'package_extract_file("{partition}.transfer.list"), '
1458            '"{partition}.new.dat", "{partition}.patch.dat");\n'.format(
1459                device=self.device, partition=self.partition))
1460    script.AppendExtra(script.WordWrap(call))
1461
1462  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1463    data = source.ReadRangeSet(ranges)
1464    ctx = sha1()
1465
1466    for p in data:
1467      ctx.update(p)
1468
1469    return ctx.hexdigest()
1470
1471  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1472    """Return the hash value for all zero blocks."""
1473    zero_block = '\x00' * 4096
1474    ctx = sha1()
1475    for _ in range(num_blocks):
1476      ctx.update(zero_block)
1477
1478    return ctx.hexdigest()
1479
1480
1481DataImage = blockimgdiff.DataImage
1482
1483# map recovery.fstab's fs_types to mount/format "partition types"
1484PARTITION_TYPES = {
1485    "yaffs2": "MTD",
1486    "mtd": "MTD",
1487    "ext4": "EMMC",
1488    "emmc": "EMMC",
1489    "f2fs": "EMMC",
1490    "squashfs": "EMMC"
1491}
1492
1493def GetTypeAndDevice(mount_point, info):
1494  fstab = info["fstab"]
1495  if fstab:
1496    return (PARTITION_TYPES[fstab[mount_point].fs_type],
1497            fstab[mount_point].device)
1498  else:
1499    raise KeyError
1500
1501
1502def ParseCertificate(data):
1503  """Parse a PEM-format certificate."""
1504  cert = []
1505  save = False
1506  for line in data.split("\n"):
1507    if "--END CERTIFICATE--" in line:
1508      break
1509    if save:
1510      cert.append(line)
1511    if "--BEGIN CERTIFICATE--" in line:
1512      save = True
1513  cert = "".join(cert).decode('base64')
1514  return cert
1515
1516def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1517                      info_dict=None):
1518  """Generate a binary patch that creates the recovery image starting
1519  with the boot image.  (Most of the space in these images is just the
1520  kernel, which is identical for the two, so the resulting patch
1521  should be efficient.)  Add it to the output zip, along with a shell
1522  script that is run from init.rc on first boot to actually do the
1523  patching and install the new recovery image.
1524
1525  recovery_img and boot_img should be File objects for the
1526  corresponding images.  info should be the dictionary returned by
1527  common.LoadInfoDict() on the input target_files.
1528  """
1529
1530  if info_dict is None:
1531    info_dict = OPTIONS.info_dict
1532
1533  full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1534  system_root_image = info_dict.get("system_root_image", None) == "true"
1535
1536  if full_recovery_image:
1537    output_sink("etc/recovery.img", recovery_img.data)
1538
1539  else:
1540    diff_program = ["imgdiff"]
1541    path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1542    if os.path.exists(path):
1543      diff_program.append("-b")
1544      diff_program.append(path)
1545      bonus_args = "-b /system/etc/recovery-resource.dat"
1546    else:
1547      bonus_args = ""
1548
1549    d = Difference(recovery_img, boot_img, diff_program=diff_program)
1550    _, _, patch = d.ComputePatch()
1551    output_sink("recovery-from-boot.p", patch)
1552
1553  try:
1554    # The following GetTypeAndDevice()s need to use the path in the target
1555    # info_dict instead of source_info_dict.
1556    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1557    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1558  except KeyError:
1559    return
1560
1561  if full_recovery_image:
1562    sh = """#!/system/bin/sh
1563if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1564  applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1565else
1566  log -t recovery "Recovery image already installed"
1567fi
1568""" % {'type': recovery_type,
1569       'device': recovery_device,
1570       'sha1': recovery_img.sha1,
1571       'size': recovery_img.size}
1572  else:
1573    sh = """#!/system/bin/sh
1574if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1575  applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1576else
1577  log -t recovery "Recovery image already installed"
1578fi
1579""" % {'boot_size': boot_img.size,
1580       'boot_sha1': boot_img.sha1,
1581       'recovery_size': recovery_img.size,
1582       'recovery_sha1': recovery_img.sha1,
1583       'boot_type': boot_type,
1584       'boot_device': boot_device,
1585       'recovery_type': recovery_type,
1586       'recovery_device': recovery_device,
1587       'bonus_args': bonus_args}
1588
1589  # The install script location moved from /system/etc to /system/bin
1590  # in the L release.  Parse init.*.rc files to find out where the
1591  # target-files expects it to be, and put it there.
1592  sh_location = "etc/install-recovery.sh"
1593  found = False
1594  if system_root_image:
1595    init_rc_dir = os.path.join(input_dir, "ROOT")
1596  else:
1597    init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1598  init_rc_files = os.listdir(init_rc_dir)
1599  for init_rc_file in init_rc_files:
1600    if (not init_rc_file.startswith('init.') or
1601        not init_rc_file.endswith('.rc')):
1602      continue
1603
1604    with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1605      for line in f:
1606        m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1607        if m:
1608          sh_location = m.group(1)
1609          found = True
1610          break
1611
1612    if found:
1613      break
1614
1615  print "putting script in", sh_location
1616
1617  output_sink(sh_location, sh)
1618