common.py revision d522bdc9edbf64d15a59c6924853b2e2c8c39e90
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import copy
16import errno
17import getopt
18import getpass
19import imp
20import os
21import platform
22import re
23import shlex
24import shutil
25import subprocess
26import sys
27import tempfile
28import threading
29import time
30import zipfile
31
32import blockimgdiff
33
34from hashlib import sha1 as sha1
35
36
37class Options(object):
38  def __init__(self):
39    platform_search_path = {
40        "linux2": "out/host/linux-x86",
41        "darwin": "out/host/darwin-x86",
42    }
43
44    self.search_path = platform_search_path.get(sys.platform, None)
45    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
46    self.signapk_shared_library_path = "lib64"   # Relative to search_path
47    self.extra_signapk_args = []
48    self.java_path = "java"  # Use the one on the path by default.
49    self.java_args = "-Xmx2048m" # JVM Args
50    self.public_key_suffix = ".x509.pem"
51    self.private_key_suffix = ".pk8"
52    # use otatools built boot_signer by default
53    self.boot_signer_path = "boot_signer"
54    self.boot_signer_args = []
55    self.verity_signer_path = None
56    self.verity_signer_args = []
57    self.verbose = False
58    self.tempfiles = []
59    self.device_specific = None
60    self.extras = {}
61    self.info_dict = None
62    self.source_info_dict = None
63    self.target_info_dict = None
64    self.worker_threads = None
65    # Stash size cannot exceed cache_size * threshold.
66    self.cache_size = None
67    self.stash_threshold = 0.8
68
69
70OPTIONS = Options()
71
72
73# Values for "certificate" in apkcerts that mean special things.
74SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
75
76
77class ExternalError(RuntimeError):
78  pass
79
80
81def Run(args, **kwargs):
82  """Create and return a subprocess.Popen object, printing the command
83  line on the terminal if -v was specified."""
84  if OPTIONS.verbose:
85    print "  running: ", " ".join(args)
86  return subprocess.Popen(args, **kwargs)
87
88
89def CloseInheritedPipes():
90  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
91  before doing other work."""
92  if platform.system() != "Darwin":
93    return
94  for d in range(3, 1025):
95    try:
96      stat = os.fstat(d)
97      if stat is not None:
98        pipebit = stat[0] & 0x1000
99        if pipebit != 0:
100          os.close(d)
101    except OSError:
102      pass
103
104
105def LoadInfoDict(input_file, input_dir=None):
106  """Read and parse the META/misc_info.txt key/value pairs from the
107  input target files and return a dict."""
108
109  def read_helper(fn):
110    if isinstance(input_file, zipfile.ZipFile):
111      return input_file.read(fn)
112    else:
113      path = os.path.join(input_file, *fn.split("/"))
114      try:
115        with open(path) as f:
116          return f.read()
117      except IOError as e:
118        if e.errno == errno.ENOENT:
119          raise KeyError(fn)
120  d = {}
121  try:
122    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
123  except KeyError:
124    # ok if misc_info.txt doesn't exist
125    pass
126
127  # backwards compatibility: These values used to be in their own
128  # files.  Look for them, in case we're processing an old
129  # target_files zip.
130
131  if "mkyaffs2_extra_flags" not in d:
132    try:
133      d["mkyaffs2_extra_flags"] = read_helper(
134          "META/mkyaffs2-extra-flags.txt").strip()
135    except KeyError:
136      # ok if flags don't exist
137      pass
138
139  if "recovery_api_version" not in d:
140    try:
141      d["recovery_api_version"] = read_helper(
142          "META/recovery-api-version.txt").strip()
143    except KeyError:
144      raise ValueError("can't find recovery API version in input target-files")
145
146  if "tool_extensions" not in d:
147    try:
148      d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
149    except KeyError:
150      # ok if extensions don't exist
151      pass
152
153  if "fstab_version" not in d:
154    d["fstab_version"] = "1"
155
156  # A few properties are stored as links to the files in the out/ directory.
157  # It works fine with the build system. However, they are no longer available
158  # when (re)generating from target_files zip. If input_dir is not None, we
159  # are doing repacking. Redirect those properties to the actual files in the
160  # unzipped directory.
161  if input_dir is not None:
162    # We carry a copy of file_contexts.bin under META/. If not available,
163    # search BOOT/RAMDISK/. Note that sometimes we may need a different file
164    # to build images than the one running on device, such as when enabling
165    # system_root_image. In that case, we must have the one for image
166    # generation copied to META/.
167    fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
168    fc_config = os.path.join(input_dir, "META", fc_basename)
169    if d.get("system_root_image") == "true":
170      assert os.path.exists(fc_config)
171    if not os.path.exists(fc_config):
172      fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
173      if not os.path.exists(fc_config):
174        fc_config = None
175
176    if fc_config:
177      d["selinux_fc"] = fc_config
178
179    # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
180    if d.get("system_root_image") == "true":
181      d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
182      d["ramdisk_fs_config"] = os.path.join(
183          input_dir, "META", "root_filesystem_config.txt")
184
185  try:
186    data = read_helper("META/imagesizes.txt")
187    for line in data.split("\n"):
188      if not line:
189        continue
190      name, value = line.split(" ", 1)
191      if not value:
192        continue
193      if name == "blocksize":
194        d[name] = value
195      else:
196        d[name + "_size"] = value
197  except KeyError:
198    pass
199
200  def makeint(key):
201    if key in d:
202      d[key] = int(d[key], 0)
203
204  makeint("recovery_api_version")
205  makeint("blocksize")
206  makeint("system_size")
207  makeint("vendor_size")
208  makeint("userdata_size")
209  makeint("cache_size")
210  makeint("recovery_size")
211  makeint("boot_size")
212  makeint("fstab_version")
213
214  if d.get("no_recovery", False) == "true":
215    d["fstab"] = None
216  else:
217    d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
218                                   d.get("system_root_image", False))
219  d["build.prop"] = LoadBuildProp(read_helper)
220  return d
221
222def LoadBuildProp(read_helper):
223  try:
224    data = read_helper("SYSTEM/build.prop")
225  except KeyError:
226    print "Warning: could not find SYSTEM/build.prop in %s" % zip
227    data = ""
228  return LoadDictionaryFromLines(data.split("\n"))
229
230def LoadDictionaryFromLines(lines):
231  d = {}
232  for line in lines:
233    line = line.strip()
234    if not line or line.startswith("#"):
235      continue
236    if "=" in line:
237      name, value = line.split("=", 1)
238      d[name] = value
239  return d
240
241def LoadRecoveryFSTab(read_helper, fstab_version, system_root_image=False):
242  class Partition(object):
243    def __init__(self, mount_point, fs_type, device, length, device2, context):
244      self.mount_point = mount_point
245      self.fs_type = fs_type
246      self.device = device
247      self.length = length
248      self.device2 = device2
249      self.context = context
250
251  try:
252    data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
253  except KeyError:
254    print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
255    data = ""
256
257  if fstab_version == 1:
258    d = {}
259    for line in data.split("\n"):
260      line = line.strip()
261      if not line or line.startswith("#"):
262        continue
263      pieces = line.split()
264      if not 3 <= len(pieces) <= 4:
265        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
266      options = None
267      if len(pieces) >= 4:
268        if pieces[3].startswith("/"):
269          device2 = pieces[3]
270          if len(pieces) >= 5:
271            options = pieces[4]
272        else:
273          device2 = None
274          options = pieces[3]
275      else:
276        device2 = None
277
278      mount_point = pieces[0]
279      length = 0
280      if options:
281        options = options.split(",")
282        for i in options:
283          if i.startswith("length="):
284            length = int(i[7:])
285          else:
286            print "%s: unknown option \"%s\"" % (mount_point, i)
287
288      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
289                                 device=pieces[2], length=length,
290                                 device2=device2)
291
292  elif fstab_version == 2:
293    d = {}
294    for line in data.split("\n"):
295      line = line.strip()
296      if not line or line.startswith("#"):
297        continue
298      # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
299      pieces = line.split()
300      if len(pieces) != 5:
301        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
302
303      # Ignore entries that are managed by vold
304      options = pieces[4]
305      if "voldmanaged=" in options:
306        continue
307
308      # It's a good line, parse it
309      length = 0
310      options = options.split(",")
311      for i in options:
312        if i.startswith("length="):
313          length = int(i[7:])
314        else:
315          # Ignore all unknown options in the unified fstab
316          continue
317
318      mount_flags = pieces[3]
319      # Honor the SELinux context if present.
320      context = None
321      for i in mount_flags.split(","):
322        if i.startswith("context="):
323          context = i
324
325      mount_point = pieces[1]
326      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
327                                 device=pieces[0], length=length,
328                                 device2=None, context=context)
329
330  else:
331    raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
332
333  # / is used for the system mount point when the root directory is included in
334  # system. Other areas assume system is always at "/system" so point /system
335  # at /.
336  if system_root_image:
337    assert not d.has_key("/system") and d.has_key("/")
338    d["/system"] = d["/"]
339  return d
340
341
342def DumpInfoDict(d):
343  for k, v in sorted(d.items()):
344    print "%-25s = (%s) %s" % (k, type(v).__name__, v)
345
346
347def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
348                        has_ramdisk=False):
349  """Build a bootable image from the specified sourcedir.
350
351  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
352  'sourcedir'), and turn them into a boot image.  Return the image data, or
353  None if sourcedir does not appear to contains files for building the
354  requested image."""
355
356  def make_ramdisk():
357    ramdisk_img = tempfile.NamedTemporaryFile()
358
359    if os.access(fs_config_file, os.F_OK):
360      cmd = ["mkbootfs", "-f", fs_config_file,
361             os.path.join(sourcedir, "RAMDISK")]
362    else:
363      cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
364    p1 = Run(cmd, stdout=subprocess.PIPE)
365    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
366
367    p2.wait()
368    p1.wait()
369    assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
370    assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
371
372    return ramdisk_img
373
374  if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
375    return None
376
377  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
378    return None
379
380  if info_dict is None:
381    info_dict = OPTIONS.info_dict
382
383  img = tempfile.NamedTemporaryFile()
384
385  if has_ramdisk:
386    ramdisk_img = make_ramdisk()
387
388  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
389  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
390
391  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
392
393  fn = os.path.join(sourcedir, "second")
394  if os.access(fn, os.F_OK):
395    cmd.append("--second")
396    cmd.append(fn)
397
398  fn = os.path.join(sourcedir, "cmdline")
399  if os.access(fn, os.F_OK):
400    cmd.append("--cmdline")
401    cmd.append(open(fn).read().rstrip("\n"))
402
403  fn = os.path.join(sourcedir, "base")
404  if os.access(fn, os.F_OK):
405    cmd.append("--base")
406    cmd.append(open(fn).read().rstrip("\n"))
407
408  fn = os.path.join(sourcedir, "pagesize")
409  if os.access(fn, os.F_OK):
410    cmd.append("--pagesize")
411    cmd.append(open(fn).read().rstrip("\n"))
412
413  args = info_dict.get("mkbootimg_args", None)
414  if args and args.strip():
415    cmd.extend(shlex.split(args))
416
417  args = info_dict.get("mkbootimg_version_args", None)
418  if args and args.strip():
419    cmd.extend(shlex.split(args))
420
421  if has_ramdisk:
422    cmd.extend(["--ramdisk", ramdisk_img.name])
423
424  img_unsigned = None
425  if info_dict.get("vboot", None):
426    img_unsigned = tempfile.NamedTemporaryFile()
427    cmd.extend(["--output", img_unsigned.name])
428  else:
429    cmd.extend(["--output", img.name])
430
431  p = Run(cmd, stdout=subprocess.PIPE)
432  p.communicate()
433  assert p.returncode == 0, "mkbootimg of %s image failed" % (
434      os.path.basename(sourcedir),)
435
436  if (info_dict.get("boot_signer", None) == "true" and
437      info_dict.get("verity_key", None)):
438    path = "/" + os.path.basename(sourcedir).lower()
439    cmd = [OPTIONS.boot_signer_path]
440    cmd.extend(OPTIONS.boot_signer_args)
441    cmd.extend([path, img.name,
442                info_dict["verity_key"] + ".pk8",
443                info_dict["verity_key"] + ".x509.pem", img.name])
444    p = Run(cmd, stdout=subprocess.PIPE)
445    p.communicate()
446    assert p.returncode == 0, "boot_signer of %s image failed" % path
447
448  # Sign the image if vboot is non-empty.
449  elif info_dict.get("vboot", None):
450    path = "/" + os.path.basename(sourcedir).lower()
451    img_keyblock = tempfile.NamedTemporaryFile()
452    cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
453           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
454           info_dict["vboot_key"] + ".vbprivk",
455           info_dict["vboot_subkey"] + ".vbprivk",
456           img_keyblock.name,
457           img.name]
458    p = Run(cmd, stdout=subprocess.PIPE)
459    p.communicate()
460    assert p.returncode == 0, "vboot_signer of %s image failed" % path
461
462    # Clean up the temp files.
463    img_unsigned.close()
464    img_keyblock.close()
465
466  img.seek(os.SEEK_SET, 0)
467  data = img.read()
468
469  if has_ramdisk:
470    ramdisk_img.close()
471  img.close()
472
473  return data
474
475
476def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
477                     info_dict=None):
478  """Return a File object with the desired bootable image.
479
480  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
481  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
482  the source files in 'unpack_dir'/'tree_subdir'."""
483
484  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
485  if os.path.exists(prebuilt_path):
486    print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
487    return File.FromLocalFile(name, prebuilt_path)
488
489  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
490  if os.path.exists(prebuilt_path):
491    print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
492    return File.FromLocalFile(name, prebuilt_path)
493
494  print "building image from target_files %s..." % (tree_subdir,)
495
496  if info_dict is None:
497    info_dict = OPTIONS.info_dict
498
499  # With system_root_image == "true", we don't pack ramdisk into the boot image.
500  # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
501  # for recovery.
502  has_ramdisk = (info_dict.get("system_root_image") != "true" or
503                 prebuilt_name != "boot.img" or
504                 info_dict.get("recovery_as_boot") == "true")
505
506  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
507  data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
508                             os.path.join(unpack_dir, fs_config),
509                             info_dict, has_ramdisk)
510  if data:
511    return File(name, data)
512  return None
513
514
515def UnzipTemp(filename, pattern=None):
516  """Unzip the given archive into a temporary directory and return the name.
517
518  If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
519  temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
520
521  Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
522  main file), open for reading.
523  """
524
525  tmp = tempfile.mkdtemp(prefix="targetfiles-")
526  OPTIONS.tempfiles.append(tmp)
527
528  def unzip_to_dir(filename, dirname):
529    cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
530    if pattern is not None:
531      cmd.append(pattern)
532    p = Run(cmd, stdout=subprocess.PIPE)
533    p.communicate()
534    if p.returncode != 0:
535      raise ExternalError("failed to unzip input target-files \"%s\"" %
536                          (filename,))
537
538  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
539  if m:
540    unzip_to_dir(m.group(1), tmp)
541    unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
542    filename = m.group(1)
543  else:
544    unzip_to_dir(filename, tmp)
545
546  return tmp, zipfile.ZipFile(filename, "r")
547
548
549def GetKeyPasswords(keylist):
550  """Given a list of keys, prompt the user to enter passwords for
551  those which require them.  Return a {key: password} dict.  password
552  will be None if the key has no password."""
553
554  no_passwords = []
555  need_passwords = []
556  key_passwords = {}
557  devnull = open("/dev/null", "w+b")
558  for k in sorted(keylist):
559    # We don't need a password for things that aren't really keys.
560    if k in SPECIAL_CERT_STRINGS:
561      no_passwords.append(k)
562      continue
563
564    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
565             "-inform", "DER", "-nocrypt"],
566            stdin=devnull.fileno(),
567            stdout=devnull.fileno(),
568            stderr=subprocess.STDOUT)
569    p.communicate()
570    if p.returncode == 0:
571      # Definitely an unencrypted key.
572      no_passwords.append(k)
573    else:
574      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
575               "-inform", "DER", "-passin", "pass:"],
576              stdin=devnull.fileno(),
577              stdout=devnull.fileno(),
578              stderr=subprocess.PIPE)
579      _, stderr = p.communicate()
580      if p.returncode == 0:
581        # Encrypted key with empty string as password.
582        key_passwords[k] = ''
583      elif stderr.startswith('Error decrypting key'):
584        # Definitely encrypted key.
585        # It would have said "Error reading key" if it didn't parse correctly.
586        need_passwords.append(k)
587      else:
588        # Potentially, a type of key that openssl doesn't understand.
589        # We'll let the routines in signapk.jar handle it.
590        no_passwords.append(k)
591  devnull.close()
592
593  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
594  key_passwords.update(dict.fromkeys(no_passwords, None))
595  return key_passwords
596
597
598def GetMinSdkVersion(apk_name):
599  """Get the minSdkVersion delared in the APK. This can be both a decimal number
600  (API Level) or a codename.
601  """
602
603  p = Run(["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE)
604  output, err = p.communicate()
605  if err:
606    raise ExternalError("Failed to obtain minSdkVersion: aapt return code %s"
607        % (p.returncode,))
608
609  for line in output.split("\n"):
610    # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'
611    m = re.match(r'sdkVersion:\'([^\']*)\'', line)
612    if m:
613      return m.group(1)
614  raise ExternalError("No minSdkVersion returned by aapt")
615
616
617def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
618  """Get the minSdkVersion declared in the APK as a number (API Level). If
619  minSdkVersion is set to a codename, it is translated to a number using the
620  provided map.
621  """
622
623  version = GetMinSdkVersion(apk_name)
624  try:
625    return int(version)
626  except ValueError:
627    # Not a decimal number. Codename?
628    if version in codename_to_api_level_map:
629      return codename_to_api_level_map[version]
630    else:
631      raise ExternalError("Unknown minSdkVersion: '%s'. Known codenames: %s"
632                          % (version, codename_to_api_level_map))
633
634
635def SignFile(input_name, output_name, key, password, min_api_level=None,
636    codename_to_api_level_map=dict(),
637    whole_file=False):
638  """Sign the input_name zip/jar/apk, producing output_name.  Use the
639  given key and password (the latter may be None if the key does not
640  have a password.
641
642  If whole_file is true, use the "-w" option to SignApk to embed a
643  signature that covers the whole file in the archive comment of the
644  zip file.
645
646  min_api_level is the API Level (int) of the oldest platform this file may end
647  up on. If not specified for an APK, the API Level is obtained by interpreting
648  the minSdkVersion attribute of the APK's AndroidManifest.xml.
649
650  codename_to_api_level_map is needed to translate the codename which may be
651  encountered as the APK's minSdkVersion.
652  """
653
654  java_library_path = os.path.join(
655      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
656
657  cmd = [OPTIONS.java_path, OPTIONS.java_args,
658         "-Djava.library.path=" + java_library_path,
659         "-jar",
660         os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
661  cmd.extend(OPTIONS.extra_signapk_args)
662  if whole_file:
663    cmd.append("-w")
664
665  min_sdk_version = min_api_level
666  if min_sdk_version is None:
667    if not whole_file:
668      min_sdk_version = GetMinSdkVersionInt(
669          input_name, codename_to_api_level_map)
670  if min_sdk_version is not None:
671    cmd.extend(["--min-sdk-version", str(min_sdk_version)])
672
673  cmd.extend([key + OPTIONS.public_key_suffix,
674              key + OPTIONS.private_key_suffix,
675              input_name, output_name])
676
677  p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
678  if password is not None:
679    password += "\n"
680  p.communicate(password)
681  if p.returncode != 0:
682    raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
683
684
685def CheckSize(data, target, info_dict):
686  """Check the data string passed against the max size limit, if
687  any, for the given target.  Raise exception if the data is too big.
688  Print a warning if the data is nearing the maximum size."""
689
690  if target.endswith(".img"):
691    target = target[:-4]
692  mount_point = "/" + target
693
694  fs_type = None
695  limit = None
696  if info_dict["fstab"]:
697    if mount_point == "/userdata":
698      mount_point = "/data"
699    p = info_dict["fstab"][mount_point]
700    fs_type = p.fs_type
701    device = p.device
702    if "/" in device:
703      device = device[device.rfind("/")+1:]
704    limit = info_dict.get(device + "_size", None)
705  if not fs_type or not limit:
706    return
707
708  if fs_type == "yaffs2":
709    # image size should be increased by 1/64th to account for the
710    # spare area (64 bytes per 2k page)
711    limit = limit / 2048 * (2048+64)
712  size = len(data)
713  pct = float(size) * 100.0 / limit
714  msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
715  if pct >= 99.0:
716    raise ExternalError(msg)
717  elif pct >= 95.0:
718    print
719    print "  WARNING: ", msg
720    print
721  elif OPTIONS.verbose:
722    print "  ", msg
723
724
725def ReadApkCerts(tf_zip):
726  """Given a target_files ZipFile, parse the META/apkcerts.txt file
727  and return a {package: cert} dict."""
728  certmap = {}
729  for line in tf_zip.read("META/apkcerts.txt").split("\n"):
730    line = line.strip()
731    if not line:
732      continue
733    m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
734                 r'private_key="(.*)"$', line)
735    if m:
736      name, cert, privkey = m.groups()
737      public_key_suffix_len = len(OPTIONS.public_key_suffix)
738      private_key_suffix_len = len(OPTIONS.private_key_suffix)
739      if cert in SPECIAL_CERT_STRINGS and not privkey:
740        certmap[name] = cert
741      elif (cert.endswith(OPTIONS.public_key_suffix) and
742            privkey.endswith(OPTIONS.private_key_suffix) and
743            cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
744        certmap[name] = cert[:-public_key_suffix_len]
745      else:
746        raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
747  return certmap
748
749
750COMMON_DOCSTRING = """
751  -p  (--path)  <dir>
752      Prepend <dir>/bin to the list of places to search for binaries
753      run by this script, and expect to find jars in <dir>/framework.
754
755  -s  (--device_specific) <file>
756      Path to the python module containing device-specific
757      releasetools code.
758
759  -x  (--extra)  <key=value>
760      Add a key/value pair to the 'extras' dict, which device-specific
761      extension code may look at.
762
763  -v  (--verbose)
764      Show command lines being executed.
765
766  -h  (--help)
767      Display this usage message and exit.
768"""
769
770def Usage(docstring):
771  print docstring.rstrip("\n")
772  print COMMON_DOCSTRING
773
774
775def ParseOptions(argv,
776                 docstring,
777                 extra_opts="", extra_long_opts=(),
778                 extra_option_handler=None):
779  """Parse the options in argv and return any arguments that aren't
780  flags.  docstring is the calling module's docstring, to be displayed
781  for errors and -h.  extra_opts and extra_long_opts are for flags
782  defined by the caller, which are processed by passing them to
783  extra_option_handler."""
784
785  try:
786    opts, args = getopt.getopt(
787        argv, "hvp:s:x:" + extra_opts,
788        ["help", "verbose", "path=", "signapk_path=",
789         "signapk_shared_library_path=", "extra_signapk_args=",
790         "java_path=", "java_args=", "public_key_suffix=",
791         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
792         "verity_signer_path=", "verity_signer_args=", "device_specific=",
793         "extra="] +
794        list(extra_long_opts))
795  except getopt.GetoptError as err:
796    Usage(docstring)
797    print "**", str(err), "**"
798    sys.exit(2)
799
800  for o, a in opts:
801    if o in ("-h", "--help"):
802      Usage(docstring)
803      sys.exit()
804    elif o in ("-v", "--verbose"):
805      OPTIONS.verbose = True
806    elif o in ("-p", "--path"):
807      OPTIONS.search_path = a
808    elif o in ("--signapk_path",):
809      OPTIONS.signapk_path = a
810    elif o in ("--signapk_shared_library_path",):
811      OPTIONS.signapk_shared_library_path = a
812    elif o in ("--extra_signapk_args",):
813      OPTIONS.extra_signapk_args = shlex.split(a)
814    elif o in ("--java_path",):
815      OPTIONS.java_path = a
816    elif o in ("--java_args",):
817      OPTIONS.java_args = a
818    elif o in ("--public_key_suffix",):
819      OPTIONS.public_key_suffix = a
820    elif o in ("--private_key_suffix",):
821      OPTIONS.private_key_suffix = a
822    elif o in ("--boot_signer_path",):
823      OPTIONS.boot_signer_path = a
824    elif o in ("--boot_signer_args",):
825      OPTIONS.boot_signer_args = shlex.split(a)
826    elif o in ("--verity_signer_path",):
827      OPTIONS.verity_signer_path = a
828    elif o in ("--verity_signer_args",):
829      OPTIONS.verity_signer_args = shlex.split(a)
830    elif o in ("-s", "--device_specific"):
831      OPTIONS.device_specific = a
832    elif o in ("-x", "--extra"):
833      key, value = a.split("=", 1)
834      OPTIONS.extras[key] = value
835    else:
836      if extra_option_handler is None or not extra_option_handler(o, a):
837        assert False, "unknown option \"%s\"" % (o,)
838
839  if OPTIONS.search_path:
840    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
841                          os.pathsep + os.environ["PATH"])
842
843  return args
844
845
846def MakeTempFile(prefix=None, suffix=None):
847  """Make a temp file and add it to the list of things to be deleted
848  when Cleanup() is called.  Return the filename."""
849  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
850  os.close(fd)
851  OPTIONS.tempfiles.append(fn)
852  return fn
853
854
855def Cleanup():
856  for i in OPTIONS.tempfiles:
857    if os.path.isdir(i):
858      shutil.rmtree(i)
859    else:
860      os.remove(i)
861
862
863class PasswordManager(object):
864  def __init__(self):
865    self.editor = os.getenv("EDITOR", None)
866    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
867
868  def GetPasswords(self, items):
869    """Get passwords corresponding to each string in 'items',
870    returning a dict.  (The dict may have keys in addition to the
871    values in 'items'.)
872
873    Uses the passwords in $ANDROID_PW_FILE if available, letting the
874    user edit that file to add more needed passwords.  If no editor is
875    available, or $ANDROID_PW_FILE isn't define, prompts the user
876    interactively in the ordinary way.
877    """
878
879    current = self.ReadFile()
880
881    first = True
882    while True:
883      missing = []
884      for i in items:
885        if i not in current or not current[i]:
886          missing.append(i)
887      # Are all the passwords already in the file?
888      if not missing:
889        return current
890
891      for i in missing:
892        current[i] = ""
893
894      if not first:
895        print "key file %s still missing some passwords." % (self.pwfile,)
896        answer = raw_input("try to edit again? [y]> ").strip()
897        if answer and answer[0] not in 'yY':
898          raise RuntimeError("key passwords unavailable")
899      first = False
900
901      current = self.UpdateAndReadFile(current)
902
903  def PromptResult(self, current): # pylint: disable=no-self-use
904    """Prompt the user to enter a value (password) for each key in
905    'current' whose value is fales.  Returns a new dict with all the
906    values.
907    """
908    result = {}
909    for k, v in sorted(current.iteritems()):
910      if v:
911        result[k] = v
912      else:
913        while True:
914          result[k] = getpass.getpass(
915              "Enter password for %s key> " % k).strip()
916          if result[k]:
917            break
918    return result
919
920  def UpdateAndReadFile(self, current):
921    if not self.editor or not self.pwfile:
922      return self.PromptResult(current)
923
924    f = open(self.pwfile, "w")
925    os.chmod(self.pwfile, 0o600)
926    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
927    f.write("# (Additional spaces are harmless.)\n\n")
928
929    first_line = None
930    sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
931    for i, (_, k, v) in enumerate(sorted_list):
932      f.write("[[[  %s  ]]] %s\n" % (v, k))
933      if not v and first_line is None:
934        # position cursor on first line with no password.
935        first_line = i + 4
936    f.close()
937
938    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
939    _, _ = p.communicate()
940
941    return self.ReadFile()
942
943  def ReadFile(self):
944    result = {}
945    if self.pwfile is None:
946      return result
947    try:
948      f = open(self.pwfile, "r")
949      for line in f:
950        line = line.strip()
951        if not line or line[0] == '#':
952          continue
953        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
954        if not m:
955          print "failed to parse password file: ", line
956        else:
957          result[m.group(2)] = m.group(1)
958      f.close()
959    except IOError as e:
960      if e.errno != errno.ENOENT:
961        print "error reading password file: ", str(e)
962    return result
963
964
965def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
966             compress_type=None):
967  import datetime
968
969  # http://b/18015246
970  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
971  # for files larger than 2GiB. We can work around this by adjusting their
972  # limit. Note that `zipfile.writestr()` will not work for strings larger than
973  # 2GiB. The Python interpreter sometimes rejects strings that large (though
974  # it isn't clear to me exactly what circumstances cause this).
975  # `zipfile.write()` must be used directly to work around this.
976  #
977  # This mess can be avoided if we port to python3.
978  saved_zip64_limit = zipfile.ZIP64_LIMIT
979  zipfile.ZIP64_LIMIT = (1 << 32) - 1
980
981  if compress_type is None:
982    compress_type = zip_file.compression
983  if arcname is None:
984    arcname = filename
985
986  saved_stat = os.stat(filename)
987
988  try:
989    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
990    # file to be zipped and reset it when we're done.
991    os.chmod(filename, perms)
992
993    # Use a fixed timestamp so the output is repeatable.
994    epoch = datetime.datetime.fromtimestamp(0)
995    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
996    os.utime(filename, (timestamp, timestamp))
997
998    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
999  finally:
1000    os.chmod(filename, saved_stat.st_mode)
1001    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
1002    zipfile.ZIP64_LIMIT = saved_zip64_limit
1003
1004
1005def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
1006                compress_type=None):
1007  """Wrap zipfile.writestr() function to work around the zip64 limit.
1008
1009  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
1010  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
1011  when calling crc32(bytes).
1012
1013  But it still works fine to write a shorter string into a large zip file.
1014  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
1015  when we know the string won't be too long.
1016  """
1017
1018  saved_zip64_limit = zipfile.ZIP64_LIMIT
1019  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1020
1021  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
1022    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
1023    zinfo.compress_type = zip_file.compression
1024    if perms is None:
1025      perms = 0o100644
1026  else:
1027    zinfo = zinfo_or_arcname
1028
1029  # If compress_type is given, it overrides the value in zinfo.
1030  if compress_type is not None:
1031    zinfo.compress_type = compress_type
1032
1033  # If perms is given, it has a priority.
1034  if perms is not None:
1035    # If perms doesn't set the file type, mark it as a regular file.
1036    if perms & 0o770000 == 0:
1037      perms |= 0o100000
1038    zinfo.external_attr = perms << 16
1039
1040  # Use a fixed timestamp so the output is repeatable.
1041  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
1042
1043  zip_file.writestr(zinfo, data)
1044  zipfile.ZIP64_LIMIT = saved_zip64_limit
1045
1046
1047def ZipClose(zip_file):
1048  # http://b/18015246
1049  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
1050  # central directory.
1051  saved_zip64_limit = zipfile.ZIP64_LIMIT
1052  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1053
1054  zip_file.close()
1055
1056  zipfile.ZIP64_LIMIT = saved_zip64_limit
1057
1058
1059class DeviceSpecificParams(object):
1060  module = None
1061  def __init__(self, **kwargs):
1062    """Keyword arguments to the constructor become attributes of this
1063    object, which is passed to all functions in the device-specific
1064    module."""
1065    for k, v in kwargs.iteritems():
1066      setattr(self, k, v)
1067    self.extras = OPTIONS.extras
1068
1069    if self.module is None:
1070      path = OPTIONS.device_specific
1071      if not path:
1072        return
1073      try:
1074        if os.path.isdir(path):
1075          info = imp.find_module("releasetools", [path])
1076        else:
1077          d, f = os.path.split(path)
1078          b, x = os.path.splitext(f)
1079          if x == ".py":
1080            f = b
1081          info = imp.find_module(f, [d])
1082        print "loaded device-specific extensions from", path
1083        self.module = imp.load_module("device_specific", *info)
1084      except ImportError:
1085        print "unable to load device-specific module; assuming none"
1086
1087  def _DoCall(self, function_name, *args, **kwargs):
1088    """Call the named function in the device-specific module, passing
1089    the given args and kwargs.  The first argument to the call will be
1090    the DeviceSpecific object itself.  If there is no module, or the
1091    module does not define the function, return the value of the
1092    'default' kwarg (which itself defaults to None)."""
1093    if self.module is None or not hasattr(self.module, function_name):
1094      return kwargs.get("default", None)
1095    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1096
1097  def FullOTA_Assertions(self):
1098    """Called after emitting the block of assertions at the top of a
1099    full OTA package.  Implementations can add whatever additional
1100    assertions they like."""
1101    return self._DoCall("FullOTA_Assertions")
1102
1103  def FullOTA_InstallBegin(self):
1104    """Called at the start of full OTA installation."""
1105    return self._DoCall("FullOTA_InstallBegin")
1106
1107  def FullOTA_InstallEnd(self):
1108    """Called at the end of full OTA installation; typically this is
1109    used to install the image for the device's baseband processor."""
1110    return self._DoCall("FullOTA_InstallEnd")
1111
1112  def IncrementalOTA_Assertions(self):
1113    """Called after emitting the block of assertions at the top of an
1114    incremental OTA package.  Implementations can add whatever
1115    additional assertions they like."""
1116    return self._DoCall("IncrementalOTA_Assertions")
1117
1118  def IncrementalOTA_VerifyBegin(self):
1119    """Called at the start of the verification phase of incremental
1120    OTA installation; additional checks can be placed here to abort
1121    the script before any changes are made."""
1122    return self._DoCall("IncrementalOTA_VerifyBegin")
1123
1124  def IncrementalOTA_VerifyEnd(self):
1125    """Called at the end of the verification phase of incremental OTA
1126    installation; additional checks can be placed here to abort the
1127    script before any changes are made."""
1128    return self._DoCall("IncrementalOTA_VerifyEnd")
1129
1130  def IncrementalOTA_InstallBegin(self):
1131    """Called at the start of incremental OTA installation (after
1132    verification is complete)."""
1133    return self._DoCall("IncrementalOTA_InstallBegin")
1134
1135  def IncrementalOTA_InstallEnd(self):
1136    """Called at the end of incremental OTA installation; typically
1137    this is used to install the image for the device's baseband
1138    processor."""
1139    return self._DoCall("IncrementalOTA_InstallEnd")
1140
1141  def VerifyOTA_Assertions(self):
1142    return self._DoCall("VerifyOTA_Assertions")
1143
1144class File(object):
1145  def __init__(self, name, data):
1146    self.name = name
1147    self.data = data
1148    self.size = len(data)
1149    self.sha1 = sha1(data).hexdigest()
1150
1151  @classmethod
1152  def FromLocalFile(cls, name, diskname):
1153    f = open(diskname, "rb")
1154    data = f.read()
1155    f.close()
1156    return File(name, data)
1157
1158  def WriteToTemp(self):
1159    t = tempfile.NamedTemporaryFile()
1160    t.write(self.data)
1161    t.flush()
1162    return t
1163
1164  def AddToZip(self, z, compression=None):
1165    ZipWriteStr(z, self.name, self.data, compress_type=compression)
1166
1167DIFF_PROGRAM_BY_EXT = {
1168    ".gz" : "imgdiff",
1169    ".zip" : ["imgdiff", "-z"],
1170    ".jar" : ["imgdiff", "-z"],
1171    ".apk" : ["imgdiff", "-z"],
1172    ".img" : "imgdiff",
1173    }
1174
1175class Difference(object):
1176  def __init__(self, tf, sf, diff_program=None):
1177    self.tf = tf
1178    self.sf = sf
1179    self.patch = None
1180    self.diff_program = diff_program
1181
1182  def ComputePatch(self):
1183    """Compute the patch (as a string of data) needed to turn sf into
1184    tf.  Returns the same tuple as GetPatch()."""
1185
1186    tf = self.tf
1187    sf = self.sf
1188
1189    if self.diff_program:
1190      diff_program = self.diff_program
1191    else:
1192      ext = os.path.splitext(tf.name)[1]
1193      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1194
1195    ttemp = tf.WriteToTemp()
1196    stemp = sf.WriteToTemp()
1197
1198    ext = os.path.splitext(tf.name)[1]
1199
1200    try:
1201      ptemp = tempfile.NamedTemporaryFile()
1202      if isinstance(diff_program, list):
1203        cmd = copy.copy(diff_program)
1204      else:
1205        cmd = [diff_program]
1206      cmd.append(stemp.name)
1207      cmd.append(ttemp.name)
1208      cmd.append(ptemp.name)
1209      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1210      err = []
1211      def run():
1212        _, e = p.communicate()
1213        if e:
1214          err.append(e)
1215      th = threading.Thread(target=run)
1216      th.start()
1217      th.join(timeout=300)   # 5 mins
1218      if th.is_alive():
1219        print "WARNING: diff command timed out"
1220        p.terminate()
1221        th.join(5)
1222        if th.is_alive():
1223          p.kill()
1224          th.join()
1225
1226      if err or p.returncode != 0:
1227        print "WARNING: failure running %s:\n%s\n" % (
1228            diff_program, "".join(err))
1229        self.patch = None
1230        return None, None, None
1231      diff = ptemp.read()
1232    finally:
1233      ptemp.close()
1234      stemp.close()
1235      ttemp.close()
1236
1237    self.patch = diff
1238    return self.tf, self.sf, self.patch
1239
1240
1241  def GetPatch(self):
1242    """Return a tuple (target_file, source_file, patch_data).
1243    patch_data may be None if ComputePatch hasn't been called, or if
1244    computing the patch failed."""
1245    return self.tf, self.sf, self.patch
1246
1247
1248def ComputeDifferences(diffs):
1249  """Call ComputePatch on all the Difference objects in 'diffs'."""
1250  print len(diffs), "diffs to compute"
1251
1252  # Do the largest files first, to try and reduce the long-pole effect.
1253  by_size = [(i.tf.size, i) for i in diffs]
1254  by_size.sort(reverse=True)
1255  by_size = [i[1] for i in by_size]
1256
1257  lock = threading.Lock()
1258  diff_iter = iter(by_size)   # accessed under lock
1259
1260  def worker():
1261    try:
1262      lock.acquire()
1263      for d in diff_iter:
1264        lock.release()
1265        start = time.time()
1266        d.ComputePatch()
1267        dur = time.time() - start
1268        lock.acquire()
1269
1270        tf, sf, patch = d.GetPatch()
1271        if sf.name == tf.name:
1272          name = tf.name
1273        else:
1274          name = "%s (%s)" % (tf.name, sf.name)
1275        if patch is None:
1276          print "patching failed!                                  %s" % (name,)
1277        else:
1278          print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1279              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1280      lock.release()
1281    except Exception as e:
1282      print e
1283      raise
1284
1285  # start worker threads; wait for them all to finish.
1286  threads = [threading.Thread(target=worker)
1287             for i in range(OPTIONS.worker_threads)]
1288  for th in threads:
1289    th.start()
1290  while threads:
1291    threads.pop().join()
1292
1293
1294class BlockDifference(object):
1295  def __init__(self, partition, tgt, src=None, check_first_block=False,
1296               version=None):
1297    self.tgt = tgt
1298    self.src = src
1299    self.partition = partition
1300    self.check_first_block = check_first_block
1301
1302    if version is None:
1303      version = 1
1304      if OPTIONS.info_dict:
1305        version = max(
1306            int(i) for i in
1307            OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1308    self.version = version
1309
1310    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1311                                    version=self.version)
1312    tmpdir = tempfile.mkdtemp()
1313    OPTIONS.tempfiles.append(tmpdir)
1314    self.path = os.path.join(tmpdir, partition)
1315    b.Compute(self.path)
1316    self._required_cache = b.max_stashed_size
1317    self.touched_src_ranges = b.touched_src_ranges
1318    self.touched_src_sha1 = b.touched_src_sha1
1319
1320    if src is None:
1321      _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1322    else:
1323      _, self.device = GetTypeAndDevice("/" + partition,
1324                                        OPTIONS.source_info_dict)
1325
1326  @property
1327  def required_cache(self):
1328    return self._required_cache
1329
1330  def WriteScript(self, script, output_zip, progress=None):
1331    if not self.src:
1332      # write the output unconditionally
1333      script.Print("Patching %s image unconditionally..." % (self.partition,))
1334    else:
1335      script.Print("Patching %s image after verification." % (self.partition,))
1336
1337    if progress:
1338      script.ShowProgress(progress, 0)
1339    self._WriteUpdate(script, output_zip)
1340    if OPTIONS.verify:
1341      self._WritePostInstallVerifyScript(script)
1342
1343  def WriteStrictVerifyScript(self, script):
1344    """Verify all the blocks in the care_map, including clobbered blocks.
1345
1346    This differs from the WriteVerifyScript() function: a) it prints different
1347    error messages; b) it doesn't allow half-way updated images to pass the
1348    verification."""
1349
1350    partition = self.partition
1351    script.Print("Verifying %s..." % (partition,))
1352    ranges = self.tgt.care_map
1353    ranges_str = ranges.to_string_raw()
1354    script.AppendExtra('range_sha1("%s", "%s") == "%s" && '
1355                       'ui_print("    Verified.") || '
1356                       'ui_print("\\"%s\\" has unexpected contents.");' % (
1357                       self.device, ranges_str,
1358                       self.tgt.TotalSha1(include_clobbered_blocks=True),
1359                       self.device))
1360    script.AppendExtra("")
1361
1362  def WriteVerifyScript(self, script, touched_blocks_only=False):
1363    partition = self.partition
1364    if not self.src:
1365      script.Print("Image %s will be patched unconditionally." % (partition,))
1366    else:
1367      if touched_blocks_only and self.version >= 3:
1368        ranges = self.touched_src_ranges
1369        expected_sha1 = self.touched_src_sha1
1370      else:
1371        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1372        expected_sha1 = self.src.TotalSha1()
1373      ranges_str = ranges.to_string_raw()
1374      if self.version >= 4:
1375        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1376                            'block_image_verify("%s", '
1377                            'package_extract_file("%s.transfer.list"), '
1378                            '"%s.new.dat", "%s.patch.dat")) then') % (
1379                            self.device, ranges_str, expected_sha1,
1380                            self.device, partition, partition, partition))
1381      elif self.version == 3:
1382        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1383                            'block_image_verify("%s", '
1384                            'package_extract_file("%s.transfer.list"), '
1385                            '"%s.new.dat", "%s.patch.dat")) then') % (
1386                            self.device, ranges_str, expected_sha1,
1387                            self.device, partition, partition, partition))
1388      else:
1389        script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1390                           self.device, ranges_str, self.src.TotalSha1()))
1391      script.Print('Verified %s image...' % (partition,))
1392      script.AppendExtra('else')
1393
1394      if self.version >= 4:
1395
1396        # Bug: 21124327
1397        # When generating incrementals for the system and vendor partitions in
1398        # version 4 or newer, explicitly check the first block (which contains
1399        # the superblock) of the partition to see if it's what we expect. If
1400        # this check fails, give an explicit log message about the partition
1401        # having been remounted R/W (the most likely explanation).
1402        if self.check_first_block:
1403          script.AppendExtra('check_first_block("%s");' % (self.device,))
1404
1405        # If version >= 4, try block recovery before abort update
1406        script.AppendExtra((
1407            'ifelse (block_image_recover("{device}", "{ranges}") && '
1408            'block_image_verify("{device}", '
1409            'package_extract_file("{partition}.transfer.list"), '
1410            '"{partition}.new.dat", "{partition}.patch.dat"), '
1411            'ui_print("{partition} recovered successfully."), '
1412            'abort("{partition} partition fails to recover"));\n'
1413            'endif;').format(device=self.device, ranges=ranges_str,
1414                             partition=partition))
1415
1416      # Abort the OTA update. Note that the incremental OTA cannot be applied
1417      # even if it may match the checksum of the target partition.
1418      # a) If version < 3, operations like move and erase will make changes
1419      #    unconditionally and damage the partition.
1420      # b) If version >= 3, it won't even reach here.
1421      else:
1422        script.AppendExtra(('abort("%s partition has unexpected contents");\n'
1423                            'endif;') % (partition,))
1424
1425  def _WritePostInstallVerifyScript(self, script):
1426    partition = self.partition
1427    script.Print('Verifying the updated %s image...' % (partition,))
1428    # Unlike pre-install verification, clobbered_blocks should not be ignored.
1429    ranges = self.tgt.care_map
1430    ranges_str = ranges.to_string_raw()
1431    script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1432                       self.device, ranges_str,
1433                       self.tgt.TotalSha1(include_clobbered_blocks=True)))
1434
1435    # Bug: 20881595
1436    # Verify that extended blocks are really zeroed out.
1437    if self.tgt.extended:
1438      ranges_str = self.tgt.extended.to_string_raw()
1439      script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1440                         self.device, ranges_str,
1441                         self._HashZeroBlocks(self.tgt.extended.size())))
1442      script.Print('Verified the updated %s image.' % (partition,))
1443      script.AppendExtra(
1444          'else\n'
1445          '  abort("%s partition has unexpected non-zero contents after OTA '
1446          'update");\n'
1447          'endif;' % (partition,))
1448    else:
1449      script.Print('Verified the updated %s image.' % (partition,))
1450
1451    script.AppendExtra(
1452        'else\n'
1453        '  abort("%s partition has unexpected contents after OTA update");\n'
1454        'endif;' % (partition,))
1455
1456  def _WriteUpdate(self, script, output_zip):
1457    ZipWrite(output_zip,
1458             '{}.transfer.list'.format(self.path),
1459             '{}.transfer.list'.format(self.partition))
1460    ZipWrite(output_zip,
1461             '{}.new.dat'.format(self.path),
1462             '{}.new.dat'.format(self.partition))
1463    ZipWrite(output_zip,
1464             '{}.patch.dat'.format(self.path),
1465             '{}.patch.dat'.format(self.partition),
1466             compress_type=zipfile.ZIP_STORED)
1467
1468    call = ('block_image_update("{device}", '
1469            'package_extract_file("{partition}.transfer.list"), '
1470            '"{partition}.new.dat", "{partition}.patch.dat") ||\n'
1471            '    abort("Failed to update {partition} image.");'.format(
1472                device=self.device, partition=self.partition))
1473    script.AppendExtra(script.WordWrap(call))
1474
1475  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1476    data = source.ReadRangeSet(ranges)
1477    ctx = sha1()
1478
1479    for p in data:
1480      ctx.update(p)
1481
1482    return ctx.hexdigest()
1483
1484  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1485    """Return the hash value for all zero blocks."""
1486    zero_block = '\x00' * 4096
1487    ctx = sha1()
1488    for _ in range(num_blocks):
1489      ctx.update(zero_block)
1490
1491    return ctx.hexdigest()
1492
1493
1494DataImage = blockimgdiff.DataImage
1495
1496# map recovery.fstab's fs_types to mount/format "partition types"
1497PARTITION_TYPES = {
1498    "yaffs2": "MTD",
1499    "mtd": "MTD",
1500    "ext4": "EMMC",
1501    "emmc": "EMMC",
1502    "f2fs": "EMMC",
1503    "squashfs": "EMMC"
1504}
1505
1506def GetTypeAndDevice(mount_point, info):
1507  fstab = info["fstab"]
1508  if fstab:
1509    return (PARTITION_TYPES[fstab[mount_point].fs_type],
1510            fstab[mount_point].device)
1511  else:
1512    raise KeyError
1513
1514
1515def ParseCertificate(data):
1516  """Parse a PEM-format certificate."""
1517  cert = []
1518  save = False
1519  for line in data.split("\n"):
1520    if "--END CERTIFICATE--" in line:
1521      break
1522    if save:
1523      cert.append(line)
1524    if "--BEGIN CERTIFICATE--" in line:
1525      save = True
1526  cert = "".join(cert).decode('base64')
1527  return cert
1528
1529def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1530                      info_dict=None):
1531  """Generate a binary patch that creates the recovery image starting
1532  with the boot image.  (Most of the space in these images is just the
1533  kernel, which is identical for the two, so the resulting patch
1534  should be efficient.)  Add it to the output zip, along with a shell
1535  script that is run from init.rc on first boot to actually do the
1536  patching and install the new recovery image.
1537
1538  recovery_img and boot_img should be File objects for the
1539  corresponding images.  info should be the dictionary returned by
1540  common.LoadInfoDict() on the input target_files.
1541  """
1542
1543  if info_dict is None:
1544    info_dict = OPTIONS.info_dict
1545
1546  full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1547  system_root_image = info_dict.get("system_root_image", None) == "true"
1548
1549  if full_recovery_image:
1550    output_sink("etc/recovery.img", recovery_img.data)
1551
1552  else:
1553    diff_program = ["imgdiff"]
1554    path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1555    if os.path.exists(path):
1556      diff_program.append("-b")
1557      diff_program.append(path)
1558      bonus_args = "-b /system/etc/recovery-resource.dat"
1559    else:
1560      bonus_args = ""
1561
1562    d = Difference(recovery_img, boot_img, diff_program=diff_program)
1563    _, _, patch = d.ComputePatch()
1564    output_sink("recovery-from-boot.p", patch)
1565
1566  try:
1567    # The following GetTypeAndDevice()s need to use the path in the target
1568    # info_dict instead of source_info_dict.
1569    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1570    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1571  except KeyError:
1572    return
1573
1574  if full_recovery_image:
1575    sh = """#!/system/bin/sh
1576if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1577  applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1578else
1579  log -t recovery "Recovery image already installed"
1580fi
1581""" % {'type': recovery_type,
1582       'device': recovery_device,
1583       'sha1': recovery_img.sha1,
1584       'size': recovery_img.size}
1585  else:
1586    sh = """#!/system/bin/sh
1587if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1588  applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1589else
1590  log -t recovery "Recovery image already installed"
1591fi
1592""" % {'boot_size': boot_img.size,
1593       'boot_sha1': boot_img.sha1,
1594       'recovery_size': recovery_img.size,
1595       'recovery_sha1': recovery_img.sha1,
1596       'boot_type': boot_type,
1597       'boot_device': boot_device,
1598       'recovery_type': recovery_type,
1599       'recovery_device': recovery_device,
1600       'bonus_args': bonus_args}
1601
1602  # The install script location moved from /system/etc to /system/bin
1603  # in the L release.  Parse init.*.rc files to find out where the
1604  # target-files expects it to be, and put it there.
1605  sh_location = "etc/install-recovery.sh"
1606  found = False
1607  if system_root_image:
1608    init_rc_dir = os.path.join(input_dir, "ROOT")
1609  else:
1610    init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1611  init_rc_files = os.listdir(init_rc_dir)
1612  for init_rc_file in init_rc_files:
1613    if (not init_rc_file.startswith('init.') or
1614        not init_rc_file.endswith('.rc')):
1615      continue
1616
1617    with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1618      for line in f:
1619        m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1620        if m:
1621          sh_location = m.group(1)
1622          found = True
1623          break
1624
1625    if found:
1626      break
1627
1628  print "putting script in", sh_location
1629
1630  output_sink(sh_location, sh)
1631