common.py revision f54216f29238a67aad1199a0e85d09e443740bf0
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import copy
16import errno
17import getopt
18import getpass
19import imp
20import os
21import platform
22import re
23import shlex
24import shutil
25import subprocess
26import sys
27import tempfile
28import threading
29import time
30import zipfile
31
32import blockimgdiff
33
34from hashlib import sha1 as sha1
35
36
37class Options(object):
38  def __init__(self):
39    platform_search_path = {
40        "linux2": "out/host/linux-x86",
41        "darwin": "out/host/darwin-x86",
42    }
43
44    self.search_path = platform_search_path.get(sys.platform, None)
45    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
46    self.signapk_shared_library_path = "lib64"   # Relative to search_path
47    self.extra_signapk_args = []
48    self.java_path = "java"  # Use the one on the path by default.
49    self.java_args = "-Xmx2048m" # JVM Args
50    self.public_key_suffix = ".x509.pem"
51    self.private_key_suffix = ".pk8"
52    # use otatools built boot_signer by default
53    self.boot_signer_path = "boot_signer"
54    self.boot_signer_args = []
55    self.verity_signer_path = None
56    self.verity_signer_args = []
57    self.verbose = False
58    self.tempfiles = []
59    self.device_specific = None
60    self.extras = {}
61    self.info_dict = None
62    self.source_info_dict = None
63    self.target_info_dict = None
64    self.worker_threads = None
65    # Stash size cannot exceed cache_size * threshold.
66    self.cache_size = None
67    self.stash_threshold = 0.8
68
69
70OPTIONS = Options()
71
72
73# Values for "certificate" in apkcerts that mean special things.
74SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
75
76
77class ExternalError(RuntimeError):
78  pass
79
80
81def Run(args, **kwargs):
82  """Create and return a subprocess.Popen object, printing the command
83  line on the terminal if -v was specified."""
84  if OPTIONS.verbose:
85    print "  running: ", " ".join(args)
86  return subprocess.Popen(args, **kwargs)
87
88
89def CloseInheritedPipes():
90  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
91  before doing other work."""
92  if platform.system() != "Darwin":
93    return
94  for d in range(3, 1025):
95    try:
96      stat = os.fstat(d)
97      if stat is not None:
98        pipebit = stat[0] & 0x1000
99        if pipebit != 0:
100          os.close(d)
101    except OSError:
102      pass
103
104
105def LoadInfoDict(input_file, input_dir=None):
106  """Read and parse the META/misc_info.txt key/value pairs from the
107  input target files and return a dict."""
108
109  def read_helper(fn):
110    if isinstance(input_file, zipfile.ZipFile):
111      return input_file.read(fn)
112    else:
113      path = os.path.join(input_file, *fn.split("/"))
114      try:
115        with open(path) as f:
116          return f.read()
117      except IOError as e:
118        if e.errno == errno.ENOENT:
119          raise KeyError(fn)
120  d = {}
121  try:
122    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
123  except KeyError:
124    # ok if misc_info.txt doesn't exist
125    pass
126
127  # backwards compatibility: These values used to be in their own
128  # files.  Look for them, in case we're processing an old
129  # target_files zip.
130
131  if "mkyaffs2_extra_flags" not in d:
132    try:
133      d["mkyaffs2_extra_flags"] = read_helper(
134          "META/mkyaffs2-extra-flags.txt").strip()
135    except KeyError:
136      # ok if flags don't exist
137      pass
138
139  if "recovery_api_version" not in d:
140    try:
141      d["recovery_api_version"] = read_helper(
142          "META/recovery-api-version.txt").strip()
143    except KeyError:
144      raise ValueError("can't find recovery API version in input target-files")
145
146  if "tool_extensions" not in d:
147    try:
148      d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
149    except KeyError:
150      # ok if extensions don't exist
151      pass
152
153  if "fstab_version" not in d:
154    d["fstab_version"] = "1"
155
156  # A few properties are stored as links to the files in the out/ directory.
157  # It works fine with the build system. However, they are no longer available
158  # when (re)generating from target_files zip. If input_dir is not None, we
159  # are doing repacking. Redirect those properties to the actual files in the
160  # unzipped directory.
161  if input_dir is not None:
162    # We carry a copy of file_contexts.bin under META/. If not available,
163    # search BOOT/RAMDISK/. Note that sometimes we may need a different file
164    # to build images than the one running on device, such as when enabling
165    # system_root_image. In that case, we must have the one for image
166    # generation copied to META/.
167    fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
168    fc_config = os.path.join(input_dir, "META", fc_basename)
169    if d.get("system_root_image") == "true":
170      assert os.path.exists(fc_config)
171    if not os.path.exists(fc_config):
172      fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
173      if not os.path.exists(fc_config):
174        fc_config = None
175
176    if fc_config:
177      d["selinux_fc"] = fc_config
178
179    # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
180    if d.get("system_root_image") == "true":
181      d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
182      d["ramdisk_fs_config"] = os.path.join(
183          input_dir, "META", "root_filesystem_config.txt")
184
185    # Redirect {system,vendor}_base_fs_file.
186    if "system_base_fs_file" in d:
187      basename = os.path.basename(d["system_base_fs_file"])
188      system_base_fs_file = os.path.join(input_dir, "META", basename)
189      assert os.path.exists(system_base_fs_file), \
190          "failed to find system base fs file: %s" % (system_base_fs_file,)
191      d["system_base_fs_file"] = system_base_fs_file
192
193    if "vendor_base_fs_file" in d:
194      basename = os.path.basename(d["vendor_base_fs_file"])
195      vendor_base_fs_file = os.path.join(input_dir, "META", basename)
196      assert os.path.exists(vendor_base_fs_file), \
197          "failed to find vendor base fs file: %s" % (vendor_base_fs_file,)
198      d["vendor_base_fs_file"] = vendor_base_fs_file
199
200  try:
201    data = read_helper("META/imagesizes.txt")
202    for line in data.split("\n"):
203      if not line:
204        continue
205      name, value = line.split(" ", 1)
206      if not value:
207        continue
208      if name == "blocksize":
209        d[name] = value
210      else:
211        d[name + "_size"] = value
212  except KeyError:
213    pass
214
215  def makeint(key):
216    if key in d:
217      d[key] = int(d[key], 0)
218
219  makeint("recovery_api_version")
220  makeint("blocksize")
221  makeint("system_size")
222  makeint("vendor_size")
223  makeint("userdata_size")
224  makeint("cache_size")
225  makeint("recovery_size")
226  makeint("boot_size")
227  makeint("fstab_version")
228
229  if d.get("no_recovery", False) == "true":
230    d["fstab"] = None
231  else:
232    d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
233                                   d.get("system_root_image", False))
234  d["build.prop"] = LoadBuildProp(read_helper)
235  return d
236
237def LoadBuildProp(read_helper):
238  try:
239    data = read_helper("SYSTEM/build.prop")
240  except KeyError:
241    print "Warning: could not find SYSTEM/build.prop in %s" % zip
242    data = ""
243  return LoadDictionaryFromLines(data.split("\n"))
244
245def LoadDictionaryFromLines(lines):
246  d = {}
247  for line in lines:
248    line = line.strip()
249    if not line or line.startswith("#"):
250      continue
251    if "=" in line:
252      name, value = line.split("=", 1)
253      d[name] = value
254  return d
255
256def LoadRecoveryFSTab(read_helper, fstab_version, system_root_image=False):
257  class Partition(object):
258    def __init__(self, mount_point, fs_type, device, length, device2, context):
259      self.mount_point = mount_point
260      self.fs_type = fs_type
261      self.device = device
262      self.length = length
263      self.device2 = device2
264      self.context = context
265
266  try:
267    data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
268  except KeyError:
269    print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
270    data = ""
271
272  if fstab_version == 1:
273    d = {}
274    for line in data.split("\n"):
275      line = line.strip()
276      if not line or line.startswith("#"):
277        continue
278      pieces = line.split()
279      if not 3 <= len(pieces) <= 4:
280        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
281      options = None
282      if len(pieces) >= 4:
283        if pieces[3].startswith("/"):
284          device2 = pieces[3]
285          if len(pieces) >= 5:
286            options = pieces[4]
287        else:
288          device2 = None
289          options = pieces[3]
290      else:
291        device2 = None
292
293      mount_point = pieces[0]
294      length = 0
295      if options:
296        options = options.split(",")
297        for i in options:
298          if i.startswith("length="):
299            length = int(i[7:])
300          else:
301            print "%s: unknown option \"%s\"" % (mount_point, i)
302
303      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
304                                 device=pieces[2], length=length,
305                                 device2=device2)
306
307  elif fstab_version == 2:
308    d = {}
309    for line in data.split("\n"):
310      line = line.strip()
311      if not line or line.startswith("#"):
312        continue
313      # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
314      pieces = line.split()
315      if len(pieces) != 5:
316        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
317
318      # Ignore entries that are managed by vold
319      options = pieces[4]
320      if "voldmanaged=" in options:
321        continue
322
323      # It's a good line, parse it
324      length = 0
325      options = options.split(",")
326      for i in options:
327        if i.startswith("length="):
328          length = int(i[7:])
329        else:
330          # Ignore all unknown options in the unified fstab
331          continue
332
333      mount_flags = pieces[3]
334      # Honor the SELinux context if present.
335      context = None
336      for i in mount_flags.split(","):
337        if i.startswith("context="):
338          context = i
339
340      mount_point = pieces[1]
341      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
342                                 device=pieces[0], length=length,
343                                 device2=None, context=context)
344
345  else:
346    raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
347
348  # / is used for the system mount point when the root directory is included in
349  # system. Other areas assume system is always at "/system" so point /system
350  # at /.
351  if system_root_image:
352    assert not d.has_key("/system") and d.has_key("/")
353    d["/system"] = d["/"]
354  return d
355
356
357def DumpInfoDict(d):
358  for k, v in sorted(d.items()):
359    print "%-25s = (%s) %s" % (k, type(v).__name__, v)
360
361
362def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
363                        has_ramdisk=False):
364  """Build a bootable image from the specified sourcedir.
365
366  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
367  'sourcedir'), and turn them into a boot image.  Return the image data, or
368  None if sourcedir does not appear to contains files for building the
369  requested image."""
370
371  def make_ramdisk():
372    ramdisk_img = tempfile.NamedTemporaryFile()
373
374    if os.access(fs_config_file, os.F_OK):
375      cmd = ["mkbootfs", "-f", fs_config_file,
376             os.path.join(sourcedir, "RAMDISK")]
377    else:
378      cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
379    p1 = Run(cmd, stdout=subprocess.PIPE)
380    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
381
382    p2.wait()
383    p1.wait()
384    assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
385    assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
386
387    return ramdisk_img
388
389  if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
390    return None
391
392  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
393    return None
394
395  if info_dict is None:
396    info_dict = OPTIONS.info_dict
397
398  img = tempfile.NamedTemporaryFile()
399
400  if has_ramdisk:
401    ramdisk_img = make_ramdisk()
402
403  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
404  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
405
406  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
407
408  fn = os.path.join(sourcedir, "second")
409  if os.access(fn, os.F_OK):
410    cmd.append("--second")
411    cmd.append(fn)
412
413  fn = os.path.join(sourcedir, "cmdline")
414  if os.access(fn, os.F_OK):
415    cmd.append("--cmdline")
416    cmd.append(open(fn).read().rstrip("\n"))
417
418  fn = os.path.join(sourcedir, "base")
419  if os.access(fn, os.F_OK):
420    cmd.append("--base")
421    cmd.append(open(fn).read().rstrip("\n"))
422
423  fn = os.path.join(sourcedir, "pagesize")
424  if os.access(fn, os.F_OK):
425    cmd.append("--pagesize")
426    cmd.append(open(fn).read().rstrip("\n"))
427
428  args = info_dict.get("mkbootimg_args", None)
429  if args and args.strip():
430    cmd.extend(shlex.split(args))
431
432  args = info_dict.get("mkbootimg_version_args", None)
433  if args and args.strip():
434    cmd.extend(shlex.split(args))
435
436  if has_ramdisk:
437    cmd.extend(["--ramdisk", ramdisk_img.name])
438
439  img_unsigned = None
440  if info_dict.get("vboot", None):
441    img_unsigned = tempfile.NamedTemporaryFile()
442    cmd.extend(["--output", img_unsigned.name])
443  else:
444    cmd.extend(["--output", img.name])
445
446  p = Run(cmd, stdout=subprocess.PIPE)
447  p.communicate()
448  assert p.returncode == 0, "mkbootimg of %s image failed" % (
449      os.path.basename(sourcedir),)
450
451  if (info_dict.get("boot_signer", None) == "true" and
452      info_dict.get("verity_key", None)):
453    path = "/" + os.path.basename(sourcedir).lower()
454    cmd = [OPTIONS.boot_signer_path]
455    cmd.extend(OPTIONS.boot_signer_args)
456    cmd.extend([path, img.name,
457                info_dict["verity_key"] + ".pk8",
458                info_dict["verity_key"] + ".x509.pem", img.name])
459    p = Run(cmd, stdout=subprocess.PIPE)
460    p.communicate()
461    assert p.returncode == 0, "boot_signer of %s image failed" % path
462
463  # Sign the image if vboot is non-empty.
464  elif info_dict.get("vboot", None):
465    path = "/" + os.path.basename(sourcedir).lower()
466    img_keyblock = tempfile.NamedTemporaryFile()
467    cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
468           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
469           info_dict["vboot_key"] + ".vbprivk",
470           info_dict["vboot_subkey"] + ".vbprivk",
471           img_keyblock.name,
472           img.name]
473    p = Run(cmd, stdout=subprocess.PIPE)
474    p.communicate()
475    assert p.returncode == 0, "vboot_signer of %s image failed" % path
476
477    # Clean up the temp files.
478    img_unsigned.close()
479    img_keyblock.close()
480
481  img.seek(os.SEEK_SET, 0)
482  data = img.read()
483
484  if has_ramdisk:
485    ramdisk_img.close()
486  img.close()
487
488  return data
489
490
491def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
492                     info_dict=None):
493  """Return a File object with the desired bootable image.
494
495  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
496  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
497  the source files in 'unpack_dir'/'tree_subdir'."""
498
499  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
500  if os.path.exists(prebuilt_path):
501    print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
502    return File.FromLocalFile(name, prebuilt_path)
503
504  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
505  if os.path.exists(prebuilt_path):
506    print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
507    return File.FromLocalFile(name, prebuilt_path)
508
509  print "building image from target_files %s..." % (tree_subdir,)
510
511  if info_dict is None:
512    info_dict = OPTIONS.info_dict
513
514  # With system_root_image == "true", we don't pack ramdisk into the boot image.
515  # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
516  # for recovery.
517  has_ramdisk = (info_dict.get("system_root_image") != "true" or
518                 prebuilt_name != "boot.img" or
519                 info_dict.get("recovery_as_boot") == "true")
520
521  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
522  data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
523                             os.path.join(unpack_dir, fs_config),
524                             info_dict, has_ramdisk)
525  if data:
526    return File(name, data)
527  return None
528
529
530def UnzipTemp(filename, pattern=None):
531  """Unzip the given archive into a temporary directory and return the name.
532
533  If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
534  temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
535
536  Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
537  main file), open for reading.
538  """
539
540  tmp = tempfile.mkdtemp(prefix="targetfiles-")
541  OPTIONS.tempfiles.append(tmp)
542
543  def unzip_to_dir(filename, dirname):
544    cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
545    if pattern is not None:
546      cmd.append(pattern)
547    p = Run(cmd, stdout=subprocess.PIPE)
548    p.communicate()
549    if p.returncode != 0:
550      raise ExternalError("failed to unzip input target-files \"%s\"" %
551                          (filename,))
552
553  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
554  if m:
555    unzip_to_dir(m.group(1), tmp)
556    unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
557    filename = m.group(1)
558  else:
559    unzip_to_dir(filename, tmp)
560
561  return tmp, zipfile.ZipFile(filename, "r")
562
563
564def GetKeyPasswords(keylist):
565  """Given a list of keys, prompt the user to enter passwords for
566  those which require them.  Return a {key: password} dict.  password
567  will be None if the key has no password."""
568
569  no_passwords = []
570  need_passwords = []
571  key_passwords = {}
572  devnull = open("/dev/null", "w+b")
573  for k in sorted(keylist):
574    # We don't need a password for things that aren't really keys.
575    if k in SPECIAL_CERT_STRINGS:
576      no_passwords.append(k)
577      continue
578
579    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
580             "-inform", "DER", "-nocrypt"],
581            stdin=devnull.fileno(),
582            stdout=devnull.fileno(),
583            stderr=subprocess.STDOUT)
584    p.communicate()
585    if p.returncode == 0:
586      # Definitely an unencrypted key.
587      no_passwords.append(k)
588    else:
589      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
590               "-inform", "DER", "-passin", "pass:"],
591              stdin=devnull.fileno(),
592              stdout=devnull.fileno(),
593              stderr=subprocess.PIPE)
594      _, stderr = p.communicate()
595      if p.returncode == 0:
596        # Encrypted key with empty string as password.
597        key_passwords[k] = ''
598      elif stderr.startswith('Error decrypting key'):
599        # Definitely encrypted key.
600        # It would have said "Error reading key" if it didn't parse correctly.
601        need_passwords.append(k)
602      else:
603        # Potentially, a type of key that openssl doesn't understand.
604        # We'll let the routines in signapk.jar handle it.
605        no_passwords.append(k)
606  devnull.close()
607
608  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
609  key_passwords.update(dict.fromkeys(no_passwords, None))
610  return key_passwords
611
612
613def GetMinSdkVersion(apk_name):
614  """Get the minSdkVersion delared in the APK. This can be both a decimal number
615  (API Level) or a codename.
616  """
617
618  p = Run(["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE)
619  output, err = p.communicate()
620  if err:
621    raise ExternalError("Failed to obtain minSdkVersion: aapt return code %s"
622        % (p.returncode,))
623
624  for line in output.split("\n"):
625    # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'
626    m = re.match(r'sdkVersion:\'([^\']*)\'', line)
627    if m:
628      return m.group(1)
629  raise ExternalError("No minSdkVersion returned by aapt")
630
631
632def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
633  """Get the minSdkVersion declared in the APK as a number (API Level). If
634  minSdkVersion is set to a codename, it is translated to a number using the
635  provided map.
636  """
637
638  version = GetMinSdkVersion(apk_name)
639  try:
640    return int(version)
641  except ValueError:
642    # Not a decimal number. Codename?
643    if version in codename_to_api_level_map:
644      return codename_to_api_level_map[version]
645    else:
646      raise ExternalError("Unknown minSdkVersion: '%s'. Known codenames: %s"
647                          % (version, codename_to_api_level_map))
648
649
650def SignFile(input_name, output_name, key, password, min_api_level=None,
651    codename_to_api_level_map=dict(),
652    whole_file=False):
653  """Sign the input_name zip/jar/apk, producing output_name.  Use the
654  given key and password (the latter may be None if the key does not
655  have a password.
656
657  If whole_file is true, use the "-w" option to SignApk to embed a
658  signature that covers the whole file in the archive comment of the
659  zip file.
660
661  min_api_level is the API Level (int) of the oldest platform this file may end
662  up on. If not specified for an APK, the API Level is obtained by interpreting
663  the minSdkVersion attribute of the APK's AndroidManifest.xml.
664
665  codename_to_api_level_map is needed to translate the codename which may be
666  encountered as the APK's minSdkVersion.
667  """
668
669  java_library_path = os.path.join(
670      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
671
672  cmd = [OPTIONS.java_path, OPTIONS.java_args,
673         "-Djava.library.path=" + java_library_path,
674         "-jar",
675         os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
676  cmd.extend(OPTIONS.extra_signapk_args)
677  if whole_file:
678    cmd.append("-w")
679
680  min_sdk_version = min_api_level
681  if min_sdk_version is None:
682    if not whole_file:
683      min_sdk_version = GetMinSdkVersionInt(
684          input_name, codename_to_api_level_map)
685  if min_sdk_version is not None:
686    cmd.extend(["--min-sdk-version", str(min_sdk_version)])
687
688  cmd.extend([key + OPTIONS.public_key_suffix,
689              key + OPTIONS.private_key_suffix,
690              input_name, output_name])
691
692  p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
693  if password is not None:
694    password += "\n"
695  p.communicate(password)
696  if p.returncode != 0:
697    raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
698
699
700def CheckSize(data, target, info_dict):
701  """Check the data string passed against the max size limit, if
702  any, for the given target.  Raise exception if the data is too big.
703  Print a warning if the data is nearing the maximum size."""
704
705  if target.endswith(".img"):
706    target = target[:-4]
707  mount_point = "/" + target
708
709  fs_type = None
710  limit = None
711  if info_dict["fstab"]:
712    if mount_point == "/userdata":
713      mount_point = "/data"
714    p = info_dict["fstab"][mount_point]
715    fs_type = p.fs_type
716    device = p.device
717    if "/" in device:
718      device = device[device.rfind("/")+1:]
719    limit = info_dict.get(device + "_size", None)
720  if not fs_type or not limit:
721    return
722
723  if fs_type == "yaffs2":
724    # image size should be increased by 1/64th to account for the
725    # spare area (64 bytes per 2k page)
726    limit = limit / 2048 * (2048+64)
727  size = len(data)
728  pct = float(size) * 100.0 / limit
729  msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
730  if pct >= 99.0:
731    raise ExternalError(msg)
732  elif pct >= 95.0:
733    print
734    print "  WARNING: ", msg
735    print
736  elif OPTIONS.verbose:
737    print "  ", msg
738
739
740def ReadApkCerts(tf_zip):
741  """Given a target_files ZipFile, parse the META/apkcerts.txt file
742  and return a {package: cert} dict."""
743  certmap = {}
744  for line in tf_zip.read("META/apkcerts.txt").split("\n"):
745    line = line.strip()
746    if not line:
747      continue
748    m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
749                 r'private_key="(.*)"$', line)
750    if m:
751      name, cert, privkey = m.groups()
752      public_key_suffix_len = len(OPTIONS.public_key_suffix)
753      private_key_suffix_len = len(OPTIONS.private_key_suffix)
754      if cert in SPECIAL_CERT_STRINGS and not privkey:
755        certmap[name] = cert
756      elif (cert.endswith(OPTIONS.public_key_suffix) and
757            privkey.endswith(OPTIONS.private_key_suffix) and
758            cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
759        certmap[name] = cert[:-public_key_suffix_len]
760      else:
761        raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
762  return certmap
763
764
765COMMON_DOCSTRING = """
766  -p  (--path)  <dir>
767      Prepend <dir>/bin to the list of places to search for binaries
768      run by this script, and expect to find jars in <dir>/framework.
769
770  -s  (--device_specific) <file>
771      Path to the python module containing device-specific
772      releasetools code.
773
774  -x  (--extra)  <key=value>
775      Add a key/value pair to the 'extras' dict, which device-specific
776      extension code may look at.
777
778  -v  (--verbose)
779      Show command lines being executed.
780
781  -h  (--help)
782      Display this usage message and exit.
783"""
784
785def Usage(docstring):
786  print docstring.rstrip("\n")
787  print COMMON_DOCSTRING
788
789
790def ParseOptions(argv,
791                 docstring,
792                 extra_opts="", extra_long_opts=(),
793                 extra_option_handler=None):
794  """Parse the options in argv and return any arguments that aren't
795  flags.  docstring is the calling module's docstring, to be displayed
796  for errors and -h.  extra_opts and extra_long_opts are for flags
797  defined by the caller, which are processed by passing them to
798  extra_option_handler."""
799
800  try:
801    opts, args = getopt.getopt(
802        argv, "hvp:s:x:" + extra_opts,
803        ["help", "verbose", "path=", "signapk_path=",
804         "signapk_shared_library_path=", "extra_signapk_args=",
805         "java_path=", "java_args=", "public_key_suffix=",
806         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
807         "verity_signer_path=", "verity_signer_args=", "device_specific=",
808         "extra="] +
809        list(extra_long_opts))
810  except getopt.GetoptError as err:
811    Usage(docstring)
812    print "**", str(err), "**"
813    sys.exit(2)
814
815  for o, a in opts:
816    if o in ("-h", "--help"):
817      Usage(docstring)
818      sys.exit()
819    elif o in ("-v", "--verbose"):
820      OPTIONS.verbose = True
821    elif o in ("-p", "--path"):
822      OPTIONS.search_path = a
823    elif o in ("--signapk_path",):
824      OPTIONS.signapk_path = a
825    elif o in ("--signapk_shared_library_path",):
826      OPTIONS.signapk_shared_library_path = a
827    elif o in ("--extra_signapk_args",):
828      OPTIONS.extra_signapk_args = shlex.split(a)
829    elif o in ("--java_path",):
830      OPTIONS.java_path = a
831    elif o in ("--java_args",):
832      OPTIONS.java_args = a
833    elif o in ("--public_key_suffix",):
834      OPTIONS.public_key_suffix = a
835    elif o in ("--private_key_suffix",):
836      OPTIONS.private_key_suffix = a
837    elif o in ("--boot_signer_path",):
838      OPTIONS.boot_signer_path = a
839    elif o in ("--boot_signer_args",):
840      OPTIONS.boot_signer_args = shlex.split(a)
841    elif o in ("--verity_signer_path",):
842      OPTIONS.verity_signer_path = a
843    elif o in ("--verity_signer_args",):
844      OPTIONS.verity_signer_args = shlex.split(a)
845    elif o in ("-s", "--device_specific"):
846      OPTIONS.device_specific = a
847    elif o in ("-x", "--extra"):
848      key, value = a.split("=", 1)
849      OPTIONS.extras[key] = value
850    else:
851      if extra_option_handler is None or not extra_option_handler(o, a):
852        assert False, "unknown option \"%s\"" % (o,)
853
854  if OPTIONS.search_path:
855    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
856                          os.pathsep + os.environ["PATH"])
857
858  return args
859
860
861def MakeTempFile(prefix=None, suffix=None):
862  """Make a temp file and add it to the list of things to be deleted
863  when Cleanup() is called.  Return the filename."""
864  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
865  os.close(fd)
866  OPTIONS.tempfiles.append(fn)
867  return fn
868
869
870def Cleanup():
871  for i in OPTIONS.tempfiles:
872    if os.path.isdir(i):
873      shutil.rmtree(i)
874    else:
875      os.remove(i)
876
877
878class PasswordManager(object):
879  def __init__(self):
880    self.editor = os.getenv("EDITOR", None)
881    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
882
883  def GetPasswords(self, items):
884    """Get passwords corresponding to each string in 'items',
885    returning a dict.  (The dict may have keys in addition to the
886    values in 'items'.)
887
888    Uses the passwords in $ANDROID_PW_FILE if available, letting the
889    user edit that file to add more needed passwords.  If no editor is
890    available, or $ANDROID_PW_FILE isn't define, prompts the user
891    interactively in the ordinary way.
892    """
893
894    current = self.ReadFile()
895
896    first = True
897    while True:
898      missing = []
899      for i in items:
900        if i not in current or not current[i]:
901          missing.append(i)
902      # Are all the passwords already in the file?
903      if not missing:
904        return current
905
906      for i in missing:
907        current[i] = ""
908
909      if not first:
910        print "key file %s still missing some passwords." % (self.pwfile,)
911        answer = raw_input("try to edit again? [y]> ").strip()
912        if answer and answer[0] not in 'yY':
913          raise RuntimeError("key passwords unavailable")
914      first = False
915
916      current = self.UpdateAndReadFile(current)
917
918  def PromptResult(self, current): # pylint: disable=no-self-use
919    """Prompt the user to enter a value (password) for each key in
920    'current' whose value is fales.  Returns a new dict with all the
921    values.
922    """
923    result = {}
924    for k, v in sorted(current.iteritems()):
925      if v:
926        result[k] = v
927      else:
928        while True:
929          result[k] = getpass.getpass(
930              "Enter password for %s key> " % k).strip()
931          if result[k]:
932            break
933    return result
934
935  def UpdateAndReadFile(self, current):
936    if not self.editor or not self.pwfile:
937      return self.PromptResult(current)
938
939    f = open(self.pwfile, "w")
940    os.chmod(self.pwfile, 0o600)
941    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
942    f.write("# (Additional spaces are harmless.)\n\n")
943
944    first_line = None
945    sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
946    for i, (_, k, v) in enumerate(sorted_list):
947      f.write("[[[  %s  ]]] %s\n" % (v, k))
948      if not v and first_line is None:
949        # position cursor on first line with no password.
950        first_line = i + 4
951    f.close()
952
953    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
954    _, _ = p.communicate()
955
956    return self.ReadFile()
957
958  def ReadFile(self):
959    result = {}
960    if self.pwfile is None:
961      return result
962    try:
963      f = open(self.pwfile, "r")
964      for line in f:
965        line = line.strip()
966        if not line or line[0] == '#':
967          continue
968        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
969        if not m:
970          print "failed to parse password file: ", line
971        else:
972          result[m.group(2)] = m.group(1)
973      f.close()
974    except IOError as e:
975      if e.errno != errno.ENOENT:
976        print "error reading password file: ", str(e)
977    return result
978
979
980def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
981             compress_type=None):
982  import datetime
983
984  # http://b/18015246
985  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
986  # for files larger than 2GiB. We can work around this by adjusting their
987  # limit. Note that `zipfile.writestr()` will not work for strings larger than
988  # 2GiB. The Python interpreter sometimes rejects strings that large (though
989  # it isn't clear to me exactly what circumstances cause this).
990  # `zipfile.write()` must be used directly to work around this.
991  #
992  # This mess can be avoided if we port to python3.
993  saved_zip64_limit = zipfile.ZIP64_LIMIT
994  zipfile.ZIP64_LIMIT = (1 << 32) - 1
995
996  if compress_type is None:
997    compress_type = zip_file.compression
998  if arcname is None:
999    arcname = filename
1000
1001  saved_stat = os.stat(filename)
1002
1003  try:
1004    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
1005    # file to be zipped and reset it when we're done.
1006    os.chmod(filename, perms)
1007
1008    # Use a fixed timestamp so the output is repeatable.
1009    epoch = datetime.datetime.fromtimestamp(0)
1010    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
1011    os.utime(filename, (timestamp, timestamp))
1012
1013    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
1014  finally:
1015    os.chmod(filename, saved_stat.st_mode)
1016    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
1017    zipfile.ZIP64_LIMIT = saved_zip64_limit
1018
1019
1020def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
1021                compress_type=None):
1022  """Wrap zipfile.writestr() function to work around the zip64 limit.
1023
1024  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
1025  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
1026  when calling crc32(bytes).
1027
1028  But it still works fine to write a shorter string into a large zip file.
1029  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
1030  when we know the string won't be too long.
1031  """
1032
1033  saved_zip64_limit = zipfile.ZIP64_LIMIT
1034  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1035
1036  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
1037    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
1038    zinfo.compress_type = zip_file.compression
1039    if perms is None:
1040      perms = 0o100644
1041  else:
1042    zinfo = zinfo_or_arcname
1043
1044  # If compress_type is given, it overrides the value in zinfo.
1045  if compress_type is not None:
1046    zinfo.compress_type = compress_type
1047
1048  # If perms is given, it has a priority.
1049  if perms is not None:
1050    # If perms doesn't set the file type, mark it as a regular file.
1051    if perms & 0o770000 == 0:
1052      perms |= 0o100000
1053    zinfo.external_attr = perms << 16
1054
1055  # Use a fixed timestamp so the output is repeatable.
1056  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
1057
1058  zip_file.writestr(zinfo, data)
1059  zipfile.ZIP64_LIMIT = saved_zip64_limit
1060
1061
1062def ZipClose(zip_file):
1063  # http://b/18015246
1064  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
1065  # central directory.
1066  saved_zip64_limit = zipfile.ZIP64_LIMIT
1067  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1068
1069  zip_file.close()
1070
1071  zipfile.ZIP64_LIMIT = saved_zip64_limit
1072
1073
1074class DeviceSpecificParams(object):
1075  module = None
1076  def __init__(self, **kwargs):
1077    """Keyword arguments to the constructor become attributes of this
1078    object, which is passed to all functions in the device-specific
1079    module."""
1080    for k, v in kwargs.iteritems():
1081      setattr(self, k, v)
1082    self.extras = OPTIONS.extras
1083
1084    if self.module is None:
1085      path = OPTIONS.device_specific
1086      if not path:
1087        return
1088      try:
1089        if os.path.isdir(path):
1090          info = imp.find_module("releasetools", [path])
1091        else:
1092          d, f = os.path.split(path)
1093          b, x = os.path.splitext(f)
1094          if x == ".py":
1095            f = b
1096          info = imp.find_module(f, [d])
1097        print "loaded device-specific extensions from", path
1098        self.module = imp.load_module("device_specific", *info)
1099      except ImportError:
1100        print "unable to load device-specific module; assuming none"
1101
1102  def _DoCall(self, function_name, *args, **kwargs):
1103    """Call the named function in the device-specific module, passing
1104    the given args and kwargs.  The first argument to the call will be
1105    the DeviceSpecific object itself.  If there is no module, or the
1106    module does not define the function, return the value of the
1107    'default' kwarg (which itself defaults to None)."""
1108    if self.module is None or not hasattr(self.module, function_name):
1109      return kwargs.get("default", None)
1110    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1111
1112  def FullOTA_Assertions(self):
1113    """Called after emitting the block of assertions at the top of a
1114    full OTA package.  Implementations can add whatever additional
1115    assertions they like."""
1116    return self._DoCall("FullOTA_Assertions")
1117
1118  def FullOTA_InstallBegin(self):
1119    """Called at the start of full OTA installation."""
1120    return self._DoCall("FullOTA_InstallBegin")
1121
1122  def FullOTA_InstallEnd(self):
1123    """Called at the end of full OTA installation; typically this is
1124    used to install the image for the device's baseband processor."""
1125    return self._DoCall("FullOTA_InstallEnd")
1126
1127  def IncrementalOTA_Assertions(self):
1128    """Called after emitting the block of assertions at the top of an
1129    incremental OTA package.  Implementations can add whatever
1130    additional assertions they like."""
1131    return self._DoCall("IncrementalOTA_Assertions")
1132
1133  def IncrementalOTA_VerifyBegin(self):
1134    """Called at the start of the verification phase of incremental
1135    OTA installation; additional checks can be placed here to abort
1136    the script before any changes are made."""
1137    return self._DoCall("IncrementalOTA_VerifyBegin")
1138
1139  def IncrementalOTA_VerifyEnd(self):
1140    """Called at the end of the verification phase of incremental OTA
1141    installation; additional checks can be placed here to abort the
1142    script before any changes are made."""
1143    return self._DoCall("IncrementalOTA_VerifyEnd")
1144
1145  def IncrementalOTA_InstallBegin(self):
1146    """Called at the start of incremental OTA installation (after
1147    verification is complete)."""
1148    return self._DoCall("IncrementalOTA_InstallBegin")
1149
1150  def IncrementalOTA_InstallEnd(self):
1151    """Called at the end of incremental OTA installation; typically
1152    this is used to install the image for the device's baseband
1153    processor."""
1154    return self._DoCall("IncrementalOTA_InstallEnd")
1155
1156  def VerifyOTA_Assertions(self):
1157    return self._DoCall("VerifyOTA_Assertions")
1158
1159class File(object):
1160  def __init__(self, name, data):
1161    self.name = name
1162    self.data = data
1163    self.size = len(data)
1164    self.sha1 = sha1(data).hexdigest()
1165
1166  @classmethod
1167  def FromLocalFile(cls, name, diskname):
1168    f = open(diskname, "rb")
1169    data = f.read()
1170    f.close()
1171    return File(name, data)
1172
1173  def WriteToTemp(self):
1174    t = tempfile.NamedTemporaryFile()
1175    t.write(self.data)
1176    t.flush()
1177    return t
1178
1179  def AddToZip(self, z, compression=None):
1180    ZipWriteStr(z, self.name, self.data, compress_type=compression)
1181
1182DIFF_PROGRAM_BY_EXT = {
1183    ".gz" : "imgdiff",
1184    ".zip" : ["imgdiff", "-z"],
1185    ".jar" : ["imgdiff", "-z"],
1186    ".apk" : ["imgdiff", "-z"],
1187    ".img" : "imgdiff",
1188    }
1189
1190class Difference(object):
1191  def __init__(self, tf, sf, diff_program=None):
1192    self.tf = tf
1193    self.sf = sf
1194    self.patch = None
1195    self.diff_program = diff_program
1196
1197  def ComputePatch(self):
1198    """Compute the patch (as a string of data) needed to turn sf into
1199    tf.  Returns the same tuple as GetPatch()."""
1200
1201    tf = self.tf
1202    sf = self.sf
1203
1204    if self.diff_program:
1205      diff_program = self.diff_program
1206    else:
1207      ext = os.path.splitext(tf.name)[1]
1208      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1209
1210    ttemp = tf.WriteToTemp()
1211    stemp = sf.WriteToTemp()
1212
1213    ext = os.path.splitext(tf.name)[1]
1214
1215    try:
1216      ptemp = tempfile.NamedTemporaryFile()
1217      if isinstance(diff_program, list):
1218        cmd = copy.copy(diff_program)
1219      else:
1220        cmd = [diff_program]
1221      cmd.append(stemp.name)
1222      cmd.append(ttemp.name)
1223      cmd.append(ptemp.name)
1224      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1225      err = []
1226      def run():
1227        _, e = p.communicate()
1228        if e:
1229          err.append(e)
1230      th = threading.Thread(target=run)
1231      th.start()
1232      th.join(timeout=300)   # 5 mins
1233      if th.is_alive():
1234        print "WARNING: diff command timed out"
1235        p.terminate()
1236        th.join(5)
1237        if th.is_alive():
1238          p.kill()
1239          th.join()
1240
1241      if err or p.returncode != 0:
1242        print "WARNING: failure running %s:\n%s\n" % (
1243            diff_program, "".join(err))
1244        self.patch = None
1245        return None, None, None
1246      diff = ptemp.read()
1247    finally:
1248      ptemp.close()
1249      stemp.close()
1250      ttemp.close()
1251
1252    self.patch = diff
1253    return self.tf, self.sf, self.patch
1254
1255
1256  def GetPatch(self):
1257    """Return a tuple (target_file, source_file, patch_data).
1258    patch_data may be None if ComputePatch hasn't been called, or if
1259    computing the patch failed."""
1260    return self.tf, self.sf, self.patch
1261
1262
1263def ComputeDifferences(diffs):
1264  """Call ComputePatch on all the Difference objects in 'diffs'."""
1265  print len(diffs), "diffs to compute"
1266
1267  # Do the largest files first, to try and reduce the long-pole effect.
1268  by_size = [(i.tf.size, i) for i in diffs]
1269  by_size.sort(reverse=True)
1270  by_size = [i[1] for i in by_size]
1271
1272  lock = threading.Lock()
1273  diff_iter = iter(by_size)   # accessed under lock
1274
1275  def worker():
1276    try:
1277      lock.acquire()
1278      for d in diff_iter:
1279        lock.release()
1280        start = time.time()
1281        d.ComputePatch()
1282        dur = time.time() - start
1283        lock.acquire()
1284
1285        tf, sf, patch = d.GetPatch()
1286        if sf.name == tf.name:
1287          name = tf.name
1288        else:
1289          name = "%s (%s)" % (tf.name, sf.name)
1290        if patch is None:
1291          print "patching failed!                                  %s" % (name,)
1292        else:
1293          print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1294              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1295      lock.release()
1296    except Exception as e:
1297      print e
1298      raise
1299
1300  # start worker threads; wait for them all to finish.
1301  threads = [threading.Thread(target=worker)
1302             for i in range(OPTIONS.worker_threads)]
1303  for th in threads:
1304    th.start()
1305  while threads:
1306    threads.pop().join()
1307
1308
1309class BlockDifference(object):
1310  def __init__(self, partition, tgt, src=None, check_first_block=False,
1311               version=None):
1312    self.tgt = tgt
1313    self.src = src
1314    self.partition = partition
1315    self.check_first_block = check_first_block
1316
1317    if version is None:
1318      version = 1
1319      if OPTIONS.info_dict:
1320        version = max(
1321            int(i) for i in
1322            OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1323    self.version = version
1324
1325    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1326                                    version=self.version)
1327    tmpdir = tempfile.mkdtemp()
1328    OPTIONS.tempfiles.append(tmpdir)
1329    self.path = os.path.join(tmpdir, partition)
1330    b.Compute(self.path)
1331    self._required_cache = b.max_stashed_size
1332    self.touched_src_ranges = b.touched_src_ranges
1333    self.touched_src_sha1 = b.touched_src_sha1
1334
1335    if src is None:
1336      _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1337    else:
1338      _, self.device = GetTypeAndDevice("/" + partition,
1339                                        OPTIONS.source_info_dict)
1340
1341  @property
1342  def required_cache(self):
1343    return self._required_cache
1344
1345  def WriteScript(self, script, output_zip, progress=None):
1346    if not self.src:
1347      # write the output unconditionally
1348      script.Print("Patching %s image unconditionally..." % (self.partition,))
1349    else:
1350      script.Print("Patching %s image after verification." % (self.partition,))
1351
1352    if progress:
1353      script.ShowProgress(progress, 0)
1354    self._WriteUpdate(script, output_zip)
1355    if OPTIONS.verify:
1356      self._WritePostInstallVerifyScript(script)
1357
1358  def WriteStrictVerifyScript(self, script):
1359    """Verify all the blocks in the care_map, including clobbered blocks.
1360
1361    This differs from the WriteVerifyScript() function: a) it prints different
1362    error messages; b) it doesn't allow half-way updated images to pass the
1363    verification."""
1364
1365    partition = self.partition
1366    script.Print("Verifying %s..." % (partition,))
1367    ranges = self.tgt.care_map
1368    ranges_str = ranges.to_string_raw()
1369    script.AppendExtra('range_sha1("%s", "%s") == "%s" && '
1370                       'ui_print("    Verified.") || '
1371                       'ui_print("\\"%s\\" has unexpected contents.");' % (
1372                       self.device, ranges_str,
1373                       self.tgt.TotalSha1(include_clobbered_blocks=True),
1374                       self.device))
1375    script.AppendExtra("")
1376
1377  def WriteVerifyScript(self, script, touched_blocks_only=False):
1378    partition = self.partition
1379
1380    # full OTA
1381    if not self.src:
1382      script.Print("Image %s will be patched unconditionally." % (partition,))
1383
1384    # incremental OTA
1385    else:
1386      if touched_blocks_only and self.version >= 3:
1387        ranges = self.touched_src_ranges
1388        expected_sha1 = self.touched_src_sha1
1389      else:
1390        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1391        expected_sha1 = self.src.TotalSha1()
1392
1393      # No blocks to be checked, skipping.
1394      if not ranges:
1395        return
1396
1397      ranges_str = ranges.to_string_raw()
1398      if self.version >= 4:
1399        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1400                            'block_image_verify("%s", '
1401                            'package_extract_file("%s.transfer.list"), '
1402                            '"%s.new.dat", "%s.patch.dat")) then') % (
1403                            self.device, ranges_str, expected_sha1,
1404                            self.device, partition, partition, partition))
1405      elif self.version == 3:
1406        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1407                            'block_image_verify("%s", '
1408                            'package_extract_file("%s.transfer.list"), '
1409                            '"%s.new.dat", "%s.patch.dat")) then') % (
1410                            self.device, ranges_str, expected_sha1,
1411                            self.device, partition, partition, partition))
1412      else:
1413        script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1414                           self.device, ranges_str, self.src.TotalSha1()))
1415      script.Print('Verified %s image...' % (partition,))
1416      script.AppendExtra('else')
1417
1418      if self.version >= 4:
1419
1420        # Bug: 21124327
1421        # When generating incrementals for the system and vendor partitions in
1422        # version 4 or newer, explicitly check the first block (which contains
1423        # the superblock) of the partition to see if it's what we expect. If
1424        # this check fails, give an explicit log message about the partition
1425        # having been remounted R/W (the most likely explanation).
1426        if self.check_first_block:
1427          script.AppendExtra('check_first_block("%s");' % (self.device,))
1428
1429        # If version >= 4, try block recovery before abort update
1430        script.AppendExtra((
1431            'ifelse (block_image_recover("{device}", "{ranges}") && '
1432            'block_image_verify("{device}", '
1433            'package_extract_file("{partition}.transfer.list"), '
1434            '"{partition}.new.dat", "{partition}.patch.dat"), '
1435            'ui_print("{partition} recovered successfully."), '
1436            'abort("{partition} partition fails to recover"));\n'
1437            'endif;').format(device=self.device, ranges=ranges_str,
1438                             partition=partition))
1439
1440      # Abort the OTA update. Note that the incremental OTA cannot be applied
1441      # even if it may match the checksum of the target partition.
1442      # a) If version < 3, operations like move and erase will make changes
1443      #    unconditionally and damage the partition.
1444      # b) If version >= 3, it won't even reach here.
1445      else:
1446        script.AppendExtra(('abort("%s partition has unexpected contents");\n'
1447                            'endif;') % (partition,))
1448
1449  def _WritePostInstallVerifyScript(self, script):
1450    partition = self.partition
1451    script.Print('Verifying the updated %s image...' % (partition,))
1452    # Unlike pre-install verification, clobbered_blocks should not be ignored.
1453    ranges = self.tgt.care_map
1454    ranges_str = ranges.to_string_raw()
1455    script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1456                       self.device, ranges_str,
1457                       self.tgt.TotalSha1(include_clobbered_blocks=True)))
1458
1459    # Bug: 20881595
1460    # Verify that extended blocks are really zeroed out.
1461    if self.tgt.extended:
1462      ranges_str = self.tgt.extended.to_string_raw()
1463      script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1464                         self.device, ranges_str,
1465                         self._HashZeroBlocks(self.tgt.extended.size())))
1466      script.Print('Verified the updated %s image.' % (partition,))
1467      script.AppendExtra(
1468          'else\n'
1469          '  abort("%s partition has unexpected non-zero contents after OTA '
1470          'update");\n'
1471          'endif;' % (partition,))
1472    else:
1473      script.Print('Verified the updated %s image.' % (partition,))
1474
1475    script.AppendExtra(
1476        'else\n'
1477        '  abort("%s partition has unexpected contents after OTA update");\n'
1478        'endif;' % (partition,))
1479
1480  def _WriteUpdate(self, script, output_zip):
1481    ZipWrite(output_zip,
1482             '{}.transfer.list'.format(self.path),
1483             '{}.transfer.list'.format(self.partition))
1484    ZipWrite(output_zip,
1485             '{}.new.dat'.format(self.path),
1486             '{}.new.dat'.format(self.partition))
1487    ZipWrite(output_zip,
1488             '{}.patch.dat'.format(self.path),
1489             '{}.patch.dat'.format(self.partition),
1490             compress_type=zipfile.ZIP_STORED)
1491
1492    call = ('block_image_update("{device}", '
1493            'package_extract_file("{partition}.transfer.list"), '
1494            '"{partition}.new.dat", "{partition}.patch.dat") ||\n'
1495            '    abort("Failed to update {partition} image.");'.format(
1496                device=self.device, partition=self.partition))
1497    script.AppendExtra(script.WordWrap(call))
1498
1499  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1500    data = source.ReadRangeSet(ranges)
1501    ctx = sha1()
1502
1503    for p in data:
1504      ctx.update(p)
1505
1506    return ctx.hexdigest()
1507
1508  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1509    """Return the hash value for all zero blocks."""
1510    zero_block = '\x00' * 4096
1511    ctx = sha1()
1512    for _ in range(num_blocks):
1513      ctx.update(zero_block)
1514
1515    return ctx.hexdigest()
1516
1517
1518DataImage = blockimgdiff.DataImage
1519
1520# map recovery.fstab's fs_types to mount/format "partition types"
1521PARTITION_TYPES = {
1522    "yaffs2": "MTD",
1523    "mtd": "MTD",
1524    "ext4": "EMMC",
1525    "emmc": "EMMC",
1526    "f2fs": "EMMC",
1527    "squashfs": "EMMC"
1528}
1529
1530def GetTypeAndDevice(mount_point, info):
1531  fstab = info["fstab"]
1532  if fstab:
1533    return (PARTITION_TYPES[fstab[mount_point].fs_type],
1534            fstab[mount_point].device)
1535  else:
1536    raise KeyError
1537
1538
1539def ParseCertificate(data):
1540  """Parse a PEM-format certificate."""
1541  cert = []
1542  save = False
1543  for line in data.split("\n"):
1544    if "--END CERTIFICATE--" in line:
1545      break
1546    if save:
1547      cert.append(line)
1548    if "--BEGIN CERTIFICATE--" in line:
1549      save = True
1550  cert = "".join(cert).decode('base64')
1551  return cert
1552
1553def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1554                      info_dict=None):
1555  """Generate a binary patch that creates the recovery image starting
1556  with the boot image.  (Most of the space in these images is just the
1557  kernel, which is identical for the two, so the resulting patch
1558  should be efficient.)  Add it to the output zip, along with a shell
1559  script that is run from init.rc on first boot to actually do the
1560  patching and install the new recovery image.
1561
1562  recovery_img and boot_img should be File objects for the
1563  corresponding images.  info should be the dictionary returned by
1564  common.LoadInfoDict() on the input target_files.
1565  """
1566
1567  if info_dict is None:
1568    info_dict = OPTIONS.info_dict
1569
1570  full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1571  system_root_image = info_dict.get("system_root_image", None) == "true"
1572
1573  if full_recovery_image:
1574    output_sink("etc/recovery.img", recovery_img.data)
1575
1576  else:
1577    diff_program = ["imgdiff"]
1578    path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1579    if os.path.exists(path):
1580      diff_program.append("-b")
1581      diff_program.append(path)
1582      bonus_args = "-b /system/etc/recovery-resource.dat"
1583    else:
1584      bonus_args = ""
1585
1586    d = Difference(recovery_img, boot_img, diff_program=diff_program)
1587    _, _, patch = d.ComputePatch()
1588    output_sink("recovery-from-boot.p", patch)
1589
1590  try:
1591    # The following GetTypeAndDevice()s need to use the path in the target
1592    # info_dict instead of source_info_dict.
1593    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1594    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1595  except KeyError:
1596    return
1597
1598  if full_recovery_image:
1599    sh = """#!/system/bin/sh
1600if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1601  applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1602else
1603  log -t recovery "Recovery image already installed"
1604fi
1605""" % {'type': recovery_type,
1606       'device': recovery_device,
1607       'sha1': recovery_img.sha1,
1608       'size': recovery_img.size}
1609  else:
1610    sh = """#!/system/bin/sh
1611if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1612  applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1613else
1614  log -t recovery "Recovery image already installed"
1615fi
1616""" % {'boot_size': boot_img.size,
1617       'boot_sha1': boot_img.sha1,
1618       'recovery_size': recovery_img.size,
1619       'recovery_sha1': recovery_img.sha1,
1620       'boot_type': boot_type,
1621       'boot_device': boot_device,
1622       'recovery_type': recovery_type,
1623       'recovery_device': recovery_device,
1624       'bonus_args': bonus_args}
1625
1626  # The install script location moved from /system/etc to /system/bin
1627  # in the L release.  Parse init.*.rc files to find out where the
1628  # target-files expects it to be, and put it there.
1629  sh_location = "etc/install-recovery.sh"
1630  found = False
1631  if system_root_image:
1632    init_rc_dir = os.path.join(input_dir, "ROOT")
1633  else:
1634    init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1635  init_rc_files = os.listdir(init_rc_dir)
1636  for init_rc_file in init_rc_files:
1637    if (not init_rc_file.startswith('init.') or
1638        not init_rc_file.endswith('.rc')):
1639      continue
1640
1641    with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1642      for line in f:
1643        m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1644        if m:
1645          sh_location = m.group(1)
1646          found = True
1647          break
1648
1649    if found:
1650      break
1651
1652  print "putting script in", sh_location
1653
1654  output_sink(sh_location, sh)
1655