common.py revision de5bc04717505ad0e5b55605bccf43974f4c5c7a
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import copy
16import errno
17import getopt
18import getpass
19import imp
20import os
21import platform
22import re
23import shlex
24import shutil
25import subprocess
26import sys
27import tempfile
28import threading
29import time
30import zipfile
31
32import blockimgdiff
33import rangelib
34
35from hashlib import sha1 as sha1
36
37
38class Options(object):
39  def __init__(self):
40    platform_search_path = {
41        "linux2": "out/host/linux-x86",
42        "darwin": "out/host/darwin-x86",
43    }
44
45    self.search_path = platform_search_path.get(sys.platform, None)
46    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
47    self.signapk_shared_library_path = "lib64"   # Relative to search_path
48    self.extra_signapk_args = []
49    self.java_path = "java"  # Use the one on the path by default.
50    self.java_args = "-Xmx2048m" # JVM Args
51    self.public_key_suffix = ".x509.pem"
52    self.private_key_suffix = ".pk8"
53    # use otatools built boot_signer by default
54    self.boot_signer_path = "boot_signer"
55    self.boot_signer_args = []
56    self.verity_signer_path = None
57    self.verity_signer_args = []
58    self.verbose = False
59    self.tempfiles = []
60    self.device_specific = None
61    self.extras = {}
62    self.info_dict = None
63    self.source_info_dict = None
64    self.target_info_dict = None
65    self.worker_threads = None
66    # Stash size cannot exceed cache_size * threshold.
67    self.cache_size = None
68    self.stash_threshold = 0.8
69
70
71OPTIONS = Options()
72
73
74# Values for "certificate" in apkcerts that mean special things.
75SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
76
77
78class ExternalError(RuntimeError):
79  pass
80
81
82def Run(args, **kwargs):
83  """Create and return a subprocess.Popen object, printing the command
84  line on the terminal if -v was specified."""
85  if OPTIONS.verbose:
86    print "  running: ", " ".join(args)
87  return subprocess.Popen(args, **kwargs)
88
89
90def CloseInheritedPipes():
91  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
92  before doing other work."""
93  if platform.system() != "Darwin":
94    return
95  for d in range(3, 1025):
96    try:
97      stat = os.fstat(d)
98      if stat is not None:
99        pipebit = stat[0] & 0x1000
100        if pipebit != 0:
101          os.close(d)
102    except OSError:
103      pass
104
105
106def LoadInfoDict(input_file, input_dir=None):
107  """Read and parse the META/misc_info.txt key/value pairs from the
108  input target files and return a dict."""
109
110  def read_helper(fn):
111    if isinstance(input_file, zipfile.ZipFile):
112      return input_file.read(fn)
113    else:
114      path = os.path.join(input_file, *fn.split("/"))
115      try:
116        with open(path) as f:
117          return f.read()
118      except IOError as e:
119        if e.errno == errno.ENOENT:
120          raise KeyError(fn)
121  d = {}
122  try:
123    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
124  except KeyError:
125    # ok if misc_info.txt doesn't exist
126    pass
127
128  # backwards compatibility: These values used to be in their own
129  # files.  Look for them, in case we're processing an old
130  # target_files zip.
131
132  if "mkyaffs2_extra_flags" not in d:
133    try:
134      d["mkyaffs2_extra_flags"] = read_helper(
135          "META/mkyaffs2-extra-flags.txt").strip()
136    except KeyError:
137      # ok if flags don't exist
138      pass
139
140  if "recovery_api_version" not in d:
141    try:
142      d["recovery_api_version"] = read_helper(
143          "META/recovery-api-version.txt").strip()
144    except KeyError:
145      raise ValueError("can't find recovery API version in input target-files")
146
147  if "tool_extensions" not in d:
148    try:
149      d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
150    except KeyError:
151      # ok if extensions don't exist
152      pass
153
154  if "fstab_version" not in d:
155    d["fstab_version"] = "1"
156
157  # A few properties are stored as links to the files in the out/ directory.
158  # It works fine with the build system. However, they are no longer available
159  # when (re)generating from target_files zip. If input_dir is not None, we
160  # are doing repacking. Redirect those properties to the actual files in the
161  # unzipped directory.
162  if input_dir is not None:
163    # We carry a copy of file_contexts.bin under META/. If not available,
164    # search BOOT/RAMDISK/. Note that sometimes we may need a different file
165    # to build images than the one running on device, such as when enabling
166    # system_root_image. In that case, we must have the one for image
167    # generation copied to META/.
168    fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
169    fc_config = os.path.join(input_dir, "META", fc_basename)
170    if d.get("system_root_image") == "true":
171      assert os.path.exists(fc_config)
172    if not os.path.exists(fc_config):
173      fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
174      if not os.path.exists(fc_config):
175        fc_config = None
176
177    if fc_config:
178      d["selinux_fc"] = fc_config
179
180    # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
181    if d.get("system_root_image") == "true":
182      d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
183      d["ramdisk_fs_config"] = os.path.join(
184          input_dir, "META", "root_filesystem_config.txt")
185
186  try:
187    data = read_helper("META/imagesizes.txt")
188    for line in data.split("\n"):
189      if not line:
190        continue
191      name, value = line.split(" ", 1)
192      if not value:
193        continue
194      if name == "blocksize":
195        d[name] = value
196      else:
197        d[name + "_size"] = value
198  except KeyError:
199    pass
200
201  def makeint(key):
202    if key in d:
203      d[key] = int(d[key], 0)
204
205  makeint("recovery_api_version")
206  makeint("blocksize")
207  makeint("system_size")
208  makeint("vendor_size")
209  makeint("userdata_size")
210  makeint("cache_size")
211  makeint("recovery_size")
212  makeint("boot_size")
213  makeint("fstab_version")
214
215  if d.get("no_recovery", False) == "true":
216    d["fstab"] = None
217  else:
218    d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
219                                   d.get("system_root_image", False))
220  d["build.prop"] = LoadBuildProp(read_helper)
221  return d
222
223def LoadBuildProp(read_helper):
224  try:
225    data = read_helper("SYSTEM/build.prop")
226  except KeyError:
227    print "Warning: could not find SYSTEM/build.prop in %s" % zip
228    data = ""
229  return LoadDictionaryFromLines(data.split("\n"))
230
231def LoadDictionaryFromLines(lines):
232  d = {}
233  for line in lines:
234    line = line.strip()
235    if not line or line.startswith("#"):
236      continue
237    if "=" in line:
238      name, value = line.split("=", 1)
239      d[name] = value
240  return d
241
242def LoadRecoveryFSTab(read_helper, fstab_version, system_root_image=False):
243  class Partition(object):
244    def __init__(self, mount_point, fs_type, device, length, device2, context):
245      self.mount_point = mount_point
246      self.fs_type = fs_type
247      self.device = device
248      self.length = length
249      self.device2 = device2
250      self.context = context
251
252  try:
253    data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
254  except KeyError:
255    print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
256    data = ""
257
258  if fstab_version == 1:
259    d = {}
260    for line in data.split("\n"):
261      line = line.strip()
262      if not line or line.startswith("#"):
263        continue
264      pieces = line.split()
265      if not 3 <= len(pieces) <= 4:
266        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
267      options = None
268      if len(pieces) >= 4:
269        if pieces[3].startswith("/"):
270          device2 = pieces[3]
271          if len(pieces) >= 5:
272            options = pieces[4]
273        else:
274          device2 = None
275          options = pieces[3]
276      else:
277        device2 = None
278
279      mount_point = pieces[0]
280      length = 0
281      if options:
282        options = options.split(",")
283        for i in options:
284          if i.startswith("length="):
285            length = int(i[7:])
286          else:
287            print "%s: unknown option \"%s\"" % (mount_point, i)
288
289      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
290                                 device=pieces[2], length=length,
291                                 device2=device2)
292
293  elif fstab_version == 2:
294    d = {}
295    for line in data.split("\n"):
296      line = line.strip()
297      if not line or line.startswith("#"):
298        continue
299      # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
300      pieces = line.split()
301      if len(pieces) != 5:
302        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
303
304      # Ignore entries that are managed by vold
305      options = pieces[4]
306      if "voldmanaged=" in options:
307        continue
308
309      # It's a good line, parse it
310      length = 0
311      options = options.split(",")
312      for i in options:
313        if i.startswith("length="):
314          length = int(i[7:])
315        else:
316          # Ignore all unknown options in the unified fstab
317          continue
318
319      mount_flags = pieces[3]
320      # Honor the SELinux context if present.
321      context = None
322      for i in mount_flags.split(","):
323        if i.startswith("context="):
324          context = i
325
326      mount_point = pieces[1]
327      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
328                                 device=pieces[0], length=length,
329                                 device2=None, context=context)
330
331  else:
332    raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
333
334  # / is used for the system mount point when the root directory is included in
335  # system. Other areas assume system is always at "/system" so point /system
336  # at /.
337  if system_root_image:
338    assert not d.has_key("/system") and d.has_key("/")
339    d["/system"] = d["/"]
340  return d
341
342
343def DumpInfoDict(d):
344  for k, v in sorted(d.items()):
345    print "%-25s = (%s) %s" % (k, type(v).__name__, v)
346
347
348def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
349                        has_ramdisk=False):
350  """Build a bootable image from the specified sourcedir.
351
352  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
353  'sourcedir'), and turn them into a boot image.  Return the image data, or
354  None if sourcedir does not appear to contains files for building the
355  requested image."""
356
357  def make_ramdisk():
358    ramdisk_img = tempfile.NamedTemporaryFile()
359
360    if os.access(fs_config_file, os.F_OK):
361      cmd = ["mkbootfs", "-f", fs_config_file,
362             os.path.join(sourcedir, "RAMDISK")]
363    else:
364      cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
365    p1 = Run(cmd, stdout=subprocess.PIPE)
366    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
367
368    p2.wait()
369    p1.wait()
370    assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
371    assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
372
373    return ramdisk_img
374
375  if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
376    return None
377
378  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
379    return None
380
381  if info_dict is None:
382    info_dict = OPTIONS.info_dict
383
384  img = tempfile.NamedTemporaryFile()
385
386  if has_ramdisk:
387    ramdisk_img = make_ramdisk()
388
389  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
390  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
391
392  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
393
394  fn = os.path.join(sourcedir, "second")
395  if os.access(fn, os.F_OK):
396    cmd.append("--second")
397    cmd.append(fn)
398
399  fn = os.path.join(sourcedir, "cmdline")
400  if os.access(fn, os.F_OK):
401    cmd.append("--cmdline")
402    cmd.append(open(fn).read().rstrip("\n"))
403
404  fn = os.path.join(sourcedir, "base")
405  if os.access(fn, os.F_OK):
406    cmd.append("--base")
407    cmd.append(open(fn).read().rstrip("\n"))
408
409  fn = os.path.join(sourcedir, "pagesize")
410  if os.access(fn, os.F_OK):
411    cmd.append("--pagesize")
412    cmd.append(open(fn).read().rstrip("\n"))
413
414  args = info_dict.get("mkbootimg_args", None)
415  if args and args.strip():
416    cmd.extend(shlex.split(args))
417
418  if has_ramdisk:
419    cmd.extend(["--ramdisk", ramdisk_img.name])
420
421  img_unsigned = None
422  if info_dict.get("vboot", None):
423    img_unsigned = tempfile.NamedTemporaryFile()
424    cmd.extend(["--output", img_unsigned.name])
425  else:
426    cmd.extend(["--output", img.name])
427
428  p = Run(cmd, stdout=subprocess.PIPE)
429  p.communicate()
430  assert p.returncode == 0, "mkbootimg of %s image failed" % (
431      os.path.basename(sourcedir),)
432
433  if (info_dict.get("boot_signer", None) == "true" and
434      info_dict.get("verity_key", None)):
435    path = "/" + os.path.basename(sourcedir).lower()
436    cmd = [OPTIONS.boot_signer_path]
437    cmd.extend(OPTIONS.boot_signer_args)
438    cmd.extend([path, img.name,
439                info_dict["verity_key"] + ".pk8",
440                info_dict["verity_key"] + ".x509.pem", img.name])
441    p = Run(cmd, stdout=subprocess.PIPE)
442    p.communicate()
443    assert p.returncode == 0, "boot_signer of %s image failed" % path
444
445  # Sign the image if vboot is non-empty.
446  elif info_dict.get("vboot", None):
447    path = "/" + os.path.basename(sourcedir).lower()
448    img_keyblock = tempfile.NamedTemporaryFile()
449    cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
450           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
451           info_dict["vboot_key"] + ".vbprivk",
452           info_dict["vboot_subkey"] + ".vbprivk",
453           img_keyblock.name,
454           img.name]
455    p = Run(cmd, stdout=subprocess.PIPE)
456    p.communicate()
457    assert p.returncode == 0, "vboot_signer of %s image failed" % path
458
459    # Clean up the temp files.
460    img_unsigned.close()
461    img_keyblock.close()
462
463  img.seek(os.SEEK_SET, 0)
464  data = img.read()
465
466  if has_ramdisk:
467    ramdisk_img.close()
468  img.close()
469
470  return data
471
472
473def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
474                     info_dict=None):
475  """Return a File object with the desired bootable image.
476
477  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
478  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
479  the source files in 'unpack_dir'/'tree_subdir'."""
480
481  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
482  if os.path.exists(prebuilt_path):
483    print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
484    return File.FromLocalFile(name, prebuilt_path)
485
486  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
487  if os.path.exists(prebuilt_path):
488    print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
489    return File.FromLocalFile(name, prebuilt_path)
490
491  print "building image from target_files %s..." % (tree_subdir,)
492
493  if info_dict is None:
494    info_dict = OPTIONS.info_dict
495
496  # With system_root_image == "true", we don't pack ramdisk into the boot image.
497  # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
498  # for recovery.
499  has_ramdisk = (info_dict.get("system_root_image") != "true" or
500                 prebuilt_name != "boot.img" or
501                 info_dict.get("recovery_as_boot") == "true")
502
503  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
504  data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
505                             os.path.join(unpack_dir, fs_config),
506                             info_dict, has_ramdisk)
507  if data:
508    return File(name, data)
509  return None
510
511
512def UnzipTemp(filename, pattern=None):
513  """Unzip the given archive into a temporary directory and return the name.
514
515  If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
516  temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
517
518  Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
519  main file), open for reading.
520  """
521
522  tmp = tempfile.mkdtemp(prefix="targetfiles-")
523  OPTIONS.tempfiles.append(tmp)
524
525  def unzip_to_dir(filename, dirname):
526    cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
527    if pattern is not None:
528      cmd.append(pattern)
529    p = Run(cmd, stdout=subprocess.PIPE)
530    p.communicate()
531    if p.returncode != 0:
532      raise ExternalError("failed to unzip input target-files \"%s\"" %
533                          (filename,))
534
535  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
536  if m:
537    unzip_to_dir(m.group(1), tmp)
538    unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
539    filename = m.group(1)
540  else:
541    unzip_to_dir(filename, tmp)
542
543  return tmp, zipfile.ZipFile(filename, "r")
544
545
546def GetKeyPasswords(keylist):
547  """Given a list of keys, prompt the user to enter passwords for
548  those which require them.  Return a {key: password} dict.  password
549  will be None if the key has no password."""
550
551  no_passwords = []
552  need_passwords = []
553  key_passwords = {}
554  devnull = open("/dev/null", "w+b")
555  for k in sorted(keylist):
556    # We don't need a password for things that aren't really keys.
557    if k in SPECIAL_CERT_STRINGS:
558      no_passwords.append(k)
559      continue
560
561    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
562             "-inform", "DER", "-nocrypt"],
563            stdin=devnull.fileno(),
564            stdout=devnull.fileno(),
565            stderr=subprocess.STDOUT)
566    p.communicate()
567    if p.returncode == 0:
568      # Definitely an unencrypted key.
569      no_passwords.append(k)
570    else:
571      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
572               "-inform", "DER", "-passin", "pass:"],
573              stdin=devnull.fileno(),
574              stdout=devnull.fileno(),
575              stderr=subprocess.PIPE)
576      _, stderr = p.communicate()
577      if p.returncode == 0:
578        # Encrypted key with empty string as password.
579        key_passwords[k] = ''
580      elif stderr.startswith('Error decrypting key'):
581        # Definitely encrypted key.
582        # It would have said "Error reading key" if it didn't parse correctly.
583        need_passwords.append(k)
584      else:
585        # Potentially, a type of key that openssl doesn't understand.
586        # We'll let the routines in signapk.jar handle it.
587        no_passwords.append(k)
588  devnull.close()
589
590  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
591  key_passwords.update(dict.fromkeys(no_passwords, None))
592  return key_passwords
593
594
595def GetMinSdkVersion(apk_name):
596  """Get the minSdkVersion delared in the APK. This can be both a decimal number
597  (API Level) or a codename.
598  """
599
600  p = Run(["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE)
601  output, err = p.communicate()
602  if err:
603    raise ExternalError("Failed to obtain minSdkVersion: aapt return code %s"
604        % (p.returncode,))
605
606  for line in output.split("\n"):
607    # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'
608    m = re.match(r'sdkVersion:\'([^\']*)\'', line)
609    if m:
610      return m.group(1)
611  raise ExternalError("No minSdkVersion returned by aapt")
612
613
614def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
615  """Get the minSdkVersion declared in the APK as a number (API Level). If
616  minSdkVersion is set to a codename, it is translated to a number using the
617  provided map.
618  """
619
620  version = GetMinSdkVersion(apk_name)
621  try:
622    return int(version)
623  except ValueError:
624    # Not a decimal number. Codename?
625    if version in codename_to_api_level_map:
626      return codename_to_api_level_map[version]
627    else:
628      raise ExternalError("Unknown minSdkVersion: '%s'. Known codenames: %s"
629                          % (version, codename_to_api_level_map))
630
631
632def SignFile(input_name, output_name, key, password, min_api_level=None,
633    codename_to_api_level_map=dict(),
634    whole_file=False):
635  """Sign the input_name zip/jar/apk, producing output_name.  Use the
636  given key and password (the latter may be None if the key does not
637  have a password.
638
639  If whole_file is true, use the "-w" option to SignApk to embed a
640  signature that covers the whole file in the archive comment of the
641  zip file.
642
643  min_api_level is the API Level (int) of the oldest platform this file may end
644  up on. If not specified for an APK, the API Level is obtained by interpreting
645  the minSdkVersion attribute of the APK's AndroidManifest.xml.
646
647  codename_to_api_level_map is needed to translate the codename which may be
648  encountered as the APK's minSdkVersion.
649  """
650
651  java_library_path = os.path.join(
652      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
653
654  cmd = [OPTIONS.java_path, OPTIONS.java_args,
655         "-Djava.library.path=" + java_library_path,
656         "-jar",
657         os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
658  cmd.extend(OPTIONS.extra_signapk_args)
659  if whole_file:
660    cmd.append("-w")
661
662  min_sdk_version = min_api_level
663  if min_sdk_version is None:
664    if not whole_file:
665      min_sdk_version = GetMinSdkVersionInt(
666          input_name, codename_to_api_level_map)
667  if min_sdk_version is not None:
668    cmd.extend(["--min-sdk-version", str(min_sdk_version)])
669
670  cmd.extend([key + OPTIONS.public_key_suffix,
671              key + OPTIONS.private_key_suffix,
672              input_name, output_name])
673
674  p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
675  if password is not None:
676    password += "\n"
677  p.communicate(password)
678  if p.returncode != 0:
679    raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
680
681
682def CheckSize(data, target, info_dict):
683  """Check the data string passed against the max size limit, if
684  any, for the given target.  Raise exception if the data is too big.
685  Print a warning if the data is nearing the maximum size."""
686
687  if target.endswith(".img"):
688    target = target[:-4]
689  mount_point = "/" + target
690
691  fs_type = None
692  limit = None
693  if info_dict["fstab"]:
694    if mount_point == "/userdata":
695      mount_point = "/data"
696    p = info_dict["fstab"][mount_point]
697    fs_type = p.fs_type
698    device = p.device
699    if "/" in device:
700      device = device[device.rfind("/")+1:]
701    limit = info_dict.get(device + "_size", None)
702  if not fs_type or not limit:
703    return
704
705  if fs_type == "yaffs2":
706    # image size should be increased by 1/64th to account for the
707    # spare area (64 bytes per 2k page)
708    limit = limit / 2048 * (2048+64)
709  size = len(data)
710  pct = float(size) * 100.0 / limit
711  msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
712  if pct >= 99.0:
713    raise ExternalError(msg)
714  elif pct >= 95.0:
715    print
716    print "  WARNING: ", msg
717    print
718  elif OPTIONS.verbose:
719    print "  ", msg
720
721
722def ReadApkCerts(tf_zip):
723  """Given a target_files ZipFile, parse the META/apkcerts.txt file
724  and return a {package: cert} dict."""
725  certmap = {}
726  for line in tf_zip.read("META/apkcerts.txt").split("\n"):
727    line = line.strip()
728    if not line:
729      continue
730    m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
731                 r'private_key="(.*)"$', line)
732    if m:
733      name, cert, privkey = m.groups()
734      public_key_suffix_len = len(OPTIONS.public_key_suffix)
735      private_key_suffix_len = len(OPTIONS.private_key_suffix)
736      if cert in SPECIAL_CERT_STRINGS and not privkey:
737        certmap[name] = cert
738      elif (cert.endswith(OPTIONS.public_key_suffix) and
739            privkey.endswith(OPTIONS.private_key_suffix) and
740            cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
741        certmap[name] = cert[:-public_key_suffix_len]
742      else:
743        raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
744  return certmap
745
746
747COMMON_DOCSTRING = """
748  -p  (--path)  <dir>
749      Prepend <dir>/bin to the list of places to search for binaries
750      run by this script, and expect to find jars in <dir>/framework.
751
752  -s  (--device_specific) <file>
753      Path to the python module containing device-specific
754      releasetools code.
755
756  -x  (--extra)  <key=value>
757      Add a key/value pair to the 'extras' dict, which device-specific
758      extension code may look at.
759
760  -v  (--verbose)
761      Show command lines being executed.
762
763  -h  (--help)
764      Display this usage message and exit.
765"""
766
767def Usage(docstring):
768  print docstring.rstrip("\n")
769  print COMMON_DOCSTRING
770
771
772def ParseOptions(argv,
773                 docstring,
774                 extra_opts="", extra_long_opts=(),
775                 extra_option_handler=None):
776  """Parse the options in argv and return any arguments that aren't
777  flags.  docstring is the calling module's docstring, to be displayed
778  for errors and -h.  extra_opts and extra_long_opts are for flags
779  defined by the caller, which are processed by passing them to
780  extra_option_handler."""
781
782  try:
783    opts, args = getopt.getopt(
784        argv, "hvp:s:x:" + extra_opts,
785        ["help", "verbose", "path=", "signapk_path=",
786         "signapk_shared_library_path=", "extra_signapk_args=",
787         "java_path=", "java_args=", "public_key_suffix=",
788         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
789         "verity_signer_path=", "verity_signer_args=", "device_specific=",
790         "extra="] +
791        list(extra_long_opts))
792  except getopt.GetoptError as err:
793    Usage(docstring)
794    print "**", str(err), "**"
795    sys.exit(2)
796
797  for o, a in opts:
798    if o in ("-h", "--help"):
799      Usage(docstring)
800      sys.exit()
801    elif o in ("-v", "--verbose"):
802      OPTIONS.verbose = True
803    elif o in ("-p", "--path"):
804      OPTIONS.search_path = a
805    elif o in ("--signapk_path",):
806      OPTIONS.signapk_path = a
807    elif o in ("--signapk_shared_library_path",):
808      OPTIONS.signapk_shared_library_path = a
809    elif o in ("--extra_signapk_args",):
810      OPTIONS.extra_signapk_args = shlex.split(a)
811    elif o in ("--java_path",):
812      OPTIONS.java_path = a
813    elif o in ("--java_args",):
814      OPTIONS.java_args = a
815    elif o in ("--public_key_suffix",):
816      OPTIONS.public_key_suffix = a
817    elif o in ("--private_key_suffix",):
818      OPTIONS.private_key_suffix = a
819    elif o in ("--boot_signer_path",):
820      OPTIONS.boot_signer_path = a
821    elif o in ("--boot_signer_args",):
822      OPTIONS.boot_signer_args = shlex.split(a)
823    elif o in ("--verity_signer_path",):
824      OPTIONS.verity_signer_path = a
825    elif o in ("--verity_signer_args",):
826      OPTIONS.verity_signer_args = shlex.split(a)
827    elif o in ("-s", "--device_specific"):
828      OPTIONS.device_specific = a
829    elif o in ("-x", "--extra"):
830      key, value = a.split("=", 1)
831      OPTIONS.extras[key] = value
832    else:
833      if extra_option_handler is None or not extra_option_handler(o, a):
834        assert False, "unknown option \"%s\"" % (o,)
835
836  if OPTIONS.search_path:
837    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
838                          os.pathsep + os.environ["PATH"])
839
840  return args
841
842
843def MakeTempFile(prefix=None, suffix=None):
844  """Make a temp file and add it to the list of things to be deleted
845  when Cleanup() is called.  Return the filename."""
846  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
847  os.close(fd)
848  OPTIONS.tempfiles.append(fn)
849  return fn
850
851
852def Cleanup():
853  for i in OPTIONS.tempfiles:
854    if os.path.isdir(i):
855      shutil.rmtree(i)
856    else:
857      os.remove(i)
858
859
860class PasswordManager(object):
861  def __init__(self):
862    self.editor = os.getenv("EDITOR", None)
863    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
864
865  def GetPasswords(self, items):
866    """Get passwords corresponding to each string in 'items',
867    returning a dict.  (The dict may have keys in addition to the
868    values in 'items'.)
869
870    Uses the passwords in $ANDROID_PW_FILE if available, letting the
871    user edit that file to add more needed passwords.  If no editor is
872    available, or $ANDROID_PW_FILE isn't define, prompts the user
873    interactively in the ordinary way.
874    """
875
876    current = self.ReadFile()
877
878    first = True
879    while True:
880      missing = []
881      for i in items:
882        if i not in current or not current[i]:
883          missing.append(i)
884      # Are all the passwords already in the file?
885      if not missing:
886        return current
887
888      for i in missing:
889        current[i] = ""
890
891      if not first:
892        print "key file %s still missing some passwords." % (self.pwfile,)
893        answer = raw_input("try to edit again? [y]> ").strip()
894        if answer and answer[0] not in 'yY':
895          raise RuntimeError("key passwords unavailable")
896      first = False
897
898      current = self.UpdateAndReadFile(current)
899
900  def PromptResult(self, current): # pylint: disable=no-self-use
901    """Prompt the user to enter a value (password) for each key in
902    'current' whose value is fales.  Returns a new dict with all the
903    values.
904    """
905    result = {}
906    for k, v in sorted(current.iteritems()):
907      if v:
908        result[k] = v
909      else:
910        while True:
911          result[k] = getpass.getpass(
912              "Enter password for %s key> " % k).strip()
913          if result[k]:
914            break
915    return result
916
917  def UpdateAndReadFile(self, current):
918    if not self.editor or not self.pwfile:
919      return self.PromptResult(current)
920
921    f = open(self.pwfile, "w")
922    os.chmod(self.pwfile, 0o600)
923    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
924    f.write("# (Additional spaces are harmless.)\n\n")
925
926    first_line = None
927    sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
928    for i, (_, k, v) in enumerate(sorted_list):
929      f.write("[[[  %s  ]]] %s\n" % (v, k))
930      if not v and first_line is None:
931        # position cursor on first line with no password.
932        first_line = i + 4
933    f.close()
934
935    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
936    _, _ = p.communicate()
937
938    return self.ReadFile()
939
940  def ReadFile(self):
941    result = {}
942    if self.pwfile is None:
943      return result
944    try:
945      f = open(self.pwfile, "r")
946      for line in f:
947        line = line.strip()
948        if not line or line[0] == '#':
949          continue
950        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
951        if not m:
952          print "failed to parse password file: ", line
953        else:
954          result[m.group(2)] = m.group(1)
955      f.close()
956    except IOError as e:
957      if e.errno != errno.ENOENT:
958        print "error reading password file: ", str(e)
959    return result
960
961
962def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
963             compress_type=None):
964  import datetime
965
966  # http://b/18015246
967  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
968  # for files larger than 2GiB. We can work around this by adjusting their
969  # limit. Note that `zipfile.writestr()` will not work for strings larger than
970  # 2GiB. The Python interpreter sometimes rejects strings that large (though
971  # it isn't clear to me exactly what circumstances cause this).
972  # `zipfile.write()` must be used directly to work around this.
973  #
974  # This mess can be avoided if we port to python3.
975  saved_zip64_limit = zipfile.ZIP64_LIMIT
976  zipfile.ZIP64_LIMIT = (1 << 32) - 1
977
978  if compress_type is None:
979    compress_type = zip_file.compression
980  if arcname is None:
981    arcname = filename
982
983  saved_stat = os.stat(filename)
984
985  try:
986    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
987    # file to be zipped and reset it when we're done.
988    os.chmod(filename, perms)
989
990    # Use a fixed timestamp so the output is repeatable.
991    epoch = datetime.datetime.fromtimestamp(0)
992    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
993    os.utime(filename, (timestamp, timestamp))
994
995    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
996  finally:
997    os.chmod(filename, saved_stat.st_mode)
998    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
999    zipfile.ZIP64_LIMIT = saved_zip64_limit
1000
1001
1002def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
1003                compress_type=None):
1004  """Wrap zipfile.writestr() function to work around the zip64 limit.
1005
1006  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
1007  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
1008  when calling crc32(bytes).
1009
1010  But it still works fine to write a shorter string into a large zip file.
1011  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
1012  when we know the string won't be too long.
1013  """
1014
1015  saved_zip64_limit = zipfile.ZIP64_LIMIT
1016  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1017
1018  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
1019    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
1020    zinfo.compress_type = zip_file.compression
1021    if perms is None:
1022      perms = 0o100644
1023  else:
1024    zinfo = zinfo_or_arcname
1025
1026  # If compress_type is given, it overrides the value in zinfo.
1027  if compress_type is not None:
1028    zinfo.compress_type = compress_type
1029
1030  # If perms is given, it has a priority.
1031  if perms is not None:
1032    # If perms doesn't set the file type, mark it as a regular file.
1033    if perms & 0o770000 == 0:
1034      perms |= 0o100000
1035    zinfo.external_attr = perms << 16
1036
1037  # Use a fixed timestamp so the output is repeatable.
1038  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
1039
1040  zip_file.writestr(zinfo, data)
1041  zipfile.ZIP64_LIMIT = saved_zip64_limit
1042
1043
1044def ZipClose(zip_file):
1045  # http://b/18015246
1046  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
1047  # central directory.
1048  saved_zip64_limit = zipfile.ZIP64_LIMIT
1049  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1050
1051  zip_file.close()
1052
1053  zipfile.ZIP64_LIMIT = saved_zip64_limit
1054
1055
1056class DeviceSpecificParams(object):
1057  module = None
1058  def __init__(self, **kwargs):
1059    """Keyword arguments to the constructor become attributes of this
1060    object, which is passed to all functions in the device-specific
1061    module."""
1062    for k, v in kwargs.iteritems():
1063      setattr(self, k, v)
1064    self.extras = OPTIONS.extras
1065
1066    if self.module is None:
1067      path = OPTIONS.device_specific
1068      if not path:
1069        return
1070      try:
1071        if os.path.isdir(path):
1072          info = imp.find_module("releasetools", [path])
1073        else:
1074          d, f = os.path.split(path)
1075          b, x = os.path.splitext(f)
1076          if x == ".py":
1077            f = b
1078          info = imp.find_module(f, [d])
1079        print "loaded device-specific extensions from", path
1080        self.module = imp.load_module("device_specific", *info)
1081      except ImportError:
1082        print "unable to load device-specific module; assuming none"
1083
1084  def _DoCall(self, function_name, *args, **kwargs):
1085    """Call the named function in the device-specific module, passing
1086    the given args and kwargs.  The first argument to the call will be
1087    the DeviceSpecific object itself.  If there is no module, or the
1088    module does not define the function, return the value of the
1089    'default' kwarg (which itself defaults to None)."""
1090    if self.module is None or not hasattr(self.module, function_name):
1091      return kwargs.get("default", None)
1092    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1093
1094  def FullOTA_Assertions(self):
1095    """Called after emitting the block of assertions at the top of a
1096    full OTA package.  Implementations can add whatever additional
1097    assertions they like."""
1098    return self._DoCall("FullOTA_Assertions")
1099
1100  def FullOTA_InstallBegin(self):
1101    """Called at the start of full OTA installation."""
1102    return self._DoCall("FullOTA_InstallBegin")
1103
1104  def FullOTA_InstallEnd(self):
1105    """Called at the end of full OTA installation; typically this is
1106    used to install the image for the device's baseband processor."""
1107    return self._DoCall("FullOTA_InstallEnd")
1108
1109  def IncrementalOTA_Assertions(self):
1110    """Called after emitting the block of assertions at the top of an
1111    incremental OTA package.  Implementations can add whatever
1112    additional assertions they like."""
1113    return self._DoCall("IncrementalOTA_Assertions")
1114
1115  def IncrementalOTA_VerifyBegin(self):
1116    """Called at the start of the verification phase of incremental
1117    OTA installation; additional checks can be placed here to abort
1118    the script before any changes are made."""
1119    return self._DoCall("IncrementalOTA_VerifyBegin")
1120
1121  def IncrementalOTA_VerifyEnd(self):
1122    """Called at the end of the verification phase of incremental OTA
1123    installation; additional checks can be placed here to abort the
1124    script before any changes are made."""
1125    return self._DoCall("IncrementalOTA_VerifyEnd")
1126
1127  def IncrementalOTA_InstallBegin(self):
1128    """Called at the start of incremental OTA installation (after
1129    verification is complete)."""
1130    return self._DoCall("IncrementalOTA_InstallBegin")
1131
1132  def IncrementalOTA_InstallEnd(self):
1133    """Called at the end of incremental OTA installation; typically
1134    this is used to install the image for the device's baseband
1135    processor."""
1136    return self._DoCall("IncrementalOTA_InstallEnd")
1137
1138  def VerifyOTA_Assertions(self):
1139    return self._DoCall("VerifyOTA_Assertions")
1140
1141class File(object):
1142  def __init__(self, name, data):
1143    self.name = name
1144    self.data = data
1145    self.size = len(data)
1146    self.sha1 = sha1(data).hexdigest()
1147
1148  @classmethod
1149  def FromLocalFile(cls, name, diskname):
1150    f = open(diskname, "rb")
1151    data = f.read()
1152    f.close()
1153    return File(name, data)
1154
1155  def WriteToTemp(self):
1156    t = tempfile.NamedTemporaryFile()
1157    t.write(self.data)
1158    t.flush()
1159    return t
1160
1161  def AddToZip(self, z, compression=None):
1162    ZipWriteStr(z, self.name, self.data, compress_type=compression)
1163
1164DIFF_PROGRAM_BY_EXT = {
1165    ".gz" : "imgdiff",
1166    ".zip" : ["imgdiff", "-z"],
1167    ".jar" : ["imgdiff", "-z"],
1168    ".apk" : ["imgdiff", "-z"],
1169    ".img" : "imgdiff",
1170    }
1171
1172class Difference(object):
1173  def __init__(self, tf, sf, diff_program=None):
1174    self.tf = tf
1175    self.sf = sf
1176    self.patch = None
1177    self.diff_program = diff_program
1178
1179  def ComputePatch(self):
1180    """Compute the patch (as a string of data) needed to turn sf into
1181    tf.  Returns the same tuple as GetPatch()."""
1182
1183    tf = self.tf
1184    sf = self.sf
1185
1186    if self.diff_program:
1187      diff_program = self.diff_program
1188    else:
1189      ext = os.path.splitext(tf.name)[1]
1190      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1191
1192    ttemp = tf.WriteToTemp()
1193    stemp = sf.WriteToTemp()
1194
1195    ext = os.path.splitext(tf.name)[1]
1196
1197    try:
1198      ptemp = tempfile.NamedTemporaryFile()
1199      if isinstance(diff_program, list):
1200        cmd = copy.copy(diff_program)
1201      else:
1202        cmd = [diff_program]
1203      cmd.append(stemp.name)
1204      cmd.append(ttemp.name)
1205      cmd.append(ptemp.name)
1206      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1207      err = []
1208      def run():
1209        _, e = p.communicate()
1210        if e:
1211          err.append(e)
1212      th = threading.Thread(target=run)
1213      th.start()
1214      th.join(timeout=300)   # 5 mins
1215      if th.is_alive():
1216        print "WARNING: diff command timed out"
1217        p.terminate()
1218        th.join(5)
1219        if th.is_alive():
1220          p.kill()
1221          th.join()
1222
1223      if err or p.returncode != 0:
1224        print "WARNING: failure running %s:\n%s\n" % (
1225            diff_program, "".join(err))
1226        self.patch = None
1227        return None, None, None
1228      diff = ptemp.read()
1229    finally:
1230      ptemp.close()
1231      stemp.close()
1232      ttemp.close()
1233
1234    self.patch = diff
1235    return self.tf, self.sf, self.patch
1236
1237
1238  def GetPatch(self):
1239    """Return a tuple (target_file, source_file, patch_data).
1240    patch_data may be None if ComputePatch hasn't been called, or if
1241    computing the patch failed."""
1242    return self.tf, self.sf, self.patch
1243
1244
1245def ComputeDifferences(diffs):
1246  """Call ComputePatch on all the Difference objects in 'diffs'."""
1247  print len(diffs), "diffs to compute"
1248
1249  # Do the largest files first, to try and reduce the long-pole effect.
1250  by_size = [(i.tf.size, i) for i in diffs]
1251  by_size.sort(reverse=True)
1252  by_size = [i[1] for i in by_size]
1253
1254  lock = threading.Lock()
1255  diff_iter = iter(by_size)   # accessed under lock
1256
1257  def worker():
1258    try:
1259      lock.acquire()
1260      for d in diff_iter:
1261        lock.release()
1262        start = time.time()
1263        d.ComputePatch()
1264        dur = time.time() - start
1265        lock.acquire()
1266
1267        tf, sf, patch = d.GetPatch()
1268        if sf.name == tf.name:
1269          name = tf.name
1270        else:
1271          name = "%s (%s)" % (tf.name, sf.name)
1272        if patch is None:
1273          print "patching failed!                                  %s" % (name,)
1274        else:
1275          print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1276              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1277      lock.release()
1278    except Exception as e:
1279      print e
1280      raise
1281
1282  # start worker threads; wait for them all to finish.
1283  threads = [threading.Thread(target=worker)
1284             for i in range(OPTIONS.worker_threads)]
1285  for th in threads:
1286    th.start()
1287  while threads:
1288    threads.pop().join()
1289
1290
1291class BlockDifference(object):
1292  def __init__(self, partition, tgt, src=None, check_first_block=False,
1293               version=None):
1294    self.tgt = tgt
1295    self.src = src
1296    self.partition = partition
1297    self.check_first_block = check_first_block
1298
1299    if version is None:
1300      version = 1
1301      if OPTIONS.info_dict:
1302        version = max(
1303            int(i) for i in
1304            OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1305    self.version = version
1306
1307    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1308                                    version=self.version)
1309    tmpdir = tempfile.mkdtemp()
1310    OPTIONS.tempfiles.append(tmpdir)
1311    self.path = os.path.join(tmpdir, partition)
1312    b.Compute(self.path)
1313
1314    if src is None:
1315      _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1316    else:
1317      _, self.device = GetTypeAndDevice("/" + partition,
1318                                        OPTIONS.source_info_dict)
1319
1320  def WriteScript(self, script, output_zip, progress=None):
1321    if not self.src:
1322      # write the output unconditionally
1323      script.Print("Patching %s image unconditionally..." % (self.partition,))
1324    else:
1325      script.Print("Patching %s image after verification." % (self.partition,))
1326
1327    if progress:
1328      script.ShowProgress(progress, 0)
1329    self._WriteUpdate(script, output_zip)
1330    self._WritePostInstallVerifyScript(script)
1331
1332  def WriteStrictVerifyScript(self, script):
1333    """Verify all the blocks in the care_map, including clobbered blocks.
1334
1335    This differs from the WriteVerifyScript() function: a) it prints different
1336    error messages; b) it doesn't allow half-way updated images to pass the
1337    verification."""
1338
1339    partition = self.partition
1340    script.Print("Verifying %s..." % (partition,))
1341    ranges = self.tgt.care_map
1342    ranges_str = ranges.to_string_raw()
1343    script.AppendExtra('range_sha1("%s", "%s") == "%s" && '
1344                       'ui_print("    Verified.") || '
1345                       'ui_print("\\"%s\\" has unexpected contents.");' % (
1346                       self.device, ranges_str,
1347                       self.tgt.TotalSha1(include_clobbered_blocks=True),
1348                       self.device))
1349    script.AppendExtra("")
1350
1351  def WriteVerifyScript(self, script):
1352    partition = self.partition
1353    if not self.src:
1354      script.Print("Image %s will be patched unconditionally." % (partition,))
1355    else:
1356      ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1357      ranges_str = ranges.to_string_raw()
1358      if self.version >= 4:
1359        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1360                            'block_image_verify("%s", '
1361                            'package_extract_file("%s.transfer.list"), '
1362                            '"%s.new.dat", "%s.patch.dat")) then') % (
1363                            self.device, ranges_str, self.src.TotalSha1(),
1364                            self.device, partition, partition, partition))
1365      elif self.version == 3:
1366        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1367                            'block_image_verify("%s", '
1368                            'package_extract_file("%s.transfer.list"), '
1369                            '"%s.new.dat", "%s.patch.dat")) then') % (
1370                            self.device, ranges_str, self.src.TotalSha1(),
1371                            self.device, partition, partition, partition))
1372      else:
1373        script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1374                           self.device, ranges_str, self.src.TotalSha1()))
1375      script.Print('Verified %s image...' % (partition,))
1376      script.AppendExtra('else')
1377
1378      if self.version >= 4:
1379
1380        # Bug: 21124327
1381        # When generating incrementals for the system and vendor partitions in
1382        # version 4 or newer, explicitly check the first block (which contains
1383        # the superblock) of the partition to see if it's what we expect. If
1384        # this check fails, give an explicit log message about the partition
1385        # having been remounted R/W (the most likely explanation).
1386        if self.check_first_block:
1387          script.AppendExtra('check_first_block("%s");' % (self.device,))
1388
1389        # If version >= 4, try block recovery before abort update
1390        script.AppendExtra((
1391            'ifelse (block_image_recover("{device}", "{ranges}") && '
1392            'block_image_verify("{device}", '
1393            'package_extract_file("{partition}.transfer.list"), '
1394            '"{partition}.new.dat", "{partition}.patch.dat"), '
1395            'ui_print("{partition} recovered successfully."), '
1396            'abort("{partition} partition fails to recover"));\n'
1397            'endif;').format(device=self.device, ranges=ranges_str,
1398                             partition=partition))
1399
1400      # Abort the OTA update. Note that the incremental OTA cannot be applied
1401      # even if it may match the checksum of the target partition.
1402      # a) If version < 3, operations like move and erase will make changes
1403      #    unconditionally and damage the partition.
1404      # b) If version >= 3, it won't even reach here.
1405      else:
1406        script.AppendExtra(('abort("%s partition has unexpected contents");\n'
1407                            'endif;') % (partition,))
1408
1409  def _WritePostInstallVerifyScript(self, script):
1410    partition = self.partition
1411    script.Print('Verifying the updated %s image...' % (partition,))
1412    # Unlike pre-install verification, clobbered_blocks should not be ignored.
1413    ranges = self.tgt.care_map
1414    ranges_str = ranges.to_string_raw()
1415    script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1416                       self.device, ranges_str,
1417                       self.tgt.TotalSha1(include_clobbered_blocks=True)))
1418
1419    # Bug: 20881595
1420    # Verify that extended blocks are really zeroed out.
1421    if self.tgt.extended:
1422      ranges_str = self.tgt.extended.to_string_raw()
1423      script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1424                         self.device, ranges_str,
1425                         self._HashZeroBlocks(self.tgt.extended.size())))
1426      script.Print('Verified the updated %s image.' % (partition,))
1427      script.AppendExtra(
1428          'else\n'
1429          '  abort("%s partition has unexpected non-zero contents after OTA '
1430          'update");\n'
1431          'endif;' % (partition,))
1432    else:
1433      script.Print('Verified the updated %s image.' % (partition,))
1434
1435    script.AppendExtra(
1436        'else\n'
1437        '  abort("%s partition has unexpected contents after OTA update");\n'
1438        'endif;' % (partition,))
1439
1440  def _WriteUpdate(self, script, output_zip):
1441    ZipWrite(output_zip,
1442             '{}.transfer.list'.format(self.path),
1443             '{}.transfer.list'.format(self.partition))
1444    ZipWrite(output_zip,
1445             '{}.new.dat'.format(self.path),
1446             '{}.new.dat'.format(self.partition))
1447    ZipWrite(output_zip,
1448             '{}.patch.dat'.format(self.path),
1449             '{}.patch.dat'.format(self.partition),
1450             compress_type=zipfile.ZIP_STORED)
1451
1452    call = ('block_image_update("{device}", '
1453            'package_extract_file("{partition}.transfer.list"), '
1454            '"{partition}.new.dat", "{partition}.patch.dat");\n'.format(
1455                device=self.device, partition=self.partition))
1456    script.AppendExtra(script.WordWrap(call))
1457
1458  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1459    data = source.ReadRangeSet(ranges)
1460    ctx = sha1()
1461
1462    for p in data:
1463      ctx.update(p)
1464
1465    return ctx.hexdigest()
1466
1467  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1468    """Return the hash value for all zero blocks."""
1469    zero_block = '\x00' * 4096
1470    ctx = sha1()
1471    for _ in range(num_blocks):
1472      ctx.update(zero_block)
1473
1474    return ctx.hexdigest()
1475
1476
1477DataImage = blockimgdiff.DataImage
1478
1479# map recovery.fstab's fs_types to mount/format "partition types"
1480PARTITION_TYPES = {
1481    "yaffs2": "MTD",
1482    "mtd": "MTD",
1483    "ext4": "EMMC",
1484    "emmc": "EMMC",
1485    "f2fs": "EMMC",
1486    "squashfs": "EMMC"
1487}
1488
1489def GetTypeAndDevice(mount_point, info):
1490  fstab = info["fstab"]
1491  if fstab:
1492    return (PARTITION_TYPES[fstab[mount_point].fs_type],
1493            fstab[mount_point].device)
1494  else:
1495    raise KeyError
1496
1497
1498def ParseCertificate(data):
1499  """Parse a PEM-format certificate."""
1500  cert = []
1501  save = False
1502  for line in data.split("\n"):
1503    if "--END CERTIFICATE--" in line:
1504      break
1505    if save:
1506      cert.append(line)
1507    if "--BEGIN CERTIFICATE--" in line:
1508      save = True
1509  cert = "".join(cert).decode('base64')
1510  return cert
1511
1512def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1513                      info_dict=None):
1514  """Generate a binary patch that creates the recovery image starting
1515  with the boot image.  (Most of the space in these images is just the
1516  kernel, which is identical for the two, so the resulting patch
1517  should be efficient.)  Add it to the output zip, along with a shell
1518  script that is run from init.rc on first boot to actually do the
1519  patching and install the new recovery image.
1520
1521  recovery_img and boot_img should be File objects for the
1522  corresponding images.  info should be the dictionary returned by
1523  common.LoadInfoDict() on the input target_files.
1524  """
1525
1526  if info_dict is None:
1527    info_dict = OPTIONS.info_dict
1528
1529  full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1530  system_root_image = info_dict.get("system_root_image", None) == "true"
1531
1532  if full_recovery_image:
1533    output_sink("etc/recovery.img", recovery_img.data)
1534
1535  else:
1536    diff_program = ["imgdiff"]
1537    path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1538    if os.path.exists(path):
1539      diff_program.append("-b")
1540      diff_program.append(path)
1541      bonus_args = "-b /system/etc/recovery-resource.dat"
1542    else:
1543      bonus_args = ""
1544
1545    d = Difference(recovery_img, boot_img, diff_program=diff_program)
1546    _, _, patch = d.ComputePatch()
1547    output_sink("recovery-from-boot.p", patch)
1548
1549  try:
1550    # The following GetTypeAndDevice()s need to use the path in the target
1551    # info_dict instead of source_info_dict.
1552    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1553    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1554  except KeyError:
1555    return
1556
1557  if full_recovery_image:
1558    sh = """#!/system/bin/sh
1559if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1560  applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1561else
1562  log -t recovery "Recovery image already installed"
1563fi
1564""" % {'type': recovery_type,
1565       'device': recovery_device,
1566       'sha1': recovery_img.sha1,
1567       'size': recovery_img.size}
1568  else:
1569    sh = """#!/system/bin/sh
1570if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1571  applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1572else
1573  log -t recovery "Recovery image already installed"
1574fi
1575""" % {'boot_size': boot_img.size,
1576       'boot_sha1': boot_img.sha1,
1577       'recovery_size': recovery_img.size,
1578       'recovery_sha1': recovery_img.sha1,
1579       'boot_type': boot_type,
1580       'boot_device': boot_device,
1581       'recovery_type': recovery_type,
1582       'recovery_device': recovery_device,
1583       'bonus_args': bonus_args}
1584
1585  # The install script location moved from /system/etc to /system/bin
1586  # in the L release.  Parse init.*.rc files to find out where the
1587  # target-files expects it to be, and put it there.
1588  sh_location = "etc/install-recovery.sh"
1589  found = False
1590  if system_root_image:
1591    init_rc_dir = os.path.join(input_dir, "ROOT")
1592  else:
1593    init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1594  init_rc_files = os.listdir(init_rc_dir)
1595  for init_rc_file in init_rc_files:
1596    if (not init_rc_file.startswith('init.') or
1597        not init_rc_file.endswith('.rc')):
1598      continue
1599
1600    with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1601      for line in f:
1602        m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1603        if m:
1604          sh_location = m.group(1)
1605          found = True
1606          break
1607
1608    if found:
1609      break
1610
1611  print "putting script in", sh_location
1612
1613  output_sink(sh_location, sh)
1614