common.py revision d29ca1ca4b5bb5a62c1d9b143168f0cd4c964bac
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import copy
16import errno
17import getopt
18import getpass
19import imp
20import os
21import platform
22import re
23import shlex
24import shutil
25import subprocess
26import sys
27import tempfile
28import threading
29import time
30import zipfile
31
32import blockimgdiff
33
34from hashlib import sha1 as sha1
35
36
37class Options(object):
38  def __init__(self):
39    platform_search_path = {
40        "linux2": "out/host/linux-x86",
41        "darwin": "out/host/darwin-x86",
42    }
43
44    self.search_path = platform_search_path.get(sys.platform, None)
45    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
46    self.signapk_shared_library_path = "lib64"   # Relative to search_path
47    self.extra_signapk_args = []
48    self.java_path = "java"  # Use the one on the path by default.
49    self.java_args = "-Xmx2048m" # JVM Args
50    self.public_key_suffix = ".x509.pem"
51    self.private_key_suffix = ".pk8"
52    # use otatools built boot_signer by default
53    self.boot_signer_path = "boot_signer"
54    self.boot_signer_args = []
55    self.verity_signer_path = None
56    self.verity_signer_args = []
57    self.verbose = False
58    self.tempfiles = []
59    self.device_specific = None
60    self.extras = {}
61    self.info_dict = None
62    self.source_info_dict = None
63    self.target_info_dict = None
64    self.worker_threads = None
65    # Stash size cannot exceed cache_size * threshold.
66    self.cache_size = None
67    self.stash_threshold = 0.8
68
69
70OPTIONS = Options()
71
72
73# Values for "certificate" in apkcerts that mean special things.
74SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
75
76
77class ExternalError(RuntimeError):
78  pass
79
80
81def Run(args, **kwargs):
82  """Create and return a subprocess.Popen object, printing the command
83  line on the terminal if -v was specified."""
84  if OPTIONS.verbose:
85    print "  running: ", " ".join(args)
86  return subprocess.Popen(args, **kwargs)
87
88
89def CloseInheritedPipes():
90  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
91  before doing other work."""
92  if platform.system() != "Darwin":
93    return
94  for d in range(3, 1025):
95    try:
96      stat = os.fstat(d)
97      if stat is not None:
98        pipebit = stat[0] & 0x1000
99        if pipebit != 0:
100          os.close(d)
101    except OSError:
102      pass
103
104
105def LoadInfoDict(input_file, input_dir=None):
106  """Read and parse the META/misc_info.txt key/value pairs from the
107  input target files and return a dict."""
108
109  def read_helper(fn):
110    if isinstance(input_file, zipfile.ZipFile):
111      return input_file.read(fn)
112    else:
113      path = os.path.join(input_file, *fn.split("/"))
114      try:
115        with open(path) as f:
116          return f.read()
117      except IOError as e:
118        if e.errno == errno.ENOENT:
119          raise KeyError(fn)
120  d = {}
121  try:
122    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
123  except KeyError:
124    # ok if misc_info.txt doesn't exist
125    pass
126
127  # backwards compatibility: These values used to be in their own
128  # files.  Look for them, in case we're processing an old
129  # target_files zip.
130
131  if "mkyaffs2_extra_flags" not in d:
132    try:
133      d["mkyaffs2_extra_flags"] = read_helper(
134          "META/mkyaffs2-extra-flags.txt").strip()
135    except KeyError:
136      # ok if flags don't exist
137      pass
138
139  if "recovery_api_version" not in d:
140    try:
141      d["recovery_api_version"] = read_helper(
142          "META/recovery-api-version.txt").strip()
143    except KeyError:
144      raise ValueError("can't find recovery API version in input target-files")
145
146  if "tool_extensions" not in d:
147    try:
148      d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
149    except KeyError:
150      # ok if extensions don't exist
151      pass
152
153  if "fstab_version" not in d:
154    d["fstab_version"] = "1"
155
156  # A few properties are stored as links to the files in the out/ directory.
157  # It works fine with the build system. However, they are no longer available
158  # when (re)generating from target_files zip. If input_dir is not None, we
159  # are doing repacking. Redirect those properties to the actual files in the
160  # unzipped directory.
161  if input_dir is not None:
162    # We carry a copy of file_contexts.bin under META/. If not available,
163    # search BOOT/RAMDISK/. Note that sometimes we may need a different file
164    # to build images than the one running on device, such as when enabling
165    # system_root_image. In that case, we must have the one for image
166    # generation copied to META/.
167    fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
168    fc_config = os.path.join(input_dir, "META", fc_basename)
169    if d.get("system_root_image") == "true":
170      assert os.path.exists(fc_config)
171    if not os.path.exists(fc_config):
172      fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
173      if not os.path.exists(fc_config):
174        fc_config = None
175
176    if fc_config:
177      d["selinux_fc"] = fc_config
178
179    # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
180    if d.get("system_root_image") == "true":
181      d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
182      d["ramdisk_fs_config"] = os.path.join(
183          input_dir, "META", "root_filesystem_config.txt")
184
185    # Redirect {system,vendor}_base_fs_file.
186    if "system_base_fs_file" in d:
187      basename = os.path.basename(d["system_base_fs_file"])
188      system_base_fs_file = os.path.join(input_dir, "META", basename)
189      if os.path.exists(system_base_fs_file):
190        d["system_base_fs_file"] = system_base_fs_file
191      else:
192        print "Warning: failed to find system base fs file: %s" % (
193            system_base_fs_file,)
194        del d["system_base_fs_file"]
195
196    if "vendor_base_fs_file" in d:
197      basename = os.path.basename(d["vendor_base_fs_file"])
198      vendor_base_fs_file = os.path.join(input_dir, "META", basename)
199      if os.path.exists(vendor_base_fs_file):
200        d["vendor_base_fs_file"] = vendor_base_fs_file
201      else:
202        print "Warning: failed to find vendor base fs file: %s" % (
203            vendor_base_fs_file,)
204        del d["vendor_base_fs_file"]
205
206  try:
207    data = read_helper("META/imagesizes.txt")
208    for line in data.split("\n"):
209      if not line:
210        continue
211      name, value = line.split(" ", 1)
212      if not value:
213        continue
214      if name == "blocksize":
215        d[name] = value
216      else:
217        d[name + "_size"] = value
218  except KeyError:
219    pass
220
221  def makeint(key):
222    if key in d:
223      d[key] = int(d[key], 0)
224
225  makeint("recovery_api_version")
226  makeint("blocksize")
227  makeint("system_size")
228  makeint("vendor_size")
229  makeint("userdata_size")
230  makeint("cache_size")
231  makeint("recovery_size")
232  makeint("boot_size")
233  makeint("fstab_version")
234
235  if d.get("no_recovery", False) == "true":
236    d["fstab"] = None
237  else:
238    d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
239                                   d.get("system_root_image", False))
240  d["build.prop"] = LoadBuildProp(read_helper)
241  return d
242
243def LoadBuildProp(read_helper):
244  try:
245    data = read_helper("SYSTEM/build.prop")
246  except KeyError:
247    print "Warning: could not find SYSTEM/build.prop in %s" % zip
248    data = ""
249  return LoadDictionaryFromLines(data.split("\n"))
250
251def LoadDictionaryFromLines(lines):
252  d = {}
253  for line in lines:
254    line = line.strip()
255    if not line or line.startswith("#"):
256      continue
257    if "=" in line:
258      name, value = line.split("=", 1)
259      d[name] = value
260  return d
261
262def LoadRecoveryFSTab(read_helper, fstab_version, system_root_image=False):
263  class Partition(object):
264    def __init__(self, mount_point, fs_type, device, length, device2, context):
265      self.mount_point = mount_point
266      self.fs_type = fs_type
267      self.device = device
268      self.length = length
269      self.device2 = device2
270      self.context = context
271
272  try:
273    data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
274  except KeyError:
275    print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
276    data = ""
277
278  if fstab_version == 1:
279    d = {}
280    for line in data.split("\n"):
281      line = line.strip()
282      if not line or line.startswith("#"):
283        continue
284      pieces = line.split()
285      if not 3 <= len(pieces) <= 4:
286        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
287      options = None
288      if len(pieces) >= 4:
289        if pieces[3].startswith("/"):
290          device2 = pieces[3]
291          if len(pieces) >= 5:
292            options = pieces[4]
293        else:
294          device2 = None
295          options = pieces[3]
296      else:
297        device2 = None
298
299      mount_point = pieces[0]
300      length = 0
301      if options:
302        options = options.split(",")
303        for i in options:
304          if i.startswith("length="):
305            length = int(i[7:])
306          else:
307            print "%s: unknown option \"%s\"" % (mount_point, i)
308
309      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
310                                 device=pieces[2], length=length,
311                                 device2=device2)
312
313  elif fstab_version == 2:
314    d = {}
315    for line in data.split("\n"):
316      line = line.strip()
317      if not line or line.startswith("#"):
318        continue
319      # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
320      pieces = line.split()
321      if len(pieces) != 5:
322        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
323
324      # Ignore entries that are managed by vold
325      options = pieces[4]
326      if "voldmanaged=" in options:
327        continue
328
329      # It's a good line, parse it
330      length = 0
331      options = options.split(",")
332      for i in options:
333        if i.startswith("length="):
334          length = int(i[7:])
335        else:
336          # Ignore all unknown options in the unified fstab
337          continue
338
339      mount_flags = pieces[3]
340      # Honor the SELinux context if present.
341      context = None
342      for i in mount_flags.split(","):
343        if i.startswith("context="):
344          context = i
345
346      mount_point = pieces[1]
347      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
348                                 device=pieces[0], length=length,
349                                 device2=None, context=context)
350
351  else:
352    raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
353
354  # / is used for the system mount point when the root directory is included in
355  # system. Other areas assume system is always at "/system" so point /system
356  # at /.
357  if system_root_image:
358    assert not d.has_key("/system") and d.has_key("/")
359    d["/system"] = d["/"]
360  return d
361
362
363def DumpInfoDict(d):
364  for k, v in sorted(d.items()):
365    print "%-25s = (%s) %s" % (k, type(v).__name__, v)
366
367
368def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
369                        has_ramdisk=False):
370  """Build a bootable image from the specified sourcedir.
371
372  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
373  'sourcedir'), and turn them into a boot image.  Return the image data, or
374  None if sourcedir does not appear to contains files for building the
375  requested image."""
376
377  def make_ramdisk():
378    ramdisk_img = tempfile.NamedTemporaryFile()
379
380    if os.access(fs_config_file, os.F_OK):
381      cmd = ["mkbootfs", "-f", fs_config_file,
382             os.path.join(sourcedir, "RAMDISK")]
383    else:
384      cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
385    p1 = Run(cmd, stdout=subprocess.PIPE)
386    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
387
388    p2.wait()
389    p1.wait()
390    assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
391    assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
392
393    return ramdisk_img
394
395  if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
396    return None
397
398  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
399    return None
400
401  if info_dict is None:
402    info_dict = OPTIONS.info_dict
403
404  img = tempfile.NamedTemporaryFile()
405
406  if has_ramdisk:
407    ramdisk_img = make_ramdisk()
408
409  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
410  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
411
412  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
413
414  fn = os.path.join(sourcedir, "second")
415  if os.access(fn, os.F_OK):
416    cmd.append("--second")
417    cmd.append(fn)
418
419  fn = os.path.join(sourcedir, "cmdline")
420  if os.access(fn, os.F_OK):
421    cmd.append("--cmdline")
422    cmd.append(open(fn).read().rstrip("\n"))
423
424  fn = os.path.join(sourcedir, "base")
425  if os.access(fn, os.F_OK):
426    cmd.append("--base")
427    cmd.append(open(fn).read().rstrip("\n"))
428
429  fn = os.path.join(sourcedir, "pagesize")
430  if os.access(fn, os.F_OK):
431    cmd.append("--pagesize")
432    cmd.append(open(fn).read().rstrip("\n"))
433
434  args = info_dict.get("mkbootimg_args", None)
435  if args and args.strip():
436    cmd.extend(shlex.split(args))
437
438  args = info_dict.get("mkbootimg_version_args", None)
439  if args and args.strip():
440    cmd.extend(shlex.split(args))
441
442  if has_ramdisk:
443    cmd.extend(["--ramdisk", ramdisk_img.name])
444
445  img_unsigned = None
446  if info_dict.get("vboot", None):
447    img_unsigned = tempfile.NamedTemporaryFile()
448    cmd.extend(["--output", img_unsigned.name])
449  else:
450    cmd.extend(["--output", img.name])
451
452  p = Run(cmd, stdout=subprocess.PIPE)
453  p.communicate()
454  assert p.returncode == 0, "mkbootimg of %s image failed" % (
455      os.path.basename(sourcedir),)
456
457  if (info_dict.get("boot_signer", None) == "true" and
458      info_dict.get("verity_key", None)):
459    path = "/" + os.path.basename(sourcedir).lower()
460    cmd = [OPTIONS.boot_signer_path]
461    cmd.extend(OPTIONS.boot_signer_args)
462    cmd.extend([path, img.name,
463                info_dict["verity_key"] + ".pk8",
464                info_dict["verity_key"] + ".x509.pem", img.name])
465    p = Run(cmd, stdout=subprocess.PIPE)
466    p.communicate()
467    assert p.returncode == 0, "boot_signer of %s image failed" % path
468
469  # Sign the image if vboot is non-empty.
470  elif info_dict.get("vboot", None):
471    path = "/" + os.path.basename(sourcedir).lower()
472    img_keyblock = tempfile.NamedTemporaryFile()
473    cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
474           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
475           info_dict["vboot_key"] + ".vbprivk",
476           info_dict["vboot_subkey"] + ".vbprivk",
477           img_keyblock.name,
478           img.name]
479    p = Run(cmd, stdout=subprocess.PIPE)
480    p.communicate()
481    assert p.returncode == 0, "vboot_signer of %s image failed" % path
482
483    # Clean up the temp files.
484    img_unsigned.close()
485    img_keyblock.close()
486
487  img.seek(os.SEEK_SET, 0)
488  data = img.read()
489
490  if has_ramdisk:
491    ramdisk_img.close()
492  img.close()
493
494  return data
495
496
497def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
498                     info_dict=None):
499  """Return a File object with the desired bootable image.
500
501  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
502  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
503  the source files in 'unpack_dir'/'tree_subdir'."""
504
505  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
506  if os.path.exists(prebuilt_path):
507    print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
508    return File.FromLocalFile(name, prebuilt_path)
509
510  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
511  if os.path.exists(prebuilt_path):
512    print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
513    return File.FromLocalFile(name, prebuilt_path)
514
515  print "building image from target_files %s..." % (tree_subdir,)
516
517  if info_dict is None:
518    info_dict = OPTIONS.info_dict
519
520  # With system_root_image == "true", we don't pack ramdisk into the boot image.
521  # Unless "recovery_as_boot" is specified, in which case we carry the ramdisk
522  # for recovery.
523  has_ramdisk = (info_dict.get("system_root_image") != "true" or
524                 prebuilt_name != "boot.img" or
525                 info_dict.get("recovery_as_boot") == "true")
526
527  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
528  data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
529                             os.path.join(unpack_dir, fs_config),
530                             info_dict, has_ramdisk)
531  if data:
532    return File(name, data)
533  return None
534
535
536def UnzipTemp(filename, pattern=None):
537  """Unzip the given archive into a temporary directory and return the name.
538
539  If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
540  temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
541
542  Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
543  main file), open for reading.
544  """
545
546  tmp = tempfile.mkdtemp(prefix="targetfiles-")
547  OPTIONS.tempfiles.append(tmp)
548
549  def unzip_to_dir(filename, dirname):
550    cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
551    if pattern is not None:
552      cmd.append(pattern)
553    p = Run(cmd, stdout=subprocess.PIPE)
554    p.communicate()
555    if p.returncode != 0:
556      raise ExternalError("failed to unzip input target-files \"%s\"" %
557                          (filename,))
558
559  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
560  if m:
561    unzip_to_dir(m.group(1), tmp)
562    unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
563    filename = m.group(1)
564  else:
565    unzip_to_dir(filename, tmp)
566
567  return tmp, zipfile.ZipFile(filename, "r")
568
569
570def GetKeyPasswords(keylist):
571  """Given a list of keys, prompt the user to enter passwords for
572  those which require them.  Return a {key: password} dict.  password
573  will be None if the key has no password."""
574
575  no_passwords = []
576  need_passwords = []
577  key_passwords = {}
578  devnull = open("/dev/null", "w+b")
579  for k in sorted(keylist):
580    # We don't need a password for things that aren't really keys.
581    if k in SPECIAL_CERT_STRINGS:
582      no_passwords.append(k)
583      continue
584
585    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
586             "-inform", "DER", "-nocrypt"],
587            stdin=devnull.fileno(),
588            stdout=devnull.fileno(),
589            stderr=subprocess.STDOUT)
590    p.communicate()
591    if p.returncode == 0:
592      # Definitely an unencrypted key.
593      no_passwords.append(k)
594    else:
595      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
596               "-inform", "DER", "-passin", "pass:"],
597              stdin=devnull.fileno(),
598              stdout=devnull.fileno(),
599              stderr=subprocess.PIPE)
600      _, stderr = p.communicate()
601      if p.returncode == 0:
602        # Encrypted key with empty string as password.
603        key_passwords[k] = ''
604      elif stderr.startswith('Error decrypting key'):
605        # Definitely encrypted key.
606        # It would have said "Error reading key" if it didn't parse correctly.
607        need_passwords.append(k)
608      else:
609        # Potentially, a type of key that openssl doesn't understand.
610        # We'll let the routines in signapk.jar handle it.
611        no_passwords.append(k)
612  devnull.close()
613
614  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
615  key_passwords.update(dict.fromkeys(no_passwords, None))
616  return key_passwords
617
618
619def GetMinSdkVersion(apk_name):
620  """Get the minSdkVersion delared in the APK. This can be both a decimal number
621  (API Level) or a codename.
622  """
623
624  p = Run(["aapt", "dump", "badging", apk_name], stdout=subprocess.PIPE)
625  output, err = p.communicate()
626  if err:
627    raise ExternalError("Failed to obtain minSdkVersion: aapt return code %s"
628        % (p.returncode,))
629
630  for line in output.split("\n"):
631    # Looking for lines such as sdkVersion:'23' or sdkVersion:'M'
632    m = re.match(r'sdkVersion:\'([^\']*)\'', line)
633    if m:
634      return m.group(1)
635  raise ExternalError("No minSdkVersion returned by aapt")
636
637
638def GetMinSdkVersionInt(apk_name, codename_to_api_level_map):
639  """Get the minSdkVersion declared in the APK as a number (API Level). If
640  minSdkVersion is set to a codename, it is translated to a number using the
641  provided map.
642  """
643
644  version = GetMinSdkVersion(apk_name)
645  try:
646    return int(version)
647  except ValueError:
648    # Not a decimal number. Codename?
649    if version in codename_to_api_level_map:
650      return codename_to_api_level_map[version]
651    else:
652      raise ExternalError("Unknown minSdkVersion: '%s'. Known codenames: %s"
653                          % (version, codename_to_api_level_map))
654
655
656def SignFile(input_name, output_name, key, password, min_api_level=None,
657    codename_to_api_level_map=dict(),
658    whole_file=False):
659  """Sign the input_name zip/jar/apk, producing output_name.  Use the
660  given key and password (the latter may be None if the key does not
661  have a password.
662
663  If whole_file is true, use the "-w" option to SignApk to embed a
664  signature that covers the whole file in the archive comment of the
665  zip file.
666
667  min_api_level is the API Level (int) of the oldest platform this file may end
668  up on. If not specified for an APK, the API Level is obtained by interpreting
669  the minSdkVersion attribute of the APK's AndroidManifest.xml.
670
671  codename_to_api_level_map is needed to translate the codename which may be
672  encountered as the APK's minSdkVersion.
673  """
674
675  java_library_path = os.path.join(
676      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
677
678  cmd = [OPTIONS.java_path, OPTIONS.java_args,
679         "-Djava.library.path=" + java_library_path,
680         "-jar",
681         os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
682  cmd.extend(OPTIONS.extra_signapk_args)
683  if whole_file:
684    cmd.append("-w")
685
686  min_sdk_version = min_api_level
687  if min_sdk_version is None:
688    if not whole_file:
689      min_sdk_version = GetMinSdkVersionInt(
690          input_name, codename_to_api_level_map)
691  if min_sdk_version is not None:
692    cmd.extend(["--min-sdk-version", str(min_sdk_version)])
693
694  cmd.extend([key + OPTIONS.public_key_suffix,
695              key + OPTIONS.private_key_suffix,
696              input_name, output_name])
697
698  p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
699  if password is not None:
700    password += "\n"
701  p.communicate(password)
702  if p.returncode != 0:
703    raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
704
705
706def CheckSize(data, target, info_dict):
707  """Check the data string passed against the max size limit, if
708  any, for the given target.  Raise exception if the data is too big.
709  Print a warning if the data is nearing the maximum size."""
710
711  if target.endswith(".img"):
712    target = target[:-4]
713  mount_point = "/" + target
714
715  fs_type = None
716  limit = None
717  if info_dict["fstab"]:
718    if mount_point == "/userdata":
719      mount_point = "/data"
720    p = info_dict["fstab"][mount_point]
721    fs_type = p.fs_type
722    device = p.device
723    if "/" in device:
724      device = device[device.rfind("/")+1:]
725    limit = info_dict.get(device + "_size", None)
726  if not fs_type or not limit:
727    return
728
729  if fs_type == "yaffs2":
730    # image size should be increased by 1/64th to account for the
731    # spare area (64 bytes per 2k page)
732    limit = limit / 2048 * (2048+64)
733  size = len(data)
734  pct = float(size) * 100.0 / limit
735  msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
736  if pct >= 99.0:
737    raise ExternalError(msg)
738  elif pct >= 95.0:
739    print
740    print "  WARNING: ", msg
741    print
742  elif OPTIONS.verbose:
743    print "  ", msg
744
745
746def ReadApkCerts(tf_zip):
747  """Given a target_files ZipFile, parse the META/apkcerts.txt file
748  and return a {package: cert} dict."""
749  certmap = {}
750  for line in tf_zip.read("META/apkcerts.txt").split("\n"):
751    line = line.strip()
752    if not line:
753      continue
754    m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
755                 r'private_key="(.*)"$', line)
756    if m:
757      name, cert, privkey = m.groups()
758      public_key_suffix_len = len(OPTIONS.public_key_suffix)
759      private_key_suffix_len = len(OPTIONS.private_key_suffix)
760      if cert in SPECIAL_CERT_STRINGS and not privkey:
761        certmap[name] = cert
762      elif (cert.endswith(OPTIONS.public_key_suffix) and
763            privkey.endswith(OPTIONS.private_key_suffix) and
764            cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
765        certmap[name] = cert[:-public_key_suffix_len]
766      else:
767        raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
768  return certmap
769
770
771COMMON_DOCSTRING = """
772  -p  (--path)  <dir>
773      Prepend <dir>/bin to the list of places to search for binaries
774      run by this script, and expect to find jars in <dir>/framework.
775
776  -s  (--device_specific) <file>
777      Path to the python module containing device-specific
778      releasetools code.
779
780  -x  (--extra)  <key=value>
781      Add a key/value pair to the 'extras' dict, which device-specific
782      extension code may look at.
783
784  -v  (--verbose)
785      Show command lines being executed.
786
787  -h  (--help)
788      Display this usage message and exit.
789"""
790
791def Usage(docstring):
792  print docstring.rstrip("\n")
793  print COMMON_DOCSTRING
794
795
796def ParseOptions(argv,
797                 docstring,
798                 extra_opts="", extra_long_opts=(),
799                 extra_option_handler=None):
800  """Parse the options in argv and return any arguments that aren't
801  flags.  docstring is the calling module's docstring, to be displayed
802  for errors and -h.  extra_opts and extra_long_opts are for flags
803  defined by the caller, which are processed by passing them to
804  extra_option_handler."""
805
806  try:
807    opts, args = getopt.getopt(
808        argv, "hvp:s:x:" + extra_opts,
809        ["help", "verbose", "path=", "signapk_path=",
810         "signapk_shared_library_path=", "extra_signapk_args=",
811         "java_path=", "java_args=", "public_key_suffix=",
812         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
813         "verity_signer_path=", "verity_signer_args=", "device_specific=",
814         "extra="] +
815        list(extra_long_opts))
816  except getopt.GetoptError as err:
817    Usage(docstring)
818    print "**", str(err), "**"
819    sys.exit(2)
820
821  for o, a in opts:
822    if o in ("-h", "--help"):
823      Usage(docstring)
824      sys.exit()
825    elif o in ("-v", "--verbose"):
826      OPTIONS.verbose = True
827    elif o in ("-p", "--path"):
828      OPTIONS.search_path = a
829    elif o in ("--signapk_path",):
830      OPTIONS.signapk_path = a
831    elif o in ("--signapk_shared_library_path",):
832      OPTIONS.signapk_shared_library_path = a
833    elif o in ("--extra_signapk_args",):
834      OPTIONS.extra_signapk_args = shlex.split(a)
835    elif o in ("--java_path",):
836      OPTIONS.java_path = a
837    elif o in ("--java_args",):
838      OPTIONS.java_args = a
839    elif o in ("--public_key_suffix",):
840      OPTIONS.public_key_suffix = a
841    elif o in ("--private_key_suffix",):
842      OPTIONS.private_key_suffix = a
843    elif o in ("--boot_signer_path",):
844      OPTIONS.boot_signer_path = a
845    elif o in ("--boot_signer_args",):
846      OPTIONS.boot_signer_args = shlex.split(a)
847    elif o in ("--verity_signer_path",):
848      OPTIONS.verity_signer_path = a
849    elif o in ("--verity_signer_args",):
850      OPTIONS.verity_signer_args = shlex.split(a)
851    elif o in ("-s", "--device_specific"):
852      OPTIONS.device_specific = a
853    elif o in ("-x", "--extra"):
854      key, value = a.split("=", 1)
855      OPTIONS.extras[key] = value
856    else:
857      if extra_option_handler is None or not extra_option_handler(o, a):
858        assert False, "unknown option \"%s\"" % (o,)
859
860  if OPTIONS.search_path:
861    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
862                          os.pathsep + os.environ["PATH"])
863
864  return args
865
866
867def MakeTempFile(prefix=None, suffix=None):
868  """Make a temp file and add it to the list of things to be deleted
869  when Cleanup() is called.  Return the filename."""
870  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
871  os.close(fd)
872  OPTIONS.tempfiles.append(fn)
873  return fn
874
875
876def Cleanup():
877  for i in OPTIONS.tempfiles:
878    if os.path.isdir(i):
879      shutil.rmtree(i)
880    else:
881      os.remove(i)
882
883
884class PasswordManager(object):
885  def __init__(self):
886    self.editor = os.getenv("EDITOR", None)
887    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
888
889  def GetPasswords(self, items):
890    """Get passwords corresponding to each string in 'items',
891    returning a dict.  (The dict may have keys in addition to the
892    values in 'items'.)
893
894    Uses the passwords in $ANDROID_PW_FILE if available, letting the
895    user edit that file to add more needed passwords.  If no editor is
896    available, or $ANDROID_PW_FILE isn't define, prompts the user
897    interactively in the ordinary way.
898    """
899
900    current = self.ReadFile()
901
902    first = True
903    while True:
904      missing = []
905      for i in items:
906        if i not in current or not current[i]:
907          missing.append(i)
908      # Are all the passwords already in the file?
909      if not missing:
910        return current
911
912      for i in missing:
913        current[i] = ""
914
915      if not first:
916        print "key file %s still missing some passwords." % (self.pwfile,)
917        answer = raw_input("try to edit again? [y]> ").strip()
918        if answer and answer[0] not in 'yY':
919          raise RuntimeError("key passwords unavailable")
920      first = False
921
922      current = self.UpdateAndReadFile(current)
923
924  def PromptResult(self, current): # pylint: disable=no-self-use
925    """Prompt the user to enter a value (password) for each key in
926    'current' whose value is fales.  Returns a new dict with all the
927    values.
928    """
929    result = {}
930    for k, v in sorted(current.iteritems()):
931      if v:
932        result[k] = v
933      else:
934        while True:
935          result[k] = getpass.getpass(
936              "Enter password for %s key> " % k).strip()
937          if result[k]:
938            break
939    return result
940
941  def UpdateAndReadFile(self, current):
942    if not self.editor or not self.pwfile:
943      return self.PromptResult(current)
944
945    f = open(self.pwfile, "w")
946    os.chmod(self.pwfile, 0o600)
947    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
948    f.write("# (Additional spaces are harmless.)\n\n")
949
950    first_line = None
951    sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
952    for i, (_, k, v) in enumerate(sorted_list):
953      f.write("[[[  %s  ]]] %s\n" % (v, k))
954      if not v and first_line is None:
955        # position cursor on first line with no password.
956        first_line = i + 4
957    f.close()
958
959    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
960    _, _ = p.communicate()
961
962    return self.ReadFile()
963
964  def ReadFile(self):
965    result = {}
966    if self.pwfile is None:
967      return result
968    try:
969      f = open(self.pwfile, "r")
970      for line in f:
971        line = line.strip()
972        if not line or line[0] == '#':
973          continue
974        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
975        if not m:
976          print "failed to parse password file: ", line
977        else:
978          result[m.group(2)] = m.group(1)
979      f.close()
980    except IOError as e:
981      if e.errno != errno.ENOENT:
982        print "error reading password file: ", str(e)
983    return result
984
985
986def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
987             compress_type=None):
988  import datetime
989
990  # http://b/18015246
991  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
992  # for files larger than 2GiB. We can work around this by adjusting their
993  # limit. Note that `zipfile.writestr()` will not work for strings larger than
994  # 2GiB. The Python interpreter sometimes rejects strings that large (though
995  # it isn't clear to me exactly what circumstances cause this).
996  # `zipfile.write()` must be used directly to work around this.
997  #
998  # This mess can be avoided if we port to python3.
999  saved_zip64_limit = zipfile.ZIP64_LIMIT
1000  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1001
1002  if compress_type is None:
1003    compress_type = zip_file.compression
1004  if arcname is None:
1005    arcname = filename
1006
1007  saved_stat = os.stat(filename)
1008
1009  try:
1010    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
1011    # file to be zipped and reset it when we're done.
1012    os.chmod(filename, perms)
1013
1014    # Use a fixed timestamp so the output is repeatable.
1015    epoch = datetime.datetime.fromtimestamp(0)
1016    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
1017    os.utime(filename, (timestamp, timestamp))
1018
1019    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
1020  finally:
1021    os.chmod(filename, saved_stat.st_mode)
1022    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
1023    zipfile.ZIP64_LIMIT = saved_zip64_limit
1024
1025
1026def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
1027                compress_type=None):
1028  """Wrap zipfile.writestr() function to work around the zip64 limit.
1029
1030  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
1031  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
1032  when calling crc32(bytes).
1033
1034  But it still works fine to write a shorter string into a large zip file.
1035  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
1036  when we know the string won't be too long.
1037  """
1038
1039  saved_zip64_limit = zipfile.ZIP64_LIMIT
1040  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1041
1042  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
1043    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
1044    zinfo.compress_type = zip_file.compression
1045    if perms is None:
1046      perms = 0o100644
1047  else:
1048    zinfo = zinfo_or_arcname
1049
1050  # If compress_type is given, it overrides the value in zinfo.
1051  if compress_type is not None:
1052    zinfo.compress_type = compress_type
1053
1054  # If perms is given, it has a priority.
1055  if perms is not None:
1056    # If perms doesn't set the file type, mark it as a regular file.
1057    if perms & 0o770000 == 0:
1058      perms |= 0o100000
1059    zinfo.external_attr = perms << 16
1060
1061  # Use a fixed timestamp so the output is repeatable.
1062  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
1063
1064  zip_file.writestr(zinfo, data)
1065  zipfile.ZIP64_LIMIT = saved_zip64_limit
1066
1067
1068def ZipClose(zip_file):
1069  # http://b/18015246
1070  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
1071  # central directory.
1072  saved_zip64_limit = zipfile.ZIP64_LIMIT
1073  zipfile.ZIP64_LIMIT = (1 << 32) - 1
1074
1075  zip_file.close()
1076
1077  zipfile.ZIP64_LIMIT = saved_zip64_limit
1078
1079
1080class DeviceSpecificParams(object):
1081  module = None
1082  def __init__(self, **kwargs):
1083    """Keyword arguments to the constructor become attributes of this
1084    object, which is passed to all functions in the device-specific
1085    module."""
1086    for k, v in kwargs.iteritems():
1087      setattr(self, k, v)
1088    self.extras = OPTIONS.extras
1089
1090    if self.module is None:
1091      path = OPTIONS.device_specific
1092      if not path:
1093        return
1094      try:
1095        if os.path.isdir(path):
1096          info = imp.find_module("releasetools", [path])
1097        else:
1098          d, f = os.path.split(path)
1099          b, x = os.path.splitext(f)
1100          if x == ".py":
1101            f = b
1102          info = imp.find_module(f, [d])
1103        print "loaded device-specific extensions from", path
1104        self.module = imp.load_module("device_specific", *info)
1105      except ImportError:
1106        print "unable to load device-specific module; assuming none"
1107
1108  def _DoCall(self, function_name, *args, **kwargs):
1109    """Call the named function in the device-specific module, passing
1110    the given args and kwargs.  The first argument to the call will be
1111    the DeviceSpecific object itself.  If there is no module, or the
1112    module does not define the function, return the value of the
1113    'default' kwarg (which itself defaults to None)."""
1114    if self.module is None or not hasattr(self.module, function_name):
1115      return kwargs.get("default", None)
1116    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1117
1118  def FullOTA_Assertions(self):
1119    """Called after emitting the block of assertions at the top of a
1120    full OTA package.  Implementations can add whatever additional
1121    assertions they like."""
1122    return self._DoCall("FullOTA_Assertions")
1123
1124  def FullOTA_InstallBegin(self):
1125    """Called at the start of full OTA installation."""
1126    return self._DoCall("FullOTA_InstallBegin")
1127
1128  def FullOTA_InstallEnd(self):
1129    """Called at the end of full OTA installation; typically this is
1130    used to install the image for the device's baseband processor."""
1131    return self._DoCall("FullOTA_InstallEnd")
1132
1133  def IncrementalOTA_Assertions(self):
1134    """Called after emitting the block of assertions at the top of an
1135    incremental OTA package.  Implementations can add whatever
1136    additional assertions they like."""
1137    return self._DoCall("IncrementalOTA_Assertions")
1138
1139  def IncrementalOTA_VerifyBegin(self):
1140    """Called at the start of the verification phase of incremental
1141    OTA installation; additional checks can be placed here to abort
1142    the script before any changes are made."""
1143    return self._DoCall("IncrementalOTA_VerifyBegin")
1144
1145  def IncrementalOTA_VerifyEnd(self):
1146    """Called at the end of the verification phase of incremental OTA
1147    installation; additional checks can be placed here to abort the
1148    script before any changes are made."""
1149    return self._DoCall("IncrementalOTA_VerifyEnd")
1150
1151  def IncrementalOTA_InstallBegin(self):
1152    """Called at the start of incremental OTA installation (after
1153    verification is complete)."""
1154    return self._DoCall("IncrementalOTA_InstallBegin")
1155
1156  def IncrementalOTA_InstallEnd(self):
1157    """Called at the end of incremental OTA installation; typically
1158    this is used to install the image for the device's baseband
1159    processor."""
1160    return self._DoCall("IncrementalOTA_InstallEnd")
1161
1162  def VerifyOTA_Assertions(self):
1163    return self._DoCall("VerifyOTA_Assertions")
1164
1165class File(object):
1166  def __init__(self, name, data):
1167    self.name = name
1168    self.data = data
1169    self.size = len(data)
1170    self.sha1 = sha1(data).hexdigest()
1171
1172  @classmethod
1173  def FromLocalFile(cls, name, diskname):
1174    f = open(diskname, "rb")
1175    data = f.read()
1176    f.close()
1177    return File(name, data)
1178
1179  def WriteToTemp(self):
1180    t = tempfile.NamedTemporaryFile()
1181    t.write(self.data)
1182    t.flush()
1183    return t
1184
1185  def AddToZip(self, z, compression=None):
1186    ZipWriteStr(z, self.name, self.data, compress_type=compression)
1187
1188DIFF_PROGRAM_BY_EXT = {
1189    ".gz" : "imgdiff",
1190    ".zip" : ["imgdiff", "-z"],
1191    ".jar" : ["imgdiff", "-z"],
1192    ".apk" : ["imgdiff", "-z"],
1193    ".img" : "imgdiff",
1194    }
1195
1196class Difference(object):
1197  def __init__(self, tf, sf, diff_program=None):
1198    self.tf = tf
1199    self.sf = sf
1200    self.patch = None
1201    self.diff_program = diff_program
1202
1203  def ComputePatch(self):
1204    """Compute the patch (as a string of data) needed to turn sf into
1205    tf.  Returns the same tuple as GetPatch()."""
1206
1207    tf = self.tf
1208    sf = self.sf
1209
1210    if self.diff_program:
1211      diff_program = self.diff_program
1212    else:
1213      ext = os.path.splitext(tf.name)[1]
1214      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1215
1216    ttemp = tf.WriteToTemp()
1217    stemp = sf.WriteToTemp()
1218
1219    ext = os.path.splitext(tf.name)[1]
1220
1221    try:
1222      ptemp = tempfile.NamedTemporaryFile()
1223      if isinstance(diff_program, list):
1224        cmd = copy.copy(diff_program)
1225      else:
1226        cmd = [diff_program]
1227      cmd.append(stemp.name)
1228      cmd.append(ttemp.name)
1229      cmd.append(ptemp.name)
1230      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1231      err = []
1232      def run():
1233        _, e = p.communicate()
1234        if e:
1235          err.append(e)
1236      th = threading.Thread(target=run)
1237      th.start()
1238      th.join(timeout=300)   # 5 mins
1239      if th.is_alive():
1240        print "WARNING: diff command timed out"
1241        p.terminate()
1242        th.join(5)
1243        if th.is_alive():
1244          p.kill()
1245          th.join()
1246
1247      if err or p.returncode != 0:
1248        print "WARNING: failure running %s:\n%s\n" % (
1249            diff_program, "".join(err))
1250        self.patch = None
1251        return None, None, None
1252      diff = ptemp.read()
1253    finally:
1254      ptemp.close()
1255      stemp.close()
1256      ttemp.close()
1257
1258    self.patch = diff
1259    return self.tf, self.sf, self.patch
1260
1261
1262  def GetPatch(self):
1263    """Return a tuple (target_file, source_file, patch_data).
1264    patch_data may be None if ComputePatch hasn't been called, or if
1265    computing the patch failed."""
1266    return self.tf, self.sf, self.patch
1267
1268
1269def ComputeDifferences(diffs):
1270  """Call ComputePatch on all the Difference objects in 'diffs'."""
1271  print len(diffs), "diffs to compute"
1272
1273  # Do the largest files first, to try and reduce the long-pole effect.
1274  by_size = [(i.tf.size, i) for i in diffs]
1275  by_size.sort(reverse=True)
1276  by_size = [i[1] for i in by_size]
1277
1278  lock = threading.Lock()
1279  diff_iter = iter(by_size)   # accessed under lock
1280
1281  def worker():
1282    try:
1283      lock.acquire()
1284      for d in diff_iter:
1285        lock.release()
1286        start = time.time()
1287        d.ComputePatch()
1288        dur = time.time() - start
1289        lock.acquire()
1290
1291        tf, sf, patch = d.GetPatch()
1292        if sf.name == tf.name:
1293          name = tf.name
1294        else:
1295          name = "%s (%s)" % (tf.name, sf.name)
1296        if patch is None:
1297          print "patching failed!                                  %s" % (name,)
1298        else:
1299          print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1300              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1301      lock.release()
1302    except Exception as e:
1303      print e
1304      raise
1305
1306  # start worker threads; wait for them all to finish.
1307  threads = [threading.Thread(target=worker)
1308             for i in range(OPTIONS.worker_threads)]
1309  for th in threads:
1310    th.start()
1311  while threads:
1312    threads.pop().join()
1313
1314
1315class BlockDifference(object):
1316  def __init__(self, partition, tgt, src=None, check_first_block=False,
1317               version=None):
1318    self.tgt = tgt
1319    self.src = src
1320    self.partition = partition
1321    self.check_first_block = check_first_block
1322
1323    if version is None:
1324      version = 1
1325      if OPTIONS.info_dict:
1326        version = max(
1327            int(i) for i in
1328            OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1329    self.version = version
1330
1331    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1332                                    version=self.version)
1333    tmpdir = tempfile.mkdtemp()
1334    OPTIONS.tempfiles.append(tmpdir)
1335    self.path = os.path.join(tmpdir, partition)
1336    b.Compute(self.path)
1337    self._required_cache = b.max_stashed_size
1338    self.touched_src_ranges = b.touched_src_ranges
1339    self.touched_src_sha1 = b.touched_src_sha1
1340
1341    if src is None:
1342      _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1343    else:
1344      _, self.device = GetTypeAndDevice("/" + partition,
1345                                        OPTIONS.source_info_dict)
1346
1347  @property
1348  def required_cache(self):
1349    return self._required_cache
1350
1351  def WriteScript(self, script, output_zip, progress=None):
1352    if not self.src:
1353      # write the output unconditionally
1354      script.Print("Patching %s image unconditionally..." % (self.partition,))
1355    else:
1356      script.Print("Patching %s image after verification." % (self.partition,))
1357
1358    if progress:
1359      script.ShowProgress(progress, 0)
1360    self._WriteUpdate(script, output_zip)
1361    if OPTIONS.verify:
1362      self._WritePostInstallVerifyScript(script)
1363
1364  def WriteStrictVerifyScript(self, script):
1365    """Verify all the blocks in the care_map, including clobbered blocks.
1366
1367    This differs from the WriteVerifyScript() function: a) it prints different
1368    error messages; b) it doesn't allow half-way updated images to pass the
1369    verification."""
1370
1371    partition = self.partition
1372    script.Print("Verifying %s..." % (partition,))
1373    ranges = self.tgt.care_map
1374    ranges_str = ranges.to_string_raw()
1375    script.AppendExtra('range_sha1("%s", "%s") == "%s" && '
1376                       'ui_print("    Verified.") || '
1377                       'ui_print("\\"%s\\" has unexpected contents.");' % (
1378                       self.device, ranges_str,
1379                       self.tgt.TotalSha1(include_clobbered_blocks=True),
1380                       self.device))
1381    script.AppendExtra("")
1382
1383  def WriteVerifyScript(self, script, touched_blocks_only=False):
1384    partition = self.partition
1385
1386    # full OTA
1387    if not self.src:
1388      script.Print("Image %s will be patched unconditionally." % (partition,))
1389
1390    # incremental OTA
1391    else:
1392      if touched_blocks_only and self.version >= 3:
1393        ranges = self.touched_src_ranges
1394        expected_sha1 = self.touched_src_sha1
1395      else:
1396        ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1397        expected_sha1 = self.src.TotalSha1()
1398
1399      # No blocks to be checked, skipping.
1400      if not ranges:
1401        return
1402
1403      ranges_str = ranges.to_string_raw()
1404      if self.version >= 4:
1405        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1406                            'block_image_verify("%s", '
1407                            'package_extract_file("%s.transfer.list"), '
1408                            '"%s.new.dat", "%s.patch.dat")) then') % (
1409                            self.device, ranges_str, expected_sha1,
1410                            self.device, partition, partition, partition))
1411      elif self.version == 3:
1412        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1413                            'block_image_verify("%s", '
1414                            'package_extract_file("%s.transfer.list"), '
1415                            '"%s.new.dat", "%s.patch.dat")) then') % (
1416                            self.device, ranges_str, expected_sha1,
1417                            self.device, partition, partition, partition))
1418      else:
1419        script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1420                           self.device, ranges_str, self.src.TotalSha1()))
1421      script.Print('Verified %s image...' % (partition,))
1422      script.AppendExtra('else')
1423
1424      if self.version >= 4:
1425
1426        # Bug: 21124327
1427        # When generating incrementals for the system and vendor partitions in
1428        # version 4 or newer, explicitly check the first block (which contains
1429        # the superblock) of the partition to see if it's what we expect. If
1430        # this check fails, give an explicit log message about the partition
1431        # having been remounted R/W (the most likely explanation).
1432        if self.check_first_block:
1433          script.AppendExtra('check_first_block("%s");' % (self.device,))
1434
1435        # If version >= 4, try block recovery before abort update
1436        script.AppendExtra((
1437            'ifelse (block_image_recover("{device}", "{ranges}") && '
1438            'block_image_verify("{device}", '
1439            'package_extract_file("{partition}.transfer.list"), '
1440            '"{partition}.new.dat", "{partition}.patch.dat"), '
1441            'ui_print("{partition} recovered successfully."), '
1442            'abort("{partition} partition fails to recover"));\n'
1443            'endif;').format(device=self.device, ranges=ranges_str,
1444                             partition=partition))
1445
1446      # Abort the OTA update. Note that the incremental OTA cannot be applied
1447      # even if it may match the checksum of the target partition.
1448      # a) If version < 3, operations like move and erase will make changes
1449      #    unconditionally and damage the partition.
1450      # b) If version >= 3, it won't even reach here.
1451      else:
1452        script.AppendExtra(('abort("%s partition has unexpected contents");\n'
1453                            'endif;') % (partition,))
1454
1455  def _WritePostInstallVerifyScript(self, script):
1456    partition = self.partition
1457    script.Print('Verifying the updated %s image...' % (partition,))
1458    # Unlike pre-install verification, clobbered_blocks should not be ignored.
1459    ranges = self.tgt.care_map
1460    ranges_str = ranges.to_string_raw()
1461    script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1462                       self.device, ranges_str,
1463                       self.tgt.TotalSha1(include_clobbered_blocks=True)))
1464
1465    # Bug: 20881595
1466    # Verify that extended blocks are really zeroed out.
1467    if self.tgt.extended:
1468      ranges_str = self.tgt.extended.to_string_raw()
1469      script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1470                         self.device, ranges_str,
1471                         self._HashZeroBlocks(self.tgt.extended.size())))
1472      script.Print('Verified the updated %s image.' % (partition,))
1473      script.AppendExtra(
1474          'else\n'
1475          '  abort("%s partition has unexpected non-zero contents after OTA '
1476          'update");\n'
1477          'endif;' % (partition,))
1478    else:
1479      script.Print('Verified the updated %s image.' % (partition,))
1480
1481    script.AppendExtra(
1482        'else\n'
1483        '  abort("%s partition has unexpected contents after OTA update");\n'
1484        'endif;' % (partition,))
1485
1486  def _WriteUpdate(self, script, output_zip):
1487    ZipWrite(output_zip,
1488             '{}.transfer.list'.format(self.path),
1489             '{}.transfer.list'.format(self.partition))
1490    ZipWrite(output_zip,
1491             '{}.new.dat'.format(self.path),
1492             '{}.new.dat'.format(self.partition))
1493    ZipWrite(output_zip,
1494             '{}.patch.dat'.format(self.path),
1495             '{}.patch.dat'.format(self.partition),
1496             compress_type=zipfile.ZIP_STORED)
1497
1498    call = ('block_image_update("{device}", '
1499            'package_extract_file("{partition}.transfer.list"), '
1500            '"{partition}.new.dat", "{partition}.patch.dat") ||\n'
1501            '    abort("Failed to update {partition} image.");'.format(
1502                device=self.device, partition=self.partition))
1503    script.AppendExtra(script.WordWrap(call))
1504
1505  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1506    data = source.ReadRangeSet(ranges)
1507    ctx = sha1()
1508
1509    for p in data:
1510      ctx.update(p)
1511
1512    return ctx.hexdigest()
1513
1514  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1515    """Return the hash value for all zero blocks."""
1516    zero_block = '\x00' * 4096
1517    ctx = sha1()
1518    for _ in range(num_blocks):
1519      ctx.update(zero_block)
1520
1521    return ctx.hexdigest()
1522
1523
1524DataImage = blockimgdiff.DataImage
1525
1526# map recovery.fstab's fs_types to mount/format "partition types"
1527PARTITION_TYPES = {
1528    "yaffs2": "MTD",
1529    "mtd": "MTD",
1530    "ext4": "EMMC",
1531    "emmc": "EMMC",
1532    "f2fs": "EMMC",
1533    "squashfs": "EMMC"
1534}
1535
1536def GetTypeAndDevice(mount_point, info):
1537  fstab = info["fstab"]
1538  if fstab:
1539    return (PARTITION_TYPES[fstab[mount_point].fs_type],
1540            fstab[mount_point].device)
1541  else:
1542    raise KeyError
1543
1544
1545def ParseCertificate(data):
1546  """Parse a PEM-format certificate."""
1547  cert = []
1548  save = False
1549  for line in data.split("\n"):
1550    if "--END CERTIFICATE--" in line:
1551      break
1552    if save:
1553      cert.append(line)
1554    if "--BEGIN CERTIFICATE--" in line:
1555      save = True
1556  cert = "".join(cert).decode('base64')
1557  return cert
1558
1559def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1560                      info_dict=None):
1561  """Generate a binary patch that creates the recovery image starting
1562  with the boot image.  (Most of the space in these images is just the
1563  kernel, which is identical for the two, so the resulting patch
1564  should be efficient.)  Add it to the output zip, along with a shell
1565  script that is run from init.rc on first boot to actually do the
1566  patching and install the new recovery image.
1567
1568  recovery_img and boot_img should be File objects for the
1569  corresponding images.  info should be the dictionary returned by
1570  common.LoadInfoDict() on the input target_files.
1571  """
1572
1573  if info_dict is None:
1574    info_dict = OPTIONS.info_dict
1575
1576  full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1577  system_root_image = info_dict.get("system_root_image", None) == "true"
1578
1579  if full_recovery_image:
1580    output_sink("etc/recovery.img", recovery_img.data)
1581
1582  else:
1583    diff_program = ["imgdiff"]
1584    path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1585    if os.path.exists(path):
1586      diff_program.append("-b")
1587      diff_program.append(path)
1588      bonus_args = "-b /system/etc/recovery-resource.dat"
1589    else:
1590      bonus_args = ""
1591
1592    d = Difference(recovery_img, boot_img, diff_program=diff_program)
1593    _, _, patch = d.ComputePatch()
1594    output_sink("recovery-from-boot.p", patch)
1595
1596  try:
1597    # The following GetTypeAndDevice()s need to use the path in the target
1598    # info_dict instead of source_info_dict.
1599    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1600    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1601  except KeyError:
1602    return
1603
1604  if full_recovery_image:
1605    sh = """#!/system/bin/sh
1606if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1607  applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1608else
1609  log -t recovery "Recovery image already installed"
1610fi
1611""" % {'type': recovery_type,
1612       'device': recovery_device,
1613       'sha1': recovery_img.sha1,
1614       'size': recovery_img.size}
1615  else:
1616    sh = """#!/system/bin/sh
1617if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1618  applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1619else
1620  log -t recovery "Recovery image already installed"
1621fi
1622""" % {'boot_size': boot_img.size,
1623       'boot_sha1': boot_img.sha1,
1624       'recovery_size': recovery_img.size,
1625       'recovery_sha1': recovery_img.sha1,
1626       'boot_type': boot_type,
1627       'boot_device': boot_device,
1628       'recovery_type': recovery_type,
1629       'recovery_device': recovery_device,
1630       'bonus_args': bonus_args}
1631
1632  # The install script location moved from /system/etc to /system/bin
1633  # in the L release.  Parse init.*.rc files to find out where the
1634  # target-files expects it to be, and put it there.
1635  sh_location = "etc/install-recovery.sh"
1636  found = False
1637  if system_root_image:
1638    init_rc_dir = os.path.join(input_dir, "ROOT")
1639  else:
1640    init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1641  init_rc_files = os.listdir(init_rc_dir)
1642  for init_rc_file in init_rc_files:
1643    if (not init_rc_file.startswith('init.') or
1644        not init_rc_file.endswith('.rc')):
1645      continue
1646
1647    with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1648      for line in f:
1649        m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1650        if m:
1651          sh_location = m.group(1)
1652          found = True
1653          break
1654
1655    if found:
1656      break
1657
1658  print "putting script in", sh_location
1659
1660  output_sink(sh_location, sh)
1661