common.py revision 9667b18f2300e4bf0f33d3aef51d2f48bcb6778b
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import copy
16import errno
17import getopt
18import getpass
19import imp
20import os
21import platform
22import re
23import shlex
24import shutil
25import subprocess
26import sys
27import tempfile
28import threading
29import time
30import zipfile
31
32import blockimgdiff
33import rangelib
34
35from hashlib import sha1 as sha1
36
37
38class Options(object):
39  def __init__(self):
40    platform_search_path = {
41        "linux2": "out/host/linux-x86",
42        "darwin": "out/host/darwin-x86",
43    }
44
45    self.search_path = platform_search_path.get(sys.platform, None)
46    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
47    self.signapk_shared_library_path = "lib64"   # Relative to search_path
48    self.extra_signapk_args = []
49    self.java_path = "java"  # Use the one on the path by default.
50    self.java_args = "-Xmx2048m" # JVM Args
51    self.public_key_suffix = ".x509.pem"
52    self.private_key_suffix = ".pk8"
53    # use otatools built boot_signer by default
54    self.boot_signer_path = "boot_signer"
55    self.boot_signer_args = []
56    self.verity_signer_path = None
57    self.verity_signer_args = []
58    self.verbose = False
59    self.tempfiles = []
60    self.device_specific = None
61    self.extras = {}
62    self.info_dict = None
63    self.source_info_dict = None
64    self.target_info_dict = None
65    self.worker_threads = None
66    # Stash size cannot exceed cache_size * threshold.
67    self.cache_size = None
68    self.stash_threshold = 0.8
69
70
71OPTIONS = Options()
72
73
74# Values for "certificate" in apkcerts that mean special things.
75SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
76
77
78class ExternalError(RuntimeError):
79  pass
80
81
82def Run(args, **kwargs):
83  """Create and return a subprocess.Popen object, printing the command
84  line on the terminal if -v was specified."""
85  if OPTIONS.verbose:
86    print "  running: ", " ".join(args)
87  return subprocess.Popen(args, **kwargs)
88
89
90def CloseInheritedPipes():
91  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
92  before doing other work."""
93  if platform.system() != "Darwin":
94    return
95  for d in range(3, 1025):
96    try:
97      stat = os.fstat(d)
98      if stat is not None:
99        pipebit = stat[0] & 0x1000
100        if pipebit != 0:
101          os.close(d)
102    except OSError:
103      pass
104
105
106def LoadInfoDict(input_file, input_dir=None):
107  """Read and parse the META/misc_info.txt key/value pairs from the
108  input target files and return a dict."""
109
110  def read_helper(fn):
111    if isinstance(input_file, zipfile.ZipFile):
112      return input_file.read(fn)
113    else:
114      path = os.path.join(input_file, *fn.split("/"))
115      try:
116        with open(path) as f:
117          return f.read()
118      except IOError as e:
119        if e.errno == errno.ENOENT:
120          raise KeyError(fn)
121  d = {}
122  try:
123    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
124  except KeyError:
125    # ok if misc_info.txt doesn't exist
126    pass
127
128  # backwards compatibility: These values used to be in their own
129  # files.  Look for them, in case we're processing an old
130  # target_files zip.
131
132  if "mkyaffs2_extra_flags" not in d:
133    try:
134      d["mkyaffs2_extra_flags"] = read_helper(
135          "META/mkyaffs2-extra-flags.txt").strip()
136    except KeyError:
137      # ok if flags don't exist
138      pass
139
140  if "recovery_api_version" not in d:
141    try:
142      d["recovery_api_version"] = read_helper(
143          "META/recovery-api-version.txt").strip()
144    except KeyError:
145      raise ValueError("can't find recovery API version in input target-files")
146
147  if "tool_extensions" not in d:
148    try:
149      d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
150    except KeyError:
151      # ok if extensions don't exist
152      pass
153
154  if "fstab_version" not in d:
155    d["fstab_version"] = "1"
156
157  # A few properties are stored as links to the files in the out/ directory.
158  # It works fine with the build system. However, they are no longer available
159  # when (re)generating from target_files zip. If input_dir is not None, we
160  # are doing repacking. Redirect those properties to the actual files in the
161  # unzipped directory.
162  if input_dir is not None:
163    # We carry a copy of file_contexts.bin under META/. If not available,
164    # search BOOT/RAMDISK/. Note that sometimes we may need a different file
165    # to build images than the one running on device, such as when enabling
166    # system_root_image. In that case, we must have the one for image
167    # generation copied to META/.
168    fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
169    fc_config = os.path.join(input_dir, "META", fc_basename)
170    if d.get("system_root_image") == "true":
171      assert os.path.exists(fc_config)
172    if not os.path.exists(fc_config):
173      fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
174      if not os.path.exists(fc_config):
175        fc_config = None
176
177    if fc_config:
178      d["selinux_fc"] = fc_config
179
180    # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
181    if d.get("system_root_image") == "true":
182      d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
183      d["ramdisk_fs_config"] = os.path.join(
184          input_dir, "META", "root_filesystem_config.txt")
185
186  try:
187    data = read_helper("META/imagesizes.txt")
188    for line in data.split("\n"):
189      if not line:
190        continue
191      name, value = line.split(" ", 1)
192      if not value:
193        continue
194      if name == "blocksize":
195        d[name] = value
196      else:
197        d[name + "_size"] = value
198  except KeyError:
199    pass
200
201  def makeint(key):
202    if key in d:
203      d[key] = int(d[key], 0)
204
205  makeint("recovery_api_version")
206  makeint("blocksize")
207  makeint("system_size")
208  makeint("vendor_size")
209  makeint("userdata_size")
210  makeint("cache_size")
211  makeint("recovery_size")
212  makeint("boot_size")
213  makeint("fstab_version")
214
215  if d.get("no_recovery", False) == "true":
216    d["fstab"] = None
217  else:
218    d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
219                                   d.get("system_root_image", False))
220  d["build.prop"] = LoadBuildProp(read_helper)
221  return d
222
223def LoadBuildProp(read_helper):
224  try:
225    data = read_helper("SYSTEM/build.prop")
226  except KeyError:
227    print "Warning: could not find SYSTEM/build.prop in %s" % zip
228    data = ""
229  return LoadDictionaryFromLines(data.split("\n"))
230
231def LoadDictionaryFromLines(lines):
232  d = {}
233  for line in lines:
234    line = line.strip()
235    if not line or line.startswith("#"):
236      continue
237    if "=" in line:
238      name, value = line.split("=", 1)
239      d[name] = value
240  return d
241
242def LoadRecoveryFSTab(read_helper, fstab_version, system_root_image=False):
243  class Partition(object):
244    def __init__(self, mount_point, fs_type, device, length, device2, context):
245      self.mount_point = mount_point
246      self.fs_type = fs_type
247      self.device = device
248      self.length = length
249      self.device2 = device2
250      self.context = context
251
252  try:
253    data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
254  except KeyError:
255    print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
256    data = ""
257
258  if fstab_version == 1:
259    d = {}
260    for line in data.split("\n"):
261      line = line.strip()
262      if not line or line.startswith("#"):
263        continue
264      pieces = line.split()
265      if not 3 <= len(pieces) <= 4:
266        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
267      options = None
268      if len(pieces) >= 4:
269        if pieces[3].startswith("/"):
270          device2 = pieces[3]
271          if len(pieces) >= 5:
272            options = pieces[4]
273        else:
274          device2 = None
275          options = pieces[3]
276      else:
277        device2 = None
278
279      mount_point = pieces[0]
280      length = 0
281      if options:
282        options = options.split(",")
283        for i in options:
284          if i.startswith("length="):
285            length = int(i[7:])
286          else:
287            print "%s: unknown option \"%s\"" % (mount_point, i)
288
289      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
290                                 device=pieces[2], length=length,
291                                 device2=device2)
292
293  elif fstab_version == 2:
294    d = {}
295    for line in data.split("\n"):
296      line = line.strip()
297      if not line or line.startswith("#"):
298        continue
299      # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
300      pieces = line.split()
301      if len(pieces) != 5:
302        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
303
304      # Ignore entries that are managed by vold
305      options = pieces[4]
306      if "voldmanaged=" in options:
307        continue
308
309      # It's a good line, parse it
310      length = 0
311      options = options.split(",")
312      for i in options:
313        if i.startswith("length="):
314          length = int(i[7:])
315        else:
316          # Ignore all unknown options in the unified fstab
317          continue
318
319      mount_flags = pieces[3]
320      # Honor the SELinux context if present.
321      context = None
322      for i in mount_flags.split(","):
323        if i.startswith("context="):
324          context = i
325
326      mount_point = pieces[1]
327      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
328                                 device=pieces[0], length=length,
329                                 device2=None, context=context)
330
331  else:
332    raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
333
334  # / is used for the system mount point when the root directory is included in
335  # system. Other areas assume system is always at "/system" so point /system
336  # at /.
337  if system_root_image:
338    assert not d.has_key("/system") and d.has_key("/")
339    d["/system"] = d["/"]
340  return d
341
342
343def DumpInfoDict(d):
344  for k, v in sorted(d.items()):
345    print "%-25s = (%s) %s" % (k, type(v).__name__, v)
346
347
348def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
349                        has_ramdisk=False):
350  """Build a bootable image from the specified sourcedir.
351
352  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
353  'sourcedir'), and turn them into a boot image.  Return the image data, or
354  None if sourcedir does not appear to contains files for building the
355  requested image."""
356
357  def make_ramdisk():
358    ramdisk_img = tempfile.NamedTemporaryFile()
359
360    if os.access(fs_config_file, os.F_OK):
361      cmd = ["mkbootfs", "-f", fs_config_file,
362             os.path.join(sourcedir, "RAMDISK")]
363    else:
364      cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
365    p1 = Run(cmd, stdout=subprocess.PIPE)
366    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
367
368    p2.wait()
369    p1.wait()
370    assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
371    assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
372
373    return ramdisk_img
374
375  if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
376    return None
377
378  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
379    return None
380
381  if info_dict is None:
382    info_dict = OPTIONS.info_dict
383
384  img = tempfile.NamedTemporaryFile()
385
386  if has_ramdisk:
387    ramdisk_img = make_ramdisk()
388
389  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
390  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
391
392  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
393
394  fn = os.path.join(sourcedir, "second")
395  if os.access(fn, os.F_OK):
396    cmd.append("--second")
397    cmd.append(fn)
398
399  fn = os.path.join(sourcedir, "cmdline")
400  if os.access(fn, os.F_OK):
401    cmd.append("--cmdline")
402    cmd.append(open(fn).read().rstrip("\n"))
403
404  fn = os.path.join(sourcedir, "base")
405  if os.access(fn, os.F_OK):
406    cmd.append("--base")
407    cmd.append(open(fn).read().rstrip("\n"))
408
409  fn = os.path.join(sourcedir, "pagesize")
410  if os.access(fn, os.F_OK):
411    cmd.append("--pagesize")
412    cmd.append(open(fn).read().rstrip("\n"))
413
414  args = info_dict.get("mkbootimg_args", None)
415  if args and args.strip():
416    cmd.extend(shlex.split(args))
417
418  if has_ramdisk:
419    cmd.extend(["--ramdisk", ramdisk_img.name])
420
421  img_unsigned = None
422  if info_dict.get("vboot", None):
423    img_unsigned = tempfile.NamedTemporaryFile()
424    cmd.extend(["--output", img_unsigned.name])
425  else:
426    cmd.extend(["--output", img.name])
427
428  p = Run(cmd, stdout=subprocess.PIPE)
429  p.communicate()
430  assert p.returncode == 0, "mkbootimg of %s image failed" % (
431      os.path.basename(sourcedir),)
432
433  if (info_dict.get("boot_signer", None) == "true" and
434      info_dict.get("verity_key", None)):
435    path = "/" + os.path.basename(sourcedir).lower()
436    cmd = [OPTIONS.boot_signer_path]
437    cmd.extend(OPTIONS.boot_signer_args)
438    cmd.extend([path, img.name,
439                info_dict["verity_key"] + ".pk8",
440                info_dict["verity_key"] + ".x509.pem", img.name])
441    p = Run(cmd, stdout=subprocess.PIPE)
442    p.communicate()
443    assert p.returncode == 0, "boot_signer of %s image failed" % path
444
445  # Sign the image if vboot is non-empty.
446  elif info_dict.get("vboot", None):
447    path = "/" + os.path.basename(sourcedir).lower()
448    img_keyblock = tempfile.NamedTemporaryFile()
449    cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
450           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
451           info_dict["vboot_key"] + ".vbprivk",
452           info_dict["vboot_subkey"] + ".vbprivk",
453           img_keyblock.name,
454           img.name]
455    p = Run(cmd, stdout=subprocess.PIPE)
456    p.communicate()
457    assert p.returncode == 0, "vboot_signer of %s image failed" % path
458
459    # Clean up the temp files.
460    img_unsigned.close()
461    img_keyblock.close()
462
463  img.seek(os.SEEK_SET, 0)
464  data = img.read()
465
466  if has_ramdisk:
467    ramdisk_img.close()
468  img.close()
469
470  return data
471
472
473def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
474                     info_dict=None):
475  """Return a File object with the desired bootable image.
476
477  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
478  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
479  the source files in 'unpack_dir'/'tree_subdir'."""
480
481  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
482  if os.path.exists(prebuilt_path):
483    print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
484    return File.FromLocalFile(name, prebuilt_path)
485
486  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
487  if os.path.exists(prebuilt_path):
488    print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
489    return File.FromLocalFile(name, prebuilt_path)
490
491  print "building image from target_files %s..." % (tree_subdir,)
492
493  if info_dict is None:
494    info_dict = OPTIONS.info_dict
495
496  # With system_root_image == "true", we don't pack ramdisk into the boot image.
497  has_ramdisk = (info_dict.get("system_root_image", None) != "true" or
498                 prebuilt_name != "boot.img")
499
500  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
501  data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
502                             os.path.join(unpack_dir, fs_config),
503                             info_dict, has_ramdisk)
504  if data:
505    return File(name, data)
506  return None
507
508
509def UnzipTemp(filename, pattern=None):
510  """Unzip the given archive into a temporary directory and return the name.
511
512  If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
513  temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
514
515  Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
516  main file), open for reading.
517  """
518
519  tmp = tempfile.mkdtemp(prefix="targetfiles-")
520  OPTIONS.tempfiles.append(tmp)
521
522  def unzip_to_dir(filename, dirname):
523    cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
524    if pattern is not None:
525      cmd.append(pattern)
526    p = Run(cmd, stdout=subprocess.PIPE)
527    p.communicate()
528    if p.returncode != 0:
529      raise ExternalError("failed to unzip input target-files \"%s\"" %
530                          (filename,))
531
532  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
533  if m:
534    unzip_to_dir(m.group(1), tmp)
535    unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
536    filename = m.group(1)
537  else:
538    unzip_to_dir(filename, tmp)
539
540  return tmp, zipfile.ZipFile(filename, "r")
541
542
543def GetKeyPasswords(keylist):
544  """Given a list of keys, prompt the user to enter passwords for
545  those which require them.  Return a {key: password} dict.  password
546  will be None if the key has no password."""
547
548  no_passwords = []
549  need_passwords = []
550  key_passwords = {}
551  devnull = open("/dev/null", "w+b")
552  for k in sorted(keylist):
553    # We don't need a password for things that aren't really keys.
554    if k in SPECIAL_CERT_STRINGS:
555      no_passwords.append(k)
556      continue
557
558    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
559             "-inform", "DER", "-nocrypt"],
560            stdin=devnull.fileno(),
561            stdout=devnull.fileno(),
562            stderr=subprocess.STDOUT)
563    p.communicate()
564    if p.returncode == 0:
565      # Definitely an unencrypted key.
566      no_passwords.append(k)
567    else:
568      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
569               "-inform", "DER", "-passin", "pass:"],
570              stdin=devnull.fileno(),
571              stdout=devnull.fileno(),
572              stderr=subprocess.PIPE)
573      _, stderr = p.communicate()
574      if p.returncode == 0:
575        # Encrypted key with empty string as password.
576        key_passwords[k] = ''
577      elif stderr.startswith('Error decrypting key'):
578        # Definitely encrypted key.
579        # It would have said "Error reading key" if it didn't parse correctly.
580        need_passwords.append(k)
581      else:
582        # Potentially, a type of key that openssl doesn't understand.
583        # We'll let the routines in signapk.jar handle it.
584        no_passwords.append(k)
585  devnull.close()
586
587  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
588  key_passwords.update(dict.fromkeys(no_passwords, None))
589  return key_passwords
590
591
592def SignFile(input_name, output_name, key, password, whole_file=False):
593  """Sign the input_name zip/jar/apk, producing output_name.  Use the
594  given key and password (the latter may be None if the key does not
595  have a password.
596
597  If whole_file is true, use the "-w" option to SignApk to embed a
598  signature that covers the whole file in the archive comment of the
599  zip file.
600  """
601
602  java_library_path = os.path.join(
603      OPTIONS.search_path, OPTIONS.signapk_shared_library_path)
604
605  cmd = [OPTIONS.java_path, OPTIONS.java_args,
606         "-Djava.library.path=" + java_library_path,
607         "-jar",
608         os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
609  cmd.extend(OPTIONS.extra_signapk_args)
610  if whole_file:
611    cmd.append("-w")
612  cmd.extend([key + OPTIONS.public_key_suffix,
613              key + OPTIONS.private_key_suffix,
614              input_name, output_name])
615
616  p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
617  if password is not None:
618    password += "\n"
619  p.communicate(password)
620  if p.returncode != 0:
621    raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
622
623
624def CheckSize(data, target, info_dict):
625  """Check the data string passed against the max size limit, if
626  any, for the given target.  Raise exception if the data is too big.
627  Print a warning if the data is nearing the maximum size."""
628
629  if target.endswith(".img"):
630    target = target[:-4]
631  mount_point = "/" + target
632
633  fs_type = None
634  limit = None
635  if info_dict["fstab"]:
636    if mount_point == "/userdata":
637      mount_point = "/data"
638    p = info_dict["fstab"][mount_point]
639    fs_type = p.fs_type
640    device = p.device
641    if "/" in device:
642      device = device[device.rfind("/")+1:]
643    limit = info_dict.get(device + "_size", None)
644  if not fs_type or not limit:
645    return
646
647  if fs_type == "yaffs2":
648    # image size should be increased by 1/64th to account for the
649    # spare area (64 bytes per 2k page)
650    limit = limit / 2048 * (2048+64)
651  size = len(data)
652  pct = float(size) * 100.0 / limit
653  msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
654  if pct >= 99.0:
655    raise ExternalError(msg)
656  elif pct >= 95.0:
657    print
658    print "  WARNING: ", msg
659    print
660  elif OPTIONS.verbose:
661    print "  ", msg
662
663
664def ReadApkCerts(tf_zip):
665  """Given a target_files ZipFile, parse the META/apkcerts.txt file
666  and return a {package: cert} dict."""
667  certmap = {}
668  for line in tf_zip.read("META/apkcerts.txt").split("\n"):
669    line = line.strip()
670    if not line:
671      continue
672    m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
673                 r'private_key="(.*)"$', line)
674    if m:
675      name, cert, privkey = m.groups()
676      public_key_suffix_len = len(OPTIONS.public_key_suffix)
677      private_key_suffix_len = len(OPTIONS.private_key_suffix)
678      if cert in SPECIAL_CERT_STRINGS and not privkey:
679        certmap[name] = cert
680      elif (cert.endswith(OPTIONS.public_key_suffix) and
681            privkey.endswith(OPTIONS.private_key_suffix) and
682            cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
683        certmap[name] = cert[:-public_key_suffix_len]
684      else:
685        raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
686  return certmap
687
688
689COMMON_DOCSTRING = """
690  -p  (--path)  <dir>
691      Prepend <dir>/bin to the list of places to search for binaries
692      run by this script, and expect to find jars in <dir>/framework.
693
694  -s  (--device_specific) <file>
695      Path to the python module containing device-specific
696      releasetools code.
697
698  -x  (--extra)  <key=value>
699      Add a key/value pair to the 'extras' dict, which device-specific
700      extension code may look at.
701
702  -v  (--verbose)
703      Show command lines being executed.
704
705  -h  (--help)
706      Display this usage message and exit.
707"""
708
709def Usage(docstring):
710  print docstring.rstrip("\n")
711  print COMMON_DOCSTRING
712
713
714def ParseOptions(argv,
715                 docstring,
716                 extra_opts="", extra_long_opts=(),
717                 extra_option_handler=None):
718  """Parse the options in argv and return any arguments that aren't
719  flags.  docstring is the calling module's docstring, to be displayed
720  for errors and -h.  extra_opts and extra_long_opts are for flags
721  defined by the caller, which are processed by passing them to
722  extra_option_handler."""
723
724  try:
725    opts, args = getopt.getopt(
726        argv, "hvp:s:x:" + extra_opts,
727        ["help", "verbose", "path=", "signapk_path=",
728         "signapk_shared_library_path=", "extra_signapk_args=",
729         "java_path=", "java_args=", "public_key_suffix=",
730         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
731         "verity_signer_path=", "verity_signer_args=", "device_specific=",
732         "extra="] +
733        list(extra_long_opts))
734  except getopt.GetoptError as err:
735    Usage(docstring)
736    print "**", str(err), "**"
737    sys.exit(2)
738
739  for o, a in opts:
740    if o in ("-h", "--help"):
741      Usage(docstring)
742      sys.exit()
743    elif o in ("-v", "--verbose"):
744      OPTIONS.verbose = True
745    elif o in ("-p", "--path"):
746      OPTIONS.search_path = a
747    elif o in ("--signapk_path",):
748      OPTIONS.signapk_path = a
749    elif o in ("--signapk_shared_library_path",):
750      OPTIONS.signapk_shared_library_path = a
751    elif o in ("--extra_signapk_args",):
752      OPTIONS.extra_signapk_args = shlex.split(a)
753    elif o in ("--java_path",):
754      OPTIONS.java_path = a
755    elif o in ("--java_args",):
756      OPTIONS.java_args = a
757    elif o in ("--public_key_suffix",):
758      OPTIONS.public_key_suffix = a
759    elif o in ("--private_key_suffix",):
760      OPTIONS.private_key_suffix = a
761    elif o in ("--boot_signer_path",):
762      OPTIONS.boot_signer_path = a
763    elif o in ("--boot_signer_args",):
764      OPTIONS.boot_signer_args = shlex.split(a)
765    elif o in ("--verity_signer_path",):
766      OPTIONS.verity_signer_path = a
767    elif o in ("--verity_signer_args",):
768      OPTIONS.verity_signer_args = shlex.split(a)
769    elif o in ("-s", "--device_specific"):
770      OPTIONS.device_specific = a
771    elif o in ("-x", "--extra"):
772      key, value = a.split("=", 1)
773      OPTIONS.extras[key] = value
774    else:
775      if extra_option_handler is None or not extra_option_handler(o, a):
776        assert False, "unknown option \"%s\"" % (o,)
777
778  if OPTIONS.search_path:
779    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
780                          os.pathsep + os.environ["PATH"])
781
782  return args
783
784
785def MakeTempFile(prefix=None, suffix=None):
786  """Make a temp file and add it to the list of things to be deleted
787  when Cleanup() is called.  Return the filename."""
788  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
789  os.close(fd)
790  OPTIONS.tempfiles.append(fn)
791  return fn
792
793
794def Cleanup():
795  for i in OPTIONS.tempfiles:
796    if os.path.isdir(i):
797      shutil.rmtree(i)
798    else:
799      os.remove(i)
800
801
802class PasswordManager(object):
803  def __init__(self):
804    self.editor = os.getenv("EDITOR", None)
805    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
806
807  def GetPasswords(self, items):
808    """Get passwords corresponding to each string in 'items',
809    returning a dict.  (The dict may have keys in addition to the
810    values in 'items'.)
811
812    Uses the passwords in $ANDROID_PW_FILE if available, letting the
813    user edit that file to add more needed passwords.  If no editor is
814    available, or $ANDROID_PW_FILE isn't define, prompts the user
815    interactively in the ordinary way.
816    """
817
818    current = self.ReadFile()
819
820    first = True
821    while True:
822      missing = []
823      for i in items:
824        if i not in current or not current[i]:
825          missing.append(i)
826      # Are all the passwords already in the file?
827      if not missing:
828        return current
829
830      for i in missing:
831        current[i] = ""
832
833      if not first:
834        print "key file %s still missing some passwords." % (self.pwfile,)
835        answer = raw_input("try to edit again? [y]> ").strip()
836        if answer and answer[0] not in 'yY':
837          raise RuntimeError("key passwords unavailable")
838      first = False
839
840      current = self.UpdateAndReadFile(current)
841
842  def PromptResult(self, current): # pylint: disable=no-self-use
843    """Prompt the user to enter a value (password) for each key in
844    'current' whose value is fales.  Returns a new dict with all the
845    values.
846    """
847    result = {}
848    for k, v in sorted(current.iteritems()):
849      if v:
850        result[k] = v
851      else:
852        while True:
853          result[k] = getpass.getpass(
854              "Enter password for %s key> " % k).strip()
855          if result[k]:
856            break
857    return result
858
859  def UpdateAndReadFile(self, current):
860    if not self.editor or not self.pwfile:
861      return self.PromptResult(current)
862
863    f = open(self.pwfile, "w")
864    os.chmod(self.pwfile, 0o600)
865    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
866    f.write("# (Additional spaces are harmless.)\n\n")
867
868    first_line = None
869    sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
870    for i, (_, k, v) in enumerate(sorted_list):
871      f.write("[[[  %s  ]]] %s\n" % (v, k))
872      if not v and first_line is None:
873        # position cursor on first line with no password.
874        first_line = i + 4
875    f.close()
876
877    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
878    _, _ = p.communicate()
879
880    return self.ReadFile()
881
882  def ReadFile(self):
883    result = {}
884    if self.pwfile is None:
885      return result
886    try:
887      f = open(self.pwfile, "r")
888      for line in f:
889        line = line.strip()
890        if not line or line[0] == '#':
891          continue
892        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
893        if not m:
894          print "failed to parse password file: ", line
895        else:
896          result[m.group(2)] = m.group(1)
897      f.close()
898    except IOError as e:
899      if e.errno != errno.ENOENT:
900        print "error reading password file: ", str(e)
901    return result
902
903
904def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
905             compress_type=None):
906  import datetime
907
908  # http://b/18015246
909  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
910  # for files larger than 2GiB. We can work around this by adjusting their
911  # limit. Note that `zipfile.writestr()` will not work for strings larger than
912  # 2GiB. The Python interpreter sometimes rejects strings that large (though
913  # it isn't clear to me exactly what circumstances cause this).
914  # `zipfile.write()` must be used directly to work around this.
915  #
916  # This mess can be avoided if we port to python3.
917  saved_zip64_limit = zipfile.ZIP64_LIMIT
918  zipfile.ZIP64_LIMIT = (1 << 32) - 1
919
920  if compress_type is None:
921    compress_type = zip_file.compression
922  if arcname is None:
923    arcname = filename
924
925  saved_stat = os.stat(filename)
926
927  try:
928    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
929    # file to be zipped and reset it when we're done.
930    os.chmod(filename, perms)
931
932    # Use a fixed timestamp so the output is repeatable.
933    epoch = datetime.datetime.fromtimestamp(0)
934    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
935    os.utime(filename, (timestamp, timestamp))
936
937    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
938  finally:
939    os.chmod(filename, saved_stat.st_mode)
940    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
941    zipfile.ZIP64_LIMIT = saved_zip64_limit
942
943
944def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
945                compress_type=None):
946  """Wrap zipfile.writestr() function to work around the zip64 limit.
947
948  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
949  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
950  when calling crc32(bytes).
951
952  But it still works fine to write a shorter string into a large zip file.
953  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
954  when we know the string won't be too long.
955  """
956
957  saved_zip64_limit = zipfile.ZIP64_LIMIT
958  zipfile.ZIP64_LIMIT = (1 << 32) - 1
959
960  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
961    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
962    zinfo.compress_type = zip_file.compression
963    if perms is None:
964      perms = 0o100644
965  else:
966    zinfo = zinfo_or_arcname
967
968  # If compress_type is given, it overrides the value in zinfo.
969  if compress_type is not None:
970    zinfo.compress_type = compress_type
971
972  # If perms is given, it has a priority.
973  if perms is not None:
974    # If perms doesn't set the file type, mark it as a regular file.
975    if perms & 0o770000 == 0:
976      perms |= 0o100000
977    zinfo.external_attr = perms << 16
978
979  # Use a fixed timestamp so the output is repeatable.
980  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
981
982  zip_file.writestr(zinfo, data)
983  zipfile.ZIP64_LIMIT = saved_zip64_limit
984
985
986def ZipClose(zip_file):
987  # http://b/18015246
988  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
989  # central directory.
990  saved_zip64_limit = zipfile.ZIP64_LIMIT
991  zipfile.ZIP64_LIMIT = (1 << 32) - 1
992
993  zip_file.close()
994
995  zipfile.ZIP64_LIMIT = saved_zip64_limit
996
997
998class DeviceSpecificParams(object):
999  module = None
1000  def __init__(self, **kwargs):
1001    """Keyword arguments to the constructor become attributes of this
1002    object, which is passed to all functions in the device-specific
1003    module."""
1004    for k, v in kwargs.iteritems():
1005      setattr(self, k, v)
1006    self.extras = OPTIONS.extras
1007
1008    if self.module is None:
1009      path = OPTIONS.device_specific
1010      if not path:
1011        return
1012      try:
1013        if os.path.isdir(path):
1014          info = imp.find_module("releasetools", [path])
1015        else:
1016          d, f = os.path.split(path)
1017          b, x = os.path.splitext(f)
1018          if x == ".py":
1019            f = b
1020          info = imp.find_module(f, [d])
1021        print "loaded device-specific extensions from", path
1022        self.module = imp.load_module("device_specific", *info)
1023      except ImportError:
1024        print "unable to load device-specific module; assuming none"
1025
1026  def _DoCall(self, function_name, *args, **kwargs):
1027    """Call the named function in the device-specific module, passing
1028    the given args and kwargs.  The first argument to the call will be
1029    the DeviceSpecific object itself.  If there is no module, or the
1030    module does not define the function, return the value of the
1031    'default' kwarg (which itself defaults to None)."""
1032    if self.module is None or not hasattr(self.module, function_name):
1033      return kwargs.get("default", None)
1034    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1035
1036  def FullOTA_Assertions(self):
1037    """Called after emitting the block of assertions at the top of a
1038    full OTA package.  Implementations can add whatever additional
1039    assertions they like."""
1040    return self._DoCall("FullOTA_Assertions")
1041
1042  def FullOTA_InstallBegin(self):
1043    """Called at the start of full OTA installation."""
1044    return self._DoCall("FullOTA_InstallBegin")
1045
1046  def FullOTA_InstallEnd(self):
1047    """Called at the end of full OTA installation; typically this is
1048    used to install the image for the device's baseband processor."""
1049    return self._DoCall("FullOTA_InstallEnd")
1050
1051  def IncrementalOTA_Assertions(self):
1052    """Called after emitting the block of assertions at the top of an
1053    incremental OTA package.  Implementations can add whatever
1054    additional assertions they like."""
1055    return self._DoCall("IncrementalOTA_Assertions")
1056
1057  def IncrementalOTA_VerifyBegin(self):
1058    """Called at the start of the verification phase of incremental
1059    OTA installation; additional checks can be placed here to abort
1060    the script before any changes are made."""
1061    return self._DoCall("IncrementalOTA_VerifyBegin")
1062
1063  def IncrementalOTA_VerifyEnd(self):
1064    """Called at the end of the verification phase of incremental OTA
1065    installation; additional checks can be placed here to abort the
1066    script before any changes are made."""
1067    return self._DoCall("IncrementalOTA_VerifyEnd")
1068
1069  def IncrementalOTA_InstallBegin(self):
1070    """Called at the start of incremental OTA installation (after
1071    verification is complete)."""
1072    return self._DoCall("IncrementalOTA_InstallBegin")
1073
1074  def IncrementalOTA_InstallEnd(self):
1075    """Called at the end of incremental OTA installation; typically
1076    this is used to install the image for the device's baseband
1077    processor."""
1078    return self._DoCall("IncrementalOTA_InstallEnd")
1079
1080  def VerifyOTA_Assertions(self):
1081    return self._DoCall("VerifyOTA_Assertions")
1082
1083class File(object):
1084  def __init__(self, name, data):
1085    self.name = name
1086    self.data = data
1087    self.size = len(data)
1088    self.sha1 = sha1(data).hexdigest()
1089
1090  @classmethod
1091  def FromLocalFile(cls, name, diskname):
1092    f = open(diskname, "rb")
1093    data = f.read()
1094    f.close()
1095    return File(name, data)
1096
1097  def WriteToTemp(self):
1098    t = tempfile.NamedTemporaryFile()
1099    t.write(self.data)
1100    t.flush()
1101    return t
1102
1103  def AddToZip(self, z, compression=None):
1104    ZipWriteStr(z, self.name, self.data, compress_type=compression)
1105
1106DIFF_PROGRAM_BY_EXT = {
1107    ".gz" : "imgdiff",
1108    ".zip" : ["imgdiff", "-z"],
1109    ".jar" : ["imgdiff", "-z"],
1110    ".apk" : ["imgdiff", "-z"],
1111    ".img" : "imgdiff",
1112    }
1113
1114class Difference(object):
1115  def __init__(self, tf, sf, diff_program=None):
1116    self.tf = tf
1117    self.sf = sf
1118    self.patch = None
1119    self.diff_program = diff_program
1120
1121  def ComputePatch(self):
1122    """Compute the patch (as a string of data) needed to turn sf into
1123    tf.  Returns the same tuple as GetPatch()."""
1124
1125    tf = self.tf
1126    sf = self.sf
1127
1128    if self.diff_program:
1129      diff_program = self.diff_program
1130    else:
1131      ext = os.path.splitext(tf.name)[1]
1132      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1133
1134    ttemp = tf.WriteToTemp()
1135    stemp = sf.WriteToTemp()
1136
1137    ext = os.path.splitext(tf.name)[1]
1138
1139    try:
1140      ptemp = tempfile.NamedTemporaryFile()
1141      if isinstance(diff_program, list):
1142        cmd = copy.copy(diff_program)
1143      else:
1144        cmd = [diff_program]
1145      cmd.append(stemp.name)
1146      cmd.append(ttemp.name)
1147      cmd.append(ptemp.name)
1148      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1149      err = []
1150      def run():
1151        _, e = p.communicate()
1152        if e:
1153          err.append(e)
1154      th = threading.Thread(target=run)
1155      th.start()
1156      th.join(timeout=300)   # 5 mins
1157      if th.is_alive():
1158        print "WARNING: diff command timed out"
1159        p.terminate()
1160        th.join(5)
1161        if th.is_alive():
1162          p.kill()
1163          th.join()
1164
1165      if err or p.returncode != 0:
1166        print "WARNING: failure running %s:\n%s\n" % (
1167            diff_program, "".join(err))
1168        self.patch = None
1169        return None, None, None
1170      diff = ptemp.read()
1171    finally:
1172      ptemp.close()
1173      stemp.close()
1174      ttemp.close()
1175
1176    self.patch = diff
1177    return self.tf, self.sf, self.patch
1178
1179
1180  def GetPatch(self):
1181    """Return a tuple (target_file, source_file, patch_data).
1182    patch_data may be None if ComputePatch hasn't been called, or if
1183    computing the patch failed."""
1184    return self.tf, self.sf, self.patch
1185
1186
1187def ComputeDifferences(diffs):
1188  """Call ComputePatch on all the Difference objects in 'diffs'."""
1189  print len(diffs), "diffs to compute"
1190
1191  # Do the largest files first, to try and reduce the long-pole effect.
1192  by_size = [(i.tf.size, i) for i in diffs]
1193  by_size.sort(reverse=True)
1194  by_size = [i[1] for i in by_size]
1195
1196  lock = threading.Lock()
1197  diff_iter = iter(by_size)   # accessed under lock
1198
1199  def worker():
1200    try:
1201      lock.acquire()
1202      for d in diff_iter:
1203        lock.release()
1204        start = time.time()
1205        d.ComputePatch()
1206        dur = time.time() - start
1207        lock.acquire()
1208
1209        tf, sf, patch = d.GetPatch()
1210        if sf.name == tf.name:
1211          name = tf.name
1212        else:
1213          name = "%s (%s)" % (tf.name, sf.name)
1214        if patch is None:
1215          print "patching failed!                                  %s" % (name,)
1216        else:
1217          print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1218              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1219      lock.release()
1220    except Exception as e:
1221      print e
1222      raise
1223
1224  # start worker threads; wait for them all to finish.
1225  threads = [threading.Thread(target=worker)
1226             for i in range(OPTIONS.worker_threads)]
1227  for th in threads:
1228    th.start()
1229  while threads:
1230    threads.pop().join()
1231
1232
1233class BlockDifference(object):
1234  def __init__(self, partition, tgt, src=None, check_first_block=False,
1235               version=None):
1236    self.tgt = tgt
1237    self.src = src
1238    self.partition = partition
1239    self.check_first_block = check_first_block
1240
1241    # Due to http://b/20939131, check_first_block is disabled temporarily.
1242    assert not self.check_first_block
1243
1244    if version is None:
1245      version = 1
1246      if OPTIONS.info_dict:
1247        version = max(
1248            int(i) for i in
1249            OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1250    self.version = version
1251
1252    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1253                                    version=self.version)
1254    tmpdir = tempfile.mkdtemp()
1255    OPTIONS.tempfiles.append(tmpdir)
1256    self.path = os.path.join(tmpdir, partition)
1257    b.Compute(self.path)
1258
1259    if src is None:
1260      _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1261    else:
1262      _, self.device = GetTypeAndDevice("/" + partition,
1263                                        OPTIONS.source_info_dict)
1264
1265  def WriteScript(self, script, output_zip, progress=None):
1266    if not self.src:
1267      # write the output unconditionally
1268      script.Print("Patching %s image unconditionally..." % (self.partition,))
1269    else:
1270      script.Print("Patching %s image after verification." % (self.partition,))
1271
1272    if progress:
1273      script.ShowProgress(progress, 0)
1274    self._WriteUpdate(script, output_zip)
1275    self._WritePostInstallVerifyScript(script)
1276
1277  def WriteStrictVerifyScript(self, script):
1278    """Verify all the blocks in the care_map, including clobbered blocks.
1279
1280    This differs from the WriteVerifyScript() function: a) it prints different
1281    error messages; b) it doesn't allow half-way updated images to pass the
1282    verification."""
1283
1284    partition = self.partition
1285    script.Print("Verifying %s..." % (partition,))
1286    ranges = self.tgt.care_map
1287    ranges_str = ranges.to_string_raw()
1288    script.AppendExtra('range_sha1("%s", "%s") == "%s" && '
1289                       'ui_print("    Verified.") || '
1290                       'ui_print("\\"%s\\" has unexpected contents.");' % (
1291                       self.device, ranges_str,
1292                       self.tgt.TotalSha1(include_clobbered_blocks=True),
1293                       self.device))
1294    script.AppendExtra("")
1295
1296  def WriteVerifyScript(self, script):
1297    partition = self.partition
1298    if not self.src:
1299      script.Print("Image %s will be patched unconditionally." % (partition,))
1300    else:
1301      ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1302      ranges_str = ranges.to_string_raw()
1303      if self.version >= 4:
1304        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1305                            'block_image_verify("%s", '
1306                            'package_extract_file("%s.transfer.list"), '
1307                            '"%s.new.dat", "%s.patch.dat") || '
1308                            '(block_image_recover("%s", "%s") && '
1309                            'block_image_verify("%s", '
1310                            'package_extract_file("%s.transfer.list"), '
1311                            '"%s.new.dat", "%s.patch.dat"))) then') % (
1312                            self.device, ranges_str, self.src.TotalSha1(),
1313                            self.device, partition, partition, partition,
1314                            self.device, ranges_str,
1315                            self.device, partition, partition, partition))
1316      elif self.version == 3:
1317        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1318                            'block_image_verify("%s", '
1319                            'package_extract_file("%s.transfer.list"), '
1320                            '"%s.new.dat", "%s.patch.dat")) then') % (
1321                            self.device, ranges_str, self.src.TotalSha1(),
1322                            self.device, partition, partition, partition))
1323      else:
1324        script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1325                           self.device, ranges_str, self.src.TotalSha1()))
1326      script.Print('Verified %s image...' % (partition,))
1327      script.AppendExtra('else')
1328
1329      # When generating incrementals for the system and vendor partitions,
1330      # explicitly check the first block (which contains the superblock) of
1331      # the partition to see if it's what we expect. If this check fails,
1332      # give an explicit log message about the partition having been
1333      # remounted R/W (the most likely explanation) and the need to flash to
1334      # get OTAs working again.
1335      if self.check_first_block:
1336        self._CheckFirstBlock(script)
1337
1338      # Abort the OTA update. Note that the incremental OTA cannot be applied
1339      # even if it may match the checksum of the target partition.
1340      # a) If version < 3, operations like move and erase will make changes
1341      #    unconditionally and damage the partition.
1342      # b) If version >= 3, it won't even reach here.
1343      script.AppendExtra(('abort("%s partition has unexpected contents");\n'
1344                          'endif;') % (partition,))
1345
1346  def _WritePostInstallVerifyScript(self, script):
1347    partition = self.partition
1348    script.Print('Verifying the updated %s image...' % (partition,))
1349    # Unlike pre-install verification, clobbered_blocks should not be ignored.
1350    ranges = self.tgt.care_map
1351    ranges_str = ranges.to_string_raw()
1352    script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1353                       self.device, ranges_str,
1354                       self.tgt.TotalSha1(include_clobbered_blocks=True)))
1355
1356    # Bug: 20881595
1357    # Verify that extended blocks are really zeroed out.
1358    if self.tgt.extended:
1359      ranges_str = self.tgt.extended.to_string_raw()
1360      script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1361                         self.device, ranges_str,
1362                         self._HashZeroBlocks(self.tgt.extended.size())))
1363      script.Print('Verified the updated %s image.' % (partition,))
1364      script.AppendExtra(
1365          'else\n'
1366          '  abort("%s partition has unexpected non-zero contents after OTA '
1367          'update");\n'
1368          'endif;' % (partition,))
1369    else:
1370      script.Print('Verified the updated %s image.' % (partition,))
1371
1372    script.AppendExtra(
1373        'else\n'
1374        '  abort("%s partition has unexpected contents after OTA update");\n'
1375        'endif;' % (partition,))
1376
1377  def _WriteUpdate(self, script, output_zip):
1378    ZipWrite(output_zip,
1379             '{}.transfer.list'.format(self.path),
1380             '{}.transfer.list'.format(self.partition))
1381    ZipWrite(output_zip,
1382             '{}.new.dat'.format(self.path),
1383             '{}.new.dat'.format(self.partition))
1384    ZipWrite(output_zip,
1385             '{}.patch.dat'.format(self.path),
1386             '{}.patch.dat'.format(self.partition),
1387             compress_type=zipfile.ZIP_STORED)
1388
1389    call = ('block_image_update("{device}", '
1390            'package_extract_file("{partition}.transfer.list"), '
1391            '"{partition}.new.dat", "{partition}.patch.dat");\n'.format(
1392                device=self.device, partition=self.partition))
1393    script.AppendExtra(script.WordWrap(call))
1394
1395  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1396    data = source.ReadRangeSet(ranges)
1397    ctx = sha1()
1398
1399    for p in data:
1400      ctx.update(p)
1401
1402    return ctx.hexdigest()
1403
1404  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1405    """Return the hash value for all zero blocks."""
1406    zero_block = '\x00' * 4096
1407    ctx = sha1()
1408    for _ in range(num_blocks):
1409      ctx.update(zero_block)
1410
1411    return ctx.hexdigest()
1412
1413  # TODO(tbao): Due to http://b/20939131, block 0 may be changed without
1414  # remounting R/W. Will change the checking to a finer-grained way to
1415  # mask off those bits.
1416  def _CheckFirstBlock(self, script):
1417    r = rangelib.RangeSet((0, 1))
1418    srchash = self._HashBlocks(self.src, r)
1419
1420    script.AppendExtra(('(range_sha1("%s", "%s") == "%s") || '
1421                        'abort("%s has been remounted R/W; '
1422                        'reflash device to reenable OTA updates");')
1423                       % (self.device, r.to_string_raw(), srchash,
1424                          self.device))
1425
1426DataImage = blockimgdiff.DataImage
1427
1428
1429# map recovery.fstab's fs_types to mount/format "partition types"
1430PARTITION_TYPES = {
1431    "yaffs2": "MTD",
1432    "mtd": "MTD",
1433    "ext4": "EMMC",
1434    "emmc": "EMMC",
1435    "f2fs": "EMMC",
1436    "squashfs": "EMMC"
1437}
1438
1439def GetTypeAndDevice(mount_point, info):
1440  fstab = info["fstab"]
1441  if fstab:
1442    return (PARTITION_TYPES[fstab[mount_point].fs_type],
1443            fstab[mount_point].device)
1444  else:
1445    raise KeyError
1446
1447
1448def ParseCertificate(data):
1449  """Parse a PEM-format certificate."""
1450  cert = []
1451  save = False
1452  for line in data.split("\n"):
1453    if "--END CERTIFICATE--" in line:
1454      break
1455    if save:
1456      cert.append(line)
1457    if "--BEGIN CERTIFICATE--" in line:
1458      save = True
1459  cert = "".join(cert).decode('base64')
1460  return cert
1461
1462def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1463                      info_dict=None):
1464  """Generate a binary patch that creates the recovery image starting
1465  with the boot image.  (Most of the space in these images is just the
1466  kernel, which is identical for the two, so the resulting patch
1467  should be efficient.)  Add it to the output zip, along with a shell
1468  script that is run from init.rc on first boot to actually do the
1469  patching and install the new recovery image.
1470
1471  recovery_img and boot_img should be File objects for the
1472  corresponding images.  info should be the dictionary returned by
1473  common.LoadInfoDict() on the input target_files.
1474  """
1475
1476  if info_dict is None:
1477    info_dict = OPTIONS.info_dict
1478
1479  full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1480  system_root_image = info_dict.get("system_root_image", None) == "true"
1481
1482  if full_recovery_image:
1483    output_sink("etc/recovery.img", recovery_img.data)
1484
1485  else:
1486    diff_program = ["imgdiff"]
1487    path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1488    if os.path.exists(path):
1489      diff_program.append("-b")
1490      diff_program.append(path)
1491      bonus_args = "-b /system/etc/recovery-resource.dat"
1492    else:
1493      bonus_args = ""
1494
1495    d = Difference(recovery_img, boot_img, diff_program=diff_program)
1496    _, _, patch = d.ComputePatch()
1497    output_sink("recovery-from-boot.p", patch)
1498
1499  try:
1500    # The following GetTypeAndDevice()s need to use the path in the target
1501    # info_dict instead of source_info_dict.
1502    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1503    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1504  except KeyError:
1505    return
1506
1507  if full_recovery_image:
1508    sh = """#!/system/bin/sh
1509if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1510  applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1511else
1512  log -t recovery "Recovery image already installed"
1513fi
1514""" % {'type': recovery_type,
1515       'device': recovery_device,
1516       'sha1': recovery_img.sha1,
1517       'size': recovery_img.size}
1518  else:
1519    sh = """#!/system/bin/sh
1520if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1521  applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1522else
1523  log -t recovery "Recovery image already installed"
1524fi
1525""" % {'boot_size': boot_img.size,
1526       'boot_sha1': boot_img.sha1,
1527       'recovery_size': recovery_img.size,
1528       'recovery_sha1': recovery_img.sha1,
1529       'boot_type': boot_type,
1530       'boot_device': boot_device,
1531       'recovery_type': recovery_type,
1532       'recovery_device': recovery_device,
1533       'bonus_args': bonus_args}
1534
1535  # The install script location moved from /system/etc to /system/bin
1536  # in the L release.  Parse init.*.rc files to find out where the
1537  # target-files expects it to be, and put it there.
1538  sh_location = "etc/install-recovery.sh"
1539  found = False
1540  if system_root_image:
1541    init_rc_dir = os.path.join(input_dir, "ROOT")
1542  else:
1543    init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1544  init_rc_files = os.listdir(init_rc_dir)
1545  for init_rc_file in init_rc_files:
1546    if (not init_rc_file.startswith('init.') or
1547        not init_rc_file.endswith('.rc')):
1548      continue
1549
1550    with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1551      for line in f:
1552        m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1553        if m:
1554          sh_location = m.group(1)
1555          found = True
1556          break
1557
1558    if found:
1559      break
1560
1561  print "putting script in", sh_location
1562
1563  output_sink(sh_location, sh)
1564