common.py revision 6f0b219ac551710c724e3f344023943178cdc217
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import copy
16import errno
17import getopt
18import getpass
19import imp
20import os
21import platform
22import re
23import shlex
24import shutil
25import subprocess
26import sys
27import tempfile
28import threading
29import time
30import zipfile
31
32import blockimgdiff
33import rangelib
34
35from hashlib import sha1 as sha1
36
37
38class Options(object):
39  def __init__(self):
40    platform_search_path = {
41        "linux2": "out/host/linux-x86",
42        "darwin": "out/host/darwin-x86",
43    }
44
45    self.search_path = platform_search_path.get(sys.platform, None)
46    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
47    self.extra_signapk_args = []
48    self.java_path = "java"  # Use the one on the path by default.
49    self.java_args = "-Xmx2048m" # JVM Args
50    self.public_key_suffix = ".x509.pem"
51    self.private_key_suffix = ".pk8"
52    # use otatools built boot_signer by default
53    self.boot_signer_path = "boot_signer"
54    self.boot_signer_args = []
55    self.verity_signer_path = None
56    self.verity_signer_args = []
57    self.verbose = False
58    self.tempfiles = []
59    self.device_specific = None
60    self.extras = {}
61    self.info_dict = None
62    self.source_info_dict = None
63    self.target_info_dict = None
64    self.worker_threads = None
65    # Stash size cannot exceed cache_size * threshold.
66    self.cache_size = None
67    self.stash_threshold = 0.8
68
69
70OPTIONS = Options()
71
72
73# Values for "certificate" in apkcerts that mean special things.
74SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
75
76
77class ExternalError(RuntimeError):
78  pass
79
80
81def Run(args, **kwargs):
82  """Create and return a subprocess.Popen object, printing the command
83  line on the terminal if -v was specified."""
84  if OPTIONS.verbose:
85    print "  running: ", " ".join(args)
86  return subprocess.Popen(args, **kwargs)
87
88
89def CloseInheritedPipes():
90  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
91  before doing other work."""
92  if platform.system() != "Darwin":
93    return
94  for d in range(3, 1025):
95    try:
96      stat = os.fstat(d)
97      if stat is not None:
98        pipebit = stat[0] & 0x1000
99        if pipebit != 0:
100          os.close(d)
101    except OSError:
102      pass
103
104
105def LoadInfoDict(input_file, input_dir=None):
106  """Read and parse the META/misc_info.txt key/value pairs from the
107  input target files and return a dict."""
108
109  def read_helper(fn):
110    if isinstance(input_file, zipfile.ZipFile):
111      return input_file.read(fn)
112    else:
113      path = os.path.join(input_file, *fn.split("/"))
114      try:
115        with open(path) as f:
116          return f.read()
117      except IOError as e:
118        if e.errno == errno.ENOENT:
119          raise KeyError(fn)
120  d = {}
121  try:
122    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
123  except KeyError:
124    # ok if misc_info.txt doesn't exist
125    pass
126
127  # backwards compatibility: These values used to be in their own
128  # files.  Look for them, in case we're processing an old
129  # target_files zip.
130
131  if "mkyaffs2_extra_flags" not in d:
132    try:
133      d["mkyaffs2_extra_flags"] = read_helper(
134          "META/mkyaffs2-extra-flags.txt").strip()
135    except KeyError:
136      # ok if flags don't exist
137      pass
138
139  if "recovery_api_version" not in d:
140    try:
141      d["recovery_api_version"] = read_helper(
142          "META/recovery-api-version.txt").strip()
143    except KeyError:
144      raise ValueError("can't find recovery API version in input target-files")
145
146  if "tool_extensions" not in d:
147    try:
148      d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
149    except KeyError:
150      # ok if extensions don't exist
151      pass
152
153  if "fstab_version" not in d:
154    d["fstab_version"] = "1"
155
156  # A few properties are stored as links to the files in the out/ directory.
157  # It works fine with the build system. However, they are no longer available
158  # when (re)generating from target_files zip. If input_dir is not None, we
159  # are doing repacking. Redirect those properties to the actual files in the
160  # unzipped directory.
161  if input_dir is not None:
162    # We carry a copy of file_contexts.bin under META/. If not available,
163    # search BOOT/RAMDISK/. Note that sometimes we may need a different file
164    # to build images than the one running on device, such as when enabling
165    # system_root_image. In that case, we must have the one for image
166    # generation copied to META/.
167    fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
168    fc_config = os.path.join(input_dir, "META", fc_basename)
169    if d.get("system_root_image") == "true":
170      assert os.path.exists(fc_config)
171    if not os.path.exists(fc_config):
172      fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
173      if not os.path.exists(fc_config):
174        fc_config = None
175
176    if fc_config:
177      d["selinux_fc"] = fc_config
178
179    # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
180    if d.get("system_root_image") == "true":
181      d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
182      d["ramdisk_fs_config"] = os.path.join(
183          input_dir, "META", "root_filesystem_config.txt")
184
185  try:
186    data = read_helper("META/imagesizes.txt")
187    for line in data.split("\n"):
188      if not line:
189        continue
190      name, value = line.split(" ", 1)
191      if not value:
192        continue
193      if name == "blocksize":
194        d[name] = value
195      else:
196        d[name + "_size"] = value
197  except KeyError:
198    pass
199
200  def makeint(key):
201    if key in d:
202      d[key] = int(d[key], 0)
203
204  makeint("recovery_api_version")
205  makeint("blocksize")
206  makeint("system_size")
207  makeint("vendor_size")
208  makeint("userdata_size")
209  makeint("cache_size")
210  makeint("recovery_size")
211  makeint("boot_size")
212  makeint("fstab_version")
213
214  d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
215                                 d.get("system_root_image", False))
216  d["build.prop"] = LoadBuildProp(read_helper)
217  return d
218
219def LoadBuildProp(read_helper):
220  try:
221    data = read_helper("SYSTEM/build.prop")
222  except KeyError:
223    print "Warning: could not find SYSTEM/build.prop in %s" % zip
224    data = ""
225  return LoadDictionaryFromLines(data.split("\n"))
226
227def LoadDictionaryFromLines(lines):
228  d = {}
229  for line in lines:
230    line = line.strip()
231    if not line or line.startswith("#"):
232      continue
233    if "=" in line:
234      name, value = line.split("=", 1)
235      d[name] = value
236  return d
237
238def LoadRecoveryFSTab(read_helper, fstab_version, system_root_image=False):
239  class Partition(object):
240    def __init__(self, mount_point, fs_type, device, length, device2, context):
241      self.mount_point = mount_point
242      self.fs_type = fs_type
243      self.device = device
244      self.length = length
245      self.device2 = device2
246      self.context = context
247
248  try:
249    data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
250  except KeyError:
251    print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
252    data = ""
253
254  if fstab_version == 1:
255    d = {}
256    for line in data.split("\n"):
257      line = line.strip()
258      if not line or line.startswith("#"):
259        continue
260      pieces = line.split()
261      if not 3 <= len(pieces) <= 4:
262        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
263      options = None
264      if len(pieces) >= 4:
265        if pieces[3].startswith("/"):
266          device2 = pieces[3]
267          if len(pieces) >= 5:
268            options = pieces[4]
269        else:
270          device2 = None
271          options = pieces[3]
272      else:
273        device2 = None
274
275      mount_point = pieces[0]
276      length = 0
277      if options:
278        options = options.split(",")
279        for i in options:
280          if i.startswith("length="):
281            length = int(i[7:])
282          else:
283            print "%s: unknown option \"%s\"" % (mount_point, i)
284
285      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
286                                 device=pieces[2], length=length,
287                                 device2=device2)
288
289  elif fstab_version == 2:
290    d = {}
291    for line in data.split("\n"):
292      line = line.strip()
293      if not line or line.startswith("#"):
294        continue
295      # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
296      pieces = line.split()
297      if len(pieces) != 5:
298        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
299
300      # Ignore entries that are managed by vold
301      options = pieces[4]
302      if "voldmanaged=" in options:
303        continue
304
305      # It's a good line, parse it
306      length = 0
307      options = options.split(",")
308      for i in options:
309        if i.startswith("length="):
310          length = int(i[7:])
311        else:
312          # Ignore all unknown options in the unified fstab
313          continue
314
315      mount_flags = pieces[3]
316      # Honor the SELinux context if present.
317      context = None
318      for i in mount_flags.split(","):
319        if i.startswith("context="):
320          context = i
321
322      mount_point = pieces[1]
323      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
324                                 device=pieces[0], length=length,
325                                 device2=None, context=context)
326
327  else:
328    raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
329
330  # / is used for the system mount point when the root directory is included in
331  # system. Other areas assume system is always at "/system" so point /system
332  # at /.
333  if system_root_image:
334    assert not d.has_key("/system") and d.has_key("/")
335    d["/system"] = d["/"]
336  return d
337
338
339def DumpInfoDict(d):
340  for k, v in sorted(d.items()):
341    print "%-25s = (%s) %s" % (k, type(v).__name__, v)
342
343
344def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
345                        has_ramdisk=False):
346  """Build a bootable image from the specified sourcedir.
347
348  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
349  'sourcedir'), and turn them into a boot image.  Return the image data, or
350  None if sourcedir does not appear to contains files for building the
351  requested image."""
352
353  def make_ramdisk():
354    ramdisk_img = tempfile.NamedTemporaryFile()
355
356    if os.access(fs_config_file, os.F_OK):
357      cmd = ["mkbootfs", "-f", fs_config_file,
358             os.path.join(sourcedir, "RAMDISK")]
359    else:
360      cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
361    p1 = Run(cmd, stdout=subprocess.PIPE)
362    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
363
364    p2.wait()
365    p1.wait()
366    assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
367    assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
368
369    return ramdisk_img
370
371  if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
372    return None
373
374  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
375    return None
376
377  if info_dict is None:
378    info_dict = OPTIONS.info_dict
379
380  img = tempfile.NamedTemporaryFile()
381
382  if has_ramdisk:
383    ramdisk_img = make_ramdisk()
384
385  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
386  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
387
388  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
389
390  fn = os.path.join(sourcedir, "second")
391  if os.access(fn, os.F_OK):
392    cmd.append("--second")
393    cmd.append(fn)
394
395  fn = os.path.join(sourcedir, "cmdline")
396  if os.access(fn, os.F_OK):
397    cmd.append("--cmdline")
398    cmd.append(open(fn).read().rstrip("\n"))
399
400  fn = os.path.join(sourcedir, "base")
401  if os.access(fn, os.F_OK):
402    cmd.append("--base")
403    cmd.append(open(fn).read().rstrip("\n"))
404
405  fn = os.path.join(sourcedir, "pagesize")
406  if os.access(fn, os.F_OK):
407    cmd.append("--pagesize")
408    cmd.append(open(fn).read().rstrip("\n"))
409
410  args = info_dict.get("mkbootimg_args", None)
411  if args and args.strip():
412    cmd.extend(shlex.split(args))
413
414  if has_ramdisk:
415    cmd.extend(["--ramdisk", ramdisk_img.name])
416
417  img_unsigned = None
418  if info_dict.get("vboot", None):
419    img_unsigned = tempfile.NamedTemporaryFile()
420    cmd.extend(["--output", img_unsigned.name])
421  else:
422    cmd.extend(["--output", img.name])
423
424  p = Run(cmd, stdout=subprocess.PIPE)
425  p.communicate()
426  assert p.returncode == 0, "mkbootimg of %s image failed" % (
427      os.path.basename(sourcedir),)
428
429  if (info_dict.get("boot_signer", None) == "true" and
430      info_dict.get("verity_key", None)):
431    path = "/" + os.path.basename(sourcedir).lower()
432    cmd = [OPTIONS.boot_signer_path]
433    cmd.extend(OPTIONS.boot_signer_args)
434    cmd.extend([path, img.name,
435                info_dict["verity_key"] + ".pk8",
436                info_dict["verity_key"] + ".x509.pem", img.name])
437    p = Run(cmd, stdout=subprocess.PIPE)
438    p.communicate()
439    assert p.returncode == 0, "boot_signer of %s image failed" % path
440
441  # Sign the image if vboot is non-empty.
442  elif info_dict.get("vboot", None):
443    path = "/" + os.path.basename(sourcedir).lower()
444    img_keyblock = tempfile.NamedTemporaryFile()
445    cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
446           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
447           info_dict["vboot_key"] + ".vbprivk", img_keyblock.name,
448           img.name]
449    p = Run(cmd, stdout=subprocess.PIPE)
450    p.communicate()
451    assert p.returncode == 0, "vboot_signer of %s image failed" % path
452
453    # Clean up the temp files.
454    img_unsigned.close()
455    img_keyblock.close()
456
457  img.seek(os.SEEK_SET, 0)
458  data = img.read()
459
460  if has_ramdisk:
461    ramdisk_img.close()
462  img.close()
463
464  return data
465
466
467def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
468                     info_dict=None):
469  """Return a File object with the desired bootable image.
470
471  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
472  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
473  the source files in 'unpack_dir'/'tree_subdir'."""
474
475  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
476  if os.path.exists(prebuilt_path):
477    print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
478    return File.FromLocalFile(name, prebuilt_path)
479
480  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
481  if os.path.exists(prebuilt_path):
482    print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
483    return File.FromLocalFile(name, prebuilt_path)
484
485  print "building image from target_files %s..." % (tree_subdir,)
486
487  if info_dict is None:
488    info_dict = OPTIONS.info_dict
489
490  # With system_root_image == "true", we don't pack ramdisk into the boot image.
491  has_ramdisk = (info_dict.get("system_root_image", None) != "true" or
492                 prebuilt_name != "boot.img")
493
494  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
495  data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
496                             os.path.join(unpack_dir, fs_config),
497                             info_dict, has_ramdisk)
498  if data:
499    return File(name, data)
500  return None
501
502
503def UnzipTemp(filename, pattern=None):
504  """Unzip the given archive into a temporary directory and return the name.
505
506  If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
507  temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
508
509  Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
510  main file), open for reading.
511  """
512
513  tmp = tempfile.mkdtemp(prefix="targetfiles-")
514  OPTIONS.tempfiles.append(tmp)
515
516  def unzip_to_dir(filename, dirname):
517    cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
518    if pattern is not None:
519      cmd.append(pattern)
520    p = Run(cmd, stdout=subprocess.PIPE)
521    p.communicate()
522    if p.returncode != 0:
523      raise ExternalError("failed to unzip input target-files \"%s\"" %
524                          (filename,))
525
526  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
527  if m:
528    unzip_to_dir(m.group(1), tmp)
529    unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
530    filename = m.group(1)
531  else:
532    unzip_to_dir(filename, tmp)
533
534  return tmp, zipfile.ZipFile(filename, "r")
535
536
537def GetKeyPasswords(keylist):
538  """Given a list of keys, prompt the user to enter passwords for
539  those which require them.  Return a {key: password} dict.  password
540  will be None if the key has no password."""
541
542  no_passwords = []
543  need_passwords = []
544  key_passwords = {}
545  devnull = open("/dev/null", "w+b")
546  for k in sorted(keylist):
547    # We don't need a password for things that aren't really keys.
548    if k in SPECIAL_CERT_STRINGS:
549      no_passwords.append(k)
550      continue
551
552    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
553             "-inform", "DER", "-nocrypt"],
554            stdin=devnull.fileno(),
555            stdout=devnull.fileno(),
556            stderr=subprocess.STDOUT)
557    p.communicate()
558    if p.returncode == 0:
559      # Definitely an unencrypted key.
560      no_passwords.append(k)
561    else:
562      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
563               "-inform", "DER", "-passin", "pass:"],
564              stdin=devnull.fileno(),
565              stdout=devnull.fileno(),
566              stderr=subprocess.PIPE)
567      _, stderr = p.communicate()
568      if p.returncode == 0:
569        # Encrypted key with empty string as password.
570        key_passwords[k] = ''
571      elif stderr.startswith('Error decrypting key'):
572        # Definitely encrypted key.
573        # It would have said "Error reading key" if it didn't parse correctly.
574        need_passwords.append(k)
575      else:
576        # Potentially, a type of key that openssl doesn't understand.
577        # We'll let the routines in signapk.jar handle it.
578        no_passwords.append(k)
579  devnull.close()
580
581  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
582  key_passwords.update(dict.fromkeys(no_passwords, None))
583  return key_passwords
584
585
586def SignFile(input_name, output_name, key, password, align=None,
587             whole_file=False):
588  """Sign the input_name zip/jar/apk, producing output_name.  Use the
589  given key and password (the latter may be None if the key does not
590  have a password.
591
592  If align is an integer > 1, zipalign is run to align stored files in
593  the output zip on 'align'-byte boundaries.
594
595  If whole_file is true, use the "-w" option to SignApk to embed a
596  signature that covers the whole file in the archive comment of the
597  zip file.
598  """
599
600  if align == 0 or align == 1:
601    align = None
602
603  if align:
604    temp = tempfile.NamedTemporaryFile()
605    sign_name = temp.name
606  else:
607    sign_name = output_name
608
609  cmd = [OPTIONS.java_path, OPTIONS.java_args, "-jar",
610         os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
611  cmd.extend(OPTIONS.extra_signapk_args)
612  if whole_file:
613    cmd.append("-w")
614  cmd.extend([key + OPTIONS.public_key_suffix,
615              key + OPTIONS.private_key_suffix,
616              input_name, sign_name])
617
618  p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
619  if password is not None:
620    password += "\n"
621  p.communicate(password)
622  if p.returncode != 0:
623    raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
624
625  if align:
626    p = Run(["zipalign", "-f", "-p", str(align), sign_name, output_name])
627    p.communicate()
628    if p.returncode != 0:
629      raise ExternalError("zipalign failed: return code %s" % (p.returncode,))
630    temp.close()
631
632
633def CheckSize(data, target, info_dict):
634  """Check the data string passed against the max size limit, if
635  any, for the given target.  Raise exception if the data is too big.
636  Print a warning if the data is nearing the maximum size."""
637
638  if target.endswith(".img"):
639    target = target[:-4]
640  mount_point = "/" + target
641
642  fs_type = None
643  limit = None
644  if info_dict["fstab"]:
645    if mount_point == "/userdata":
646      mount_point = "/data"
647    p = info_dict["fstab"][mount_point]
648    fs_type = p.fs_type
649    device = p.device
650    if "/" in device:
651      device = device[device.rfind("/")+1:]
652    limit = info_dict.get(device + "_size", None)
653  if not fs_type or not limit:
654    return
655
656  if fs_type == "yaffs2":
657    # image size should be increased by 1/64th to account for the
658    # spare area (64 bytes per 2k page)
659    limit = limit / 2048 * (2048+64)
660  size = len(data)
661  pct = float(size) * 100.0 / limit
662  msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
663  if pct >= 99.0:
664    raise ExternalError(msg)
665  elif pct >= 95.0:
666    print
667    print "  WARNING: ", msg
668    print
669  elif OPTIONS.verbose:
670    print "  ", msg
671
672
673def ReadApkCerts(tf_zip):
674  """Given a target_files ZipFile, parse the META/apkcerts.txt file
675  and return a {package: cert} dict."""
676  certmap = {}
677  for line in tf_zip.read("META/apkcerts.txt").split("\n"):
678    line = line.strip()
679    if not line:
680      continue
681    m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
682                 r'private_key="(.*)"$', line)
683    if m:
684      name, cert, privkey = m.groups()
685      public_key_suffix_len = len(OPTIONS.public_key_suffix)
686      private_key_suffix_len = len(OPTIONS.private_key_suffix)
687      if cert in SPECIAL_CERT_STRINGS and not privkey:
688        certmap[name] = cert
689      elif (cert.endswith(OPTIONS.public_key_suffix) and
690            privkey.endswith(OPTIONS.private_key_suffix) and
691            cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
692        certmap[name] = cert[:-public_key_suffix_len]
693      else:
694        raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
695  return certmap
696
697
698COMMON_DOCSTRING = """
699  -p  (--path)  <dir>
700      Prepend <dir>/bin to the list of places to search for binaries
701      run by this script, and expect to find jars in <dir>/framework.
702
703  -s  (--device_specific) <file>
704      Path to the python module containing device-specific
705      releasetools code.
706
707  -x  (--extra)  <key=value>
708      Add a key/value pair to the 'extras' dict, which device-specific
709      extension code may look at.
710
711  -v  (--verbose)
712      Show command lines being executed.
713
714  -h  (--help)
715      Display this usage message and exit.
716"""
717
718def Usage(docstring):
719  print docstring.rstrip("\n")
720  print COMMON_DOCSTRING
721
722
723def ParseOptions(argv,
724                 docstring,
725                 extra_opts="", extra_long_opts=(),
726                 extra_option_handler=None):
727  """Parse the options in argv and return any arguments that aren't
728  flags.  docstring is the calling module's docstring, to be displayed
729  for errors and -h.  extra_opts and extra_long_opts are for flags
730  defined by the caller, which are processed by passing them to
731  extra_option_handler."""
732
733  try:
734    opts, args = getopt.getopt(
735        argv, "hvp:s:x:" + extra_opts,
736        ["help", "verbose", "path=", "signapk_path=", "extra_signapk_args=",
737         "java_path=", "java_args=", "public_key_suffix=",
738         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
739         "verity_signer_path=", "verity_signer_args=", "device_specific=",
740         "extra="] +
741        list(extra_long_opts))
742  except getopt.GetoptError as err:
743    Usage(docstring)
744    print "**", str(err), "**"
745    sys.exit(2)
746
747  for o, a in opts:
748    if o in ("-h", "--help"):
749      Usage(docstring)
750      sys.exit()
751    elif o in ("-v", "--verbose"):
752      OPTIONS.verbose = True
753    elif o in ("-p", "--path"):
754      OPTIONS.search_path = a
755    elif o in ("--signapk_path",):
756      OPTIONS.signapk_path = a
757    elif o in ("--extra_signapk_args",):
758      OPTIONS.extra_signapk_args = shlex.split(a)
759    elif o in ("--java_path",):
760      OPTIONS.java_path = a
761    elif o in ("--java_args",):
762      OPTIONS.java_args = a
763    elif o in ("--public_key_suffix",):
764      OPTIONS.public_key_suffix = a
765    elif o in ("--private_key_suffix",):
766      OPTIONS.private_key_suffix = a
767    elif o in ("--boot_signer_path",):
768      OPTIONS.boot_signer_path = a
769    elif o in ("--boot_signer_args",):
770      OPTIONS.boot_signer_args = shlex.split(a)
771    elif o in ("--verity_signer_path",):
772      OPTIONS.verity_signer_path = a
773    elif o in ("--verity_signer_args",):
774      OPTIONS.verity_signer_args = shlex.split(a)
775    elif o in ("-s", "--device_specific"):
776      OPTIONS.device_specific = a
777    elif o in ("-x", "--extra"):
778      key, value = a.split("=", 1)
779      OPTIONS.extras[key] = value
780    else:
781      if extra_option_handler is None or not extra_option_handler(o, a):
782        assert False, "unknown option \"%s\"" % (o,)
783
784  if OPTIONS.search_path:
785    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
786                          os.pathsep + os.environ["PATH"])
787
788  return args
789
790
791def MakeTempFile(prefix=None, suffix=None):
792  """Make a temp file and add it to the list of things to be deleted
793  when Cleanup() is called.  Return the filename."""
794  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
795  os.close(fd)
796  OPTIONS.tempfiles.append(fn)
797  return fn
798
799
800def Cleanup():
801  for i in OPTIONS.tempfiles:
802    if os.path.isdir(i):
803      shutil.rmtree(i)
804    else:
805      os.remove(i)
806
807
808class PasswordManager(object):
809  def __init__(self):
810    self.editor = os.getenv("EDITOR", None)
811    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
812
813  def GetPasswords(self, items):
814    """Get passwords corresponding to each string in 'items',
815    returning a dict.  (The dict may have keys in addition to the
816    values in 'items'.)
817
818    Uses the passwords in $ANDROID_PW_FILE if available, letting the
819    user edit that file to add more needed passwords.  If no editor is
820    available, or $ANDROID_PW_FILE isn't define, prompts the user
821    interactively in the ordinary way.
822    """
823
824    current = self.ReadFile()
825
826    first = True
827    while True:
828      missing = []
829      for i in items:
830        if i not in current or not current[i]:
831          missing.append(i)
832      # Are all the passwords already in the file?
833      if not missing:
834        return current
835
836      for i in missing:
837        current[i] = ""
838
839      if not first:
840        print "key file %s still missing some passwords." % (self.pwfile,)
841        answer = raw_input("try to edit again? [y]> ").strip()
842        if answer and answer[0] not in 'yY':
843          raise RuntimeError("key passwords unavailable")
844      first = False
845
846      current = self.UpdateAndReadFile(current)
847
848  def PromptResult(self, current): # pylint: disable=no-self-use
849    """Prompt the user to enter a value (password) for each key in
850    'current' whose value is fales.  Returns a new dict with all the
851    values.
852    """
853    result = {}
854    for k, v in sorted(current.iteritems()):
855      if v:
856        result[k] = v
857      else:
858        while True:
859          result[k] = getpass.getpass(
860              "Enter password for %s key> " % k).strip()
861          if result[k]:
862            break
863    return result
864
865  def UpdateAndReadFile(self, current):
866    if not self.editor or not self.pwfile:
867      return self.PromptResult(current)
868
869    f = open(self.pwfile, "w")
870    os.chmod(self.pwfile, 0o600)
871    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
872    f.write("# (Additional spaces are harmless.)\n\n")
873
874    first_line = None
875    sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
876    for i, (_, k, v) in enumerate(sorted_list):
877      f.write("[[[  %s  ]]] %s\n" % (v, k))
878      if not v and first_line is None:
879        # position cursor on first line with no password.
880        first_line = i + 4
881    f.close()
882
883    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
884    _, _ = p.communicate()
885
886    return self.ReadFile()
887
888  def ReadFile(self):
889    result = {}
890    if self.pwfile is None:
891      return result
892    try:
893      f = open(self.pwfile, "r")
894      for line in f:
895        line = line.strip()
896        if not line or line[0] == '#':
897          continue
898        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
899        if not m:
900          print "failed to parse password file: ", line
901        else:
902          result[m.group(2)] = m.group(1)
903      f.close()
904    except IOError as e:
905      if e.errno != errno.ENOENT:
906        print "error reading password file: ", str(e)
907    return result
908
909
910def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
911             compress_type=None):
912  import datetime
913
914  # http://b/18015246
915  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
916  # for files larger than 2GiB. We can work around this by adjusting their
917  # limit. Note that `zipfile.writestr()` will not work for strings larger than
918  # 2GiB. The Python interpreter sometimes rejects strings that large (though
919  # it isn't clear to me exactly what circumstances cause this).
920  # `zipfile.write()` must be used directly to work around this.
921  #
922  # This mess can be avoided if we port to python3.
923  saved_zip64_limit = zipfile.ZIP64_LIMIT
924  zipfile.ZIP64_LIMIT = (1 << 32) - 1
925
926  if compress_type is None:
927    compress_type = zip_file.compression
928  if arcname is None:
929    arcname = filename
930
931  saved_stat = os.stat(filename)
932
933  try:
934    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
935    # file to be zipped and reset it when we're done.
936    os.chmod(filename, perms)
937
938    # Use a fixed timestamp so the output is repeatable.
939    epoch = datetime.datetime.fromtimestamp(0)
940    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
941    os.utime(filename, (timestamp, timestamp))
942
943    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
944  finally:
945    os.chmod(filename, saved_stat.st_mode)
946    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
947    zipfile.ZIP64_LIMIT = saved_zip64_limit
948
949
950def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
951                compress_type=None):
952  """Wrap zipfile.writestr() function to work around the zip64 limit.
953
954  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
955  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
956  when calling crc32(bytes).
957
958  But it still works fine to write a shorter string into a large zip file.
959  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
960  when we know the string won't be too long.
961  """
962
963  saved_zip64_limit = zipfile.ZIP64_LIMIT
964  zipfile.ZIP64_LIMIT = (1 << 32) - 1
965
966  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
967    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
968    zinfo.compress_type = zip_file.compression
969    if perms is None:
970      perms = 0o100644
971  else:
972    zinfo = zinfo_or_arcname
973
974  # If compress_type is given, it overrides the value in zinfo.
975  if compress_type is not None:
976    zinfo.compress_type = compress_type
977
978  # If perms is given, it has a priority.
979  if perms is not None:
980    # If perms doesn't set the file type, mark it as a regular file.
981    if perms & 0o770000 == 0:
982      perms |= 0o100000
983    zinfo.external_attr = perms << 16
984
985  # Use a fixed timestamp so the output is repeatable.
986  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
987
988  zip_file.writestr(zinfo, data)
989  zipfile.ZIP64_LIMIT = saved_zip64_limit
990
991
992def ZipClose(zip_file):
993  # http://b/18015246
994  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
995  # central directory.
996  saved_zip64_limit = zipfile.ZIP64_LIMIT
997  zipfile.ZIP64_LIMIT = (1 << 32) - 1
998
999  zip_file.close()
1000
1001  zipfile.ZIP64_LIMIT = saved_zip64_limit
1002
1003
1004class DeviceSpecificParams(object):
1005  module = None
1006  def __init__(self, **kwargs):
1007    """Keyword arguments to the constructor become attributes of this
1008    object, which is passed to all functions in the device-specific
1009    module."""
1010    for k, v in kwargs.iteritems():
1011      setattr(self, k, v)
1012    self.extras = OPTIONS.extras
1013
1014    if self.module is None:
1015      path = OPTIONS.device_specific
1016      if not path:
1017        return
1018      try:
1019        if os.path.isdir(path):
1020          info = imp.find_module("releasetools", [path])
1021        else:
1022          d, f = os.path.split(path)
1023          b, x = os.path.splitext(f)
1024          if x == ".py":
1025            f = b
1026          info = imp.find_module(f, [d])
1027        print "loaded device-specific extensions from", path
1028        self.module = imp.load_module("device_specific", *info)
1029      except ImportError:
1030        print "unable to load device-specific module; assuming none"
1031
1032  def _DoCall(self, function_name, *args, **kwargs):
1033    """Call the named function in the device-specific module, passing
1034    the given args and kwargs.  The first argument to the call will be
1035    the DeviceSpecific object itself.  If there is no module, or the
1036    module does not define the function, return the value of the
1037    'default' kwarg (which itself defaults to None)."""
1038    if self.module is None or not hasattr(self.module, function_name):
1039      return kwargs.get("default", None)
1040    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1041
1042  def FullOTA_Assertions(self):
1043    """Called after emitting the block of assertions at the top of a
1044    full OTA package.  Implementations can add whatever additional
1045    assertions they like."""
1046    return self._DoCall("FullOTA_Assertions")
1047
1048  def FullOTA_InstallBegin(self):
1049    """Called at the start of full OTA installation."""
1050    return self._DoCall("FullOTA_InstallBegin")
1051
1052  def FullOTA_InstallEnd(self):
1053    """Called at the end of full OTA installation; typically this is
1054    used to install the image for the device's baseband processor."""
1055    return self._DoCall("FullOTA_InstallEnd")
1056
1057  def IncrementalOTA_Assertions(self):
1058    """Called after emitting the block of assertions at the top of an
1059    incremental OTA package.  Implementations can add whatever
1060    additional assertions they like."""
1061    return self._DoCall("IncrementalOTA_Assertions")
1062
1063  def IncrementalOTA_VerifyBegin(self):
1064    """Called at the start of the verification phase of incremental
1065    OTA installation; additional checks can be placed here to abort
1066    the script before any changes are made."""
1067    return self._DoCall("IncrementalOTA_VerifyBegin")
1068
1069  def IncrementalOTA_VerifyEnd(self):
1070    """Called at the end of the verification phase of incremental OTA
1071    installation; additional checks can be placed here to abort the
1072    script before any changes are made."""
1073    return self._DoCall("IncrementalOTA_VerifyEnd")
1074
1075  def IncrementalOTA_InstallBegin(self):
1076    """Called at the start of incremental OTA installation (after
1077    verification is complete)."""
1078    return self._DoCall("IncrementalOTA_InstallBegin")
1079
1080  def IncrementalOTA_InstallEnd(self):
1081    """Called at the end of incremental OTA installation; typically
1082    this is used to install the image for the device's baseband
1083    processor."""
1084    return self._DoCall("IncrementalOTA_InstallEnd")
1085
1086class File(object):
1087  def __init__(self, name, data):
1088    self.name = name
1089    self.data = data
1090    self.size = len(data)
1091    self.sha1 = sha1(data).hexdigest()
1092
1093  @classmethod
1094  def FromLocalFile(cls, name, diskname):
1095    f = open(diskname, "rb")
1096    data = f.read()
1097    f.close()
1098    return File(name, data)
1099
1100  def WriteToTemp(self):
1101    t = tempfile.NamedTemporaryFile()
1102    t.write(self.data)
1103    t.flush()
1104    return t
1105
1106  def AddToZip(self, z, compression=None):
1107    ZipWriteStr(z, self.name, self.data, compress_type=compression)
1108
1109DIFF_PROGRAM_BY_EXT = {
1110    ".gz" : "imgdiff",
1111    ".zip" : ["imgdiff", "-z"],
1112    ".jar" : ["imgdiff", "-z"],
1113    ".apk" : ["imgdiff", "-z"],
1114    ".img" : "imgdiff",
1115    }
1116
1117class Difference(object):
1118  def __init__(self, tf, sf, diff_program=None):
1119    self.tf = tf
1120    self.sf = sf
1121    self.patch = None
1122    self.diff_program = diff_program
1123
1124  def ComputePatch(self):
1125    """Compute the patch (as a string of data) needed to turn sf into
1126    tf.  Returns the same tuple as GetPatch()."""
1127
1128    tf = self.tf
1129    sf = self.sf
1130
1131    if self.diff_program:
1132      diff_program = self.diff_program
1133    else:
1134      ext = os.path.splitext(tf.name)[1]
1135      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1136
1137    ttemp = tf.WriteToTemp()
1138    stemp = sf.WriteToTemp()
1139
1140    ext = os.path.splitext(tf.name)[1]
1141
1142    try:
1143      ptemp = tempfile.NamedTemporaryFile()
1144      if isinstance(diff_program, list):
1145        cmd = copy.copy(diff_program)
1146      else:
1147        cmd = [diff_program]
1148      cmd.append(stemp.name)
1149      cmd.append(ttemp.name)
1150      cmd.append(ptemp.name)
1151      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1152      err = []
1153      def run():
1154        _, e = p.communicate()
1155        if e:
1156          err.append(e)
1157      th = threading.Thread(target=run)
1158      th.start()
1159      th.join(timeout=300)   # 5 mins
1160      if th.is_alive():
1161        print "WARNING: diff command timed out"
1162        p.terminate()
1163        th.join(5)
1164        if th.is_alive():
1165          p.kill()
1166          th.join()
1167
1168      if err or p.returncode != 0:
1169        print "WARNING: failure running %s:\n%s\n" % (
1170            diff_program, "".join(err))
1171        self.patch = None
1172        return None, None, None
1173      diff = ptemp.read()
1174    finally:
1175      ptemp.close()
1176      stemp.close()
1177      ttemp.close()
1178
1179    self.patch = diff
1180    return self.tf, self.sf, self.patch
1181
1182
1183  def GetPatch(self):
1184    """Return a tuple (target_file, source_file, patch_data).
1185    patch_data may be None if ComputePatch hasn't been called, or if
1186    computing the patch failed."""
1187    return self.tf, self.sf, self.patch
1188
1189
1190def ComputeDifferences(diffs):
1191  """Call ComputePatch on all the Difference objects in 'diffs'."""
1192  print len(diffs), "diffs to compute"
1193
1194  # Do the largest files first, to try and reduce the long-pole effect.
1195  by_size = [(i.tf.size, i) for i in diffs]
1196  by_size.sort(reverse=True)
1197  by_size = [i[1] for i in by_size]
1198
1199  lock = threading.Lock()
1200  diff_iter = iter(by_size)   # accessed under lock
1201
1202  def worker():
1203    try:
1204      lock.acquire()
1205      for d in diff_iter:
1206        lock.release()
1207        start = time.time()
1208        d.ComputePatch()
1209        dur = time.time() - start
1210        lock.acquire()
1211
1212        tf, sf, patch = d.GetPatch()
1213        if sf.name == tf.name:
1214          name = tf.name
1215        else:
1216          name = "%s (%s)" % (tf.name, sf.name)
1217        if patch is None:
1218          print "patching failed!                                  %s" % (name,)
1219        else:
1220          print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1221              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1222      lock.release()
1223    except Exception as e:
1224      print e
1225      raise
1226
1227  # start worker threads; wait for them all to finish.
1228  threads = [threading.Thread(target=worker)
1229             for i in range(OPTIONS.worker_threads)]
1230  for th in threads:
1231    th.start()
1232  while threads:
1233    threads.pop().join()
1234
1235
1236class BlockDifference(object):
1237  def __init__(self, partition, tgt, src=None, check_first_block=False,
1238               version=None):
1239    self.tgt = tgt
1240    self.src = src
1241    self.partition = partition
1242    self.check_first_block = check_first_block
1243
1244    # Due to http://b/20939131, check_first_block is disabled temporarily.
1245    assert not self.check_first_block
1246
1247    if version is None:
1248      version = 1
1249      if OPTIONS.info_dict:
1250        version = max(
1251            int(i) for i in
1252            OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1253    self.version = version
1254
1255    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1256                                    version=self.version)
1257    tmpdir = tempfile.mkdtemp()
1258    OPTIONS.tempfiles.append(tmpdir)
1259    self.path = os.path.join(tmpdir, partition)
1260    b.Compute(self.path)
1261
1262    _, self.device = GetTypeAndDevice("/" + partition,
1263                                      OPTIONS.source_info_dict)
1264
1265  def WriteScript(self, script, output_zip, progress=None):
1266    if not self.src:
1267      # write the output unconditionally
1268      script.Print("Patching %s image unconditionally..." % (self.partition,))
1269    else:
1270      script.Print("Patching %s image after verification." % (self.partition,))
1271
1272    if progress:
1273      script.ShowProgress(progress, 0)
1274    self._WriteUpdate(script, output_zip)
1275    self._WritePostInstallVerifyScript(script)
1276
1277  def WriteVerifyScript(self, script):
1278    partition = self.partition
1279    if not self.src:
1280      script.Print("Image %s will be patched unconditionally." % (partition,))
1281    else:
1282      ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1283      ranges_str = ranges.to_string_raw()
1284      if self.version >= 3:
1285        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1286                            'block_image_verify("%s", '
1287                            'package_extract_file("%s.transfer.list"), '
1288                            '"%s.new.dat", "%s.patch.dat")) then') % (
1289                            self.device, ranges_str, self.src.TotalSha1(),
1290                            self.device, partition, partition, partition))
1291      else:
1292        script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1293                           self.device, ranges_str, self.src.TotalSha1()))
1294      script.Print('Verified %s image...' % (partition,))
1295      script.AppendExtra('else')
1296
1297      # When generating incrementals for the system and vendor partitions,
1298      # explicitly check the first block (which contains the superblock) of
1299      # the partition to see if it's what we expect. If this check fails,
1300      # give an explicit log message about the partition having been
1301      # remounted R/W (the most likely explanation) and the need to flash to
1302      # get OTAs working again.
1303      if self.check_first_block:
1304        self._CheckFirstBlock(script)
1305
1306      # Abort the OTA update. Note that the incremental OTA cannot be applied
1307      # even if it may match the checksum of the target partition.
1308      # a) If version < 3, operations like move and erase will make changes
1309      #    unconditionally and damage the partition.
1310      # b) If version >= 3, it won't even reach here.
1311      script.AppendExtra(('abort("%s partition has unexpected contents");\n'
1312                          'endif;') % (partition,))
1313
1314  def _WritePostInstallVerifyScript(self, script):
1315    partition = self.partition
1316    script.Print('Verifying the updated %s image...' % (partition,))
1317    # Unlike pre-install verification, clobbered_blocks should not be ignored.
1318    ranges = self.tgt.care_map
1319    ranges_str = ranges.to_string_raw()
1320    script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1321                       self.device, ranges_str,
1322                       self.tgt.TotalSha1(include_clobbered_blocks=True)))
1323
1324    # Bug: 20881595
1325    # Verify that extended blocks are really zeroed out.
1326    if self.tgt.extended:
1327      ranges_str = self.tgt.extended.to_string_raw()
1328      script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1329                         self.device, ranges_str,
1330                         self._HashZeroBlocks(self.tgt.extended.size())))
1331      script.Print('Verified the updated %s image.' % (partition,))
1332      script.AppendExtra(
1333          'else\n'
1334          '  abort("%s partition has unexpected non-zero contents after OTA '
1335          'update");\n'
1336          'endif;' % (partition,))
1337    else:
1338      script.Print('Verified the updated %s image.' % (partition,))
1339
1340    script.AppendExtra(
1341        'else\n'
1342        '  abort("%s partition has unexpected contents after OTA update");\n'
1343        'endif;' % (partition,))
1344
1345  def _WriteUpdate(self, script, output_zip):
1346    ZipWrite(output_zip,
1347             '{}.transfer.list'.format(self.path),
1348             '{}.transfer.list'.format(self.partition))
1349    ZipWrite(output_zip,
1350             '{}.new.dat'.format(self.path),
1351             '{}.new.dat'.format(self.partition))
1352    ZipWrite(output_zip,
1353             '{}.patch.dat'.format(self.path),
1354             '{}.patch.dat'.format(self.partition),
1355             compress_type=zipfile.ZIP_STORED)
1356
1357    call = ('block_image_update("{device}", '
1358            'package_extract_file("{partition}.transfer.list"), '
1359            '"{partition}.new.dat", "{partition}.patch.dat");\n'.format(
1360                device=self.device, partition=self.partition))
1361    script.AppendExtra(script.WordWrap(call))
1362
1363  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1364    data = source.ReadRangeSet(ranges)
1365    ctx = sha1()
1366
1367    for p in data:
1368      ctx.update(p)
1369
1370    return ctx.hexdigest()
1371
1372  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1373    """Return the hash value for all zero blocks."""
1374    zero_block = '\x00' * 4096
1375    ctx = sha1()
1376    for _ in range(num_blocks):
1377      ctx.update(zero_block)
1378
1379    return ctx.hexdigest()
1380
1381  # TODO(tbao): Due to http://b/20939131, block 0 may be changed without
1382  # remounting R/W. Will change the checking to a finer-grained way to
1383  # mask off those bits.
1384  def _CheckFirstBlock(self, script):
1385    r = rangelib.RangeSet((0, 1))
1386    srchash = self._HashBlocks(self.src, r)
1387
1388    script.AppendExtra(('(range_sha1("%s", "%s") == "%s") || '
1389                        'abort("%s has been remounted R/W; '
1390                        'reflash device to reenable OTA updates");')
1391                       % (self.device, r.to_string_raw(), srchash,
1392                          self.device))
1393
1394DataImage = blockimgdiff.DataImage
1395
1396
1397# map recovery.fstab's fs_types to mount/format "partition types"
1398PARTITION_TYPES = {
1399    "yaffs2": "MTD",
1400    "mtd": "MTD",
1401    "ext4": "EMMC",
1402    "emmc": "EMMC",
1403    "f2fs": "EMMC",
1404    "squashfs": "EMMC"
1405}
1406
1407def GetTypeAndDevice(mount_point, info):
1408  fstab = info["fstab"]
1409  if fstab:
1410    return (PARTITION_TYPES[fstab[mount_point].fs_type],
1411            fstab[mount_point].device)
1412  else:
1413    raise KeyError
1414
1415
1416def ParseCertificate(data):
1417  """Parse a PEM-format certificate."""
1418  cert = []
1419  save = False
1420  for line in data.split("\n"):
1421    if "--END CERTIFICATE--" in line:
1422      break
1423    if save:
1424      cert.append(line)
1425    if "--BEGIN CERTIFICATE--" in line:
1426      save = True
1427  cert = "".join(cert).decode('base64')
1428  return cert
1429
1430def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1431                      info_dict=None):
1432  """Generate a binary patch that creates the recovery image starting
1433  with the boot image.  (Most of the space in these images is just the
1434  kernel, which is identical for the two, so the resulting patch
1435  should be efficient.)  Add it to the output zip, along with a shell
1436  script that is run from init.rc on first boot to actually do the
1437  patching and install the new recovery image.
1438
1439  recovery_img and boot_img should be File objects for the
1440  corresponding images.  info should be the dictionary returned by
1441  common.LoadInfoDict() on the input target_files.
1442  """
1443
1444  if info_dict is None:
1445    info_dict = OPTIONS.info_dict
1446
1447  full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1448  system_root_image = info_dict.get("system_root_image", None) == "true"
1449
1450  if full_recovery_image:
1451    output_sink("etc/recovery.img", recovery_img.data)
1452
1453  else:
1454    diff_program = ["imgdiff"]
1455    path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1456    if os.path.exists(path):
1457      diff_program.append("-b")
1458      diff_program.append(path)
1459      bonus_args = "-b /system/etc/recovery-resource.dat"
1460    else:
1461      bonus_args = ""
1462
1463    d = Difference(recovery_img, boot_img, diff_program=diff_program)
1464    _, _, patch = d.ComputePatch()
1465    output_sink("recovery-from-boot.p", patch)
1466
1467  try:
1468    # The following GetTypeAndDevice()s need to use the path in the target
1469    # info_dict instead of source_info_dict.
1470    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1471    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1472  except KeyError:
1473    return
1474
1475  if full_recovery_image:
1476    sh = """#!/system/bin/sh
1477if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1478  applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1479else
1480  log -t recovery "Recovery image already installed"
1481fi
1482""" % {'type': recovery_type,
1483       'device': recovery_device,
1484       'sha1': recovery_img.sha1,
1485       'size': recovery_img.size}
1486  else:
1487    sh = """#!/system/bin/sh
1488if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1489  applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1490else
1491  log -t recovery "Recovery image already installed"
1492fi
1493""" % {'boot_size': boot_img.size,
1494       'boot_sha1': boot_img.sha1,
1495       'recovery_size': recovery_img.size,
1496       'recovery_sha1': recovery_img.sha1,
1497       'boot_type': boot_type,
1498       'boot_device': boot_device,
1499       'recovery_type': recovery_type,
1500       'recovery_device': recovery_device,
1501       'bonus_args': bonus_args}
1502
1503  # The install script location moved from /system/etc to /system/bin
1504  # in the L release.  Parse init.*.rc files to find out where the
1505  # target-files expects it to be, and put it there.
1506  sh_location = "etc/install-recovery.sh"
1507  found = False
1508  if system_root_image:
1509    init_rc_dir = os.path.join(input_dir, "ROOT")
1510  else:
1511    init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1512  init_rc_files = os.listdir(init_rc_dir)
1513  for init_rc_file in init_rc_files:
1514    if (not init_rc_file.startswith('init.') or
1515        not init_rc_file.endswith('.rc')):
1516      continue
1517
1518    with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1519      for line in f:
1520        m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1521        if m:
1522          sh_location = m.group(1)
1523          found = True
1524          break
1525
1526    if found:
1527      break
1528
1529  print "putting script in", sh_location
1530
1531  output_sink(sh_location, sh)
1532