common.py revision 79735a6a82f7f3d60cd86bf293b8a6b4e1d92768
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import copy
16import errno
17import getopt
18import getpass
19import imp
20import os
21import platform
22import re
23import shlex
24import shutil
25import subprocess
26import sys
27import tempfile
28import threading
29import time
30import zipfile
31
32import blockimgdiff
33import rangelib
34
35from hashlib import sha1 as sha1
36
37
38class Options(object):
39  def __init__(self):
40    platform_search_path = {
41        "linux2": "out/host/linux-x86",
42        "darwin": "out/host/darwin-x86",
43    }
44
45    self.search_path = platform_search_path.get(sys.platform, None)
46    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
47    self.extra_signapk_args = []
48    self.java_path = "java"  # Use the one on the path by default.
49    self.java_args = "-Xmx2048m" # JVM Args
50    self.public_key_suffix = ".x509.pem"
51    self.private_key_suffix = ".pk8"
52    # use otatools built boot_signer by default
53    self.boot_signer_path = "boot_signer"
54    self.verbose = False
55    self.tempfiles = []
56    self.device_specific = None
57    self.extras = {}
58    self.info_dict = None
59    self.worker_threads = None
60    # Stash size cannot exceed cache_size * threshold.
61    self.cache_size = None
62    self.stash_threshold = 0.8
63
64
65OPTIONS = Options()
66
67
68# Values for "certificate" in apkcerts that mean special things.
69SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
70
71
72class ExternalError(RuntimeError):
73  pass
74
75
76def Run(args, **kwargs):
77  """Create and return a subprocess.Popen object, printing the command
78  line on the terminal if -v was specified."""
79  if OPTIONS.verbose:
80    print "  running: ", " ".join(args)
81  return subprocess.Popen(args, **kwargs)
82
83
84def CloseInheritedPipes():
85  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
86  before doing other work."""
87  if platform.system() != "Darwin":
88    return
89  for d in range(3, 1025):
90    try:
91      stat = os.fstat(d)
92      if stat is not None:
93        pipebit = stat[0] & 0x1000
94        if pipebit != 0:
95          os.close(d)
96    except OSError:
97      pass
98
99
100def LoadInfoDict(input_file, input_dir=None):
101  """Read and parse the META/misc_info.txt key/value pairs from the
102  input target files and return a dict."""
103
104  def read_helper(fn):
105    if isinstance(input_file, zipfile.ZipFile):
106      return input_file.read(fn)
107    else:
108      path = os.path.join(input_file, *fn.split("/"))
109      try:
110        with open(path) as f:
111          return f.read()
112      except IOError as e:
113        if e.errno == errno.ENOENT:
114          raise KeyError(fn)
115  d = {}
116  try:
117    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
118  except KeyError:
119    # ok if misc_info.txt doesn't exist
120    pass
121
122  # backwards compatibility: These values used to be in their own
123  # files.  Look for them, in case we're processing an old
124  # target_files zip.
125
126  if "mkyaffs2_extra_flags" not in d:
127    try:
128      d["mkyaffs2_extra_flags"] = read_helper(
129          "META/mkyaffs2-extra-flags.txt").strip()
130    except KeyError:
131      # ok if flags don't exist
132      pass
133
134  if "recovery_api_version" not in d:
135    try:
136      d["recovery_api_version"] = read_helper(
137          "META/recovery-api-version.txt").strip()
138    except KeyError:
139      raise ValueError("can't find recovery API version in input target-files")
140
141  if "tool_extensions" not in d:
142    try:
143      d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
144    except KeyError:
145      # ok if extensions don't exist
146      pass
147
148  if "fstab_version" not in d:
149    d["fstab_version"] = "1"
150
151  # A few properties are stored as links to the files in the out/ directory.
152  # It works fine with the build system. However, they are no longer available
153  # when (re)generating from target_files zip. If input_dir is not None, we
154  # are doing repacking. Redirect those properties to the actual files in the
155  # unzipped directory.
156  if input_dir is not None:
157    # We carry a copy of file_contexts.bin under META/. If not available,
158    # search BOOT/RAMDISK/. Note that sometimes we may need a different file
159    # to build images than the one running on device, such as when enabling
160    # system_root_image. In that case, we must have the one for image
161    # generation copied to META/.
162    fc_basename = os.path.basename(d.get("selinux_fc", "file_contexts"))
163    fc_config = os.path.join(input_dir, "META", fc_basename)
164    if d.get("system_root_image") == "true":
165      assert os.path.exists(fc_config)
166    if not os.path.exists(fc_config):
167      fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", fc_basename)
168      if not os.path.exists(fc_config):
169        fc_config = None
170
171    if fc_config:
172      d["selinux_fc"] = fc_config
173
174    # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
175    if d.get("system_root_image") == "true":
176      d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
177      d["ramdisk_fs_config"] = os.path.join(
178          input_dir, "META", "root_filesystem_config.txt")
179
180  try:
181    data = read_helper("META/imagesizes.txt")
182    for line in data.split("\n"):
183      if not line:
184        continue
185      name, value = line.split(" ", 1)
186      if not value:
187        continue
188      if name == "blocksize":
189        d[name] = value
190      else:
191        d[name + "_size"] = value
192  except KeyError:
193    pass
194
195  def makeint(key):
196    if key in d:
197      d[key] = int(d[key], 0)
198
199  makeint("recovery_api_version")
200  makeint("blocksize")
201  makeint("system_size")
202  makeint("vendor_size")
203  makeint("userdata_size")
204  makeint("cache_size")
205  makeint("recovery_size")
206  makeint("boot_size")
207  makeint("fstab_version")
208
209  d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
210                                 d.get("system_root_image", False))
211  d["build.prop"] = LoadBuildProp(read_helper)
212  return d
213
214def LoadBuildProp(read_helper):
215  try:
216    data = read_helper("SYSTEM/build.prop")
217  except KeyError:
218    print "Warning: could not find SYSTEM/build.prop in %s" % zip
219    data = ""
220  return LoadDictionaryFromLines(data.split("\n"))
221
222def LoadDictionaryFromLines(lines):
223  d = {}
224  for line in lines:
225    line = line.strip()
226    if not line or line.startswith("#"):
227      continue
228    if "=" in line:
229      name, value = line.split("=", 1)
230      d[name] = value
231  return d
232
233def LoadRecoveryFSTab(read_helper, fstab_version, system_root_image=False):
234  class Partition(object):
235    def __init__(self, mount_point, fs_type, device, length, device2, context):
236      self.mount_point = mount_point
237      self.fs_type = fs_type
238      self.device = device
239      self.length = length
240      self.device2 = device2
241      self.context = context
242
243  try:
244    data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
245  except KeyError:
246    print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
247    data = ""
248
249  if fstab_version == 1:
250    d = {}
251    for line in data.split("\n"):
252      line = line.strip()
253      if not line or line.startswith("#"):
254        continue
255      pieces = line.split()
256      if not 3 <= len(pieces) <= 4:
257        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
258      options = None
259      if len(pieces) >= 4:
260        if pieces[3].startswith("/"):
261          device2 = pieces[3]
262          if len(pieces) >= 5:
263            options = pieces[4]
264        else:
265          device2 = None
266          options = pieces[3]
267      else:
268        device2 = None
269
270      mount_point = pieces[0]
271      length = 0
272      if options:
273        options = options.split(",")
274        for i in options:
275          if i.startswith("length="):
276            length = int(i[7:])
277          else:
278            print "%s: unknown option \"%s\"" % (mount_point, i)
279
280      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
281                                 device=pieces[2], length=length,
282                                 device2=device2)
283
284  elif fstab_version == 2:
285    d = {}
286    for line in data.split("\n"):
287      line = line.strip()
288      if not line or line.startswith("#"):
289        continue
290      # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
291      pieces = line.split()
292      if len(pieces) != 5:
293        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
294
295      # Ignore entries that are managed by vold
296      options = pieces[4]
297      if "voldmanaged=" in options:
298        continue
299
300      # It's a good line, parse it
301      length = 0
302      options = options.split(",")
303      for i in options:
304        if i.startswith("length="):
305          length = int(i[7:])
306        else:
307          # Ignore all unknown options in the unified fstab
308          continue
309
310      mount_flags = pieces[3]
311      # Honor the SELinux context if present.
312      context = None
313      for i in mount_flags.split(","):
314        if i.startswith("context="):
315          context = i
316
317      mount_point = pieces[1]
318      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
319                                 device=pieces[0], length=length,
320                                 device2=None, context=context)
321
322  else:
323    raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
324
325  # / is used for the system mount point when the root directory is included in
326  # system. Other areas assume system is always at "/system" so point /system
327  # at /.
328  if system_root_image:
329    assert not d.has_key("/system") and d.has_key("/")
330    d["/system"] = d["/"]
331  return d
332
333
334def DumpInfoDict(d):
335  for k, v in sorted(d.items()):
336    print "%-25s = (%s) %s" % (k, type(v).__name__, v)
337
338
339def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
340                        has_ramdisk=False):
341  """Build a bootable image from the specified sourcedir.
342
343  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
344  'sourcedir'), and turn them into a boot image.  Return the image data, or
345  None if sourcedir does not appear to contains files for building the
346  requested image."""
347
348  def make_ramdisk():
349    ramdisk_img = tempfile.NamedTemporaryFile()
350
351    if os.access(fs_config_file, os.F_OK):
352      cmd = ["mkbootfs", "-f", fs_config_file,
353             os.path.join(sourcedir, "RAMDISK")]
354    else:
355      cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
356    p1 = Run(cmd, stdout=subprocess.PIPE)
357    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
358
359    p2.wait()
360    p1.wait()
361    assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
362    assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
363
364    return ramdisk_img
365
366  if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
367    return None
368
369  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
370    return None
371
372  if info_dict is None:
373    info_dict = OPTIONS.info_dict
374
375  img = tempfile.NamedTemporaryFile()
376
377  if has_ramdisk:
378    ramdisk_img = make_ramdisk()
379
380  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
381  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
382
383  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
384
385  fn = os.path.join(sourcedir, "second")
386  if os.access(fn, os.F_OK):
387    cmd.append("--second")
388    cmd.append(fn)
389
390  fn = os.path.join(sourcedir, "cmdline")
391  if os.access(fn, os.F_OK):
392    cmd.append("--cmdline")
393    cmd.append(open(fn).read().rstrip("\n"))
394
395  fn = os.path.join(sourcedir, "base")
396  if os.access(fn, os.F_OK):
397    cmd.append("--base")
398    cmd.append(open(fn).read().rstrip("\n"))
399
400  fn = os.path.join(sourcedir, "pagesize")
401  if os.access(fn, os.F_OK):
402    cmd.append("--pagesize")
403    cmd.append(open(fn).read().rstrip("\n"))
404
405  args = info_dict.get("mkbootimg_args", None)
406  if args and args.strip():
407    cmd.extend(shlex.split(args))
408
409  if has_ramdisk:
410    cmd.extend(["--ramdisk", ramdisk_img.name])
411
412  img_unsigned = None
413  if info_dict.get("vboot", None):
414    img_unsigned = tempfile.NamedTemporaryFile()
415    cmd.extend(["--output", img_unsigned.name])
416  else:
417    cmd.extend(["--output", img.name])
418
419  p = Run(cmd, stdout=subprocess.PIPE)
420  p.communicate()
421  assert p.returncode == 0, "mkbootimg of %s image failed" % (
422      os.path.basename(sourcedir),)
423
424  if info_dict.get("verity_key", None):
425    path = "/" + os.path.basename(sourcedir).lower()
426    cmd = [OPTIONS.boot_signer_path, path, img.name,
427           info_dict["verity_key"] + ".pk8",
428           info_dict["verity_key"] + ".x509.pem", img.name]
429    p = Run(cmd, stdout=subprocess.PIPE)
430    p.communicate()
431    assert p.returncode == 0, "boot_signer of %s image failed" % path
432
433  # Sign the image if vboot is non-empty.
434  elif info_dict.get("vboot", None):
435    path = "/" + os.path.basename(sourcedir).lower()
436    img_keyblock = tempfile.NamedTemporaryFile()
437    cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
438           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
439           info_dict["vboot_key"] + ".vbprivk", img_keyblock.name,
440           img.name]
441    p = Run(cmd, stdout=subprocess.PIPE)
442    p.communicate()
443    assert p.returncode == 0, "vboot_signer of %s image failed" % path
444
445    # Clean up the temp files.
446    img_unsigned.close()
447    img_keyblock.close()
448
449  img.seek(os.SEEK_SET, 0)
450  data = img.read()
451
452  if has_ramdisk:
453    ramdisk_img.close()
454  img.close()
455
456  return data
457
458
459def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
460                     info_dict=None):
461  """Return a File object with the desired bootable image.
462
463  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
464  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
465  the source files in 'unpack_dir'/'tree_subdir'."""
466
467  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
468  if os.path.exists(prebuilt_path):
469    print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
470    return File.FromLocalFile(name, prebuilt_path)
471
472  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
473  if os.path.exists(prebuilt_path):
474    print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
475    return File.FromLocalFile(name, prebuilt_path)
476
477  print "building image from target_files %s..." % (tree_subdir,)
478
479  if info_dict is None:
480    info_dict = OPTIONS.info_dict
481
482  # With system_root_image == "true", we don't pack ramdisk into the boot image.
483  has_ramdisk = (info_dict.get("system_root_image", None) != "true" or
484                 prebuilt_name != "boot.img")
485
486  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
487  data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
488                             os.path.join(unpack_dir, fs_config),
489                             info_dict, has_ramdisk)
490  if data:
491    return File(name, data)
492  return None
493
494
495def UnzipTemp(filename, pattern=None):
496  """Unzip the given archive into a temporary directory and return the name.
497
498  If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
499  temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
500
501  Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
502  main file), open for reading.
503  """
504
505  tmp = tempfile.mkdtemp(prefix="targetfiles-")
506  OPTIONS.tempfiles.append(tmp)
507
508  def unzip_to_dir(filename, dirname):
509    cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
510    if pattern is not None:
511      cmd.append(pattern)
512    p = Run(cmd, stdout=subprocess.PIPE)
513    p.communicate()
514    if p.returncode != 0:
515      raise ExternalError("failed to unzip input target-files \"%s\"" %
516                          (filename,))
517
518  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
519  if m:
520    unzip_to_dir(m.group(1), tmp)
521    unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
522    filename = m.group(1)
523  else:
524    unzip_to_dir(filename, tmp)
525
526  return tmp, zipfile.ZipFile(filename, "r")
527
528
529def GetKeyPasswords(keylist):
530  """Given a list of keys, prompt the user to enter passwords for
531  those which require them.  Return a {key: password} dict.  password
532  will be None if the key has no password."""
533
534  no_passwords = []
535  need_passwords = []
536  key_passwords = {}
537  devnull = open("/dev/null", "w+b")
538  for k in sorted(keylist):
539    # We don't need a password for things that aren't really keys.
540    if k in SPECIAL_CERT_STRINGS:
541      no_passwords.append(k)
542      continue
543
544    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
545             "-inform", "DER", "-nocrypt"],
546            stdin=devnull.fileno(),
547            stdout=devnull.fileno(),
548            stderr=subprocess.STDOUT)
549    p.communicate()
550    if p.returncode == 0:
551      # Definitely an unencrypted key.
552      no_passwords.append(k)
553    else:
554      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
555               "-inform", "DER", "-passin", "pass:"],
556              stdin=devnull.fileno(),
557              stdout=devnull.fileno(),
558              stderr=subprocess.PIPE)
559      _, stderr = p.communicate()
560      if p.returncode == 0:
561        # Encrypted key with empty string as password.
562        key_passwords[k] = ''
563      elif stderr.startswith('Error decrypting key'):
564        # Definitely encrypted key.
565        # It would have said "Error reading key" if it didn't parse correctly.
566        need_passwords.append(k)
567      else:
568        # Potentially, a type of key that openssl doesn't understand.
569        # We'll let the routines in signapk.jar handle it.
570        no_passwords.append(k)
571  devnull.close()
572
573  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
574  key_passwords.update(dict.fromkeys(no_passwords, None))
575  return key_passwords
576
577
578def SignFile(input_name, output_name, key, password, align=None,
579             whole_file=False):
580  """Sign the input_name zip/jar/apk, producing output_name.  Use the
581  given key and password (the latter may be None if the key does not
582  have a password.
583
584  If align is an integer > 1, zipalign is run to align stored files in
585  the output zip on 'align'-byte boundaries.
586
587  If whole_file is true, use the "-w" option to SignApk to embed a
588  signature that covers the whole file in the archive comment of the
589  zip file.
590  """
591
592  if align == 0 or align == 1:
593    align = None
594
595  if align:
596    temp = tempfile.NamedTemporaryFile()
597    sign_name = temp.name
598  else:
599    sign_name = output_name
600
601  cmd = [OPTIONS.java_path, OPTIONS.java_args, "-jar",
602         os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
603  cmd.extend(OPTIONS.extra_signapk_args)
604  if whole_file:
605    cmd.append("-w")
606  cmd.extend([key + OPTIONS.public_key_suffix,
607              key + OPTIONS.private_key_suffix,
608              input_name, sign_name])
609
610  p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
611  if password is not None:
612    password += "\n"
613  p.communicate(password)
614  if p.returncode != 0:
615    raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
616
617  if align:
618    p = Run(["zipalign", "-f", "-p", str(align), sign_name, output_name])
619    p.communicate()
620    if p.returncode != 0:
621      raise ExternalError("zipalign failed: return code %s" % (p.returncode,))
622    temp.close()
623
624
625def CheckSize(data, target, info_dict):
626  """Check the data string passed against the max size limit, if
627  any, for the given target.  Raise exception if the data is too big.
628  Print a warning if the data is nearing the maximum size."""
629
630  if target.endswith(".img"):
631    target = target[:-4]
632  mount_point = "/" + target
633
634  fs_type = None
635  limit = None
636  if info_dict["fstab"]:
637    if mount_point == "/userdata":
638      mount_point = "/data"
639    p = info_dict["fstab"][mount_point]
640    fs_type = p.fs_type
641    device = p.device
642    if "/" in device:
643      device = device[device.rfind("/")+1:]
644    limit = info_dict.get(device + "_size", None)
645  if not fs_type or not limit:
646    return
647
648  if fs_type == "yaffs2":
649    # image size should be increased by 1/64th to account for the
650    # spare area (64 bytes per 2k page)
651    limit = limit / 2048 * (2048+64)
652  size = len(data)
653  pct = float(size) * 100.0 / limit
654  msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
655  if pct >= 99.0:
656    raise ExternalError(msg)
657  elif pct >= 95.0:
658    print
659    print "  WARNING: ", msg
660    print
661  elif OPTIONS.verbose:
662    print "  ", msg
663
664
665def ReadApkCerts(tf_zip):
666  """Given a target_files ZipFile, parse the META/apkcerts.txt file
667  and return a {package: cert} dict."""
668  certmap = {}
669  for line in tf_zip.read("META/apkcerts.txt").split("\n"):
670    line = line.strip()
671    if not line:
672      continue
673    m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
674                 r'private_key="(.*)"$', line)
675    if m:
676      name, cert, privkey = m.groups()
677      public_key_suffix_len = len(OPTIONS.public_key_suffix)
678      private_key_suffix_len = len(OPTIONS.private_key_suffix)
679      if cert in SPECIAL_CERT_STRINGS and not privkey:
680        certmap[name] = cert
681      elif (cert.endswith(OPTIONS.public_key_suffix) and
682            privkey.endswith(OPTIONS.private_key_suffix) and
683            cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
684        certmap[name] = cert[:-public_key_suffix_len]
685      else:
686        raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
687  return certmap
688
689
690COMMON_DOCSTRING = """
691  -p  (--path)  <dir>
692      Prepend <dir>/bin to the list of places to search for binaries
693      run by this script, and expect to find jars in <dir>/framework.
694
695  -s  (--device_specific) <file>
696      Path to the python module containing device-specific
697      releasetools code.
698
699  -x  (--extra)  <key=value>
700      Add a key/value pair to the 'extras' dict, which device-specific
701      extension code may look at.
702
703  -v  (--verbose)
704      Show command lines being executed.
705
706  -h  (--help)
707      Display this usage message and exit.
708"""
709
710def Usage(docstring):
711  print docstring.rstrip("\n")
712  print COMMON_DOCSTRING
713
714
715def ParseOptions(argv,
716                 docstring,
717                 extra_opts="", extra_long_opts=(),
718                 extra_option_handler=None):
719  """Parse the options in argv and return any arguments that aren't
720  flags.  docstring is the calling module's docstring, to be displayed
721  for errors and -h.  extra_opts and extra_long_opts are for flags
722  defined by the caller, which are processed by passing them to
723  extra_option_handler."""
724
725  try:
726    opts, args = getopt.getopt(
727        argv, "hvp:s:x:" + extra_opts,
728        ["help", "verbose", "path=", "signapk_path=", "extra_signapk_args=",
729         "java_path=", "java_args=", "public_key_suffix=",
730         "private_key_suffix=", "boot_signer_path=", "device_specific=",
731         "extra="] +
732        list(extra_long_opts))
733  except getopt.GetoptError as err:
734    Usage(docstring)
735    print "**", str(err), "**"
736    sys.exit(2)
737
738  for o, a in opts:
739    if o in ("-h", "--help"):
740      Usage(docstring)
741      sys.exit()
742    elif o in ("-v", "--verbose"):
743      OPTIONS.verbose = True
744    elif o in ("-p", "--path"):
745      OPTIONS.search_path = a
746    elif o in ("--signapk_path",):
747      OPTIONS.signapk_path = a
748    elif o in ("--extra_signapk_args",):
749      OPTIONS.extra_signapk_args = shlex.split(a)
750    elif o in ("--java_path",):
751      OPTIONS.java_path = a
752    elif o in ("--java_args",):
753      OPTIONS.java_args = a
754    elif o in ("--public_key_suffix",):
755      OPTIONS.public_key_suffix = a
756    elif o in ("--private_key_suffix",):
757      OPTIONS.private_key_suffix = a
758    elif o in ("--boot_signer_path",):
759      OPTIONS.boot_signer_path = a
760    elif o in ("-s", "--device_specific"):
761      OPTIONS.device_specific = a
762    elif o in ("-x", "--extra"):
763      key, value = a.split("=", 1)
764      OPTIONS.extras[key] = value
765    else:
766      if extra_option_handler is None or not extra_option_handler(o, a):
767        assert False, "unknown option \"%s\"" % (o,)
768
769  if OPTIONS.search_path:
770    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
771                          os.pathsep + os.environ["PATH"])
772
773  return args
774
775
776def MakeTempFile(prefix=None, suffix=None):
777  """Make a temp file and add it to the list of things to be deleted
778  when Cleanup() is called.  Return the filename."""
779  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
780  os.close(fd)
781  OPTIONS.tempfiles.append(fn)
782  return fn
783
784
785def Cleanup():
786  for i in OPTIONS.tempfiles:
787    if os.path.isdir(i):
788      shutil.rmtree(i)
789    else:
790      os.remove(i)
791
792
793class PasswordManager(object):
794  def __init__(self):
795    self.editor = os.getenv("EDITOR", None)
796    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
797
798  def GetPasswords(self, items):
799    """Get passwords corresponding to each string in 'items',
800    returning a dict.  (The dict may have keys in addition to the
801    values in 'items'.)
802
803    Uses the passwords in $ANDROID_PW_FILE if available, letting the
804    user edit that file to add more needed passwords.  If no editor is
805    available, or $ANDROID_PW_FILE isn't define, prompts the user
806    interactively in the ordinary way.
807    """
808
809    current = self.ReadFile()
810
811    first = True
812    while True:
813      missing = []
814      for i in items:
815        if i not in current or not current[i]:
816          missing.append(i)
817      # Are all the passwords already in the file?
818      if not missing:
819        return current
820
821      for i in missing:
822        current[i] = ""
823
824      if not first:
825        print "key file %s still missing some passwords." % (self.pwfile,)
826        answer = raw_input("try to edit again? [y]> ").strip()
827        if answer and answer[0] not in 'yY':
828          raise RuntimeError("key passwords unavailable")
829      first = False
830
831      current = self.UpdateAndReadFile(current)
832
833  def PromptResult(self, current): # pylint: disable=no-self-use
834    """Prompt the user to enter a value (password) for each key in
835    'current' whose value is fales.  Returns a new dict with all the
836    values.
837    """
838    result = {}
839    for k, v in sorted(current.iteritems()):
840      if v:
841        result[k] = v
842      else:
843        while True:
844          result[k] = getpass.getpass(
845              "Enter password for %s key> " % k).strip()
846          if result[k]:
847            break
848    return result
849
850  def UpdateAndReadFile(self, current):
851    if not self.editor or not self.pwfile:
852      return self.PromptResult(current)
853
854    f = open(self.pwfile, "w")
855    os.chmod(self.pwfile, 0o600)
856    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
857    f.write("# (Additional spaces are harmless.)\n\n")
858
859    first_line = None
860    sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
861    for i, (_, k, v) in enumerate(sorted_list):
862      f.write("[[[  %s  ]]] %s\n" % (v, k))
863      if not v and first_line is None:
864        # position cursor on first line with no password.
865        first_line = i + 4
866    f.close()
867
868    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
869    _, _ = p.communicate()
870
871    return self.ReadFile()
872
873  def ReadFile(self):
874    result = {}
875    if self.pwfile is None:
876      return result
877    try:
878      f = open(self.pwfile, "r")
879      for line in f:
880        line = line.strip()
881        if not line or line[0] == '#':
882          continue
883        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
884        if not m:
885          print "failed to parse password file: ", line
886        else:
887          result[m.group(2)] = m.group(1)
888      f.close()
889    except IOError as e:
890      if e.errno != errno.ENOENT:
891        print "error reading password file: ", str(e)
892    return result
893
894
895def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
896             compress_type=None):
897  import datetime
898
899  # http://b/18015246
900  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
901  # for files larger than 2GiB. We can work around this by adjusting their
902  # limit. Note that `zipfile.writestr()` will not work for strings larger than
903  # 2GiB. The Python interpreter sometimes rejects strings that large (though
904  # it isn't clear to me exactly what circumstances cause this).
905  # `zipfile.write()` must be used directly to work around this.
906  #
907  # This mess can be avoided if we port to python3.
908  saved_zip64_limit = zipfile.ZIP64_LIMIT
909  zipfile.ZIP64_LIMIT = (1 << 32) - 1
910
911  if compress_type is None:
912    compress_type = zip_file.compression
913  if arcname is None:
914    arcname = filename
915
916  saved_stat = os.stat(filename)
917
918  try:
919    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
920    # file to be zipped and reset it when we're done.
921    os.chmod(filename, perms)
922
923    # Use a fixed timestamp so the output is repeatable.
924    epoch = datetime.datetime.fromtimestamp(0)
925    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
926    os.utime(filename, (timestamp, timestamp))
927
928    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
929  finally:
930    os.chmod(filename, saved_stat.st_mode)
931    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
932    zipfile.ZIP64_LIMIT = saved_zip64_limit
933
934
935def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
936                compress_type=None):
937  """Wrap zipfile.writestr() function to work around the zip64 limit.
938
939  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
940  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
941  when calling crc32(bytes).
942
943  But it still works fine to write a shorter string into a large zip file.
944  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
945  when we know the string won't be too long.
946  """
947
948  saved_zip64_limit = zipfile.ZIP64_LIMIT
949  zipfile.ZIP64_LIMIT = (1 << 32) - 1
950
951  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
952    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
953    zinfo.compress_type = zip_file.compression
954    if perms is None:
955      perms = 0o100644
956  else:
957    zinfo = zinfo_or_arcname
958
959  # If compress_type is given, it overrides the value in zinfo.
960  if compress_type is not None:
961    zinfo.compress_type = compress_type
962
963  # If perms is given, it has a priority.
964  if perms is not None:
965    # If perms doesn't set the file type, mark it as a regular file.
966    if perms & 0o770000 == 0:
967      perms |= 0o100000
968    zinfo.external_attr = perms << 16
969
970  # Use a fixed timestamp so the output is repeatable.
971  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
972
973  zip_file.writestr(zinfo, data)
974  zipfile.ZIP64_LIMIT = saved_zip64_limit
975
976
977def ZipClose(zip_file):
978  # http://b/18015246
979  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
980  # central directory.
981  saved_zip64_limit = zipfile.ZIP64_LIMIT
982  zipfile.ZIP64_LIMIT = (1 << 32) - 1
983
984  zip_file.close()
985
986  zipfile.ZIP64_LIMIT = saved_zip64_limit
987
988
989class DeviceSpecificParams(object):
990  module = None
991  def __init__(self, **kwargs):
992    """Keyword arguments to the constructor become attributes of this
993    object, which is passed to all functions in the device-specific
994    module."""
995    for k, v in kwargs.iteritems():
996      setattr(self, k, v)
997    self.extras = OPTIONS.extras
998
999    if self.module is None:
1000      path = OPTIONS.device_specific
1001      if not path:
1002        return
1003      try:
1004        if os.path.isdir(path):
1005          info = imp.find_module("releasetools", [path])
1006        else:
1007          d, f = os.path.split(path)
1008          b, x = os.path.splitext(f)
1009          if x == ".py":
1010            f = b
1011          info = imp.find_module(f, [d])
1012        print "loaded device-specific extensions from", path
1013        self.module = imp.load_module("device_specific", *info)
1014      except ImportError:
1015        print "unable to load device-specific module; assuming none"
1016
1017  def _DoCall(self, function_name, *args, **kwargs):
1018    """Call the named function in the device-specific module, passing
1019    the given args and kwargs.  The first argument to the call will be
1020    the DeviceSpecific object itself.  If there is no module, or the
1021    module does not define the function, return the value of the
1022    'default' kwarg (which itself defaults to None)."""
1023    if self.module is None or not hasattr(self.module, function_name):
1024      return kwargs.get("default", None)
1025    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1026
1027  def FullOTA_Assertions(self):
1028    """Called after emitting the block of assertions at the top of a
1029    full OTA package.  Implementations can add whatever additional
1030    assertions they like."""
1031    return self._DoCall("FullOTA_Assertions")
1032
1033  def FullOTA_InstallBegin(self):
1034    """Called at the start of full OTA installation."""
1035    return self._DoCall("FullOTA_InstallBegin")
1036
1037  def FullOTA_InstallEnd(self):
1038    """Called at the end of full OTA installation; typically this is
1039    used to install the image for the device's baseband processor."""
1040    return self._DoCall("FullOTA_InstallEnd")
1041
1042  def IncrementalOTA_Assertions(self):
1043    """Called after emitting the block of assertions at the top of an
1044    incremental OTA package.  Implementations can add whatever
1045    additional assertions they like."""
1046    return self._DoCall("IncrementalOTA_Assertions")
1047
1048  def IncrementalOTA_VerifyBegin(self):
1049    """Called at the start of the verification phase of incremental
1050    OTA installation; additional checks can be placed here to abort
1051    the script before any changes are made."""
1052    return self._DoCall("IncrementalOTA_VerifyBegin")
1053
1054  def IncrementalOTA_VerifyEnd(self):
1055    """Called at the end of the verification phase of incremental OTA
1056    installation; additional checks can be placed here to abort the
1057    script before any changes are made."""
1058    return self._DoCall("IncrementalOTA_VerifyEnd")
1059
1060  def IncrementalOTA_InstallBegin(self):
1061    """Called at the start of incremental OTA installation (after
1062    verification is complete)."""
1063    return self._DoCall("IncrementalOTA_InstallBegin")
1064
1065  def IncrementalOTA_InstallEnd(self):
1066    """Called at the end of incremental OTA installation; typically
1067    this is used to install the image for the device's baseband
1068    processor."""
1069    return self._DoCall("IncrementalOTA_InstallEnd")
1070
1071class File(object):
1072  def __init__(self, name, data):
1073    self.name = name
1074    self.data = data
1075    self.size = len(data)
1076    self.sha1 = sha1(data).hexdigest()
1077
1078  @classmethod
1079  def FromLocalFile(cls, name, diskname):
1080    f = open(diskname, "rb")
1081    data = f.read()
1082    f.close()
1083    return File(name, data)
1084
1085  def WriteToTemp(self):
1086    t = tempfile.NamedTemporaryFile()
1087    t.write(self.data)
1088    t.flush()
1089    return t
1090
1091  def AddToZip(self, z, compression=None):
1092    ZipWriteStr(z, self.name, self.data, compress_type=compression)
1093
1094DIFF_PROGRAM_BY_EXT = {
1095    ".gz" : "imgdiff",
1096    ".zip" : ["imgdiff", "-z"],
1097    ".jar" : ["imgdiff", "-z"],
1098    ".apk" : ["imgdiff", "-z"],
1099    ".img" : "imgdiff",
1100    }
1101
1102class Difference(object):
1103  def __init__(self, tf, sf, diff_program=None):
1104    self.tf = tf
1105    self.sf = sf
1106    self.patch = None
1107    self.diff_program = diff_program
1108
1109  def ComputePatch(self):
1110    """Compute the patch (as a string of data) needed to turn sf into
1111    tf.  Returns the same tuple as GetPatch()."""
1112
1113    tf = self.tf
1114    sf = self.sf
1115
1116    if self.diff_program:
1117      diff_program = self.diff_program
1118    else:
1119      ext = os.path.splitext(tf.name)[1]
1120      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1121
1122    ttemp = tf.WriteToTemp()
1123    stemp = sf.WriteToTemp()
1124
1125    ext = os.path.splitext(tf.name)[1]
1126
1127    try:
1128      ptemp = tempfile.NamedTemporaryFile()
1129      if isinstance(diff_program, list):
1130        cmd = copy.copy(diff_program)
1131      else:
1132        cmd = [diff_program]
1133      cmd.append(stemp.name)
1134      cmd.append(ttemp.name)
1135      cmd.append(ptemp.name)
1136      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1137      err = []
1138      def run():
1139        _, e = p.communicate()
1140        if e:
1141          err.append(e)
1142      th = threading.Thread(target=run)
1143      th.start()
1144      th.join(timeout=300)   # 5 mins
1145      if th.is_alive():
1146        print "WARNING: diff command timed out"
1147        p.terminate()
1148        th.join(5)
1149        if th.is_alive():
1150          p.kill()
1151          th.join()
1152
1153      if err or p.returncode != 0:
1154        print "WARNING: failure running %s:\n%s\n" % (
1155            diff_program, "".join(err))
1156        self.patch = None
1157        return None, None, None
1158      diff = ptemp.read()
1159    finally:
1160      ptemp.close()
1161      stemp.close()
1162      ttemp.close()
1163
1164    self.patch = diff
1165    return self.tf, self.sf, self.patch
1166
1167
1168  def GetPatch(self):
1169    """Return a tuple (target_file, source_file, patch_data).
1170    patch_data may be None if ComputePatch hasn't been called, or if
1171    computing the patch failed."""
1172    return self.tf, self.sf, self.patch
1173
1174
1175def ComputeDifferences(diffs):
1176  """Call ComputePatch on all the Difference objects in 'diffs'."""
1177  print len(diffs), "diffs to compute"
1178
1179  # Do the largest files first, to try and reduce the long-pole effect.
1180  by_size = [(i.tf.size, i) for i in diffs]
1181  by_size.sort(reverse=True)
1182  by_size = [i[1] for i in by_size]
1183
1184  lock = threading.Lock()
1185  diff_iter = iter(by_size)   # accessed under lock
1186
1187  def worker():
1188    try:
1189      lock.acquire()
1190      for d in diff_iter:
1191        lock.release()
1192        start = time.time()
1193        d.ComputePatch()
1194        dur = time.time() - start
1195        lock.acquire()
1196
1197        tf, sf, patch = d.GetPatch()
1198        if sf.name == tf.name:
1199          name = tf.name
1200        else:
1201          name = "%s (%s)" % (tf.name, sf.name)
1202        if patch is None:
1203          print "patching failed!                                  %s" % (name,)
1204        else:
1205          print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1206              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1207      lock.release()
1208    except Exception as e:
1209      print e
1210      raise
1211
1212  # start worker threads; wait for them all to finish.
1213  threads = [threading.Thread(target=worker)
1214             for i in range(OPTIONS.worker_threads)]
1215  for th in threads:
1216    th.start()
1217  while threads:
1218    threads.pop().join()
1219
1220
1221class BlockDifference(object):
1222  def __init__(self, partition, tgt, src=None, check_first_block=False,
1223               version=None):
1224    self.tgt = tgt
1225    self.src = src
1226    self.partition = partition
1227    self.check_first_block = check_first_block
1228
1229    # Due to http://b/20939131, check_first_block is disabled temporarily.
1230    assert not self.check_first_block
1231
1232    if version is None:
1233      version = 1
1234      if OPTIONS.info_dict:
1235        version = max(
1236            int(i) for i in
1237            OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1238    self.version = version
1239
1240    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1241                                    version=self.version)
1242    tmpdir = tempfile.mkdtemp()
1243    OPTIONS.tempfiles.append(tmpdir)
1244    self.path = os.path.join(tmpdir, partition)
1245    b.Compute(self.path)
1246
1247    _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1248
1249  def WriteScript(self, script, output_zip, progress=None):
1250    if not self.src:
1251      # write the output unconditionally
1252      script.Print("Patching %s image unconditionally..." % (self.partition,))
1253    else:
1254      script.Print("Patching %s image after verification." % (self.partition,))
1255
1256    if progress:
1257      script.ShowProgress(progress, 0)
1258    self._WriteUpdate(script, output_zip)
1259    self._WritePostInstallVerifyScript(script)
1260
1261  def WriteVerifyScript(self, script):
1262    partition = self.partition
1263    if not self.src:
1264      script.Print("Image %s will be patched unconditionally." % (partition,))
1265    else:
1266      ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1267      ranges_str = ranges.to_string_raw()
1268      if self.version >= 3:
1269        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1270                            'block_image_verify("%s", '
1271                            'package_extract_file("%s.transfer.list"), '
1272                            '"%s.new.dat", "%s.patch.dat")) then') % (
1273                            self.device, ranges_str, self.src.TotalSha1(),
1274                            self.device, partition, partition, partition))
1275      else:
1276        script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1277                           self.device, ranges_str, self.src.TotalSha1()))
1278      script.Print('Verified %s image...' % (partition,))
1279      script.AppendExtra('else')
1280
1281      # When generating incrementals for the system and vendor partitions,
1282      # explicitly check the first block (which contains the superblock) of
1283      # the partition to see if it's what we expect. If this check fails,
1284      # give an explicit log message about the partition having been
1285      # remounted R/W (the most likely explanation) and the need to flash to
1286      # get OTAs working again.
1287      if self.check_first_block:
1288        self._CheckFirstBlock(script)
1289
1290      # Abort the OTA update. Note that the incremental OTA cannot be applied
1291      # even if it may match the checksum of the target partition.
1292      # a) If version < 3, operations like move and erase will make changes
1293      #    unconditionally and damage the partition.
1294      # b) If version >= 3, it won't even reach here.
1295      script.AppendExtra(('abort("%s partition has unexpected contents");\n'
1296                          'endif;') % (partition,))
1297
1298  def _WritePostInstallVerifyScript(self, script):
1299    partition = self.partition
1300    script.Print('Verifying the updated %s image...' % (partition,))
1301    # Unlike pre-install verification, clobbered_blocks should not be ignored.
1302    ranges = self.tgt.care_map
1303    ranges_str = ranges.to_string_raw()
1304    script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1305                       self.device, ranges_str,
1306                       self.tgt.TotalSha1(include_clobbered_blocks=True)))
1307
1308    # Bug: 20881595
1309    # Verify that extended blocks are really zeroed out.
1310    if self.tgt.extended:
1311      ranges_str = self.tgt.extended.to_string_raw()
1312      script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1313                         self.device, ranges_str,
1314                         self._HashZeroBlocks(self.tgt.extended.size())))
1315      script.Print('Verified the updated %s image.' % (partition,))
1316      script.AppendExtra(
1317          'else\n'
1318          '  abort("%s partition has unexpected non-zero contents after OTA '
1319          'update");\n'
1320          'endif;' % (partition,))
1321    else:
1322      script.Print('Verified the updated %s image.' % (partition,))
1323
1324    script.AppendExtra(
1325        'else\n'
1326        '  abort("%s partition has unexpected contents after OTA update");\n'
1327        'endif;' % (partition,))
1328
1329  def _WriteUpdate(self, script, output_zip):
1330    ZipWrite(output_zip,
1331             '{}.transfer.list'.format(self.path),
1332             '{}.transfer.list'.format(self.partition))
1333    ZipWrite(output_zip,
1334             '{}.new.dat'.format(self.path),
1335             '{}.new.dat'.format(self.partition))
1336    ZipWrite(output_zip,
1337             '{}.patch.dat'.format(self.path),
1338             '{}.patch.dat'.format(self.partition),
1339             compress_type=zipfile.ZIP_STORED)
1340
1341    call = ('block_image_update("{device}", '
1342            'package_extract_file("{partition}.transfer.list"), '
1343            '"{partition}.new.dat", "{partition}.patch.dat");\n'.format(
1344                device=self.device, partition=self.partition))
1345    script.AppendExtra(script.WordWrap(call))
1346
1347  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1348    data = source.ReadRangeSet(ranges)
1349    ctx = sha1()
1350
1351    for p in data:
1352      ctx.update(p)
1353
1354    return ctx.hexdigest()
1355
1356  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1357    """Return the hash value for all zero blocks."""
1358    zero_block = '\x00' * 4096
1359    ctx = sha1()
1360    for _ in range(num_blocks):
1361      ctx.update(zero_block)
1362
1363    return ctx.hexdigest()
1364
1365  # TODO(tbao): Due to http://b/20939131, block 0 may be changed without
1366  # remounting R/W. Will change the checking to a finer-grained way to
1367  # mask off those bits.
1368  def _CheckFirstBlock(self, script):
1369    r = rangelib.RangeSet((0, 1))
1370    srchash = self._HashBlocks(self.src, r)
1371
1372    script.AppendExtra(('(range_sha1("%s", "%s") == "%s") || '
1373                        'abort("%s has been remounted R/W; '
1374                        'reflash device to reenable OTA updates");')
1375                       % (self.device, r.to_string_raw(), srchash,
1376                          self.device))
1377
1378DataImage = blockimgdiff.DataImage
1379
1380
1381# map recovery.fstab's fs_types to mount/format "partition types"
1382PARTITION_TYPES = {
1383    "yaffs2": "MTD",
1384    "mtd": "MTD",
1385    "ext4": "EMMC",
1386    "emmc": "EMMC",
1387    "f2fs": "EMMC",
1388    "squashfs": "EMMC"
1389}
1390
1391def GetTypeAndDevice(mount_point, info):
1392  fstab = info["fstab"]
1393  if fstab:
1394    return (PARTITION_TYPES[fstab[mount_point].fs_type],
1395            fstab[mount_point].device)
1396  else:
1397    raise KeyError
1398
1399
1400def ParseCertificate(data):
1401  """Parse a PEM-format certificate."""
1402  cert = []
1403  save = False
1404  for line in data.split("\n"):
1405    if "--END CERTIFICATE--" in line:
1406      break
1407    if save:
1408      cert.append(line)
1409    if "--BEGIN CERTIFICATE--" in line:
1410      save = True
1411  cert = "".join(cert).decode('base64')
1412  return cert
1413
1414def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1415                      info_dict=None):
1416  """Generate a binary patch that creates the recovery image starting
1417  with the boot image.  (Most of the space in these images is just the
1418  kernel, which is identical for the two, so the resulting patch
1419  should be efficient.)  Add it to the output zip, along with a shell
1420  script that is run from init.rc on first boot to actually do the
1421  patching and install the new recovery image.
1422
1423  recovery_img and boot_img should be File objects for the
1424  corresponding images.  info should be the dictionary returned by
1425  common.LoadInfoDict() on the input target_files.
1426  """
1427
1428  if info_dict is None:
1429    info_dict = OPTIONS.info_dict
1430
1431  full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1432  system_root_image = info_dict.get("system_root_image", None) == "true"
1433
1434  if full_recovery_image:
1435    output_sink("etc/recovery.img", recovery_img.data)
1436
1437  else:
1438    diff_program = ["imgdiff"]
1439    path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1440    if os.path.exists(path):
1441      diff_program.append("-b")
1442      diff_program.append(path)
1443      bonus_args = "-b /system/etc/recovery-resource.dat"
1444    else:
1445      bonus_args = ""
1446
1447    d = Difference(recovery_img, boot_img, diff_program=diff_program)
1448    _, _, patch = d.ComputePatch()
1449    output_sink("recovery-from-boot.p", patch)
1450
1451  try:
1452    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1453    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1454  except KeyError:
1455    return
1456
1457  if full_recovery_image:
1458    sh = """#!/system/bin/sh
1459if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1460  applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1461else
1462  log -t recovery "Recovery image already installed"
1463fi
1464""" % {'type': recovery_type,
1465       'device': recovery_device,
1466       'sha1': recovery_img.sha1,
1467       'size': recovery_img.size}
1468  else:
1469    sh = """#!/system/bin/sh
1470if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1471  applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1472else
1473  log -t recovery "Recovery image already installed"
1474fi
1475""" % {'boot_size': boot_img.size,
1476       'boot_sha1': boot_img.sha1,
1477       'recovery_size': recovery_img.size,
1478       'recovery_sha1': recovery_img.sha1,
1479       'boot_type': boot_type,
1480       'boot_device': boot_device,
1481       'recovery_type': recovery_type,
1482       'recovery_device': recovery_device,
1483       'bonus_args': bonus_args}
1484
1485  # The install script location moved from /system/etc to /system/bin
1486  # in the L release.  Parse init.*.rc files to find out where the
1487  # target-files expects it to be, and put it there.
1488  sh_location = "etc/install-recovery.sh"
1489  found = False
1490  if system_root_image:
1491    init_rc_dir = os.path.join(input_dir, "ROOT")
1492  else:
1493    init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1494  init_rc_files = os.listdir(init_rc_dir)
1495  for init_rc_file in init_rc_files:
1496    if (not init_rc_file.startswith('init.') or
1497        not init_rc_file.endswith('.rc')):
1498      continue
1499
1500    with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1501      for line in f:
1502        m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1503        if m:
1504          sh_location = m.group(1)
1505          found = True
1506          break
1507
1508    if found:
1509      break
1510
1511  print "putting script in", sh_location
1512
1513  output_sink(sh_location, sh)
1514