common.py revision 575d68a48edc90d655509f2980dacc69958948de
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import copy
16import errno
17import getopt
18import getpass
19import imp
20import os
21import platform
22import re
23import shlex
24import shutil
25import subprocess
26import sys
27import tempfile
28import threading
29import time
30import zipfile
31
32import blockimgdiff
33import rangelib
34
35from hashlib import sha1 as sha1
36
37
38class Options(object):
39  def __init__(self):
40    platform_search_path = {
41        "linux2": "out/host/linux-x86",
42        "darwin": "out/host/darwin-x86",
43    }
44
45    self.search_path = platform_search_path.get(sys.platform, None)
46    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
47    self.extra_signapk_args = []
48    self.java_path = "java"  # Use the one on the path by default.
49    self.java_args = "-Xmx2048m" # JVM Args
50    self.public_key_suffix = ".x509.pem"
51    self.private_key_suffix = ".pk8"
52    # use otatools built boot_signer by default
53    self.boot_signer_path = "boot_signer"
54    self.verbose = False
55    self.tempfiles = []
56    self.device_specific = None
57    self.extras = {}
58    self.info_dict = None
59    self.worker_threads = None
60    # Stash size cannot exceed cache_size * threshold.
61    self.cache_size = None
62    self.stash_threshold = 0.8
63
64
65OPTIONS = Options()
66
67
68# Values for "certificate" in apkcerts that mean special things.
69SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
70
71
72class ExternalError(RuntimeError):
73  pass
74
75
76def Run(args, **kwargs):
77  """Create and return a subprocess.Popen object, printing the command
78  line on the terminal if -v was specified."""
79  if OPTIONS.verbose:
80    print "  running: ", " ".join(args)
81  return subprocess.Popen(args, **kwargs)
82
83
84def CloseInheritedPipes():
85  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
86  before doing other work."""
87  if platform.system() != "Darwin":
88    return
89  for d in range(3, 1025):
90    try:
91      stat = os.fstat(d)
92      if stat is not None:
93        pipebit = stat[0] & 0x1000
94        if pipebit != 0:
95          os.close(d)
96    except OSError:
97      pass
98
99
100def LoadInfoDict(input_file, input_dir=None):
101  """Read and parse the META/misc_info.txt key/value pairs from the
102  input target files and return a dict."""
103
104  def read_helper(fn):
105    if isinstance(input_file, zipfile.ZipFile):
106      return input_file.read(fn)
107    else:
108      path = os.path.join(input_file, *fn.split("/"))
109      try:
110        with open(path) as f:
111          return f.read()
112      except IOError as e:
113        if e.errno == errno.ENOENT:
114          raise KeyError(fn)
115  d = {}
116  try:
117    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
118  except KeyError:
119    # ok if misc_info.txt doesn't exist
120    pass
121
122  # backwards compatibility: These values used to be in their own
123  # files.  Look for them, in case we're processing an old
124  # target_files zip.
125
126  if "mkyaffs2_extra_flags" not in d:
127    try:
128      d["mkyaffs2_extra_flags"] = read_helper(
129          "META/mkyaffs2-extra-flags.txt").strip()
130    except KeyError:
131      # ok if flags don't exist
132      pass
133
134  if "recovery_api_version" not in d:
135    try:
136      d["recovery_api_version"] = read_helper(
137          "META/recovery-api-version.txt").strip()
138    except KeyError:
139      raise ValueError("can't find recovery API version in input target-files")
140
141  if "tool_extensions" not in d:
142    try:
143      d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
144    except KeyError:
145      # ok if extensions don't exist
146      pass
147
148  if "fstab_version" not in d:
149    d["fstab_version"] = "1"
150
151  # A few properties are stored as links to the files in the out/ directory.
152  # It works fine with the build system. However, they are no longer available
153  # when (re)generating from target_files zip. If input_dir is not None, we
154  # are doing repacking. Redirect those properties to the actual files in the
155  # unzipped directory.
156  if input_dir is not None:
157    # We carry a copy of file_contexts under META/. If not available, search
158    # BOOT/RAMDISK/. Note that sometimes we may need a different file_contexts
159    # to build images than the one running on device, such as when enabling
160    # system_root_image. In that case, we must have the one for image
161    # generation copied to META/.
162    fc_config = os.path.join(input_dir, "META", "file_contexts")
163    if d.get("system_root_image") == "true":
164      assert os.path.exists(fc_config)
165    if not os.path.exists(fc_config):
166      fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", "file_contexts")
167      if not os.path.exists(fc_config):
168        fc_config = None
169
170    if fc_config:
171      d["selinux_fc"] = fc_config
172
173    # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
174    if d.get("system_root_image") == "true":
175      d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
176      d["ramdisk_fs_config"] = os.path.join(
177          input_dir, "META", "root_filesystem_config.txt")
178
179  try:
180    data = read_helper("META/imagesizes.txt")
181    for line in data.split("\n"):
182      if not line:
183        continue
184      name, value = line.split(" ", 1)
185      if not value:
186        continue
187      if name == "blocksize":
188        d[name] = value
189      else:
190        d[name + "_size"] = value
191  except KeyError:
192    pass
193
194  def makeint(key):
195    if key in d:
196      d[key] = int(d[key], 0)
197
198  makeint("recovery_api_version")
199  makeint("blocksize")
200  makeint("system_size")
201  makeint("vendor_size")
202  makeint("userdata_size")
203  makeint("cache_size")
204  makeint("recovery_size")
205  makeint("boot_size")
206  makeint("fstab_version")
207
208  d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"],
209                                 d.get("system_root_image", False))
210  d["build.prop"] = LoadBuildProp(read_helper)
211  return d
212
213def LoadBuildProp(read_helper):
214  try:
215    data = read_helper("SYSTEM/build.prop")
216  except KeyError:
217    print "Warning: could not find SYSTEM/build.prop in %s" % zip
218    data = ""
219  return LoadDictionaryFromLines(data.split("\n"))
220
221def LoadDictionaryFromLines(lines):
222  d = {}
223  for line in lines:
224    line = line.strip()
225    if not line or line.startswith("#"):
226      continue
227    if "=" in line:
228      name, value = line.split("=", 1)
229      d[name] = value
230  return d
231
232def LoadRecoveryFSTab(read_helper, fstab_version, system_root_image=False):
233  class Partition(object):
234    def __init__(self, mount_point, fs_type, device, length, device2, context):
235      self.mount_point = mount_point
236      self.fs_type = fs_type
237      self.device = device
238      self.length = length
239      self.device2 = device2
240      self.context = context
241
242  try:
243    data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
244  except KeyError:
245    print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
246    data = ""
247
248  if fstab_version == 1:
249    d = {}
250    for line in data.split("\n"):
251      line = line.strip()
252      if not line or line.startswith("#"):
253        continue
254      pieces = line.split()
255      if not 3 <= len(pieces) <= 4:
256        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
257      options = None
258      if len(pieces) >= 4:
259        if pieces[3].startswith("/"):
260          device2 = pieces[3]
261          if len(pieces) >= 5:
262            options = pieces[4]
263        else:
264          device2 = None
265          options = pieces[3]
266      else:
267        device2 = None
268
269      mount_point = pieces[0]
270      length = 0
271      if options:
272        options = options.split(",")
273        for i in options:
274          if i.startswith("length="):
275            length = int(i[7:])
276          else:
277            print "%s: unknown option \"%s\"" % (mount_point, i)
278
279      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
280                                 device=pieces[2], length=length,
281                                 device2=device2)
282
283  elif fstab_version == 2:
284    d = {}
285    for line in data.split("\n"):
286      line = line.strip()
287      if not line or line.startswith("#"):
288        continue
289      # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
290      pieces = line.split()
291      if len(pieces) != 5:
292        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
293
294      # Ignore entries that are managed by vold
295      options = pieces[4]
296      if "voldmanaged=" in options:
297        continue
298
299      # It's a good line, parse it
300      length = 0
301      options = options.split(",")
302      for i in options:
303        if i.startswith("length="):
304          length = int(i[7:])
305        else:
306          # Ignore all unknown options in the unified fstab
307          continue
308
309      mount_flags = pieces[3]
310      # Honor the SELinux context if present.
311      context = None
312      for i in mount_flags.split(","):
313        if i.startswith("context="):
314          context = i
315
316      mount_point = pieces[1]
317      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
318                                 device=pieces[0], length=length,
319                                 device2=None, context=context)
320
321  else:
322    raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
323
324  # / is used for the system mount point when the root directory is included in
325  # system. Other areas assume system is always at "/system" so point /system
326  # at /.
327  if system_root_image:
328    assert not d.has_key("/system") and d.has_key("/")
329    d["/system"] = d["/"]
330  return d
331
332
333def DumpInfoDict(d):
334  for k, v in sorted(d.items()):
335    print "%-25s = (%s) %s" % (k, type(v).__name__, v)
336
337
338def _BuildBootableImage(sourcedir, fs_config_file, info_dict=None,
339                        has_ramdisk=False):
340  """Build a bootable image from the specified sourcedir.
341
342  Take a kernel, cmdline, and optionally a ramdisk directory from the input (in
343  'sourcedir'), and turn them into a boot image.  Return the image data, or
344  None if sourcedir does not appear to contains files for building the
345  requested image."""
346
347  def make_ramdisk():
348    ramdisk_img = tempfile.NamedTemporaryFile()
349
350    if os.access(fs_config_file, os.F_OK):
351      cmd = ["mkbootfs", "-f", fs_config_file,
352             os.path.join(sourcedir, "RAMDISK")]
353    else:
354      cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
355    p1 = Run(cmd, stdout=subprocess.PIPE)
356    p2 = Run(["minigzip"], stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
357
358    p2.wait()
359    p1.wait()
360    assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
361    assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
362
363    return ramdisk_img
364
365  if not os.access(os.path.join(sourcedir, "kernel"), os.F_OK):
366    return None
367
368  if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
369    return None
370
371  if info_dict is None:
372    info_dict = OPTIONS.info_dict
373
374  img = tempfile.NamedTemporaryFile()
375
376  if has_ramdisk:
377    ramdisk_img = make_ramdisk()
378
379  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
380  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
381
382  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
383
384  fn = os.path.join(sourcedir, "second")
385  if os.access(fn, os.F_OK):
386    cmd.append("--second")
387    cmd.append(fn)
388
389  fn = os.path.join(sourcedir, "cmdline")
390  if os.access(fn, os.F_OK):
391    cmd.append("--cmdline")
392    cmd.append(open(fn).read().rstrip("\n"))
393
394  fn = os.path.join(sourcedir, "base")
395  if os.access(fn, os.F_OK):
396    cmd.append("--base")
397    cmd.append(open(fn).read().rstrip("\n"))
398
399  fn = os.path.join(sourcedir, "pagesize")
400  if os.access(fn, os.F_OK):
401    cmd.append("--pagesize")
402    cmd.append(open(fn).read().rstrip("\n"))
403
404  args = info_dict.get("mkbootimg_args", None)
405  if args and args.strip():
406    cmd.extend(shlex.split(args))
407
408  if has_ramdisk:
409    cmd.extend(["--ramdisk", ramdisk_img.name])
410
411  img_unsigned = None
412  if info_dict.get("vboot", None):
413    img_unsigned = tempfile.NamedTemporaryFile()
414    cmd.extend(["--output", img_unsigned.name])
415  else:
416    cmd.extend(["--output", img.name])
417
418  p = Run(cmd, stdout=subprocess.PIPE)
419  p.communicate()
420  assert p.returncode == 0, "mkbootimg of %s image failed" % (
421      os.path.basename(sourcedir),)
422
423  if info_dict.get("verity_key", None):
424    path = "/" + os.path.basename(sourcedir).lower()
425    cmd = [OPTIONS.boot_signer_path, path, img.name,
426           info_dict["verity_key"] + ".pk8",
427           info_dict["verity_key"] + ".x509.pem", img.name]
428    p = Run(cmd, stdout=subprocess.PIPE)
429    p.communicate()
430    assert p.returncode == 0, "boot_signer of %s image failed" % path
431
432  # Sign the image if vboot is non-empty.
433  elif info_dict.get("vboot", None):
434    path = "/" + os.path.basename(sourcedir).lower()
435    img_keyblock = tempfile.NamedTemporaryFile()
436    cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
437           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
438           info_dict["vboot_key"] + ".vbprivk", img_keyblock.name,
439           img.name]
440    p = Run(cmd, stdout=subprocess.PIPE)
441    p.communicate()
442    assert p.returncode == 0, "vboot_signer of %s image failed" % path
443
444    # Clean up the temp files.
445    img_unsigned.close()
446    img_keyblock.close()
447
448  img.seek(os.SEEK_SET, 0)
449  data = img.read()
450
451  if has_ramdisk:
452    ramdisk_img.close()
453  img.close()
454
455  return data
456
457
458def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
459                     info_dict=None):
460  """Return a File object with the desired bootable image.
461
462  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name 'prebuilt_name',
463  otherwise look for it under 'unpack_dir'/IMAGES, otherwise construct it from
464  the source files in 'unpack_dir'/'tree_subdir'."""
465
466  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
467  if os.path.exists(prebuilt_path):
468    print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
469    return File.FromLocalFile(name, prebuilt_path)
470
471  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
472  if os.path.exists(prebuilt_path):
473    print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
474    return File.FromLocalFile(name, prebuilt_path)
475
476  print "building image from target_files %s..." % (tree_subdir,)
477
478  if info_dict is None:
479    info_dict = OPTIONS.info_dict
480
481  # With system_root_image == "true", we don't pack ramdisk into the boot image.
482  has_ramdisk = (info_dict.get("system_root_image", None) != "true" or
483                 prebuilt_name != "boot.img")
484
485  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
486  data = _BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
487                             os.path.join(unpack_dir, fs_config),
488                             info_dict, has_ramdisk)
489  if data:
490    return File(name, data)
491  return None
492
493
494def UnzipTemp(filename, pattern=None):
495  """Unzip the given archive into a temporary directory and return the name.
496
497  If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
498  temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
499
500  Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
501  main file), open for reading.
502  """
503
504  tmp = tempfile.mkdtemp(prefix="targetfiles-")
505  OPTIONS.tempfiles.append(tmp)
506
507  def unzip_to_dir(filename, dirname):
508    cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
509    if pattern is not None:
510      cmd.append(pattern)
511    p = Run(cmd, stdout=subprocess.PIPE)
512    p.communicate()
513    if p.returncode != 0:
514      raise ExternalError("failed to unzip input target-files \"%s\"" %
515                          (filename,))
516
517  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
518  if m:
519    unzip_to_dir(m.group(1), tmp)
520    unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
521    filename = m.group(1)
522  else:
523    unzip_to_dir(filename, tmp)
524
525  return tmp, zipfile.ZipFile(filename, "r")
526
527
528def GetKeyPasswords(keylist):
529  """Given a list of keys, prompt the user to enter passwords for
530  those which require them.  Return a {key: password} dict.  password
531  will be None if the key has no password."""
532
533  no_passwords = []
534  need_passwords = []
535  key_passwords = {}
536  devnull = open("/dev/null", "w+b")
537  for k in sorted(keylist):
538    # We don't need a password for things that aren't really keys.
539    if k in SPECIAL_CERT_STRINGS:
540      no_passwords.append(k)
541      continue
542
543    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
544             "-inform", "DER", "-nocrypt"],
545            stdin=devnull.fileno(),
546            stdout=devnull.fileno(),
547            stderr=subprocess.STDOUT)
548    p.communicate()
549    if p.returncode == 0:
550      # Definitely an unencrypted key.
551      no_passwords.append(k)
552    else:
553      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
554               "-inform", "DER", "-passin", "pass:"],
555              stdin=devnull.fileno(),
556              stdout=devnull.fileno(),
557              stderr=subprocess.PIPE)
558      _, stderr = p.communicate()
559      if p.returncode == 0:
560        # Encrypted key with empty string as password.
561        key_passwords[k] = ''
562      elif stderr.startswith('Error decrypting key'):
563        # Definitely encrypted key.
564        # It would have said "Error reading key" if it didn't parse correctly.
565        need_passwords.append(k)
566      else:
567        # Potentially, a type of key that openssl doesn't understand.
568        # We'll let the routines in signapk.jar handle it.
569        no_passwords.append(k)
570  devnull.close()
571
572  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
573  key_passwords.update(dict.fromkeys(no_passwords, None))
574  return key_passwords
575
576
577def SignFile(input_name, output_name, key, password, align=None,
578             whole_file=False):
579  """Sign the input_name zip/jar/apk, producing output_name.  Use the
580  given key and password (the latter may be None if the key does not
581  have a password.
582
583  If align is an integer > 1, zipalign is run to align stored files in
584  the output zip on 'align'-byte boundaries.
585
586  If whole_file is true, use the "-w" option to SignApk to embed a
587  signature that covers the whole file in the archive comment of the
588  zip file.
589  """
590
591  if align == 0 or align == 1:
592    align = None
593
594  if align:
595    temp = tempfile.NamedTemporaryFile()
596    sign_name = temp.name
597  else:
598    sign_name = output_name
599
600  cmd = [OPTIONS.java_path, OPTIONS.java_args, "-jar",
601         os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
602  cmd.extend(OPTIONS.extra_signapk_args)
603  if whole_file:
604    cmd.append("-w")
605  cmd.extend([key + OPTIONS.public_key_suffix,
606              key + OPTIONS.private_key_suffix,
607              input_name, sign_name])
608
609  p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
610  if password is not None:
611    password += "\n"
612  p.communicate(password)
613  if p.returncode != 0:
614    raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
615
616  if align:
617    p = Run(["zipalign", "-f", "-p", str(align), sign_name, output_name])
618    p.communicate()
619    if p.returncode != 0:
620      raise ExternalError("zipalign failed: return code %s" % (p.returncode,))
621    temp.close()
622
623
624def CheckSize(data, target, info_dict):
625  """Check the data string passed against the max size limit, if
626  any, for the given target.  Raise exception if the data is too big.
627  Print a warning if the data is nearing the maximum size."""
628
629  if target.endswith(".img"):
630    target = target[:-4]
631  mount_point = "/" + target
632
633  fs_type = None
634  limit = None
635  if info_dict["fstab"]:
636    if mount_point == "/userdata":
637      mount_point = "/data"
638    p = info_dict["fstab"][mount_point]
639    fs_type = p.fs_type
640    device = p.device
641    if "/" in device:
642      device = device[device.rfind("/")+1:]
643    limit = info_dict.get(device + "_size", None)
644  if not fs_type or not limit:
645    return
646
647  if fs_type == "yaffs2":
648    # image size should be increased by 1/64th to account for the
649    # spare area (64 bytes per 2k page)
650    limit = limit / 2048 * (2048+64)
651  size = len(data)
652  pct = float(size) * 100.0 / limit
653  msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
654  if pct >= 99.0:
655    raise ExternalError(msg)
656  elif pct >= 95.0:
657    print
658    print "  WARNING: ", msg
659    print
660  elif OPTIONS.verbose:
661    print "  ", msg
662
663
664def ReadApkCerts(tf_zip):
665  """Given a target_files ZipFile, parse the META/apkcerts.txt file
666  and return a {package: cert} dict."""
667  certmap = {}
668  for line in tf_zip.read("META/apkcerts.txt").split("\n"):
669    line = line.strip()
670    if not line:
671      continue
672    m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
673                 r'private_key="(.*)"$', line)
674    if m:
675      name, cert, privkey = m.groups()
676      public_key_suffix_len = len(OPTIONS.public_key_suffix)
677      private_key_suffix_len = len(OPTIONS.private_key_suffix)
678      if cert in SPECIAL_CERT_STRINGS and not privkey:
679        certmap[name] = cert
680      elif (cert.endswith(OPTIONS.public_key_suffix) and
681            privkey.endswith(OPTIONS.private_key_suffix) and
682            cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
683        certmap[name] = cert[:-public_key_suffix_len]
684      else:
685        raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
686  return certmap
687
688
689COMMON_DOCSTRING = """
690  -p  (--path)  <dir>
691      Prepend <dir>/bin to the list of places to search for binaries
692      run by this script, and expect to find jars in <dir>/framework.
693
694  -s  (--device_specific) <file>
695      Path to the python module containing device-specific
696      releasetools code.
697
698  -x  (--extra)  <key=value>
699      Add a key/value pair to the 'extras' dict, which device-specific
700      extension code may look at.
701
702  -v  (--verbose)
703      Show command lines being executed.
704
705  -h  (--help)
706      Display this usage message and exit.
707"""
708
709def Usage(docstring):
710  print docstring.rstrip("\n")
711  print COMMON_DOCSTRING
712
713
714def ParseOptions(argv,
715                 docstring,
716                 extra_opts="", extra_long_opts=(),
717                 extra_option_handler=None):
718  """Parse the options in argv and return any arguments that aren't
719  flags.  docstring is the calling module's docstring, to be displayed
720  for errors and -h.  extra_opts and extra_long_opts are for flags
721  defined by the caller, which are processed by passing them to
722  extra_option_handler."""
723
724  try:
725    opts, args = getopt.getopt(
726        argv, "hvp:s:x:" + extra_opts,
727        ["help", "verbose", "path=", "signapk_path=", "extra_signapk_args=",
728         "java_path=", "java_args=", "public_key_suffix=",
729         "private_key_suffix=", "boot_signer_path=", "device_specific=",
730         "extra="] +
731        list(extra_long_opts))
732  except getopt.GetoptError as err:
733    Usage(docstring)
734    print "**", str(err), "**"
735    sys.exit(2)
736
737  for o, a in opts:
738    if o in ("-h", "--help"):
739      Usage(docstring)
740      sys.exit()
741    elif o in ("-v", "--verbose"):
742      OPTIONS.verbose = True
743    elif o in ("-p", "--path"):
744      OPTIONS.search_path = a
745    elif o in ("--signapk_path",):
746      OPTIONS.signapk_path = a
747    elif o in ("--extra_signapk_args",):
748      OPTIONS.extra_signapk_args = shlex.split(a)
749    elif o in ("--java_path",):
750      OPTIONS.java_path = a
751    elif o in ("--java_args",):
752      OPTIONS.java_args = a
753    elif o in ("--public_key_suffix",):
754      OPTIONS.public_key_suffix = a
755    elif o in ("--private_key_suffix",):
756      OPTIONS.private_key_suffix = a
757    elif o in ("--boot_signer_path",):
758      OPTIONS.boot_signer_path = a
759    elif o in ("-s", "--device_specific"):
760      OPTIONS.device_specific = a
761    elif o in ("-x", "--extra"):
762      key, value = a.split("=", 1)
763      OPTIONS.extras[key] = value
764    else:
765      if extra_option_handler is None or not extra_option_handler(o, a):
766        assert False, "unknown option \"%s\"" % (o,)
767
768  if OPTIONS.search_path:
769    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
770                          os.pathsep + os.environ["PATH"])
771
772  return args
773
774
775def MakeTempFile(prefix=None, suffix=None):
776  """Make a temp file and add it to the list of things to be deleted
777  when Cleanup() is called.  Return the filename."""
778  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
779  os.close(fd)
780  OPTIONS.tempfiles.append(fn)
781  return fn
782
783
784def Cleanup():
785  for i in OPTIONS.tempfiles:
786    if os.path.isdir(i):
787      shutil.rmtree(i)
788    else:
789      os.remove(i)
790
791
792class PasswordManager(object):
793  def __init__(self):
794    self.editor = os.getenv("EDITOR", None)
795    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
796
797  def GetPasswords(self, items):
798    """Get passwords corresponding to each string in 'items',
799    returning a dict.  (The dict may have keys in addition to the
800    values in 'items'.)
801
802    Uses the passwords in $ANDROID_PW_FILE if available, letting the
803    user edit that file to add more needed passwords.  If no editor is
804    available, or $ANDROID_PW_FILE isn't define, prompts the user
805    interactively in the ordinary way.
806    """
807
808    current = self.ReadFile()
809
810    first = True
811    while True:
812      missing = []
813      for i in items:
814        if i not in current or not current[i]:
815          missing.append(i)
816      # Are all the passwords already in the file?
817      if not missing:
818        return current
819
820      for i in missing:
821        current[i] = ""
822
823      if not first:
824        print "key file %s still missing some passwords." % (self.pwfile,)
825        answer = raw_input("try to edit again? [y]> ").strip()
826        if answer and answer[0] not in 'yY':
827          raise RuntimeError("key passwords unavailable")
828      first = False
829
830      current = self.UpdateAndReadFile(current)
831
832  def PromptResult(self, current): # pylint: disable=no-self-use
833    """Prompt the user to enter a value (password) for each key in
834    'current' whose value is fales.  Returns a new dict with all the
835    values.
836    """
837    result = {}
838    for k, v in sorted(current.iteritems()):
839      if v:
840        result[k] = v
841      else:
842        while True:
843          result[k] = getpass.getpass(
844              "Enter password for %s key> " % k).strip()
845          if result[k]:
846            break
847    return result
848
849  def UpdateAndReadFile(self, current):
850    if not self.editor or not self.pwfile:
851      return self.PromptResult(current)
852
853    f = open(self.pwfile, "w")
854    os.chmod(self.pwfile, 0o600)
855    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
856    f.write("# (Additional spaces are harmless.)\n\n")
857
858    first_line = None
859    sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
860    for i, (_, k, v) in enumerate(sorted_list):
861      f.write("[[[  %s  ]]] %s\n" % (v, k))
862      if not v and first_line is None:
863        # position cursor on first line with no password.
864        first_line = i + 4
865    f.close()
866
867    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
868    _, _ = p.communicate()
869
870    return self.ReadFile()
871
872  def ReadFile(self):
873    result = {}
874    if self.pwfile is None:
875      return result
876    try:
877      f = open(self.pwfile, "r")
878      for line in f:
879        line = line.strip()
880        if not line or line[0] == '#':
881          continue
882        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
883        if not m:
884          print "failed to parse password file: ", line
885        else:
886          result[m.group(2)] = m.group(1)
887      f.close()
888    except IOError as e:
889      if e.errno != errno.ENOENT:
890        print "error reading password file: ", str(e)
891    return result
892
893
894def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
895             compress_type=None):
896  import datetime
897
898  # http://b/18015246
899  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
900  # for files larger than 2GiB. We can work around this by adjusting their
901  # limit. Note that `zipfile.writestr()` will not work for strings larger than
902  # 2GiB. The Python interpreter sometimes rejects strings that large (though
903  # it isn't clear to me exactly what circumstances cause this).
904  # `zipfile.write()` must be used directly to work around this.
905  #
906  # This mess can be avoided if we port to python3.
907  saved_zip64_limit = zipfile.ZIP64_LIMIT
908  zipfile.ZIP64_LIMIT = (1 << 32) - 1
909
910  if compress_type is None:
911    compress_type = zip_file.compression
912  if arcname is None:
913    arcname = filename
914
915  saved_stat = os.stat(filename)
916
917  try:
918    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
919    # file to be zipped and reset it when we're done.
920    os.chmod(filename, perms)
921
922    # Use a fixed timestamp so the output is repeatable.
923    epoch = datetime.datetime.fromtimestamp(0)
924    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
925    os.utime(filename, (timestamp, timestamp))
926
927    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
928  finally:
929    os.chmod(filename, saved_stat.st_mode)
930    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
931    zipfile.ZIP64_LIMIT = saved_zip64_limit
932
933
934def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
935                compress_type=None):
936  """Wrap zipfile.writestr() function to work around the zip64 limit.
937
938  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
939  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
940  when calling crc32(bytes).
941
942  But it still works fine to write a shorter string into a large zip file.
943  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
944  when we know the string won't be too long.
945  """
946
947  saved_zip64_limit = zipfile.ZIP64_LIMIT
948  zipfile.ZIP64_LIMIT = (1 << 32) - 1
949
950  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
951    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
952    zinfo.compress_type = zip_file.compression
953    if perms is None:
954      perms = 0o100644
955  else:
956    zinfo = zinfo_or_arcname
957
958  # If compress_type is given, it overrides the value in zinfo.
959  if compress_type is not None:
960    zinfo.compress_type = compress_type
961
962  # If perms is given, it has a priority.
963  if perms is not None:
964    # If perms doesn't set the file type, mark it as a regular file.
965    if perms & 0o770000 == 0:
966      perms |= 0o100000
967    zinfo.external_attr = perms << 16
968
969  # Use a fixed timestamp so the output is repeatable.
970  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
971
972  zip_file.writestr(zinfo, data)
973  zipfile.ZIP64_LIMIT = saved_zip64_limit
974
975
976def ZipClose(zip_file):
977  # http://b/18015246
978  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
979  # central directory.
980  saved_zip64_limit = zipfile.ZIP64_LIMIT
981  zipfile.ZIP64_LIMIT = (1 << 32) - 1
982
983  zip_file.close()
984
985  zipfile.ZIP64_LIMIT = saved_zip64_limit
986
987
988class DeviceSpecificParams(object):
989  module = None
990  def __init__(self, **kwargs):
991    """Keyword arguments to the constructor become attributes of this
992    object, which is passed to all functions in the device-specific
993    module."""
994    for k, v in kwargs.iteritems():
995      setattr(self, k, v)
996    self.extras = OPTIONS.extras
997
998    if self.module is None:
999      path = OPTIONS.device_specific
1000      if not path:
1001        return
1002      try:
1003        if os.path.isdir(path):
1004          info = imp.find_module("releasetools", [path])
1005        else:
1006          d, f = os.path.split(path)
1007          b, x = os.path.splitext(f)
1008          if x == ".py":
1009            f = b
1010          info = imp.find_module(f, [d])
1011        print "loaded device-specific extensions from", path
1012        self.module = imp.load_module("device_specific", *info)
1013      except ImportError:
1014        print "unable to load device-specific module; assuming none"
1015
1016  def _DoCall(self, function_name, *args, **kwargs):
1017    """Call the named function in the device-specific module, passing
1018    the given args and kwargs.  The first argument to the call will be
1019    the DeviceSpecific object itself.  If there is no module, or the
1020    module does not define the function, return the value of the
1021    'default' kwarg (which itself defaults to None)."""
1022    if self.module is None or not hasattr(self.module, function_name):
1023      return kwargs.get("default", None)
1024    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1025
1026  def FullOTA_Assertions(self):
1027    """Called after emitting the block of assertions at the top of a
1028    full OTA package.  Implementations can add whatever additional
1029    assertions they like."""
1030    return self._DoCall("FullOTA_Assertions")
1031
1032  def FullOTA_InstallBegin(self):
1033    """Called at the start of full OTA installation."""
1034    return self._DoCall("FullOTA_InstallBegin")
1035
1036  def FullOTA_InstallEnd(self):
1037    """Called at the end of full OTA installation; typically this is
1038    used to install the image for the device's baseband processor."""
1039    return self._DoCall("FullOTA_InstallEnd")
1040
1041  def IncrementalOTA_Assertions(self):
1042    """Called after emitting the block of assertions at the top of an
1043    incremental OTA package.  Implementations can add whatever
1044    additional assertions they like."""
1045    return self._DoCall("IncrementalOTA_Assertions")
1046
1047  def IncrementalOTA_VerifyBegin(self):
1048    """Called at the start of the verification phase of incremental
1049    OTA installation; additional checks can be placed here to abort
1050    the script before any changes are made."""
1051    return self._DoCall("IncrementalOTA_VerifyBegin")
1052
1053  def IncrementalOTA_VerifyEnd(self):
1054    """Called at the end of the verification phase of incremental OTA
1055    installation; additional checks can be placed here to abort the
1056    script before any changes are made."""
1057    return self._DoCall("IncrementalOTA_VerifyEnd")
1058
1059  def IncrementalOTA_InstallBegin(self):
1060    """Called at the start of incremental OTA installation (after
1061    verification is complete)."""
1062    return self._DoCall("IncrementalOTA_InstallBegin")
1063
1064  def IncrementalOTA_InstallEnd(self):
1065    """Called at the end of incremental OTA installation; typically
1066    this is used to install the image for the device's baseband
1067    processor."""
1068    return self._DoCall("IncrementalOTA_InstallEnd")
1069
1070class File(object):
1071  def __init__(self, name, data):
1072    self.name = name
1073    self.data = data
1074    self.size = len(data)
1075    self.sha1 = sha1(data).hexdigest()
1076
1077  @classmethod
1078  def FromLocalFile(cls, name, diskname):
1079    f = open(diskname, "rb")
1080    data = f.read()
1081    f.close()
1082    return File(name, data)
1083
1084  def WriteToTemp(self):
1085    t = tempfile.NamedTemporaryFile()
1086    t.write(self.data)
1087    t.flush()
1088    return t
1089
1090  def AddToZip(self, z, compression=None):
1091    ZipWriteStr(z, self.name, self.data, compress_type=compression)
1092
1093DIFF_PROGRAM_BY_EXT = {
1094    ".gz" : "imgdiff",
1095    ".zip" : ["imgdiff", "-z"],
1096    ".jar" : ["imgdiff", "-z"],
1097    ".apk" : ["imgdiff", "-z"],
1098    ".img" : "imgdiff",
1099    }
1100
1101class Difference(object):
1102  def __init__(self, tf, sf, diff_program=None):
1103    self.tf = tf
1104    self.sf = sf
1105    self.patch = None
1106    self.diff_program = diff_program
1107
1108  def ComputePatch(self):
1109    """Compute the patch (as a string of data) needed to turn sf into
1110    tf.  Returns the same tuple as GetPatch()."""
1111
1112    tf = self.tf
1113    sf = self.sf
1114
1115    if self.diff_program:
1116      diff_program = self.diff_program
1117    else:
1118      ext = os.path.splitext(tf.name)[1]
1119      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1120
1121    ttemp = tf.WriteToTemp()
1122    stemp = sf.WriteToTemp()
1123
1124    ext = os.path.splitext(tf.name)[1]
1125
1126    try:
1127      ptemp = tempfile.NamedTemporaryFile()
1128      if isinstance(diff_program, list):
1129        cmd = copy.copy(diff_program)
1130      else:
1131        cmd = [diff_program]
1132      cmd.append(stemp.name)
1133      cmd.append(ttemp.name)
1134      cmd.append(ptemp.name)
1135      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1136      err = []
1137      def run():
1138        _, e = p.communicate()
1139        if e:
1140          err.append(e)
1141      th = threading.Thread(target=run)
1142      th.start()
1143      th.join(timeout=300)   # 5 mins
1144      if th.is_alive():
1145        print "WARNING: diff command timed out"
1146        p.terminate()
1147        th.join(5)
1148        if th.is_alive():
1149          p.kill()
1150          th.join()
1151
1152      if err or p.returncode != 0:
1153        print "WARNING: failure running %s:\n%s\n" % (
1154            diff_program, "".join(err))
1155        self.patch = None
1156        return None, None, None
1157      diff = ptemp.read()
1158    finally:
1159      ptemp.close()
1160      stemp.close()
1161      ttemp.close()
1162
1163    self.patch = diff
1164    return self.tf, self.sf, self.patch
1165
1166
1167  def GetPatch(self):
1168    """Return a tuple (target_file, source_file, patch_data).
1169    patch_data may be None if ComputePatch hasn't been called, or if
1170    computing the patch failed."""
1171    return self.tf, self.sf, self.patch
1172
1173
1174def ComputeDifferences(diffs):
1175  """Call ComputePatch on all the Difference objects in 'diffs'."""
1176  print len(diffs), "diffs to compute"
1177
1178  # Do the largest files first, to try and reduce the long-pole effect.
1179  by_size = [(i.tf.size, i) for i in diffs]
1180  by_size.sort(reverse=True)
1181  by_size = [i[1] for i in by_size]
1182
1183  lock = threading.Lock()
1184  diff_iter = iter(by_size)   # accessed under lock
1185
1186  def worker():
1187    try:
1188      lock.acquire()
1189      for d in diff_iter:
1190        lock.release()
1191        start = time.time()
1192        d.ComputePatch()
1193        dur = time.time() - start
1194        lock.acquire()
1195
1196        tf, sf, patch = d.GetPatch()
1197        if sf.name == tf.name:
1198          name = tf.name
1199        else:
1200          name = "%s (%s)" % (tf.name, sf.name)
1201        if patch is None:
1202          print "patching failed!                                  %s" % (name,)
1203        else:
1204          print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1205              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1206      lock.release()
1207    except Exception as e:
1208      print e
1209      raise
1210
1211  # start worker threads; wait for them all to finish.
1212  threads = [threading.Thread(target=worker)
1213             for i in range(OPTIONS.worker_threads)]
1214  for th in threads:
1215    th.start()
1216  while threads:
1217    threads.pop().join()
1218
1219
1220class BlockDifference(object):
1221  def __init__(self, partition, tgt, src=None, check_first_block=False,
1222               version=None):
1223    self.tgt = tgt
1224    self.src = src
1225    self.partition = partition
1226    self.check_first_block = check_first_block
1227
1228    # Due to http://b/20939131, check_first_block is disabled temporarily.
1229    assert not self.check_first_block
1230
1231    if version is None:
1232      version = 1
1233      if OPTIONS.info_dict:
1234        version = max(
1235            int(i) for i in
1236            OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1237    self.version = version
1238
1239    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1240                                    version=self.version)
1241    tmpdir = tempfile.mkdtemp()
1242    OPTIONS.tempfiles.append(tmpdir)
1243    self.path = os.path.join(tmpdir, partition)
1244    b.Compute(self.path)
1245
1246    _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1247
1248  def WriteScript(self, script, output_zip, progress=None):
1249    if not self.src:
1250      # write the output unconditionally
1251      script.Print("Patching %s image unconditionally..." % (self.partition,))
1252    else:
1253      script.Print("Patching %s image after verification." % (self.partition,))
1254
1255    if progress:
1256      script.ShowProgress(progress, 0)
1257    self._WriteUpdate(script, output_zip)
1258    self._WritePostInstallVerifyScript(script)
1259
1260  def WriteVerifyScript(self, script):
1261    partition = self.partition
1262    if not self.src:
1263      script.Print("Image %s will be patched unconditionally." % (partition,))
1264    else:
1265      ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1266      ranges_str = ranges.to_string_raw()
1267      if self.version >= 3:
1268        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1269                            'block_image_verify("%s", '
1270                            'package_extract_file("%s.transfer.list"), '
1271                            '"%s.new.dat", "%s.patch.dat")) then') % (
1272                            self.device, ranges_str, self.src.TotalSha1(),
1273                            self.device, partition, partition, partition))
1274      else:
1275        script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1276                           self.device, ranges_str, self.src.TotalSha1()))
1277      script.Print('Verified %s image...' % (partition,))
1278      script.AppendExtra('else')
1279
1280      # When generating incrementals for the system and vendor partitions,
1281      # explicitly check the first block (which contains the superblock) of
1282      # the partition to see if it's what we expect. If this check fails,
1283      # give an explicit log message about the partition having been
1284      # remounted R/W (the most likely explanation) and the need to flash to
1285      # get OTAs working again.
1286      if self.check_first_block:
1287        self._CheckFirstBlock(script)
1288
1289      # Abort the OTA update. Note that the incremental OTA cannot be applied
1290      # even if it may match the checksum of the target partition.
1291      # a) If version < 3, operations like move and erase will make changes
1292      #    unconditionally and damage the partition.
1293      # b) If version >= 3, it won't even reach here.
1294      script.AppendExtra(('abort("%s partition has unexpected contents");\n'
1295                          'endif;') % (partition,))
1296
1297  def _WritePostInstallVerifyScript(self, script):
1298    partition = self.partition
1299    script.Print('Verifying the updated %s image...' % (partition,))
1300    # Unlike pre-install verification, clobbered_blocks should not be ignored.
1301    ranges = self.tgt.care_map
1302    ranges_str = ranges.to_string_raw()
1303    script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1304                       self.device, ranges_str,
1305                       self.tgt.TotalSha1(include_clobbered_blocks=True)))
1306
1307    # Bug: 20881595
1308    # Verify that extended blocks are really zeroed out.
1309    if self.tgt.extended:
1310      ranges_str = self.tgt.extended.to_string_raw()
1311      script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1312                         self.device, ranges_str,
1313                         self._HashZeroBlocks(self.tgt.extended.size())))
1314      script.Print('Verified the updated %s image.' % (partition,))
1315      script.AppendExtra(
1316          'else\n'
1317          '  abort("%s partition has unexpected non-zero contents after OTA '
1318          'update");\n'
1319          'endif;' % (partition,))
1320    else:
1321      script.Print('Verified the updated %s image.' % (partition,))
1322
1323    script.AppendExtra(
1324        'else\n'
1325        '  abort("%s partition has unexpected contents after OTA update");\n'
1326        'endif;' % (partition,))
1327
1328  def _WriteUpdate(self, script, output_zip):
1329    ZipWrite(output_zip,
1330             '{}.transfer.list'.format(self.path),
1331             '{}.transfer.list'.format(self.partition))
1332    ZipWrite(output_zip,
1333             '{}.new.dat'.format(self.path),
1334             '{}.new.dat'.format(self.partition))
1335    ZipWrite(output_zip,
1336             '{}.patch.dat'.format(self.path),
1337             '{}.patch.dat'.format(self.partition),
1338             compress_type=zipfile.ZIP_STORED)
1339
1340    call = ('block_image_update("{device}", '
1341            'package_extract_file("{partition}.transfer.list"), '
1342            '"{partition}.new.dat", "{partition}.patch.dat");\n'.format(
1343                device=self.device, partition=self.partition))
1344    script.AppendExtra(script.WordWrap(call))
1345
1346  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1347    data = source.ReadRangeSet(ranges)
1348    ctx = sha1()
1349
1350    for p in data:
1351      ctx.update(p)
1352
1353    return ctx.hexdigest()
1354
1355  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1356    """Return the hash value for all zero blocks."""
1357    zero_block = '\x00' * 4096
1358    ctx = sha1()
1359    for _ in range(num_blocks):
1360      ctx.update(zero_block)
1361
1362    return ctx.hexdigest()
1363
1364  # TODO(tbao): Due to http://b/20939131, block 0 may be changed without
1365  # remounting R/W. Will change the checking to a finer-grained way to
1366  # mask off those bits.
1367  def _CheckFirstBlock(self, script):
1368    r = rangelib.RangeSet((0, 1))
1369    srchash = self._HashBlocks(self.src, r)
1370
1371    script.AppendExtra(('(range_sha1("%s", "%s") == "%s") || '
1372                        'abort("%s has been remounted R/W; '
1373                        'reflash device to reenable OTA updates");')
1374                       % (self.device, r.to_string_raw(), srchash,
1375                          self.device))
1376
1377DataImage = blockimgdiff.DataImage
1378
1379
1380# map recovery.fstab's fs_types to mount/format "partition types"
1381PARTITION_TYPES = {
1382    "yaffs2": "MTD",
1383    "mtd": "MTD",
1384    "ext4": "EMMC",
1385    "emmc": "EMMC",
1386    "f2fs": "EMMC",
1387    "squashfs": "EMMC"
1388}
1389
1390def GetTypeAndDevice(mount_point, info):
1391  fstab = info["fstab"]
1392  if fstab:
1393    return (PARTITION_TYPES[fstab[mount_point].fs_type],
1394            fstab[mount_point].device)
1395  else:
1396    raise KeyError
1397
1398
1399def ParseCertificate(data):
1400  """Parse a PEM-format certificate."""
1401  cert = []
1402  save = False
1403  for line in data.split("\n"):
1404    if "--END CERTIFICATE--" in line:
1405      break
1406    if save:
1407      cert.append(line)
1408    if "--BEGIN CERTIFICATE--" in line:
1409      save = True
1410  cert = "".join(cert).decode('base64')
1411  return cert
1412
1413def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1414                      info_dict=None):
1415  """Generate a binary patch that creates the recovery image starting
1416  with the boot image.  (Most of the space in these images is just the
1417  kernel, which is identical for the two, so the resulting patch
1418  should be efficient.)  Add it to the output zip, along with a shell
1419  script that is run from init.rc on first boot to actually do the
1420  patching and install the new recovery image.
1421
1422  recovery_img and boot_img should be File objects for the
1423  corresponding images.  info should be the dictionary returned by
1424  common.LoadInfoDict() on the input target_files.
1425  """
1426
1427  if info_dict is None:
1428    info_dict = OPTIONS.info_dict
1429
1430  full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1431  system_root_image = info_dict.get("system_root_image", None) == "true"
1432
1433  if full_recovery_image:
1434    output_sink("etc/recovery.img", recovery_img.data)
1435
1436  else:
1437    diff_program = ["imgdiff"]
1438    path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1439    if os.path.exists(path):
1440      diff_program.append("-b")
1441      diff_program.append(path)
1442      bonus_args = "-b /system/etc/recovery-resource.dat"
1443    else:
1444      bonus_args = ""
1445
1446    d = Difference(recovery_img, boot_img, diff_program=diff_program)
1447    _, _, patch = d.ComputePatch()
1448    output_sink("recovery-from-boot.p", patch)
1449
1450  try:
1451    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1452    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1453  except KeyError:
1454    return
1455
1456  if full_recovery_image:
1457    sh = """#!/system/bin/sh
1458if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1459  applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1460else
1461  log -t recovery "Recovery image already installed"
1462fi
1463""" % {'type': recovery_type,
1464       'device': recovery_device,
1465       'sha1': recovery_img.sha1,
1466       'size': recovery_img.size}
1467  else:
1468    sh = """#!/system/bin/sh
1469if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1470  applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1471else
1472  log -t recovery "Recovery image already installed"
1473fi
1474""" % {'boot_size': boot_img.size,
1475       'boot_sha1': boot_img.sha1,
1476       'recovery_size': recovery_img.size,
1477       'recovery_sha1': recovery_img.sha1,
1478       'boot_type': boot_type,
1479       'boot_device': boot_device,
1480       'recovery_type': recovery_type,
1481       'recovery_device': recovery_device,
1482       'bonus_args': bonus_args}
1483
1484  # The install script location moved from /system/etc to /system/bin
1485  # in the L release.  Parse init.*.rc files to find out where the
1486  # target-files expects it to be, and put it there.
1487  sh_location = "etc/install-recovery.sh"
1488  found = False
1489  if system_root_image:
1490    init_rc_dir = os.path.join(input_dir, "ROOT")
1491  else:
1492    init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1493  init_rc_files = os.listdir(init_rc_dir)
1494  for init_rc_file in init_rc_files:
1495    if (not init_rc_file.startswith('init.') or
1496        not init_rc_file.endswith('.rc')):
1497      continue
1498
1499    with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1500      for line in f:
1501        m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1502        if m:
1503          sh_location = m.group(1)
1504          found = True
1505          break
1506
1507    if found:
1508      break
1509
1510  print "putting script in", sh_location
1511
1512  output_sink(sh_location, sh)
1513