common.py revision 8beab69bd5d728810aca55536017912e65777bb8
1# Copyright (C) 2008 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7#      http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import copy
16import errno
17import getopt
18import getpass
19import imp
20import os
21import platform
22import re
23import shlex
24import shutil
25import subprocess
26import sys
27import tempfile
28import threading
29import time
30import zipfile
31
32import blockimgdiff
33import rangelib
34
35from hashlib import sha1 as sha1
36
37
38class Options(object):
39  def __init__(self):
40    platform_search_path = {
41        "linux2": "out/host/linux-x86",
42        "darwin": "out/host/darwin-x86",
43    }
44
45    self.search_path = platform_search_path.get(sys.platform, None)
46    self.signapk_path = "framework/signapk.jar"  # Relative to search_path
47    self.extra_signapk_args = []
48    self.java_path = "java"  # Use the one on the path by default.
49    self.java_args = "-Xmx2048m" # JVM Args
50    self.public_key_suffix = ".x509.pem"
51    self.private_key_suffix = ".pk8"
52    # use otatools built boot_signer by default
53    self.boot_signer_path = "boot_signer"
54    self.boot_signer_args = []
55    self.verity_signer_path = None
56    self.verity_signer_args = []
57    self.verbose = False
58    self.tempfiles = []
59    self.device_specific = None
60    self.extras = {}
61    self.info_dict = None
62    self.worker_threads = None
63
64
65OPTIONS = Options()
66
67
68# Values for "certificate" in apkcerts that mean special things.
69SPECIAL_CERT_STRINGS = ("PRESIGNED", "EXTERNAL")
70
71
72class ExternalError(RuntimeError):
73  pass
74
75
76def Run(args, **kwargs):
77  """Create and return a subprocess.Popen object, printing the command
78  line on the terminal if -v was specified."""
79  if OPTIONS.verbose:
80    print "  running: ", " ".join(args)
81  return subprocess.Popen(args, **kwargs)
82
83
84def CloseInheritedPipes():
85  """ Gmake in MAC OS has file descriptor (PIPE) leak. We close those fds
86  before doing other work."""
87  if platform.system() != "Darwin":
88    return
89  for d in range(3, 1025):
90    try:
91      stat = os.fstat(d)
92      if stat is not None:
93        pipebit = stat[0] & 0x1000
94        if pipebit != 0:
95          os.close(d)
96    except OSError:
97      pass
98
99
100def LoadInfoDict(input_file, input_dir=None):
101  """Read and parse the META/misc_info.txt key/value pairs from the
102  input target files and return a dict."""
103
104  def read_helper(fn):
105    if isinstance(input_file, zipfile.ZipFile):
106      return input_file.read(fn)
107    else:
108      path = os.path.join(input_file, *fn.split("/"))
109      try:
110        with open(path) as f:
111          return f.read()
112      except IOError as e:
113        if e.errno == errno.ENOENT:
114          raise KeyError(fn)
115  d = {}
116  try:
117    d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
118  except KeyError:
119    # ok if misc_info.txt doesn't exist
120    pass
121
122  # backwards compatibility: These values used to be in their own
123  # files.  Look for them, in case we're processing an old
124  # target_files zip.
125
126  if "mkyaffs2_extra_flags" not in d:
127    try:
128      d["mkyaffs2_extra_flags"] = read_helper(
129          "META/mkyaffs2-extra-flags.txt").strip()
130    except KeyError:
131      # ok if flags don't exist
132      pass
133
134  if "recovery_api_version" not in d:
135    try:
136      d["recovery_api_version"] = read_helper(
137          "META/recovery-api-version.txt").strip()
138    except KeyError:
139      raise ValueError("can't find recovery API version in input target-files")
140
141  if "tool_extensions" not in d:
142    try:
143      d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
144    except KeyError:
145      # ok if extensions don't exist
146      pass
147
148  if "fstab_version" not in d:
149    d["fstab_version"] = "1"
150
151  # A few properties are stored as links to the files in the out/ directory.
152  # It works fine with the build system. However, they are no longer available
153  # when (re)generating from target_files zip. If input_dir is not None, we
154  # are doing repacking. Redirect those properties to the actual files in the
155  # unzipped directory.
156  if input_dir is not None:
157    # We carry a copy of file_contexts under META/. If not available, search
158    # BOOT/RAMDISK/. Note that sometimes we may need a different file_contexts
159    # to build images than the one running on device, such as when enabling
160    # system_root_image. In that case, we must have the one for image
161    # generation copied to META/.
162    fc_config = os.path.join(input_dir, "META", "file_contexts")
163    if d.get("system_root_image") == "true":
164      assert os.path.exists(fc_config)
165    if not os.path.exists(fc_config):
166      fc_config = os.path.join(input_dir, "BOOT", "RAMDISK", "file_contexts")
167      if not os.path.exists(fc_config):
168        fc_config = None
169
170    if fc_config:
171      d["selinux_fc"] = fc_config
172
173    # Similarly we need to redirect "ramdisk_dir" and "ramdisk_fs_config".
174    if d.get("system_root_image") == "true":
175      d["ramdisk_dir"] = os.path.join(input_dir, "ROOT")
176      d["ramdisk_fs_config"] = os.path.join(
177          input_dir, "META", "root_filesystem_config.txt")
178
179  try:
180    data = read_helper("META/imagesizes.txt")
181    for line in data.split("\n"):
182      if not line:
183        continue
184      name, value = line.split(" ", 1)
185      if not value:
186        continue
187      if name == "blocksize":
188        d[name] = value
189      else:
190        d[name + "_size"] = value
191  except KeyError:
192    pass
193
194  def makeint(key):
195    if key in d:
196      d[key] = int(d[key], 0)
197
198  makeint("recovery_api_version")
199  makeint("blocksize")
200  makeint("system_size")
201  makeint("vendor_size")
202  makeint("userdata_size")
203  makeint("cache_size")
204  makeint("recovery_size")
205  makeint("boot_size")
206  makeint("fstab_version")
207
208  d["fstab"] = LoadRecoveryFSTab(read_helper, d["fstab_version"])
209  d["build.prop"] = LoadBuildProp(read_helper)
210  return d
211
212def LoadBuildProp(read_helper):
213  try:
214    data = read_helper("SYSTEM/build.prop")
215  except KeyError:
216    print "Warning: could not find SYSTEM/build.prop in %s" % zip
217    data = ""
218  return LoadDictionaryFromLines(data.split("\n"))
219
220def LoadDictionaryFromLines(lines):
221  d = {}
222  for line in lines:
223    line = line.strip()
224    if not line or line.startswith("#"):
225      continue
226    if "=" in line:
227      name, value = line.split("=", 1)
228      d[name] = value
229  return d
230
231def LoadRecoveryFSTab(read_helper, fstab_version):
232  class Partition(object):
233    def __init__(self, mount_point, fs_type, device, length, device2, context):
234      self.mount_point = mount_point
235      self.fs_type = fs_type
236      self.device = device
237      self.length = length
238      self.device2 = device2
239      self.context = context
240
241  try:
242    data = read_helper("RECOVERY/RAMDISK/etc/recovery.fstab")
243  except KeyError:
244    print "Warning: could not find RECOVERY/RAMDISK/etc/recovery.fstab"
245    data = ""
246
247  if fstab_version == 1:
248    d = {}
249    for line in data.split("\n"):
250      line = line.strip()
251      if not line or line.startswith("#"):
252        continue
253      pieces = line.split()
254      if not 3 <= len(pieces) <= 4:
255        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
256      options = None
257      if len(pieces) >= 4:
258        if pieces[3].startswith("/"):
259          device2 = pieces[3]
260          if len(pieces) >= 5:
261            options = pieces[4]
262        else:
263          device2 = None
264          options = pieces[3]
265      else:
266        device2 = None
267
268      mount_point = pieces[0]
269      length = 0
270      if options:
271        options = options.split(",")
272        for i in options:
273          if i.startswith("length="):
274            length = int(i[7:])
275          else:
276            print "%s: unknown option \"%s\"" % (mount_point, i)
277
278      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
279                                 device=pieces[2], length=length,
280                                 device2=device2)
281
282  elif fstab_version == 2:
283    d = {}
284    for line in data.split("\n"):
285      line = line.strip()
286      if not line or line.startswith("#"):
287        continue
288      # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
289      pieces = line.split()
290      if len(pieces) != 5:
291        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
292
293      # Ignore entries that are managed by vold
294      options = pieces[4]
295      if "voldmanaged=" in options:
296        continue
297
298      # It's a good line, parse it
299      length = 0
300      options = options.split(",")
301      for i in options:
302        if i.startswith("length="):
303          length = int(i[7:])
304        else:
305          # Ignore all unknown options in the unified fstab
306          continue
307
308      mount_flags = pieces[3]
309      # Honor the SELinux context if present.
310      context = None
311      for i in mount_flags.split(","):
312        if i.startswith("context="):
313          context = i
314
315      mount_point = pieces[1]
316      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
317                                 device=pieces[0], length=length,
318                                 device2=None, context=context)
319
320  else:
321    raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
322
323  return d
324
325
326def DumpInfoDict(d):
327  for k, v in sorted(d.items()):
328    print "%-25s = (%s) %s" % (k, type(v).__name__, v)
329
330
331def BuildBootableImage(sourcedir, fs_config_file, info_dict=None):
332  """Take a kernel, cmdline, and ramdisk directory from the input (in
333  'sourcedir'), and turn them into a boot image.  Return the image
334  data, or None if sourcedir does not appear to contains files for
335  building the requested image."""
336
337  if (not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK) or
338      not os.access(os.path.join(sourcedir, "kernel"), os.F_OK)):
339    return None
340
341  if info_dict is None:
342    info_dict = OPTIONS.info_dict
343
344  ramdisk_img = tempfile.NamedTemporaryFile()
345  img = tempfile.NamedTemporaryFile()
346
347  if os.access(fs_config_file, os.F_OK):
348    cmd = ["mkbootfs", "-f", fs_config_file, os.path.join(sourcedir, "RAMDISK")]
349  else:
350    cmd = ["mkbootfs", os.path.join(sourcedir, "RAMDISK")]
351  p1 = Run(cmd, stdout=subprocess.PIPE)
352  p2 = Run(["minigzip"],
353           stdin=p1.stdout, stdout=ramdisk_img.file.fileno())
354
355  p2.wait()
356  p1.wait()
357  assert p1.returncode == 0, "mkbootfs of %s ramdisk failed" % (sourcedir,)
358  assert p2.returncode == 0, "minigzip of %s ramdisk failed" % (sourcedir,)
359
360  # use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
361  mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
362
363  cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, "kernel")]
364
365  fn = os.path.join(sourcedir, "second")
366  if os.access(fn, os.F_OK):
367    cmd.append("--second")
368    cmd.append(fn)
369
370  fn = os.path.join(sourcedir, "cmdline")
371  if os.access(fn, os.F_OK):
372    cmd.append("--cmdline")
373    cmd.append(open(fn).read().rstrip("\n"))
374
375  fn = os.path.join(sourcedir, "base")
376  if os.access(fn, os.F_OK):
377    cmd.append("--base")
378    cmd.append(open(fn).read().rstrip("\n"))
379
380  fn = os.path.join(sourcedir, "pagesize")
381  if os.access(fn, os.F_OK):
382    cmd.append("--pagesize")
383    cmd.append(open(fn).read().rstrip("\n"))
384
385  args = info_dict.get("mkbootimg_args", None)
386  if args and args.strip():
387    cmd.extend(shlex.split(args))
388
389  img_unsigned = None
390  if info_dict.get("vboot", None):
391    img_unsigned = tempfile.NamedTemporaryFile()
392    cmd.extend(["--ramdisk", ramdisk_img.name,
393                "--output", img_unsigned.name])
394  else:
395    cmd.extend(["--ramdisk", ramdisk_img.name,
396                "--output", img.name])
397
398  p = Run(cmd, stdout=subprocess.PIPE)
399  p.communicate()
400  assert p.returncode == 0, "mkbootimg of %s image failed" % (
401      os.path.basename(sourcedir),)
402
403  if (info_dict.get("boot_signer", None) == "true" and
404      info_dict.get("verity_key", None)):
405    path = "/" + os.path.basename(sourcedir).lower()
406    cmd = [OPTIONS.boot_signer_path]
407    cmd.extend(OPTIONS.boot_signer_args)
408    cmd.extend([path, img.name,
409                info_dict["verity_key"] + ".pk8",
410                info_dict["verity_key"] + ".x509.pem", img.name])
411    p = Run(cmd, stdout=subprocess.PIPE)
412    p.communicate()
413    assert p.returncode == 0, "boot_signer of %s image failed" % path
414
415  # Sign the image if vboot is non-empty.
416  elif info_dict.get("vboot", None):
417    path = "/" + os.path.basename(sourcedir).lower()
418    img_keyblock = tempfile.NamedTemporaryFile()
419    cmd = [info_dict["vboot_signer_cmd"], info_dict["futility"],
420           img_unsigned.name, info_dict["vboot_key"] + ".vbpubk",
421           info_dict["vboot_key"] + ".vbprivk", img_keyblock.name,
422           img.name]
423    p = Run(cmd, stdout=subprocess.PIPE)
424    p.communicate()
425    assert p.returncode == 0, "vboot_signer of %s image failed" % path
426
427    # Clean up the temp files.
428    img_unsigned.close()
429    img_keyblock.close()
430
431  img.seek(os.SEEK_SET, 0)
432  data = img.read()
433
434  ramdisk_img.close()
435  img.close()
436
437  return data
438
439
440def GetBootableImage(name, prebuilt_name, unpack_dir, tree_subdir,
441                     info_dict=None):
442  """Return a File object (with name 'name') with the desired bootable
443  image.  Look for it in 'unpack_dir'/BOOTABLE_IMAGES under the name
444  'prebuilt_name', otherwise look for it under 'unpack_dir'/IMAGES,
445  otherwise construct it from the source files in
446  'unpack_dir'/'tree_subdir'."""
447
448  prebuilt_path = os.path.join(unpack_dir, "BOOTABLE_IMAGES", prebuilt_name)
449  if os.path.exists(prebuilt_path):
450    print "using prebuilt %s from BOOTABLE_IMAGES..." % (prebuilt_name,)
451    return File.FromLocalFile(name, prebuilt_path)
452
453  prebuilt_path = os.path.join(unpack_dir, "IMAGES", prebuilt_name)
454  if os.path.exists(prebuilt_path):
455    print "using prebuilt %s from IMAGES..." % (prebuilt_name,)
456    return File.FromLocalFile(name, prebuilt_path)
457
458  print "building image from target_files %s..." % (tree_subdir,)
459  fs_config = "META/" + tree_subdir.lower() + "_filesystem_config.txt"
460  data = BuildBootableImage(os.path.join(unpack_dir, tree_subdir),
461                            os.path.join(unpack_dir, fs_config),
462                            info_dict)
463  if data:
464    return File(name, data)
465  return None
466
467
468def UnzipTemp(filename, pattern=None):
469  """Unzip the given archive into a temporary directory and return the name.
470
471  If filename is of the form "foo.zip+bar.zip", unzip foo.zip into a
472  temp dir, then unzip bar.zip into that_dir/BOOTABLE_IMAGES.
473
474  Returns (tempdir, zipobj) where zipobj is a zipfile.ZipFile (of the
475  main file), open for reading.
476  """
477
478  tmp = tempfile.mkdtemp(prefix="targetfiles-")
479  OPTIONS.tempfiles.append(tmp)
480
481  def unzip_to_dir(filename, dirname):
482    cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
483    if pattern is not None:
484      cmd.append(pattern)
485    p = Run(cmd, stdout=subprocess.PIPE)
486    p.communicate()
487    if p.returncode != 0:
488      raise ExternalError("failed to unzip input target-files \"%s\"" %
489                          (filename,))
490
491  m = re.match(r"^(.*[.]zip)\+(.*[.]zip)$", filename, re.IGNORECASE)
492  if m:
493    unzip_to_dir(m.group(1), tmp)
494    unzip_to_dir(m.group(2), os.path.join(tmp, "BOOTABLE_IMAGES"))
495    filename = m.group(1)
496  else:
497    unzip_to_dir(filename, tmp)
498
499  return tmp, zipfile.ZipFile(filename, "r")
500
501
502def GetKeyPasswords(keylist):
503  """Given a list of keys, prompt the user to enter passwords for
504  those which require them.  Return a {key: password} dict.  password
505  will be None if the key has no password."""
506
507  no_passwords = []
508  need_passwords = []
509  key_passwords = {}
510  devnull = open("/dev/null", "w+b")
511  for k in sorted(keylist):
512    # We don't need a password for things that aren't really keys.
513    if k in SPECIAL_CERT_STRINGS:
514      no_passwords.append(k)
515      continue
516
517    p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
518             "-inform", "DER", "-nocrypt"],
519            stdin=devnull.fileno(),
520            stdout=devnull.fileno(),
521            stderr=subprocess.STDOUT)
522    p.communicate()
523    if p.returncode == 0:
524      # Definitely an unencrypted key.
525      no_passwords.append(k)
526    else:
527      p = Run(["openssl", "pkcs8", "-in", k+OPTIONS.private_key_suffix,
528               "-inform", "DER", "-passin", "pass:"],
529              stdin=devnull.fileno(),
530              stdout=devnull.fileno(),
531              stderr=subprocess.PIPE)
532      _, stderr = p.communicate()
533      if p.returncode == 0:
534        # Encrypted key with empty string as password.
535        key_passwords[k] = ''
536      elif stderr.startswith('Error decrypting key'):
537        # Definitely encrypted key.
538        # It would have said "Error reading key" if it didn't parse correctly.
539        need_passwords.append(k)
540      else:
541        # Potentially, a type of key that openssl doesn't understand.
542        # We'll let the routines in signapk.jar handle it.
543        no_passwords.append(k)
544  devnull.close()
545
546  key_passwords.update(PasswordManager().GetPasswords(need_passwords))
547  key_passwords.update(dict.fromkeys(no_passwords, None))
548  return key_passwords
549
550
551def SignFile(input_name, output_name, key, password, align=None,
552             whole_file=False):
553  """Sign the input_name zip/jar/apk, producing output_name.  Use the
554  given key and password (the latter may be None if the key does not
555  have a password.
556
557  If align is an integer > 1, zipalign is run to align stored files in
558  the output zip on 'align'-byte boundaries.
559
560  If whole_file is true, use the "-w" option to SignApk to embed a
561  signature that covers the whole file in the archive comment of the
562  zip file.
563  """
564
565  if align == 0 or align == 1:
566    align = None
567
568  if align:
569    temp = tempfile.NamedTemporaryFile()
570    sign_name = temp.name
571  else:
572    sign_name = output_name
573
574  cmd = [OPTIONS.java_path, OPTIONS.java_args, "-jar",
575         os.path.join(OPTIONS.search_path, OPTIONS.signapk_path)]
576  cmd.extend(OPTIONS.extra_signapk_args)
577  if whole_file:
578    cmd.append("-w")
579  cmd.extend([key + OPTIONS.public_key_suffix,
580              key + OPTIONS.private_key_suffix,
581              input_name, sign_name])
582
583  p = Run(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
584  if password is not None:
585    password += "\n"
586  p.communicate(password)
587  if p.returncode != 0:
588    raise ExternalError("signapk.jar failed: return code %s" % (p.returncode,))
589
590  if align:
591    p = Run(["zipalign", "-f", "-p", str(align), sign_name, output_name])
592    p.communicate()
593    if p.returncode != 0:
594      raise ExternalError("zipalign failed: return code %s" % (p.returncode,))
595    temp.close()
596
597
598def CheckSize(data, target, info_dict):
599  """Check the data string passed against the max size limit, if
600  any, for the given target.  Raise exception if the data is too big.
601  Print a warning if the data is nearing the maximum size."""
602
603  if target.endswith(".img"):
604    target = target[:-4]
605  mount_point = "/" + target
606
607  fs_type = None
608  limit = None
609  if info_dict["fstab"]:
610    if mount_point == "/userdata":
611      mount_point = "/data"
612    p = info_dict["fstab"][mount_point]
613    fs_type = p.fs_type
614    device = p.device
615    if "/" in device:
616      device = device[device.rfind("/")+1:]
617    limit = info_dict.get(device + "_size", None)
618  if not fs_type or not limit:
619    return
620
621  if fs_type == "yaffs2":
622    # image size should be increased by 1/64th to account for the
623    # spare area (64 bytes per 2k page)
624    limit = limit / 2048 * (2048+64)
625  size = len(data)
626  pct = float(size) * 100.0 / limit
627  msg = "%s size (%d) is %.2f%% of limit (%d)" % (target, size, pct, limit)
628  if pct >= 99.0:
629    raise ExternalError(msg)
630  elif pct >= 95.0:
631    print
632    print "  WARNING: ", msg
633    print
634  elif OPTIONS.verbose:
635    print "  ", msg
636
637
638def ReadApkCerts(tf_zip):
639  """Given a target_files ZipFile, parse the META/apkcerts.txt file
640  and return a {package: cert} dict."""
641  certmap = {}
642  for line in tf_zip.read("META/apkcerts.txt").split("\n"):
643    line = line.strip()
644    if not line:
645      continue
646    m = re.match(r'^name="(.*)"\s+certificate="(.*)"\s+'
647                 r'private_key="(.*)"$', line)
648    if m:
649      name, cert, privkey = m.groups()
650      public_key_suffix_len = len(OPTIONS.public_key_suffix)
651      private_key_suffix_len = len(OPTIONS.private_key_suffix)
652      if cert in SPECIAL_CERT_STRINGS and not privkey:
653        certmap[name] = cert
654      elif (cert.endswith(OPTIONS.public_key_suffix) and
655            privkey.endswith(OPTIONS.private_key_suffix) and
656            cert[:-public_key_suffix_len] == privkey[:-private_key_suffix_len]):
657        certmap[name] = cert[:-public_key_suffix_len]
658      else:
659        raise ValueError("failed to parse line from apkcerts.txt:\n" + line)
660  return certmap
661
662
663COMMON_DOCSTRING = """
664  -p  (--path)  <dir>
665      Prepend <dir>/bin to the list of places to search for binaries
666      run by this script, and expect to find jars in <dir>/framework.
667
668  -s  (--device_specific) <file>
669      Path to the python module containing device-specific
670      releasetools code.
671
672  -x  (--extra)  <key=value>
673      Add a key/value pair to the 'extras' dict, which device-specific
674      extension code may look at.
675
676  -v  (--verbose)
677      Show command lines being executed.
678
679  -h  (--help)
680      Display this usage message and exit.
681"""
682
683def Usage(docstring):
684  print docstring.rstrip("\n")
685  print COMMON_DOCSTRING
686
687
688def ParseOptions(argv,
689                 docstring,
690                 extra_opts="", extra_long_opts=(),
691                 extra_option_handler=None):
692  """Parse the options in argv and return any arguments that aren't
693  flags.  docstring is the calling module's docstring, to be displayed
694  for errors and -h.  extra_opts and extra_long_opts are for flags
695  defined by the caller, which are processed by passing them to
696  extra_option_handler."""
697
698  try:
699    opts, args = getopt.getopt(
700        argv, "hvp:s:x:" + extra_opts,
701        ["help", "verbose", "path=", "signapk_path=", "extra_signapk_args=",
702         "java_path=", "java_args=", "public_key_suffix=",
703         "private_key_suffix=", "boot_signer_path=", "boot_signer_args=",
704         "verity_signer_path=", "verity_signer_args=", "device_specific=",
705         "extra="] +
706        list(extra_long_opts))
707  except getopt.GetoptError as err:
708    Usage(docstring)
709    print "**", str(err), "**"
710    sys.exit(2)
711
712  for o, a in opts:
713    if o in ("-h", "--help"):
714      Usage(docstring)
715      sys.exit()
716    elif o in ("-v", "--verbose"):
717      OPTIONS.verbose = True
718    elif o in ("-p", "--path"):
719      OPTIONS.search_path = a
720    elif o in ("--signapk_path",):
721      OPTIONS.signapk_path = a
722    elif o in ("--extra_signapk_args",):
723      OPTIONS.extra_signapk_args = shlex.split(a)
724    elif o in ("--java_path",):
725      OPTIONS.java_path = a
726    elif o in ("--java_args",):
727      OPTIONS.java_args = a
728    elif o in ("--public_key_suffix",):
729      OPTIONS.public_key_suffix = a
730    elif o in ("--private_key_suffix",):
731      OPTIONS.private_key_suffix = a
732    elif o in ("--boot_signer_path",):
733      OPTIONS.boot_signer_path = a
734    elif o in ("--boot_signer_args",):
735      OPTIONS.boot_signer_args = shlex.split(a)
736    elif o in ("--verity_signer_path",):
737      OPTIONS.verity_signer_path = a
738    elif o in ("--verity_signer_args",):
739      OPTIONS.verity_signer_args = shlex.split(a)
740    elif o in ("-s", "--device_specific"):
741      OPTIONS.device_specific = a
742    elif o in ("-x", "--extra"):
743      key, value = a.split("=", 1)
744      OPTIONS.extras[key] = value
745    else:
746      if extra_option_handler is None or not extra_option_handler(o, a):
747        assert False, "unknown option \"%s\"" % (o,)
748
749  if OPTIONS.search_path:
750    os.environ["PATH"] = (os.path.join(OPTIONS.search_path, "bin") +
751                          os.pathsep + os.environ["PATH"])
752
753  return args
754
755
756def MakeTempFile(prefix=None, suffix=None):
757  """Make a temp file and add it to the list of things to be deleted
758  when Cleanup() is called.  Return the filename."""
759  fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
760  os.close(fd)
761  OPTIONS.tempfiles.append(fn)
762  return fn
763
764
765def Cleanup():
766  for i in OPTIONS.tempfiles:
767    if os.path.isdir(i):
768      shutil.rmtree(i)
769    else:
770      os.remove(i)
771
772
773class PasswordManager(object):
774  def __init__(self):
775    self.editor = os.getenv("EDITOR", None)
776    self.pwfile = os.getenv("ANDROID_PW_FILE", None)
777
778  def GetPasswords(self, items):
779    """Get passwords corresponding to each string in 'items',
780    returning a dict.  (The dict may have keys in addition to the
781    values in 'items'.)
782
783    Uses the passwords in $ANDROID_PW_FILE if available, letting the
784    user edit that file to add more needed passwords.  If no editor is
785    available, or $ANDROID_PW_FILE isn't define, prompts the user
786    interactively in the ordinary way.
787    """
788
789    current = self.ReadFile()
790
791    first = True
792    while True:
793      missing = []
794      for i in items:
795        if i not in current or not current[i]:
796          missing.append(i)
797      # Are all the passwords already in the file?
798      if not missing:
799        return current
800
801      for i in missing:
802        current[i] = ""
803
804      if not first:
805        print "key file %s still missing some passwords." % (self.pwfile,)
806        answer = raw_input("try to edit again? [y]> ").strip()
807        if answer and answer[0] not in 'yY':
808          raise RuntimeError("key passwords unavailable")
809      first = False
810
811      current = self.UpdateAndReadFile(current)
812
813  def PromptResult(self, current): # pylint: disable=no-self-use
814    """Prompt the user to enter a value (password) for each key in
815    'current' whose value is fales.  Returns a new dict with all the
816    values.
817    """
818    result = {}
819    for k, v in sorted(current.iteritems()):
820      if v:
821        result[k] = v
822      else:
823        while True:
824          result[k] = getpass.getpass(
825              "Enter password for %s key> " % k).strip()
826          if result[k]:
827            break
828    return result
829
830  def UpdateAndReadFile(self, current):
831    if not self.editor or not self.pwfile:
832      return self.PromptResult(current)
833
834    f = open(self.pwfile, "w")
835    os.chmod(self.pwfile, 0o600)
836    f.write("# Enter key passwords between the [[[ ]]] brackets.\n")
837    f.write("# (Additional spaces are harmless.)\n\n")
838
839    first_line = None
840    sorted_list = sorted([(not v, k, v) for (k, v) in current.iteritems()])
841    for i, (_, k, v) in enumerate(sorted_list):
842      f.write("[[[  %s  ]]] %s\n" % (v, k))
843      if not v and first_line is None:
844        # position cursor on first line with no password.
845        first_line = i + 4
846    f.close()
847
848    p = Run([self.editor, "+%d" % (first_line,), self.pwfile])
849    _, _ = p.communicate()
850
851    return self.ReadFile()
852
853  def ReadFile(self):
854    result = {}
855    if self.pwfile is None:
856      return result
857    try:
858      f = open(self.pwfile, "r")
859      for line in f:
860        line = line.strip()
861        if not line or line[0] == '#':
862          continue
863        m = re.match(r"^\[\[\[\s*(.*?)\s*\]\]\]\s*(\S+)$", line)
864        if not m:
865          print "failed to parse password file: ", line
866        else:
867          result[m.group(2)] = m.group(1)
868      f.close()
869    except IOError as e:
870      if e.errno != errno.ENOENT:
871        print "error reading password file: ", str(e)
872    return result
873
874
875def ZipWrite(zip_file, filename, arcname=None, perms=0o644,
876             compress_type=None):
877  import datetime
878
879  # http://b/18015246
880  # Python 2.7's zipfile implementation wrongly thinks that zip64 is required
881  # for files larger than 2GiB. We can work around this by adjusting their
882  # limit. Note that `zipfile.writestr()` will not work for strings larger than
883  # 2GiB. The Python interpreter sometimes rejects strings that large (though
884  # it isn't clear to me exactly what circumstances cause this).
885  # `zipfile.write()` must be used directly to work around this.
886  #
887  # This mess can be avoided if we port to python3.
888  saved_zip64_limit = zipfile.ZIP64_LIMIT
889  zipfile.ZIP64_LIMIT = (1 << 32) - 1
890
891  if compress_type is None:
892    compress_type = zip_file.compression
893  if arcname is None:
894    arcname = filename
895
896  saved_stat = os.stat(filename)
897
898  try:
899    # `zipfile.write()` doesn't allow us to pass ZipInfo, so just modify the
900    # file to be zipped and reset it when we're done.
901    os.chmod(filename, perms)
902
903    # Use a fixed timestamp so the output is repeatable.
904    epoch = datetime.datetime.fromtimestamp(0)
905    timestamp = (datetime.datetime(2009, 1, 1) - epoch).total_seconds()
906    os.utime(filename, (timestamp, timestamp))
907
908    zip_file.write(filename, arcname=arcname, compress_type=compress_type)
909  finally:
910    os.chmod(filename, saved_stat.st_mode)
911    os.utime(filename, (saved_stat.st_atime, saved_stat.st_mtime))
912    zipfile.ZIP64_LIMIT = saved_zip64_limit
913
914
915def ZipWriteStr(zip_file, zinfo_or_arcname, data, perms=None,
916                compress_type=None):
917  """Wrap zipfile.writestr() function to work around the zip64 limit.
918
919  Even with the ZIP64_LIMIT workaround, it won't allow writing a string
920  longer than 2GiB. It gives 'OverflowError: size does not fit in an int'
921  when calling crc32(bytes).
922
923  But it still works fine to write a shorter string into a large zip file.
924  We should use ZipWrite() whenever possible, and only use ZipWriteStr()
925  when we know the string won't be too long.
926  """
927
928  saved_zip64_limit = zipfile.ZIP64_LIMIT
929  zipfile.ZIP64_LIMIT = (1 << 32) - 1
930
931  if not isinstance(zinfo_or_arcname, zipfile.ZipInfo):
932    zinfo = zipfile.ZipInfo(filename=zinfo_or_arcname)
933    zinfo.compress_type = zip_file.compression
934    if perms is None:
935      perms = 0o100644
936  else:
937    zinfo = zinfo_or_arcname
938
939  # If compress_type is given, it overrides the value in zinfo.
940  if compress_type is not None:
941    zinfo.compress_type = compress_type
942
943  # If perms is given, it has a priority.
944  if perms is not None:
945    # If perms doesn't set the file type, mark it as a regular file.
946    if perms & 0o770000 == 0:
947      perms |= 0o100000
948    zinfo.external_attr = perms << 16
949
950  # Use a fixed timestamp so the output is repeatable.
951  zinfo.date_time = (2009, 1, 1, 0, 0, 0)
952
953  zip_file.writestr(zinfo, data)
954  zipfile.ZIP64_LIMIT = saved_zip64_limit
955
956
957def ZipClose(zip_file):
958  # http://b/18015246
959  # zipfile also refers to ZIP64_LIMIT during close() when it writes out the
960  # central directory.
961  saved_zip64_limit = zipfile.ZIP64_LIMIT
962  zipfile.ZIP64_LIMIT = (1 << 32) - 1
963
964  zip_file.close()
965
966  zipfile.ZIP64_LIMIT = saved_zip64_limit
967
968
969class DeviceSpecificParams(object):
970  module = None
971  def __init__(self, **kwargs):
972    """Keyword arguments to the constructor become attributes of this
973    object, which is passed to all functions in the device-specific
974    module."""
975    for k, v in kwargs.iteritems():
976      setattr(self, k, v)
977    self.extras = OPTIONS.extras
978
979    if self.module is None:
980      path = OPTIONS.device_specific
981      if not path:
982        return
983      try:
984        if os.path.isdir(path):
985          info = imp.find_module("releasetools", [path])
986        else:
987          d, f = os.path.split(path)
988          b, x = os.path.splitext(f)
989          if x == ".py":
990            f = b
991          info = imp.find_module(f, [d])
992        print "loaded device-specific extensions from", path
993        self.module = imp.load_module("device_specific", *info)
994      except ImportError:
995        print "unable to load device-specific module; assuming none"
996
997  def _DoCall(self, function_name, *args, **kwargs):
998    """Call the named function in the device-specific module, passing
999    the given args and kwargs.  The first argument to the call will be
1000    the DeviceSpecific object itself.  If there is no module, or the
1001    module does not define the function, return the value of the
1002    'default' kwarg (which itself defaults to None)."""
1003    if self.module is None or not hasattr(self.module, function_name):
1004      return kwargs.get("default", None)
1005    return getattr(self.module, function_name)(*((self,) + args), **kwargs)
1006
1007  def FullOTA_Assertions(self):
1008    """Called after emitting the block of assertions at the top of a
1009    full OTA package.  Implementations can add whatever additional
1010    assertions they like."""
1011    return self._DoCall("FullOTA_Assertions")
1012
1013  def FullOTA_InstallBegin(self):
1014    """Called at the start of full OTA installation."""
1015    return self._DoCall("FullOTA_InstallBegin")
1016
1017  def FullOTA_InstallEnd(self):
1018    """Called at the end of full OTA installation; typically this is
1019    used to install the image for the device's baseband processor."""
1020    return self._DoCall("FullOTA_InstallEnd")
1021
1022  def IncrementalOTA_Assertions(self):
1023    """Called after emitting the block of assertions at the top of an
1024    incremental OTA package.  Implementations can add whatever
1025    additional assertions they like."""
1026    return self._DoCall("IncrementalOTA_Assertions")
1027
1028  def IncrementalOTA_VerifyBegin(self):
1029    """Called at the start of the verification phase of incremental
1030    OTA installation; additional checks can be placed here to abort
1031    the script before any changes are made."""
1032    return self._DoCall("IncrementalOTA_VerifyBegin")
1033
1034  def IncrementalOTA_VerifyEnd(self):
1035    """Called at the end of the verification phase of incremental OTA
1036    installation; additional checks can be placed here to abort the
1037    script before any changes are made."""
1038    return self._DoCall("IncrementalOTA_VerifyEnd")
1039
1040  def IncrementalOTA_InstallBegin(self):
1041    """Called at the start of incremental OTA installation (after
1042    verification is complete)."""
1043    return self._DoCall("IncrementalOTA_InstallBegin")
1044
1045  def IncrementalOTA_InstallEnd(self):
1046    """Called at the end of incremental OTA installation; typically
1047    this is used to install the image for the device's baseband
1048    processor."""
1049    return self._DoCall("IncrementalOTA_InstallEnd")
1050
1051class File(object):
1052  def __init__(self, name, data):
1053    self.name = name
1054    self.data = data
1055    self.size = len(data)
1056    self.sha1 = sha1(data).hexdigest()
1057
1058  @classmethod
1059  def FromLocalFile(cls, name, diskname):
1060    f = open(diskname, "rb")
1061    data = f.read()
1062    f.close()
1063    return File(name, data)
1064
1065  def WriteToTemp(self):
1066    t = tempfile.NamedTemporaryFile()
1067    t.write(self.data)
1068    t.flush()
1069    return t
1070
1071  def AddToZip(self, z, compression=None):
1072    ZipWriteStr(z, self.name, self.data, compress_type=compression)
1073
1074DIFF_PROGRAM_BY_EXT = {
1075    ".gz" : "imgdiff",
1076    ".zip" : ["imgdiff", "-z"],
1077    ".jar" : ["imgdiff", "-z"],
1078    ".apk" : ["imgdiff", "-z"],
1079    ".img" : "imgdiff",
1080    }
1081
1082class Difference(object):
1083  def __init__(self, tf, sf, diff_program=None):
1084    self.tf = tf
1085    self.sf = sf
1086    self.patch = None
1087    self.diff_program = diff_program
1088
1089  def ComputePatch(self):
1090    """Compute the patch (as a string of data) needed to turn sf into
1091    tf.  Returns the same tuple as GetPatch()."""
1092
1093    tf = self.tf
1094    sf = self.sf
1095
1096    if self.diff_program:
1097      diff_program = self.diff_program
1098    else:
1099      ext = os.path.splitext(tf.name)[1]
1100      diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
1101
1102    ttemp = tf.WriteToTemp()
1103    stemp = sf.WriteToTemp()
1104
1105    ext = os.path.splitext(tf.name)[1]
1106
1107    try:
1108      ptemp = tempfile.NamedTemporaryFile()
1109      if isinstance(diff_program, list):
1110        cmd = copy.copy(diff_program)
1111      else:
1112        cmd = [diff_program]
1113      cmd.append(stemp.name)
1114      cmd.append(ttemp.name)
1115      cmd.append(ptemp.name)
1116      p = Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
1117      err = []
1118      def run():
1119        _, e = p.communicate()
1120        if e:
1121          err.append(e)
1122      th = threading.Thread(target=run)
1123      th.start()
1124      th.join(timeout=300)   # 5 mins
1125      if th.is_alive():
1126        print "WARNING: diff command timed out"
1127        p.terminate()
1128        th.join(5)
1129        if th.is_alive():
1130          p.kill()
1131          th.join()
1132
1133      if err or p.returncode != 0:
1134        print "WARNING: failure running %s:\n%s\n" % (
1135            diff_program, "".join(err))
1136        self.patch = None
1137        return None, None, None
1138      diff = ptemp.read()
1139    finally:
1140      ptemp.close()
1141      stemp.close()
1142      ttemp.close()
1143
1144    self.patch = diff
1145    return self.tf, self.sf, self.patch
1146
1147
1148  def GetPatch(self):
1149    """Return a tuple (target_file, source_file, patch_data).
1150    patch_data may be None if ComputePatch hasn't been called, or if
1151    computing the patch failed."""
1152    return self.tf, self.sf, self.patch
1153
1154
1155def ComputeDifferences(diffs):
1156  """Call ComputePatch on all the Difference objects in 'diffs'."""
1157  print len(diffs), "diffs to compute"
1158
1159  # Do the largest files first, to try and reduce the long-pole effect.
1160  by_size = [(i.tf.size, i) for i in diffs]
1161  by_size.sort(reverse=True)
1162  by_size = [i[1] for i in by_size]
1163
1164  lock = threading.Lock()
1165  diff_iter = iter(by_size)   # accessed under lock
1166
1167  def worker():
1168    try:
1169      lock.acquire()
1170      for d in diff_iter:
1171        lock.release()
1172        start = time.time()
1173        d.ComputePatch()
1174        dur = time.time() - start
1175        lock.acquire()
1176
1177        tf, sf, patch = d.GetPatch()
1178        if sf.name == tf.name:
1179          name = tf.name
1180        else:
1181          name = "%s (%s)" % (tf.name, sf.name)
1182        if patch is None:
1183          print "patching failed!                                  %s" % (name,)
1184        else:
1185          print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
1186              dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
1187      lock.release()
1188    except Exception as e:
1189      print e
1190      raise
1191
1192  # start worker threads; wait for them all to finish.
1193  threads = [threading.Thread(target=worker)
1194             for i in range(OPTIONS.worker_threads)]
1195  for th in threads:
1196    th.start()
1197  while threads:
1198    threads.pop().join()
1199
1200
1201class BlockDifference(object):
1202  def __init__(self, partition, tgt, src=None, check_first_block=False,
1203               version=None):
1204    self.tgt = tgt
1205    self.src = src
1206    self.partition = partition
1207    self.check_first_block = check_first_block
1208
1209    # Due to http://b/20939131, check_first_block is disabled temporarily.
1210    assert not self.check_first_block
1211
1212    if version is None:
1213      version = 1
1214      if OPTIONS.info_dict:
1215        version = max(
1216            int(i) for i in
1217            OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
1218    self.version = version
1219
1220    b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
1221                                    version=self.version)
1222    tmpdir = tempfile.mkdtemp()
1223    OPTIONS.tempfiles.append(tmpdir)
1224    self.path = os.path.join(tmpdir, partition)
1225    b.Compute(self.path)
1226
1227    _, self.device = GetTypeAndDevice("/" + partition, OPTIONS.info_dict)
1228
1229  def WriteScript(self, script, output_zip, progress=None):
1230    if not self.src:
1231      # write the output unconditionally
1232      script.Print("Patching %s image unconditionally..." % (self.partition,))
1233    else:
1234      script.Print("Patching %s image after verification." % (self.partition,))
1235
1236    if progress:
1237      script.ShowProgress(progress, 0)
1238    self._WriteUpdate(script, output_zip)
1239    self._WritePostInstallVerifyScript(script)
1240
1241  def WriteVerifyScript(self, script):
1242    partition = self.partition
1243    if not self.src:
1244      script.Print("Image %s will be patched unconditionally." % (partition,))
1245    else:
1246      ranges = self.src.care_map.subtract(self.src.clobbered_blocks)
1247      ranges_str = ranges.to_string_raw()
1248      if self.version >= 3:
1249        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
1250                            'block_image_verify("%s", '
1251                            'package_extract_file("%s.transfer.list"), '
1252                            '"%s.new.dat", "%s.patch.dat")) then') % (
1253                            self.device, ranges_str, self.src.TotalSha1(),
1254                            self.device, partition, partition, partition))
1255      else:
1256        script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1257                           self.device, ranges_str, self.src.TotalSha1()))
1258      script.Print('Verified %s image...' % (partition,))
1259      script.AppendExtra('else')
1260
1261      # When generating incrementals for the system and vendor partitions,
1262      # explicitly check the first block (which contains the superblock) of
1263      # the partition to see if it's what we expect. If this check fails,
1264      # give an explicit log message about the partition having been
1265      # remounted R/W (the most likely explanation) and the need to flash to
1266      # get OTAs working again.
1267      if self.check_first_block:
1268        self._CheckFirstBlock(script)
1269
1270      # Abort the OTA update. Note that the incremental OTA cannot be applied
1271      # even if it may match the checksum of the target partition.
1272      # a) If version < 3, operations like move and erase will make changes
1273      #    unconditionally and damage the partition.
1274      # b) If version >= 3, it won't even reach here.
1275      script.AppendExtra(('abort("%s partition has unexpected contents");\n'
1276                          'endif;') % (partition,))
1277
1278  def _WritePostInstallVerifyScript(self, script):
1279    partition = self.partition
1280    script.Print('Verifying the updated %s image...' % (partition,))
1281    # Unlike pre-install verification, clobbered_blocks should not be ignored.
1282    ranges = self.tgt.care_map
1283    ranges_str = ranges.to_string_raw()
1284    script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1285                       self.device, ranges_str,
1286                       self.tgt.TotalSha1(include_clobbered_blocks=True)))
1287
1288    # Bug: 20881595
1289    # Verify that extended blocks are really zeroed out.
1290    if self.tgt.extended:
1291      ranges_str = self.tgt.extended.to_string_raw()
1292      script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
1293                         self.device, ranges_str,
1294                         self._HashZeroBlocks(self.tgt.extended.size())))
1295      script.Print('Verified the updated %s image.' % (partition,))
1296      script.AppendExtra(
1297          'else\n'
1298          '  abort("%s partition has unexpected non-zero contents after OTA '
1299          'update");\n'
1300          'endif;' % (partition,))
1301    else:
1302      script.Print('Verified the updated %s image.' % (partition,))
1303
1304    script.AppendExtra(
1305        'else\n'
1306        '  abort("%s partition has unexpected contents after OTA update");\n'
1307        'endif;' % (partition,))
1308
1309  def _WriteUpdate(self, script, output_zip):
1310    ZipWrite(output_zip,
1311             '{}.transfer.list'.format(self.path),
1312             '{}.transfer.list'.format(self.partition))
1313    ZipWrite(output_zip,
1314             '{}.new.dat'.format(self.path),
1315             '{}.new.dat'.format(self.partition))
1316    ZipWrite(output_zip,
1317             '{}.patch.dat'.format(self.path),
1318             '{}.patch.dat'.format(self.partition),
1319             compress_type=zipfile.ZIP_STORED)
1320
1321    call = ('block_image_update("{device}", '
1322            'package_extract_file("{partition}.transfer.list"), '
1323            '"{partition}.new.dat", "{partition}.patch.dat");\n'.format(
1324                device=self.device, partition=self.partition))
1325    script.AppendExtra(script.WordWrap(call))
1326
1327  def _HashBlocks(self, source, ranges): # pylint: disable=no-self-use
1328    data = source.ReadRangeSet(ranges)
1329    ctx = sha1()
1330
1331    for p in data:
1332      ctx.update(p)
1333
1334    return ctx.hexdigest()
1335
1336  def _HashZeroBlocks(self, num_blocks): # pylint: disable=no-self-use
1337    """Return the hash value for all zero blocks."""
1338    zero_block = '\x00' * 4096
1339    ctx = sha1()
1340    for _ in range(num_blocks):
1341      ctx.update(zero_block)
1342
1343    return ctx.hexdigest()
1344
1345  # TODO(tbao): Due to http://b/20939131, block 0 may be changed without
1346  # remounting R/W. Will change the checking to a finer-grained way to
1347  # mask off those bits.
1348  def _CheckFirstBlock(self, script):
1349    r = rangelib.RangeSet((0, 1))
1350    srchash = self._HashBlocks(self.src, r)
1351
1352    script.AppendExtra(('(range_sha1("%s", "%s") == "%s") || '
1353                        'abort("%s has been remounted R/W; '
1354                        'reflash device to reenable OTA updates");')
1355                       % (self.device, r.to_string_raw(), srchash,
1356                          self.device))
1357
1358DataImage = blockimgdiff.DataImage
1359
1360
1361# map recovery.fstab's fs_types to mount/format "partition types"
1362PARTITION_TYPES = {
1363    "yaffs2": "MTD",
1364    "mtd": "MTD",
1365    "ext4": "EMMC",
1366    "emmc": "EMMC",
1367    "f2fs": "EMMC",
1368    "squashfs": "EMMC"
1369}
1370
1371def GetTypeAndDevice(mount_point, info):
1372  fstab = info["fstab"]
1373  if fstab:
1374    return (PARTITION_TYPES[fstab[mount_point].fs_type],
1375            fstab[mount_point].device)
1376  else:
1377    raise KeyError
1378
1379
1380def ParseCertificate(data):
1381  """Parse a PEM-format certificate."""
1382  cert = []
1383  save = False
1384  for line in data.split("\n"):
1385    if "--END CERTIFICATE--" in line:
1386      break
1387    if save:
1388      cert.append(line)
1389    if "--BEGIN CERTIFICATE--" in line:
1390      save = True
1391  cert = "".join(cert).decode('base64')
1392  return cert
1393
1394def MakeRecoveryPatch(input_dir, output_sink, recovery_img, boot_img,
1395                      info_dict=None):
1396  """Generate a binary patch that creates the recovery image starting
1397  with the boot image.  (Most of the space in these images is just the
1398  kernel, which is identical for the two, so the resulting patch
1399  should be efficient.)  Add it to the output zip, along with a shell
1400  script that is run from init.rc on first boot to actually do the
1401  patching and install the new recovery image.
1402
1403  recovery_img and boot_img should be File objects for the
1404  corresponding images.  info should be the dictionary returned by
1405  common.LoadInfoDict() on the input target_files.
1406  """
1407
1408  if info_dict is None:
1409    info_dict = OPTIONS.info_dict
1410
1411  full_recovery_image = info_dict.get("full_recovery_image", None) == "true"
1412
1413  if full_recovery_image:
1414    output_sink("etc/recovery.img", recovery_img.data)
1415
1416  else:
1417    diff_program = ["imgdiff"]
1418    path = os.path.join(input_dir, "SYSTEM", "etc", "recovery-resource.dat")
1419    if os.path.exists(path):
1420      diff_program.append("-b")
1421      diff_program.append(path)
1422      bonus_args = "-b /system/etc/recovery-resource.dat"
1423    else:
1424      bonus_args = ""
1425
1426    d = Difference(recovery_img, boot_img, diff_program=diff_program)
1427    _, _, patch = d.ComputePatch()
1428    output_sink("recovery-from-boot.p", patch)
1429
1430  try:
1431    boot_type, boot_device = GetTypeAndDevice("/boot", info_dict)
1432    recovery_type, recovery_device = GetTypeAndDevice("/recovery", info_dict)
1433  except KeyError:
1434    return
1435
1436  if full_recovery_image:
1437    sh = """#!/system/bin/sh
1438if ! applypatch -c %(type)s:%(device)s:%(size)d:%(sha1)s; then
1439  applypatch /system/etc/recovery.img %(type)s:%(device)s %(sha1)s %(size)d && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1440else
1441  log -t recovery "Recovery image already installed"
1442fi
1443""" % {'type': recovery_type,
1444       'device': recovery_device,
1445       'sha1': recovery_img.sha1,
1446       'size': recovery_img.size}
1447  else:
1448    sh = """#!/system/bin/sh
1449if ! applypatch -c %(recovery_type)s:%(recovery_device)s:%(recovery_size)d:%(recovery_sha1)s; then
1450  applypatch %(bonus_args)s %(boot_type)s:%(boot_device)s:%(boot_size)d:%(boot_sha1)s %(recovery_type)s:%(recovery_device)s %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p && log -t recovery "Installing new recovery image: succeeded" || log -t recovery "Installing new recovery image: failed"
1451else
1452  log -t recovery "Recovery image already installed"
1453fi
1454""" % {'boot_size': boot_img.size,
1455       'boot_sha1': boot_img.sha1,
1456       'recovery_size': recovery_img.size,
1457       'recovery_sha1': recovery_img.sha1,
1458       'boot_type': boot_type,
1459       'boot_device': boot_device,
1460       'recovery_type': recovery_type,
1461       'recovery_device': recovery_device,
1462       'bonus_args': bonus_args}
1463
1464  # The install script location moved from /system/etc to /system/bin
1465  # in the L release.  Parse init.*.rc files to find out where the
1466  # target-files expects it to be, and put it there.
1467  sh_location = "etc/install-recovery.sh"
1468  found = False
1469  init_rc_dir = os.path.join(input_dir, "BOOT", "RAMDISK")
1470  init_rc_files = os.listdir(init_rc_dir)
1471  for init_rc_file in init_rc_files:
1472    if (not init_rc_file.startswith('init.') or
1473        not init_rc_file.endswith('.rc')):
1474      continue
1475
1476    with open(os.path.join(init_rc_dir, init_rc_file)) as f:
1477      for line in f:
1478        m = re.match(r"^service flash_recovery /system/(\S+)\s*$", line)
1479        if m:
1480          sh_location = m.group(1)
1481          found = True
1482          break
1483
1484    if found:
1485      break
1486
1487  print "putting script in", sh_location
1488
1489  output_sink(sh_location, sh)
1490