blob: 69af4bcab0d2e97d67cdfb8f5d804c09804e710f [file] [log] [blame]
#!/usr/bin/env python
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Given a target-files zipfile, produces an OTA package that installs
that build. An incremental OTA is produced if -i is given, otherwise
a full OTA is produced.
Usage: ota_from_target_files [flags] input_target_files output_ota_package
-b (--board_config) <file>
Deprecated.
-k (--package_key) <key>
Key to use to sign the package (default is
"build/target/product/security/testkey").
-i (--incremental_from) <file>
Generate an incremental OTA using the given target-files zip as
the starting build.
-w (--wipe_user_data)
Generate an OTA package that will wipe the user data partition
when installed.
-n (--no_prereq)
Omit the timestamp prereq check normally included at the top of
the build scripts (used for developer OTA packages which
legitimately need to go back and forth).
-e (--extra_script) <file>
Insert the contents of file at the end of the update script.
-a (--aslr_mode) <on|off>
Specify whether to turn on ASLR for the package (on by default).
"""
import sys
if sys.hexversion < 0x02040000:
print >> sys.stderr, "Python 2.4 or newer is required."
sys.exit(1)
import copy
import errno
import os
import re
import sha
import subprocess
import tempfile
import threading
import time
import zipfile
import common
import edify_generator
OPTIONS = common.OPTIONS
OPTIONS.package_key = "build/target/product/security/testkey"
OPTIONS.incremental_source = None
OPTIONS.require_verbatim = set()
OPTIONS.prohibit_verbatim = set(("system/build.prop",))
OPTIONS.patch_threshold = 0.95
OPTIONS.wipe_user_data = False
OPTIONS.omit_prereq = False
OPTIONS.extra_script = None
OPTIONS.aslr_mode = True
OPTIONS.worker_threads = 3
def MostPopularKey(d, default):
"""Given a dict, return the key corresponding to the largest
value. Returns 'default' if the dict is empty."""
x = [(v, k) for (k, v) in d.iteritems()]
if not x: return default
x.sort()
return x[-1][1]
def IsSymlink(info):
"""Return true if the zipfile.ZipInfo object passed in represents a
symlink."""
return (info.external_attr >> 16) == 0120777
def IsRegular(info):
"""Return true if the zipfile.ZipInfo object passed in represents a
symlink."""
return (info.external_attr >> 28) == 010
class Item:
"""Items represent the metadata (user, group, mode) of files and
directories in the system image."""
ITEMS = {}
def __init__(self, name, dir=False):
self.name = name
self.uid = None
self.gid = None
self.mode = None
self.dir = dir
if name:
self.parent = Item.Get(os.path.dirname(name), dir=True)
self.parent.children.append(self)
else:
self.parent = None
if dir:
self.children = []
def Dump(self, indent=0):
if self.uid is not None:
print "%s%s %d %d %o" % (" "*indent, self.name, self.uid, self.gid, self.mode)
else:
print "%s%s %s %s %s" % (" "*indent, self.name, self.uid, self.gid, self.mode)
if self.dir:
print "%s%s" % (" "*indent, self.descendants)
print "%s%s" % (" "*indent, self.best_subtree)
for i in self.children:
i.Dump(indent=indent+1)
@classmethod
def Get(cls, name, dir=False):
if name not in cls.ITEMS:
cls.ITEMS[name] = Item(name, dir=dir)
return cls.ITEMS[name]
@classmethod
def GetMetadata(cls, input_zip):
try:
# See if the target_files contains a record of what the uid,
# gid, and mode is supposed to be.
output = input_zip.read("META/filesystem_config.txt")
except KeyError:
# Run the external 'fs_config' program to determine the desired
# uid, gid, and mode for every Item object. Note this uses the
# one in the client now, which might not be the same as the one
# used when this target_files was built.
p = common.Run(["fs_config"], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
suffix = { False: "", True: "/" }
input = "".join(["%s%s\n" % (i.name, suffix[i.dir])
for i in cls.ITEMS.itervalues() if i.name])
output, error = p.communicate(input)
assert not error
for line in output.split("\n"):
if not line: continue
name, uid, gid, mode = line.split()
i = cls.ITEMS.get(name, None)
if i is not None:
i.uid = int(uid)
i.gid = int(gid)
i.mode = int(mode, 8)
if i.dir:
i.children.sort(key=lambda i: i.name)
# set metadata for the files generated by this script.
i = cls.ITEMS.get("system/recovery-from-boot.p", None)
if i: i.uid, i.gid, i.mode = 0, 0, 0644
i = cls.ITEMS.get("system/etc/install-recovery.sh", None)
if i: i.uid, i.gid, i.mode = 0, 0, 0544
def CountChildMetadata(self):
"""Count up the (uid, gid, mode) tuples for all children and
determine the best strategy for using set_perm_recursive and
set_perm to correctly chown/chmod all the files to their desired
values. Recursively calls itself for all descendants.
Returns a dict of {(uid, gid, dmode, fmode): count} counting up
all descendants of this node. (dmode or fmode may be None.) Also
sets the best_subtree of each directory Item to the (uid, gid,
dmode, fmode) tuple that will match the most descendants of that
Item.
"""
assert self.dir
d = self.descendants = {(self.uid, self.gid, self.mode, None): 1}
for i in self.children:
if i.dir:
for k, v in i.CountChildMetadata().iteritems():
d[k] = d.get(k, 0) + v
else:
k = (i.uid, i.gid, None, i.mode)
d[k] = d.get(k, 0) + 1
# Find the (uid, gid, dmode, fmode) tuple that matches the most
# descendants.
# First, find the (uid, gid) pair that matches the most
# descendants.
ug = {}
for (uid, gid, _, _), count in d.iteritems():
ug[(uid, gid)] = ug.get((uid, gid), 0) + count
ug = MostPopularKey(ug, (0, 0))
# Now find the dmode and fmode that match the most descendants
# with that (uid, gid), and choose those.
best_dmode = (0, 0755)
best_fmode = (0, 0644)
for k, count in d.iteritems():
if k[:2] != ug: continue
if k[2] is not None and count >= best_dmode[0]: best_dmode = (count, k[2])
if k[3] is not None and count >= best_fmode[0]: best_fmode = (count, k[3])
self.best_subtree = ug + (best_dmode[1], best_fmode[1])
return d
def SetPermissions(self, script):
"""Append set_perm/set_perm_recursive commands to 'script' to
set all permissions, users, and groups for the tree of files
rooted at 'self'."""
self.CountChildMetadata()
def recurse(item, current):
# current is the (uid, gid, dmode, fmode) tuple that the current
# item (and all its children) have already been set to. We only
# need to issue set_perm/set_perm_recursive commands if we're
# supposed to be something different.
if item.dir:
if current != item.best_subtree:
script.SetPermissionsRecursive("/"+item.name, *item.best_subtree)
current = item.best_subtree
if item.uid != current[0] or item.gid != current[1] or \
item.mode != current[2]:
script.SetPermissions("/"+item.name, item.uid, item.gid, item.mode)
for i in item.children:
recurse(i, current)
else:
if item.uid != current[0] or item.gid != current[1] or \
item.mode != current[3]:
script.SetPermissions("/"+item.name, item.uid, item.gid, item.mode)
recurse(self, (-1, -1, -1, -1))
def CopySystemFiles(input_zip, output_zip=None,
substitute=None):
"""Copies files underneath system/ in the input zip to the output
zip. Populates the Item class with their metadata, and returns a
list of symlinks as well as a list of files that will be retouched.
output_zip may be None, in which case the copy is skipped (but the
other side effects still happen). substitute is an optional dict
of {output filename: contents} to be output instead of certain input
files.
"""
symlinks = []
retouch_files = []
for info in input_zip.infolist():
if info.filename.startswith("SYSTEM/"):
basefilename = info.filename[7:]
if IsSymlink(info):
symlinks.append((input_zip.read(info.filename),
"/system/" + basefilename))
else:
info2 = copy.copy(info)
fn = info2.filename = "system/" + basefilename
if substitute and fn in substitute and substitute[fn] is None:
continue
if output_zip is not None:
if substitute and fn in substitute:
data = substitute[fn]
else:
data = input_zip.read(info.filename)
if info.filename.startswith("SYSTEM/lib/") and IsRegular(info):
retouch_files.append(("/system/" + basefilename,
sha.sha(data).hexdigest()))
output_zip.writestr(info2, data)
if fn.endswith("/"):
Item.Get(fn[:-1], dir=True)
else:
Item.Get(fn, dir=False)
symlinks.sort()
return (symlinks, retouch_files)
def SignOutput(temp_zip_name, output_zip_name):
key_passwords = common.GetKeyPasswords([OPTIONS.package_key])
pw = key_passwords[OPTIONS.package_key]
common.SignFile(temp_zip_name, output_zip_name, OPTIONS.package_key, pw,
whole_file=True)
def AppendAssertions(script, input_zip):
device = GetBuildProp("ro.product.device", input_zip)
script.AssertDevice(device)
def MakeRecoveryPatch(output_zip, recovery_img, boot_img, info):
"""Generate a binary patch that creates the recovery image starting
with the boot image. (Most of the space in these images is just the
kernel, which is identical for the two, so the resulting patch
should be efficient.) Add it to the output zip, along with a shell
script that is run from init.rc on first boot to actually do the
patching and install the new recovery image.
recovery_img and boot_img should be File objects for the
corresponding images. info should be the dictionary returned by
common.LoadInfoDict() on the input target_files.
Returns an Item for the shell script, which must be made
executable.
"""
d = Difference(recovery_img, boot_img)
_, _, patch = d.ComputePatch()
common.ZipWriteStr(output_zip, "recovery/recovery-from-boot.p", patch)
Item.Get("system/recovery-from-boot.p", dir=False)
# Images with different content will have a different first page, so
# we check to see if this recovery has already been installed by
# testing just the first 2k.
HEADER_SIZE = 2048
header_sha1 = sha.sha(recovery_img.data[:HEADER_SIZE]).hexdigest()
sh = """#!/system/bin/sh
if ! applypatch -c %(partition_type)s:%(partition_path)srecovery:%(header_size)d:%(header_sha1)s; then
log -t recovery "Installing new recovery image"
applypatch %(partition_type)s:%(partition_path)sboot:%(boot_size)d:%(boot_sha1)s %(partition_type)s:%(partition_path)srecovery %(recovery_sha1)s %(recovery_size)d %(boot_sha1)s:/system/recovery-from-boot.p
else
log -t recovery "Recovery image already installed"
fi
""" % { 'boot_size': boot_img.size,
'boot_sha1': boot_img.sha1,
'header_size': HEADER_SIZE,
'header_sha1': header_sha1,
'recovery_size': recovery_img.size,
'recovery_sha1': recovery_img.sha1,
'partition_type': info["partition_type"],
'partition_path': info.get("partition_path", ""),
}
common.ZipWriteStr(output_zip, "recovery/etc/install-recovery.sh", sh)
return Item.Get("system/etc/install-recovery.sh", dir=False)
def WriteFullOTAPackage(input_zip, output_zip, info):
# TODO: how to determine this? We don't know what version it will
# be installed on top of. For now, we expect the API just won't
# change very often.
script = edify_generator.EdifyGenerator(3, info)
metadata = {"post-build": GetBuildProp("ro.build.fingerprint", input_zip),
"pre-device": GetBuildProp("ro.product.device", input_zip),
"post-timestamp": GetBuildProp("ro.build.date.utc", input_zip),
}
device_specific = common.DeviceSpecificParams(
input_zip=input_zip,
input_version=GetRecoveryAPIVersion(input_zip),
output_zip=output_zip,
script=script,
input_tmp=OPTIONS.input_tmp,
metadata=metadata)
if not OPTIONS.omit_prereq:
ts = GetBuildProp("ro.build.date.utc", input_zip)
script.AssertOlderBuild(ts)
AppendAssertions(script, input_zip)
device_specific.FullOTA_Assertions()
script.ShowProgress(0.5, 0)
if OPTIONS.wipe_user_data:
script.FormatPartition("userdata")
script.FormatPartition("system")
script.Mount("system", "/system")
script.UnpackPackageDir("recovery", "/system")
script.UnpackPackageDir("system", "/system")
(symlinks, retouch_files) = CopySystemFiles(input_zip, output_zip)
script.MakeSymlinks(symlinks)
if OPTIONS.aslr_mode:
script.RetouchBinaries(retouch_files)
else:
script.UndoRetouchBinaries(retouch_files)
boot_img = File("boot.img", common.BuildBootableImage(
os.path.join(OPTIONS.input_tmp, "BOOT")))
recovery_img = File("recovery.img", common.BuildBootableImage(
os.path.join(OPTIONS.input_tmp, "RECOVERY")))
MakeRecoveryPatch(output_zip, recovery_img, boot_img, info)
Item.GetMetadata(input_zip)
Item.Get("system").SetPermissions(script)
common.CheckSize(boot_img.data, "boot.img")
common.ZipWriteStr(output_zip, "boot.img", boot_img.data)
script.ShowProgress(0.2, 0)
script.ShowProgress(0.2, 10)
script.WriteRawImage("boot", "boot.img")
script.ShowProgress(0.1, 0)
device_specific.FullOTA_InstallEnd()
if OPTIONS.extra_script is not None:
script.AppendExtra(OPTIONS.extra_script)
script.UnmountAll()
script.AddToZip(input_zip, output_zip)
WriteMetadata(metadata, output_zip)
def WriteMetadata(metadata, output_zip):
common.ZipWriteStr(output_zip, "META-INF/com/android/metadata",
"".join(["%s=%s\n" % kv
for kv in sorted(metadata.iteritems())]))
class File(object):
def __init__(self, name, data):
self.name = name
self.data = data
self.size = len(data)
self.sha1 = sha.sha(data).hexdigest()
def WriteToTemp(self):
t = tempfile.NamedTemporaryFile()
t.write(self.data)
t.flush()
return t
def AddToZip(self, z):
common.ZipWriteStr(z, self.name, self.data)
def LoadSystemFiles(z):
"""Load all the files from SYSTEM/... in a given target-files
ZipFile, and return a dict of {filename: File object}."""
out = {}
retouch_files = []
for info in z.infolist():
if info.filename.startswith("SYSTEM/") and not IsSymlink(info):
basefilename = info.filename[7:]
fn = "system/" + basefilename
data = z.read(info.filename)
out[fn] = File(fn, data)
if info.filename.startswith("SYSTEM/lib/") and IsRegular(info):
retouch_files.append(("/system/" + basefilename,
out[fn].sha1))
return (out, retouch_files)
DIFF_PROGRAM_BY_EXT = {
".gz" : "imgdiff",
".zip" : ["imgdiff", "-z"],
".jar" : ["imgdiff", "-z"],
".apk" : ["imgdiff", "-z"],
".img" : "imgdiff",
}
class Difference(object):
def __init__(self, tf, sf):
self.tf = tf
self.sf = sf
self.patch = None
def ComputePatch(self):
"""Compute the patch (as a string of data) needed to turn sf into
tf. Returns the same tuple as GetPatch()."""
tf = self.tf
sf = self.sf
ext = os.path.splitext(tf.name)[1]
diff_program = DIFF_PROGRAM_BY_EXT.get(ext, "bsdiff")
ttemp = tf.WriteToTemp()
stemp = sf.WriteToTemp()
ext = os.path.splitext(tf.name)[1]
try:
ptemp = tempfile.NamedTemporaryFile()
if isinstance(diff_program, list):
cmd = copy.copy(diff_program)
else:
cmd = [diff_program]
cmd.append(stemp.name)
cmd.append(ttemp.name)
cmd.append(ptemp.name)
p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, err = p.communicate()
if err or p.returncode != 0:
print "WARNING: failure running %s:\n%s\n" % (diff_program, err)
return None
diff = ptemp.read()
finally:
ptemp.close()
stemp.close()
ttemp.close()
self.patch = diff
return self.tf, self.sf, self.patch
def GetPatch(self):
"""Return a tuple (target_file, source_file, patch_data).
patch_data may be None if ComputePatch hasn't been called, or if
computing the patch failed."""
return self.tf, self.sf, self.patch
def ComputeDifferences(diffs):
"""Call ComputePatch on all the Difference objects in 'diffs'."""
print len(diffs), "diffs to compute"
# Do the largest files first, to try and reduce the long-pole effect.
by_size = [(i.tf.size, i) for i in diffs]
by_size.sort(reverse=True)
by_size = [i[1] for i in by_size]
lock = threading.Lock()
diff_iter = iter(by_size) # accessed under lock
def worker():
try:
lock.acquire()
for d in diff_iter:
lock.release()
start = time.time()
d.ComputePatch()
dur = time.time() - start
lock.acquire()
tf, sf, patch = d.GetPatch()
if sf.name == tf.name:
name = tf.name
else:
name = "%s (%s)" % (tf.name, sf.name)
if patch is None:
print "patching failed! %s" % (name,)
else:
print "%8.2f sec %8d / %8d bytes (%6.2f%%) %s" % (
dur, len(patch), tf.size, 100.0 * len(patch) / tf.size, name)
lock.release()
except Exception, e:
print e
raise
# start worker threads; wait for them all to finish.
threads = [threading.Thread(target=worker)
for i in range(OPTIONS.worker_threads)]
for th in threads:
th.start()
while threads:
threads.pop().join()
def GetBuildProp(property, z):
"""Return the fingerprint of the build of a given target-files
ZipFile object."""
bp = z.read("SYSTEM/build.prop")
if not property:
return bp
m = re.search(re.escape(property) + r"=(.*)\n", bp)
if not m:
raise common.ExternalError("couldn't find %s in build.prop" % (property,))
return m.group(1).strip()
def GetRecoveryAPIVersion(zip):
"""Returns the version of the recovery API. Version 0 is the older
amend code (no separate binary)."""
try:
version = zip.read("META/recovery-api-version.txt")
return int(version)
except KeyError:
try:
# version one didn't have the recovery-api-version.txt file, but
# it did include an updater binary.
zip.getinfo("OTA/bin/updater")
return 1
except KeyError:
return 0
def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip, info):
source_version = GetRecoveryAPIVersion(source_zip)
target_version = GetRecoveryAPIVersion(target_zip)
partition_type = info["partition_type"]
partition_path = info.get("partition_path", "")
if source_version == 0:
print ("WARNING: generating edify script for a source that "
"can't install it.")
script = edify_generator.EdifyGenerator(source_version, info)
metadata = {"pre-device": GetBuildProp("ro.product.device", source_zip),
"post-timestamp": GetBuildProp("ro.build.date.utc", target_zip),
}
device_specific = common.DeviceSpecificParams(
source_zip=source_zip,
source_version=source_version,
target_zip=target_zip,
target_version=target_version,
output_zip=output_zip,
script=script,
metadata=metadata)
print "Loading target..."
(target_data, target_retouch_files) = LoadSystemFiles(target_zip)
print "Loading source..."
(source_data, source_retouch_files) = LoadSystemFiles(source_zip)
verbatim_targets = []
patch_list = []
diffs = []
largest_source_size = 0
for fn in sorted(target_data.keys()):
tf = target_data[fn]
assert fn == tf.name
sf = source_data.get(fn, None)
if sf is None or fn in OPTIONS.require_verbatim:
# This file should be included verbatim
if fn in OPTIONS.prohibit_verbatim:
raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,))
print "send", fn, "verbatim"
tf.AddToZip(output_zip)
verbatim_targets.append((fn, tf.size))
elif tf.sha1 != sf.sha1:
# File is different; consider sending as a patch
diffs.append(Difference(tf, sf))
else:
# Target file identical to source.
pass
ComputeDifferences(diffs)
for diff in diffs:
tf, sf, d = diff.GetPatch()
if d is None or len(d) > tf.size * OPTIONS.patch_threshold:
# patch is almost as big as the file; don't bother patching
tf.AddToZip(output_zip)
verbatim_targets.append((tf.name, tf.size))
else:
common.ZipWriteStr(output_zip, "patch/" + tf.name + ".p", d)
patch_list.append((tf.name, tf, sf, tf.size, sha.sha(d).hexdigest()))
largest_source_size = max(largest_source_size, sf.size)
source_fp = GetBuildProp("ro.build.fingerprint", source_zip)
target_fp = GetBuildProp("ro.build.fingerprint", target_zip)
metadata["pre-build"] = source_fp
metadata["post-build"] = target_fp
script.Mount("system", "/system")
script.AssertSomeFingerprint(source_fp, target_fp)
source_boot = File("/tmp/boot.img",
common.BuildBootableImage(
os.path.join(OPTIONS.source_tmp, "BOOT")))
target_boot = File("/tmp/boot.img",
common.BuildBootableImage(
os.path.join(OPTIONS.target_tmp, "BOOT")))
updating_boot = (source_boot.data != target_boot.data)
source_recovery = File("system/recovery.img",
common.BuildBootableImage(
os.path.join(OPTIONS.source_tmp, "RECOVERY")))
target_recovery = File("system/recovery.img",
common.BuildBootableImage(
os.path.join(OPTIONS.target_tmp, "RECOVERY")))
updating_recovery = (source_recovery.data != target_recovery.data)
# Here's how we divide up the progress bar:
# 0.1 for verifying the start state (PatchCheck calls)
# 0.8 for applying patches (ApplyPatch calls)
# 0.1 for unpacking verbatim files, symlinking, and doing the
# device-specific commands.
AppendAssertions(script, target_zip)
device_specific.IncrementalOTA_Assertions()
script.Print("Verifying current system...")
script.ShowProgress(0.1, 0)
total_verify_size = float(sum([i[2].size for i in patch_list]) + 1)
if updating_boot:
total_verify_size += source_boot.size
so_far = 0
for fn, tf, sf, size, patch_sha in patch_list:
script.PatchCheck("/"+fn, tf.sha1, sf.sha1)
so_far += sf.size
script.SetProgress(so_far / total_verify_size)
if updating_boot:
d = Difference(target_boot, source_boot)
_, _, d = d.ComputePatch()
print "boot target: %d source: %d diff: %d" % (
target_boot.size, source_boot.size, len(d))
common.ZipWriteStr(output_zip, "patch/boot.img.p", d)
script.PatchCheck("%s:%sboot:%d:%s:%d:%s" %
(partition_type, partition_path,
source_boot.size, source_boot.sha1,
target_boot.size, target_boot.sha1))
so_far += source_boot.size
script.SetProgress(so_far / total_verify_size)
if patch_list or updating_recovery or updating_boot:
script.CacheFreeSpaceCheck(largest_source_size)
device_specific.IncrementalOTA_VerifyEnd()
script.Comment("---- start making changes here ----")
if OPTIONS.wipe_user_data:
script.Print("Erasing user data...")
script.FormatPartition("userdata")
script.Print("Removing unneeded files...")
script.DeleteFiles(["/"+i[0] for i in verbatim_targets] +
["/"+i for i in sorted(source_data)
if i not in target_data] +
["/system/recovery.img"])
script.ShowProgress(0.8, 0)
total_patch_size = float(sum([i[1].size for i in patch_list]) + 1)
if updating_boot:
total_patch_size += target_boot.size
so_far = 0
script.Print("Patching system files...")
for fn, tf, sf, size, _ in patch_list:
script.ApplyPatch("/"+fn, "-", tf.size, tf.sha1, sf.sha1, "patch/"+fn+".p")
so_far += tf.size
script.SetProgress(so_far / total_patch_size)
if updating_boot:
# Produce the boot image by applying a patch to the current
# contents of the boot partition, and write it back to the
# partition.
script.Print("Patching boot image...")
script.ApplyPatch("%s:%sboot:%d:%s:%d:%s"
% (partition_type, partition_path,
source_boot.size, source_boot.sha1,
target_boot.size, target_boot.sha1),
"-",
target_boot.size, target_boot.sha1,
source_boot.sha1, "patch/boot.img.p")
so_far += target_boot.size
script.SetProgress(so_far / total_patch_size)
print "boot image changed; including."
else:
print "boot image unchanged; skipping."
if updating_recovery:
# Is it better to generate recovery as a patch from the current
# boot image, or from the previous recovery image? For large
# updates with significant kernel changes, probably the former.
# For small updates where the kernel hasn't changed, almost
# certainly the latter. We pick the first option. Future
# complicated schemes may let us effectively use both.
#
# A wacky possibility: as long as there is room in the boot
# partition, include the binaries and image files from recovery in
# the boot image (though not in the ramdisk) so they can be used
# as fodder for constructing the recovery image.
MakeRecoveryPatch(output_zip, target_recovery, target_boot, info)
script.DeleteFiles(["/system/recovery-from-boot.p",
"/system/etc/install-recovery.sh"])
print "recovery image changed; including as patch from boot."
else:
print "recovery image unchanged; skipping."
script.ShowProgress(0.1, 10)
(target_symlinks, target_retouch_dummies) = CopySystemFiles(target_zip, None)
target_symlinks_d = dict([(i[1], i[0]) for i in target_symlinks])
temp_script = script.MakeTemporary()
Item.GetMetadata(target_zip)
Item.Get("system").SetPermissions(temp_script)
# Note that this call will mess up the tree of Items, so make sure
# we're done with it.
(source_symlinks, source_retouch_dummies) = CopySystemFiles(source_zip, None)
source_symlinks_d = dict([(i[1], i[0]) for i in source_symlinks])
# Delete all the symlinks in source that aren't in target. This
# needs to happen before verbatim files are unpacked, in case a
# symlink in the source is replaced by a real file in the target.
to_delete = []
for dest, link in source_symlinks:
if link not in target_symlinks_d:
to_delete.append(link)
script.DeleteFiles(to_delete)
if verbatim_targets:
script.Print("Unpacking new files...")
script.UnpackPackageDir("system", "/system")
if updating_recovery:
script.Print("Unpacking new recovery...")
script.UnpackPackageDir("recovery", "/system")
script.Print("Symlinks and permissions...")
# Create all the symlinks that don't already exist, or point to
# somewhere different than what we want. Delete each symlink before
# creating it, since the 'symlink' command won't overwrite.
to_create = []
for dest, link in target_symlinks:
if link in source_symlinks_d:
if dest != source_symlinks_d[link]:
to_create.append((dest, link))
else:
to_create.append((dest, link))
script.DeleteFiles([i[1] for i in to_create])
script.MakeSymlinks(to_create)
if OPTIONS.aslr_mode:
script.RetouchBinaries(target_retouch_files)
else:
script.UndoRetouchBinaries(target_retouch_files)
# Now that the symlinks are created, we can set all the
# permissions.
script.AppendScript(temp_script)
# Do device-specific installation (eg, write radio image).
device_specific.IncrementalOTA_InstallEnd()
if OPTIONS.extra_script is not None:
script.AppendExtra(OPTIONS.extra_script)
script.AddToZip(target_zip, output_zip)
WriteMetadata(metadata, output_zip)
def main(argv):
def option_handler(o, a):
if o in ("-b", "--board_config"):
pass # deprecated
elif o in ("-k", "--package_key"):
OPTIONS.package_key = a
elif o in ("-i", "--incremental_from"):
OPTIONS.incremental_source = a
elif o in ("-w", "--wipe_user_data"):
OPTIONS.wipe_user_data = True
elif o in ("-n", "--no_prereq"):
OPTIONS.omit_prereq = True
elif o in ("-e", "--extra_script"):
OPTIONS.extra_script = a
elif o in ("-a", "--aslr_mode"):
if a in ("on", "On", "true", "True", "yes", "Yes"):
OPTIONS.aslr_mode = True
else:
OPTIONS.aslr_mode = False
elif o in ("--worker_threads"):
OPTIONS.worker_threads = int(a)
else:
return False
return True
args = common.ParseOptions(argv, __doc__,
extra_opts="b:k:i:d:wne:a:",
extra_long_opts=["board_config=",
"package_key=",
"incremental_from=",
"wipe_user_data",
"no_prereq",
"extra_script=",
"worker_threads=",
"aslr_mode="],
extra_option_handler=option_handler)
if len(args) != 2:
common.Usage(__doc__)
sys.exit(1)
if OPTIONS.extra_script is not None:
OPTIONS.extra_script = open(OPTIONS.extra_script).read()
print "unzipping target target-files..."
OPTIONS.input_tmp = common.UnzipTemp(args[0])
if OPTIONS.device_specific is None:
# look for the device-specific tools extension location in the input
try:
f = open(os.path.join(OPTIONS.input_tmp, "META", "tool-extensions.txt"))
ds = f.read().strip()
f.close()
if ds:
ds = os.path.normpath(ds)
print "using device-specific extensions in", ds
OPTIONS.device_specific = ds
except IOError, e:
if e.errno == errno.ENOENT:
# nothing specified in the file
pass
else:
raise
info = common.LoadInfoDict()
common.LoadMaxSizes(info)
if not OPTIONS.max_image_size:
print
print " WARNING: Failed to load max image sizes; will not enforce"
print " image size limits."
print
OPTIONS.target_tmp = OPTIONS.input_tmp
input_zip = zipfile.ZipFile(args[0], "r")
if OPTIONS.package_key:
temp_zip_file = tempfile.NamedTemporaryFile()
output_zip = zipfile.ZipFile(temp_zip_file, "w",
compression=zipfile.ZIP_DEFLATED)
else:
output_zip = zipfile.ZipFile(args[1], "w",
compression=zipfile.ZIP_DEFLATED)
if OPTIONS.incremental_source is None:
WriteFullOTAPackage(input_zip, output_zip, info)
else:
print "unzipping source target-files..."
OPTIONS.source_tmp = common.UnzipTemp(OPTIONS.incremental_source)
source_zip = zipfile.ZipFile(OPTIONS.incremental_source, "r")
WriteIncrementalOTAPackage(input_zip, source_zip, output_zip, info)
output_zip.close()
if OPTIONS.package_key:
SignOutput(temp_zip_file.name, args[1])
temp_zip_file.close()
common.Cleanup()
print "done."
if __name__ == '__main__':
try:
main(sys.argv[1:])
except common.ExternalError, e:
print
print " ERROR: %s" % (e,)
print
sys.exit(1)