Merge "Add TARGET_OUT_VENDOR_RENDERSCRIPT_BITCODE definition."
diff --git a/CleanSpec.mk b/CleanSpec.mk
index 9d3fc23..e0c826c 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -400,6 +400,11 @@
 
 $(call add-clean-step, rm -rf $(TARGET_OUT_ETC)/init)
 
+# Libraries are moved from {system|vendor}/lib to ./lib/framework, ./lib/vndk, etc.
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/lib*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/vendor/lib*)
+$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/vendor/lib*)
+
 # ************************************************
 # NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
 # ************************************************
diff --git a/core/Makefile b/core/Makefile
index 9e82bfc..cf0aa4a 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -835,7 +835,7 @@
 endif
 endif
 
-SELINUX_FC := $(TARGET_ROOT_OUT)/file_contexts.bin
+SELINUX_FC := $(call intermediates-dir-for,ETC,file_contexts.bin)/file_contexts.bin
 INTERNAL_USERIMAGES_DEPS += $(SELINUX_FC)
 
 INTERNAL_USERIMAGES_DEPS += $(BLK_ALLOC_TO_BASE_FS)
@@ -910,7 +910,15 @@
     $(ALL_DEFAULT_INSTALLED_MODULES))
 
 recovery_initrc := $(call include-path-for, recovery)/etc/init.rc
-recovery_sepolicy := $(call intermediates-dir-for,ETC,sepolicy.recovery)/sepolicy.recovery
+recovery_sepolicy := \
+    $(TARGET_RECOVERY_ROOT_OUT)/sepolicy \
+    $(TARGET_RECOVERY_ROOT_OUT)/file_contexts.bin \
+    $(TARGET_RECOVERY_ROOT_OUT)/plat_property_contexts \
+    $(TARGET_RECOVERY_ROOT_OUT)/nonplat_property_contexts
+# Passed into rsync from non-recovery root to recovery root, to avoid overwriting recovery-specific
+# SELinux files
+IGNORE_RECOVERY_SEPOLICY := $(patsubst $(TARGET_RECOVERY_OUT)/%,--exclude=/%,$(recovery_sepolicy))
+
 recovery_kernel := $(INSTALLED_KERNEL_TARGET) # same as a non-recovery system
 recovery_ramdisk := $(PRODUCT_OUT)/ramdisk-recovery.img
 recovery_build_prop := $(intermediate_system_build_prop)
@@ -1021,18 +1029,37 @@
 	java -jar $(DUMPKEY_JAR) $(PRIVATE_OTA_PUBLIC_KEYS) $(extra_keys) > $@
 
 RECOVERYIMAGE_ID_FILE := $(PRODUCT_OUT)/recovery.id
+
+# $(1): modules list
+# $(2): output dir
+# $(3): mount point
+# $(4): staging dir
+# Depmod requires a well-formed kernel version so 0.0 is used as a placeholder.
+define build-image-kernel-modules
+    $(hide) rm -rf $(2)/lib/modules
+    $(hide) mkdir -p $(2)/lib/modules
+    $(hide) cp $(1) $(2)/lib/modules/
+    $(hide) rm -rf $(4)
+    $(hide) mkdir -p $(4)/lib/modules/0.0/$(3)lib/modules
+    $(hide) cp $(1) $(4)/lib/modules/0.0/$(3)lib/modules
+    $(hide) $(DEPMOD) -b $(4) 0.0
+    $(hide) sed -e 's/\(.*modules.*\):/\/\1:/g' -e 's/: \(.*modules.*\)/: \/\1/g' -i $(4)/lib/modules/0.0/modules.dep
+    $(hide) cp $(4)/lib/modules/0.0/modules.dep $(2)/lib/modules
+endef
+
 # $(1): output file
 define build-recoveryimage-target
   @echo ----- Making recovery image ------
   $(hide) mkdir -p $(TARGET_RECOVERY_OUT)
   $(hide) mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/etc $(TARGET_RECOVERY_ROOT_OUT)/sdcard $(TARGET_RECOVERY_ROOT_OUT)/tmp
   @echo Copying baseline ramdisk...
-  $(hide) rsync -a --exclude=etc --exclude=sdcard $(IGNORE_CACHE_LINK) $(TARGET_ROOT_OUT) $(TARGET_RECOVERY_OUT) # "cp -Rf" fails to overwrite broken symlinks on Mac.
+  # Use rsync because "cp -Rf" fails to overwrite broken symlinks on Mac.
+  $(hide) rsync -a --exclude=etc --exclude=sdcard $(IGNORE_RECOVERY_SEPOLICY) $(IGNORE_CACHE_LINK) $(TARGET_ROOT_OUT) $(TARGET_RECOVERY_OUT)
   @echo Modifying ramdisk contents...
+  $(if $(BOARD_RECOVERY_KERNEL_MODULES), \
+    $(call build-image-kernel-modules,$(BOARD_RECOVERY_KERNEL_MODULES),$(TARGET_RECOVERY_ROOT_OUT),,$(call intermediates-dir-for,PACKAGING,depmod_recovery)))
   $(hide) rm -f $(TARGET_RECOVERY_ROOT_OUT)/init*.rc
   $(hide) cp -f $(recovery_initrc) $(TARGET_RECOVERY_ROOT_OUT)/
-  $(hide) rm -f $(TARGET_RECOVERY_ROOT_OUT)/sepolicy
-  $(hide) cp -f $(recovery_sepolicy) $(TARGET_RECOVERY_ROOT_OUT)/sepolicy
   $(hide) cp $(TARGET_ROOT_OUT)/init.recovery.*.rc $(TARGET_RECOVERY_ROOT_OUT)/ || true # Ignore error when the src file doesn't exist.
   $(hide) mkdir -p $(TARGET_RECOVERY_ROOT_OUT)/res
   $(hide) rm -rf $(TARGET_RECOVERY_ROOT_OUT)/res/*
@@ -1090,7 +1117,9 @@
 		$(INSTALLED_2NDBOOTLOADER_TARGET) \
 		$(recovery_build_prop) $(recovery_resource_deps) \
 		$(recovery_fstab) \
-		$(RECOVERY_INSTALL_OTA_KEYS)
+		$(RECOVERY_INSTALL_OTA_KEYS) \
+		$(BOARD_RECOVERY_KERNEL_MODULES) \
+		$(DEPMOD)
 		$(call pretty,"Target boot image from recovery: $@")
 		$(call build-recoveryimage-target, $@)
 endif
@@ -1103,7 +1132,9 @@
 		$(INSTALLED_2NDBOOTLOADER_TARGET) \
 		$(recovery_build_prop) $(recovery_resource_deps) \
 		$(recovery_fstab) \
-		$(RECOVERY_INSTALL_OTA_KEYS)
+		$(RECOVERY_INSTALL_OTA_KEYS) \
+		$(BOARD_RECOVERY_KERNEL_MODULES) \
+		$(DEPMOD)
 		$(call build-recoveryimage-target, $@)
 
 ifdef RECOVERY_RESOURCE_ZIP
@@ -1659,12 +1690,13 @@
 vendorimage_intermediates := \
     $(call intermediates-dir-for,PACKAGING,vendor)
 BUILT_VENDORIMAGE_TARGET := $(PRODUCT_OUT)/vendor.img
-
 define build-vendorimage-target
   $(call pretty,"Target vendor fs image: $(INSTALLED_VENDORIMAGE_TARGET)")
   @mkdir -p $(TARGET_OUT_VENDOR)
   @mkdir -p $(vendorimage_intermediates) && rm -rf $(vendorimage_intermediates)/vendor_image_info.txt
   $(call generate-userimage-prop-dictionary, $(vendorimage_intermediates)/vendor_image_info.txt, skip_fsck=true)
+  $(if $(BOARD_VENDOR_KERNEL_MODULES), \
+    $(call build-image-kernel-modules,$(BOARD_VENDOR_KERNEL_MODULES),$(TARGET_OUT_VENDOR),vendor/,$(call intermediates-dir-for,PACKAGING,depmod_vendor)))
   $(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH \
       ./build/tools/releasetools/build_image.py \
       $(TARGET_OUT_VENDOR) $(vendorimage_intermediates)/vendor_image_info.txt $(INSTALLED_VENDORIMAGE_TARGET) $(TARGET_OUT)
@@ -1673,11 +1705,11 @@
 
 # We just build this directly to the install location.
 INSTALLED_VENDORIMAGE_TARGET := $(BUILT_VENDORIMAGE_TARGET)
-$(INSTALLED_VENDORIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_VENDORIMAGE_FILES) $(INSTALLED_FILES_FILE_VENDOR) $(BUILD_IMAGE_SRCS)
+$(INSTALLED_VENDORIMAGE_TARGET): $(INTERNAL_USERIMAGES_DEPS) $(INTERNAL_VENDORIMAGE_FILES) $(INSTALLED_FILES_FILE_VENDOR) $(BUILD_IMAGE_SRCS) $(DEPMOD) $(BOARD_VENDOR_KERNEL_MODULES)
 	$(build-vendorimage-target)
 
-.PHONY: vendorimage-nodeps
-vendorimage-nodeps: | $(INTERNAL_USERIMAGES_DEPS)
+.PHONY: vendorimage-nodeps vnod
+vendorimage-nodeps vnod: | $(INTERNAL_USERIMAGES_DEPS) $(DEPMOD)
 	$(build-vendorimage-target)
 
 else ifdef BOARD_PREBUILT_VENDORIMAGE
@@ -1880,6 +1912,13 @@
 $(BUILT_TARGET_FILES_PACKAGE): PRIVATE_RECOVERY_OUT := RECOVERY
 endif
 
+# Run fs_config while creating the target files package
+# $1: root directory
+# $2: add prefix
+define fs_config
+(cd $(1); find . -type d | sed 's,$$,/,'; find . \! -type d) | cut -c 3- | sort | sed 's,^,$(2),' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC)
+endef
+
 # Depending on the various images guarantees that the underlying
 # directories are up-to-date.
 $(BUILT_TARGET_FILES_PACKAGE): \
@@ -2020,10 +2059,8 @@
 endif
 	$(hide) echo 'mkbootimg_args=$(BOARD_MKBOOTIMG_ARGS)' >> $(zip_root)/META/misc_info.txt
 	$(hide) echo 'mkbootimg_version_args=$(INTERNAL_MKBOOTIMG_VERSION_ARGS)' >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "use_set_metadata=1" >> $(zip_root)/META/misc_info.txt
 	$(hide) echo "multistage_support=1" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "update_rename_support=1" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo "blockimgdiff_versions=1,2,3,4" >> $(zip_root)/META/misc_info.txt
+	$(hide) echo "blockimgdiff_versions=3,4" >> $(zip_root)/META/misc_info.txt
 ifneq ($(OEM_THUMBPRINT_PROPERTIES),)
 	# OTA scripts are only interested in fingerprint related properties
 	$(hide) echo "oem_fingerprint_properties=$(OEM_THUMBPRINT_PROPERTIES)" >> $(zip_root)/META/misc_info.txt
@@ -2090,8 +2127,9 @@
 	done;
 endif
 ifdef OSRELEASED_DIRECTORY
-	$(hide) $(ACP) $(TARGET_OUT_ETC)/$(OSRELEASED_DIRECTORY)/product_id $(zip_root)/META/product_id.txt
-	$(hide) $(ACP) $(TARGET_OUT_ETC)/$(OSRELEASED_DIRECTORY)/product_version $(zip_root)/META/product_version.txt
+	$(hide) $(ACP) $(TARGET_OUT_OEM)/$(OSRELEASED_DIRECTORY)/product_id $(zip_root)/META/product_id.txt
+	$(hide) $(ACP) $(TARGET_OUT_OEM)/$(OSRELEASED_DIRECTORY)/product_version $(zip_root)/META/product_version.txt
+	$(hide) $(ACP) $(TARGET_OUT_ETC)/$(OSRELEASED_DIRECTORY)/system_version $(zip_root)/META/system_version.txt
 endif
 endif
 ifeq ($(BREAKPAD_GENERATE_SYMBOLS),true)
@@ -2102,28 +2140,29 @@
 	$(hide) mkdir -p $(zip_root)/IMAGES
 	$(hide) cp $(INSTALLED_VENDORIMAGE_TARGET) $(zip_root)/IMAGES/
 endif
+	@# Run fs_config on all the system, vendor, boot ramdisk,
+	@# and recovery ramdisk files in the zip, and save the output
+	$(hide) $(call fs_config,$(zip_root)/SYSTEM,system/) > $(zip_root)/META/filesystem_config.txt
+ifdef BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE
+	$(hide) $(call fs_config,$(zip_root)/VENDOR,vendor/) > $(zip_root)/META/vendor_filesystem_config.txt
+endif
+ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
+	$(hide) $(call fs_config,$(zip_root)/ROOT,) > $(zip_root)/META/root_filesystem_config.txt
+endif
+	$(hide) $(call fs_config,$(zip_root)/BOOT/RAMDISK,) > $(zip_root)/META/boot_filesystem_config.txt
+ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
+	$(hide) $(call fs_config,$(zip_root)/RECOVERY/RAMDISK,) > $(zip_root)/META/recovery_filesystem_config.txt
+endif
+ifdef INSTALLED_SYSTEMOTHERIMAGE_TARGET
+	$(hide) $(call fs_config,$(zip_root)/SYSTEM_OTHER,system/) > $(zip_root)/META/system_other_filesystem_config.txt
+endif
+	$(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \
+	    ./build/tools/releasetools/add_img_to_target_files -a -v -p $(HOST_OUT) $(zip_root)
 	@# Zip everything up, preserving symlinks and placing META/ files first to
 	@# help early validation of the .zip file while uploading it.
 	$(hide) find $(zip_root)/META | sort >$@.list
 	$(hide) find $(zip_root) | grep -v "^$(zip_root)/META/" | sort >>$@.list
 	$(hide) $(SOONG_ZIP) -d -o $@ -C $(zip_root) -l $@.list
-	@# Run fs_config on all the system, vendor, boot ramdisk,
-	@# and recovery ramdisk files in the zip, and save the output
-	$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="SYSTEM/" } /^SYSTEM\// {print "system/" $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/filesystem_config.txt
-	$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="VENDOR/" } /^VENDOR\// {print "vendor/" $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/vendor_filesystem_config.txt
-ifeq ($(BOARD_BUILD_SYSTEM_ROOT_IMAGE),true)
-	$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="ROOT/" } /^ROOT\// {print $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/root_filesystem_config.txt
-endif
-	$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="BOOT/RAMDISK/" } /^BOOT\/RAMDISK\// {print $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/boot_filesystem_config.txt
-ifneq ($(INSTALLED_RECOVERYIMAGE_TARGET),)
-	$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="RECOVERY/RAMDISK/" } /^RECOVERY\/RAMDISK\// {print $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/recovery_filesystem_config.txt
-endif
-ifdef INSTALLED_SYSTEMOTHERIMAGE_TARGET
-	$(hide) zipinfo -1 $@ | awk 'BEGIN { FS="SYSTEM_OTHER/" } /^SYSTEM_OTHER\// { print "system/" $$2}' | $(HOST_OUT_EXECUTABLES)/fs_config -C -D $(TARGET_OUT) -S $(SELINUX_FC) > $(zip_root)/META/system_other_filesystem_config.txt
-endif
-	$(hide) (cd $(zip_root) && zip -qX ../$(notdir $@) META/*filesystem_config.txt)
-	$(hide) PATH=$(foreach p,$(INTERNAL_USERIMAGES_BINARY_PATHS),$(p):)$$PATH MKBOOTIMG=$(MKBOOTIMG) \
-	    ./build/tools/releasetools/add_img_to_target_files -a -v -p $(HOST_OUT) $@
 
 .PHONY: target-files-package
 target-files-package: $(BUILT_TARGET_FILES_PACKAGE)
diff --git a/core/base_rules.mk b/core/base_rules.mk
index e90c1bb..1763a06 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -78,6 +78,8 @@
 endif
 endif
 
+my_module_is_soong := $(if $(filter $(OUT_DIR)/soong/%,$(LOCAL_MODULE_MAKEFILE)),true,false)
+
 # Ninja has an implicit dependency on the command being run, and kati will
 # regenerate the ninja manifest if any read makefile changes, so there is no
 # need to have dependencies on makefiles.
@@ -121,7 +123,7 @@
 # makefiles. Anything else is either a typo or a source of unexpected
 # behaviors.
 ifneq ($(filter-out debug eng tests optional samples,$(my_module_tags)),)
-$(warning unusual tags $(my_module_tags) on $(LOCAL_MODULE) at $(LOCAL_PATH))
+$(call pretty-warning,unusual tags $(my_module_tags))
 endif
 
 # Add implicit tags.
@@ -163,6 +165,14 @@
 endif
 my_module_path := $(patsubst %/,%,$(my_module_path))
 my_module_relative_path := $(strip $(LOCAL_MODULE_RELATIVE_PATH))
+
+# my_module_default_path is the path that is automatically chosen according to the attributes of
+# a module. It is used when the module does not explicitly specify install path using LOCAL_MODULE_PATH.
+# If LOCAL_MODULE_PATH is specified, it is always respected and my_module_default_path is
+# ignored. However, for shared libraries, such conflict generates warning so that module owner
+# can place the library in the correct location (, stop using LOCAL_MODULE_PATH, or migrate to Soong to
+# be better).
+my_module_default_path :=
 ifdef LOCAL_IS_HOST_MODULE
   partition_tag :=
 else
@@ -180,20 +190,137 @@
   partition_tag := $(if $(call should-install-to-system,$(my_module_tags)),,_DATA)
 endif
 endif
-ifeq ($(my_module_path),)
-  install_path_var := $(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)OUT$(partition_tag)_$(LOCAL_MODULE_CLASS)
-  ifeq (true,$(LOCAL_PRIVILEGED_MODULE))
-    install_path_var := $(install_path_var)_PRIVILEGED
-  endif
+install_path_var := $(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)OUT$(partition_tag)_$(LOCAL_MODULE_CLASS)
+ifeq (true,$(LOCAL_PRIVILEGED_MODULE))
+  install_path_var := $(install_path_var)_PRIVILEGED
+endif
 
-  my_module_path := $($(install_path_var))
-  ifeq ($(strip $(my_module_path)),)
-    $(error $(LOCAL_PATH): unhandled install path "$(install_path_var) for $(LOCAL_MODULE)")
+my_module_default_path := $($(install_path_var))
+ifeq ($(strip $(my_module_path)$(my_module_default_path)),)
+  $(call pretty-error,internal error in base_rules.mk; $(install_path_var) is not defined.)
+endif
+
+# Determine lib_type and do some sanity checks.
+ifeq ($(LOCAL_IS_HOST_MODULE)$(LOCAL_MODULE_CLASS),SHARED_LIBRARIES)
+  ifneq ($(filter $(LOCAL_MODULE),$(addprefix lib,$(NDK_PREBUILT_SHARED_LIBRARIES))),)
+    ifneq ($(partition_tag),)
+      $(call pretty-error,"NDK library must be installed at system partition, where other libraries will look for it. It cannot be moved.")
+    endif
+    lib_type := ndk
+  else ifneq ($(filter $(LOCAL_MODULE),$(VNDK_LIBRARIES) $(VNDK_INDIRECT_LIBRARIES)),)
+    ifneq ($(partition_tag),)
+      $(call pretty-error,"VNDK library must be installed at system partition. DO NOT modify VNDK_LIBRARIES or VNDK_LIBRARIES. \
+If your library needs to be shared between system.img and vendor.img then define it as a VNDK-ext library. Use vndk_ext_library {...} \
+in Android.bp to do so.")
+    endif
+    lib_type := vndk
+  else ifneq ($(filter $(LOCAL_MODULE),$(BOARD_SAME_PROCESS_HAL_DEPS)),)
+    # List of libraries implementing same-process HALs (and their internal sub-libraries) is
+    # defined by vendors.
+    ifeq ($(partition_tag),)
+      $(call pretty-error,Sameprocess HAL must not be installed at system partition)
+    endif
+    lib_type := sameprocess_hal
+  else ifeq ($(LOCAL_IS_HOST_MODULE)$(partition_tag),)
+    lib_type := framework
+  else ifneq ($(partition_tag),_DATA)
+    # Here, vendor means vendor/oem/odm
+    lib_type := vendor_provided
+  else
+    # Test, samples lib falls into this. No lib_type required for them.
+    ifeq ($(filter tests samples,$(LOCAL_MODULE_TAGS)),)
+      $(call pretty-warning,Cannot determine the type of this library)
+    endif
+    lib_type :=
+  endif
+else
+  lib_type :=
+endif
+
+# This is the default path until N. From O, the default path is changed.
+# Let's save the old default path in case we need a symlink to it later.
+my_module_pre_o_default_path := $(my_module_default_path)
+
+# Special case for pre_o_default_path of Soong defined modules.
+# For those modules, we guess their pre_o_default_path by removing /ndk, /vndk, etc.
+# from their LOCAL_MODULE_PATH. This is because relative_install_path is already
+# embedded to my_module_path.
+ifeq ($(my_module_is_soong),true)
+ifndef LOCAL_IS_HOST_MODULE
+ifeq ($(LOCAL_MODULE_CLASS),SHARED_LIBRARIES)
+  my_module_pre_o_default_path := $(my_module_path)
+  my_module_pre_o_default_path := $(subst /vndk-ext,,$(my_module_pre_o_default_path))
+  my_module_pre_o_default_path := $(subst /vndk,,$(my_module_pre_o_default_path))
+  my_module_pre_o_default_path := $(subst /ndk,,$(my_module_pre_o_default_path))
+  my_module_pre_o_default_path := $(subst /sameprocess,,$(my_module_pre_o_default_path))
+endif
+endif
+endif
+
+# Amend the default_path once again depending on lib_type. This is new from O.
+ifeq ($(lib_type),vndk)
+  my_module_default_path := $(my_module_default_path)/vndk
+  # TODO(b/35020246): before P, we should support installing two snapshots of VNDK
+  # libraries. One for framework libs and execs and the other for vendor libs and execs.
+else ifeq ($(lib_type),ndk)
+  my_module_default_path := $(my_module_default_path)/ndk
+else ifeq ($(lib_type),sameprocess_hal)
+  my_module_default_path := $(my_module_default_path)/sameprocess
+endif
+
+# Relative path is appended to path resolved so far
+ifneq ($(my_module_relative_path),)
+  my_module_default_path := $(my_module_default_path)/$(my_module_relative_path)
+  my_module_pre_o_default_path := $(my_module_pre_o_default_path)/$(my_module_relative_path)
+  ifneq ($(my_module_path),)
+    my_module_path := $(my_module_path)/$(my_module_relative_path)
   endif
 endif
-ifneq ($(my_module_relative_path),)
-  my_module_path := $(my_module_path)/$(my_module_relative_path)
+
+_lib_moved :=
+ifeq ($(my_module_path),)
+  # If LOCAL_MODULE_PATH is not specified, use the automatically determined path.
+  my_module_path := $(my_module_default_path)
+
+  # Mark if the lib is installed to a different path than before. With this hint,
+  # a symlink is created if BOARD_SYMLINK_FOR_LIBS is true.
+  ifneq ($(my_module_path),$(my_module_pre_o_default_path))
+    _lib_moved := true
+  endif
+else
+  # If LOCAL_MODULE_PATH is specified, we respect it.
+  ifndef LOCAL_IS_HOST_MODULE
+  ifeq ($(LOCAL_MODULE_CLASS),SHARED_LIBRARIES)
+  ifeq ($(filter $(TARGET_OUT_DATA)%,$(my_module_path)),)
+    # However, we are kind enough to warn if it seems to be wrong.
+    # Warn only for Android.mk defined shared libraries that will be installed
+    # to system or vendor partition. For other types of files - especially
+    # Soong-defined libs -, we don't warn because Soong always gives us correct
+    # paths.
+    ifeq ($(my_module_is_soong),false)
+      ifneq ($(my_module_path),$(my_module_default_path))
+        ifeq ($(SHOW_MODULE_PATH_WARNINGS),true)
+        # TODO(b/35020635): s/warning/error/
+        $(call pretty-warning,$(lib_type) library must be installed to \
+$(subst $(PRODUCT_OUT)/,,$(my_module_default_path)) but requested to be installed at \
+$(subst $(PRODUCT_OUT)/,,$(my_module_path)). Please fix.)
+        endif
+      endif
+    else
+      # For Soong-defined module, symlink is provided if the path has been amended
+      # ...except for vndk-ext libraries because there already is a symlink for the
+      # vndk (unmodified) version of the vndk-ext library.
+      ifneq ($(my_module_path),$(my_module_pre_o_default_path))
+        ifeq ($(filter vndk-ext,$(subst /,$(space),$(my_module_path))),)
+          _lib_moved := true
+        endif
+      endif
+    endif
+  endif
+  endif
+  endif
 endif
+
 endif # not LOCAL_UNINSTALLABLE_MODULE
 
 ifneq ($(strip $(LOCAL_BUILT_MODULE)$(LOCAL_INSTALLED_MODULE)),)
@@ -384,8 +511,30 @@
 
 # Rule to install the module's companion symlinks
 my_installed_symlinks := $(addprefix $(my_module_path)/,$(LOCAL_MODULE_SYMLINKS) $(LOCAL_MODULE_SYMLINKS_$(my_32_64_bit_suffix)))
+
+# If this lib is installed to the different directory than before,
+# make a symlink from the old path to the new path.
+# This symlink is required because there are so many plances that expect the old
+# path (e.g. systemproperty rild.libpath). Until that places are all fixed,
+# we keep this symlink.
+# TODO(b/34917183): remove symlinks after everything migrations to the new paths;
+# this should be done before O launch unless it will be a security hole that
+# we can't restrict access to a certain set of libraries by using the directory
+# path.
+ifneq ($(BOARD_SYMLINK_FOR_LIBS),false)
+ifeq ($(_lib_moved),true)
+  my_installed_symlinks += $(my_module_pre_o_default_path)/$(my_installed_module_stem)
+endif
+else
+# Symlinks for ndk libs are permanent.
+ifeq ($(lib_type)$(_lib_moved),ndktrue)
+  my_installed_symlinks += $(my_module_pre_o_default_path)/$(my_installed_module_stem)
+endif
+endif
+
+# Make a symlink $(symlink) -> $(LOCAL_INSTALLED_MODULE)
 $(foreach symlink,$(my_installed_symlinks),\
-    $(call symlink-file,$(LOCAL_INSTALLED_MODULE),$(my_installed_module_stem),$(symlink)))
+    $(call symlink-file,$(LOCAL_INSTALLED_MODULE),$(LOCAL_INSTALLED_MODULE),$(symlink),true))
 
 $(my_all_targets) : | $(my_installed_symlinks)
 
@@ -417,56 +566,44 @@
 endif
 
 ###########################################################
-## Compatibiliy suite files.
+## Compatibility suite files.
 ###########################################################
 ifdef LOCAL_COMPATIBILITY_SUITE
-ifneq ($(words $(LOCAL_COMPATIBILITY_SUITE)),1)
-$(error $(LOCAL_PATH):$(LOCAL_MODULE) LOCAL_COMPATIBILITY_SUITE can be only one name)
-endif
-
-# Copy this module into its own subdirectory in the common testcases output directory.
-my_testcases_subdir := $($(my_prefix)OUT_TESTCASES)/$(LOCAL_MODULE)
 
 # The module itself.
-my_compat_dist := \
-  $(LOCAL_BUILT_MODULE):$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(my_installed_module_stem) \
-  $(LOCAL_BUILT_MODULE):$(my_testcases_subdir)/$(my_installed_module_stem)
+$(foreach suite, $(LOCAL_COMPATIBILITY_SUITE), \
+  $(eval my_compat_dist_$(suite) := $(foreach dir, $(call compatibility_suite_dirs,$(suite)), \
+    $(LOCAL_BUILT_MODULE):$(dir)/$(my_installed_module_stem))))
 
 # Make sure we only add the files once for multilib modules.
 ifndef $(my_prefix)$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_compat_files
 $(my_prefix)$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_compat_files := true
 
 # LOCAL_COMPATIBILITY_SUPPORT_FILES is a list of <src>[:<dest>].
-my_compat_dist += $(foreach f, $(LOCAL_COMPATIBILITY_SUPPORT_FILES),\
-  $(eval p := $(subst :,$(space),$(f)))\
-  $(eval s := $(word 1,$(p)))\
-  $(eval n := $(or $(word 2,$(p)),$(notdir $(word 1, $(p))))) \
-  $(eval d := $(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(n)) \
-  $(s):$(d) $(s):$(my_testcases_subdir)/$(n))
+$(foreach suite, $(LOCAL_COMPATIBILITY_SUITE), \
+  $(eval my_compat_dist_$(suite) += $(foreach f, $(LOCAL_COMPATIBILITY_SUPPORT_FILES), \
+    $(eval p := $(subst :,$(space),$(f))) \
+    $(eval s := $(word 1,$(p))) \
+    $(eval n := $(or $(word 2,$(p)),$(notdir $(word 1, $(p))))) \
+    $(foreach dir, $(call compatibility_suite_dirs,$(suite)), \
+      $(s):$(dir)/$(n)))))
+
 
 ifneq (,$(wildcard $(LOCAL_PATH)/AndroidTest.xml))
-my_compat_dist += \
-  $(LOCAL_PATH)/AndroidTest.xml:$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(LOCAL_MODULE).config
-my_compat_dist += \
-  $(LOCAL_PATH)/AndroidTest.xml:$(my_testcases_subdir)/$(LOCAL_MODULE).config
+$(foreach suite, $(LOCAL_COMPATIBILITY_SUITE), \
+  $(eval my_compat_dist_$(suite) += $(foreach dir, $(call compatibility_suite_dirs,$(suite)), \
+    $(LOCAL_PATH)/AndroidTest.xml:$(dir)/$(LOCAL_MODULE).config)))
 endif
 
 ifneq (,$(wildcard $(LOCAL_PATH)/DynamicConfig.xml))
-my_compat_dist += \
-  $(LOCAL_PATH)/DynamicConfig.xml:$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(LOCAL_MODULE).dynamic
-my_compat_dist += \
-  $(LOCAL_PATH)/DynamicConfig.xml:$(my_testcases_subdir)/$(LOCAL_MODULE).dynamic
+$(foreach suite, $(LOCAL_COMPATIBILITY_SUITE), \
+  $(eval my_compat_dist_$(suite) += $(foreach dir, $(call compatibility_suite_dirs,$(suite)), \
+    $(LOCAL_PATH)/DynamicConfig.xml:$(dir)/$(LOCAL_MODULE).dynamic)))
 endif
 endif # $(my_prefix)$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_compat_files
 
-my_compat_files := $(call copy-many-files, $(my_compat_dist))
+$(call create-suite-dependencies)
 
-COMPATIBILITY.$(LOCAL_COMPATIBILITY_SUITE).FILES := \
-  $(COMPATIBILITY.$(LOCAL_COMPATIBILITY_SUITE).FILES) \
-  $(my_compat_files)
-
-# Copy over the compatibility files when user runs mm/mmm.
-$(my_all_targets) : $(my_compat_files)
 endif  # LOCAL_COMPATIBILITY_SUITE
 
 ###########################################################
diff --git a/core/binary.mk b/core/binary.mk
index f5d9bfc..faae547 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -61,10 +61,14 @@
 my_arflags :=
 
 ifneq (,$(strip $(foreach dir,$(subst $(comma),$(space),$(COVERAGE_PATHS)),$(filter $(dir)%,$(LOCAL_PATH)))))
+ifeq (,$(strip $(foreach dir,$(subst $(comma),$(space),$(COVERAGE_EXCLUDE_PATHS)),$(filter $(dir)%,$(LOCAL_PATH)))))
   my_native_coverage := true
 else
   my_native_coverage := false
 endif
+else
+  my_native_coverage := false
+endif
 
 my_allow_undefined_symbols := $(strip $(LOCAL_ALLOW_UNDEFINED_SYMBOLS))
 ifdef SANITIZE_HOST
@@ -934,62 +938,6 @@
 endif  # $(proto_sources) non-empty
 
 ###########################################################
-## Compile the .dbus-xml files to c++ headers
-###########################################################
-dbus_definitions := $(filter %.dbus-xml,$(my_src_files))
-dbus_generated_headers :=
-ifneq ($(dbus_definitions),)
-my_soong_problems += dbus
-
-dbus_definition_paths := $(addprefix $(LOCAL_PATH)/,$(dbus_definitions))
-dbus_service_config := $(filter %dbus-service-config.json,$(my_src_files))
-dbus_service_config_path := $(addprefix $(LOCAL_PATH)/,$(dbus_service_config))
-
-# Mark these source files as not producing objects
-$(call track-src-file-obj,$(dbus_definitions) $(dbus_service_config),)
-
-dbus_gen_dir := $(generated_sources_dir)/dbus_bindings
-
-ifdef LOCAL_DBUS_PROXY_PREFIX
-dbus_header_dir := $(dbus_gen_dir)/include/$(LOCAL_DBUS_PROXY_PREFIX)
-dbus_headers := dbus-proxies.h
-else
-dbus_header_dir := $(dbus_gen_dir)
-dbus_headers := $(patsubst %.dbus-xml,%.h,$(dbus_definitions))
-endif
-dbus_generated_headers := $(addprefix $(dbus_header_dir)/,$(dbus_headers))
-
-# Ensure that we only define build rules once in multilib builds.
-ifndef $(my_prefix)_$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_dbus_bindings_defined
-$(my_prefix)_$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_dbus_bindings_defined := true
-
-$(dbus_generated_headers): PRIVATE_MODULE := $(LOCAL_MODULE)
-$(dbus_generated_headers): PRIVATE_DBUS_SERVICE_CONFIG := $(dbus_service_config_path)
-$(dbus_generated_headers) : $(dbus_service_config_path) $(DBUS_GENERATOR)
-ifdef LOCAL_DBUS_PROXY_PREFIX
-$(dbus_generated_headers) : $(dbus_definition_paths)
-	$(generate-dbus-proxies)
-else
-$(dbus_generated_headers) : $(dbus_header_dir)/%.h : $(LOCAL_PATH)/%.dbus-xml
-	$(generate-dbus-adaptors)
-endif  # $(LOCAL_DBUS_PROXY_PREFIX)
-endif  # $(my_prefix)_$(LOCAL_MODULE_CLASS)_$(LOCAL_MODULE)_dbus_bindings_defined
-
-ifdef LOCAL_DBUS_PROXY_PREFIX
-# Auto-export the generated dbus proxy directory.
-my_export_c_include_dirs += $(dbus_gen_dir)/include
-my_c_includes += $(dbus_gen_dir)/include
-else
-my_export_c_include_dirs += $(dbus_header_dir)
-my_c_includes += $(dbus_header_dir)
-endif  # $(LOCAL_DBUS_PROXY_PREFIX)
-
-my_generated_sources += $(dbus_generated_headers)
-
-endif  # $(dbus_definitions) non-empty
-
-
-###########################################################
 ## AIDL: Compile .aidl files to .cpp and .h files
 ###########################################################
 aidl_src := $(strip $(filter %.aidl,$(my_src_files)))
@@ -1855,6 +1803,14 @@
 # Coverage packaging.
 ###########################################################
 ifeq ($(my_native_coverage),true)
-LOCAL_GCNO_FILES := $(patsubst %.o,%.gcno,$(all_objects))
-$(foreach f,$(all_objects),$(eval $(call gcno-touch-rule,$(f),$(f:.o=.gcno))))
+my_gcno_objects := \
+    $(cpp_objects) \
+    $(gen_cpp_objects) \
+    $(c_objects) \
+    $(gen_c_objects) \
+    $(objc_objects) \
+    $(objcpp_objects)
+
+LOCAL_GCNO_FILES := $(patsubst %.o,%.gcno,$(my_gcno_objects))
+$(foreach f,$(my_gcno_objects),$(eval $(call gcno-touch-rule,$(f),$(f:.o=.gcno))))
 endif
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index ae037b3..f30c4ba 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -44,9 +44,11 @@
 LOCAL_CXX:=
 LOCAL_CXX_STL := default
 LOCAL_DATA_BINDING:=
-LOCAL_DBUS_PROXY_PREFIX:=
+LOCAL_DEX_PREOPT_APP_IMAGE:=
 LOCAL_DEX_PREOPT_FLAGS:=
+LOCAL_DEX_PREOPT_GENERATE_PROFILE:=
 LOCAL_DEX_PREOPT_IMAGE_LOCATION:=
+LOCAL_DEX_PREOPT_PROFILE_CLASS_LISTING:=
 LOCAL_DEX_PREOPT:= # '',true,false,nostripping
 LOCAL_DONT_CHECK_MODULE:=
 # Don't delete the META_INF dir when merging static Java libraries.
diff --git a/core/config.mk b/core/config.mk
index 8fa7d98..319a069 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -490,6 +490,7 @@
 SIGNAPK_JNI_LIBRARY_PATH := $(HOST_OUT_SHARED_LIBRARIES)
 LLVM_RS_CC := $(HOST_OUT_EXECUTABLES)/llvm-rs-cc
 BCC_COMPAT := $(HOST_OUT_EXECUTABLES)/bcc_compat
+DEPMOD := $(HOST_OUT_EXECUTABLES)/depmod
 
 DX := $(HOST_OUT_EXECUTABLES)/dx
 MAINDEXCLASSES := $(HOST_OUT_EXECUTABLES)/mainDexClasses
@@ -566,7 +567,6 @@
                external/nanopb-c/generator/google/*.py \
                external/nanopb-c/generator/proto/*.py)
 VTSC := $(HOST_OUT_EXECUTABLES)/vtsc$(HOST_EXECUTABLE_SUFFIX)
-DBUS_GENERATOR := $(HOST_OUT_EXECUTABLES)/dbus-binding-generator
 MKBOOTFS := $(HOST_OUT_EXECUTABLES)/mkbootfs$(HOST_EXECUTABLE_SUFFIX)
 MINIGZIP := $(HOST_OUT_EXECUTABLES)/minigzip$(HOST_EXECUTABLE_SUFFIX)
 ifeq (,$(strip $(BOARD_CUSTOM_MKBOOTIMG)))
@@ -635,6 +635,7 @@
 FEC := $(HOST_OUT_EXECUTABLES)/fec
 
 DEXDUMP := $(HOST_OUT_EXECUTABLES)/dexdump2$(BUILD_EXECUTABLE_SUFFIX)
+PROFMAN := $(HOST_OUT_EXECUTABLES)/profman
 
 # relocation packer
 RELOCATION_PACKER := prebuilts/misc/$(BUILD_OS)-$(HOST_PREBUILT_ARCH)/relocation_packer/relocation_packer
@@ -816,6 +817,7 @@
     art/% \
     bionic/% \
     external/fio/% \
+    hardware/interfaces/% \
 
 define find_warning_disallowed_projects
     $(filter $(ANDROID_WARNING_DISALLOWED_PROJECTS),$(1)/)
@@ -850,7 +852,7 @@
     userdataimage-nodeps userdatatarball-nodeps \
     cacheimage-nodeps \
     bptimage-nodeps \
-    vendorimage-nodeps \
+    vnod vendorimage-nodeps \
     systemotherimage-nodeps \
     ramdisk-nodeps \
     bootimage-nodeps \
diff --git a/core/definitions.mk b/core/definitions.mk
index 556b41f..aae269b 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -1196,31 +1196,6 @@
   $(hide) mv $(basename $@).cc $@)
 endef
 
-
-######################################################################
-## Commands for generating DBus adaptors from .dbus-xml files.
-######################################################################
-define generate-dbus-adaptors
-@echo "Generating DBus adaptors for $(PRIVATE_MODULE)"
-@mkdir -p $(dir $@)
-$(hide) $(DBUS_GENERATOR) \
-	--service-config=$(PRIVATE_DBUS_SERVICE_CONFIG) \
-	--adaptor=$@ \
-	$<
-endef
-
-######################################################################
-## Commands for generating DBus proxies from .dbus-xml files.
-######################################################################
-define generate-dbus-proxies
-@echo "Generating DBus proxies for $(PRIVATE_MODULE)"
-@mkdir -p $(dir $@)
-$(hide) $(DBUS_GENERATOR) \
-	--service-config=$(PRIVATE_DBUS_SERVICE_CONFIG) \
-	--proxy=$@ \
-	$(filter %.dbus-xml,$^)
-endef
-
 ###########################################################
 ## Helper to set include paths form transform-*-to-o
 ###########################################################
@@ -2870,8 +2845,10 @@
 
 # Define a rule to create a symlink to a file.
 # $(1): full path to source
-# $(2): source (may be relative)
-# $(3): full path to destination
+# $(2): target of the link
+# $(3): full path of the symlink
+# $(4): (optional) when set to true, $(2) is recognized as a path from the build root and
+#       thus -r option is used to link $(3) to $(2). Off by default.
 define symlink-file
 $(eval $(_symlink-file))
 endef
@@ -2883,7 +2860,9 @@
 	@echo "Symlink: $$@ -> $(2)"
 	@mkdir -p $(dir $$@)
 	@rm -rf $$@
-	$(hide) ln -sf $(2) $$@
+	$(if $(filter true,$(4)),\
+            $(hide) python -c "import os.path; import os; os.symlink(os.path.relpath('$(2)','$(dir $(3))'), '$$@')",\
+            $(hide) ln -sf $(2) $$@)
 endef
 
 ###########################################################
@@ -3144,7 +3123,8 @@
   NOTICE_FILE \
   HOST_DALVIK_JAVA_LIBRARY \
   HOST_DALVIK_STATIC_JAVA_LIBRARY \
-  base_rules
+  base_rules \
+  HEADER_LIBRARY
 
 $(foreach $(s),$(STATS.MODULE_TYPE),$(eval STATS.MODULE_TYPE.$(s) :=))
 define record-module-type
@@ -3243,12 +3223,44 @@
 #$(warning 42 == $(call math_max,5,42))
 #$(warning 42 == $(call math_max,42,5))
 
+define math_gt_or_eq
+$(if $(filter $(1),$(call math_max,$(1),$(2))),true)
+endef
+
+#$(warning $(call math_gt_or_eq, 2, 1))
+#$(warning $(call math_gt_or_eq, 1, 1))
+#$(warning $(if $(call math_gt_or_eq, 1, 2),false,true))
+
 # $1 is the variable name to increment
 define inc_and_print
 $(strip $(eval $(1) := $($(1)) .)$(words $($(1))))
 endef
 
 ###########################################################
+## Compatibility suite tools
+###########################################################
+
+# Return a list of output directories for a given suite and the current LOCAL_MODULE
+define compatibility_suite_dirs
+  $(strip \
+    $(COMPATIBILITY_TESTCASES_OUT_$(1)) \
+    $($(my_prefix)OUT_TESTCASES)/$(LOCAL_MODULE))
+endef
+
+# For each suite:
+# 1. Copy the files to the many suite output directories.
+# 2. Add all the files to each suite's dependent files list.
+# 3. Do the dependency addition to my_all_targets
+# Requires for each suite: my_compat_dist_$(suite) to be defined.
+define create-suite-dependencies
+$(foreach suite, $(LOCAL_COMPATIBILITY_SUITE), \
+  $(eval my_compat_files_$(suite) := $(call copy-many-files, $(my_compat_dist_$(suite)))) \
+  $(eval COMPATIBILITY.$(suite).FILES := \
+    $(COMPATIBILITY.$(suite).FILES) $(my_compat_files_$(suite))) \
+  $(eval $(my_all_targets) : $(my_compat_files_$(suite))))
+endef
+
+###########################################################
 ## Other includes
 ###########################################################
 
diff --git a/core/dex_preopt_libart.mk b/core/dex_preopt_libart.mk
index 41e5e87..ffb888c 100644
--- a/core/dex_preopt_libart.mk
+++ b/core/dex_preopt_libart.mk
@@ -119,11 +119,12 @@
 	--instruction-set=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_ARCH) \
 	--instruction-set-variant=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_CPU_VARIANT) \
 	--instruction-set-features=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
-	--include-patch-information --runtime-arg -Xnorelocate --compile-pic \
+	--runtime-arg -Xnorelocate --compile-pic \
 	--no-generate-debug-info --generate-build-id \
 	--abort-on-hard-verifier-error \
 	--no-inline-from=core-oj.jar \
 	$(PRIVATE_DEX_PREOPT_FLAGS) \
 	$(PRIVATE_ART_FILE_PREOPT_FLAGS) \
+	$(PRIVATE_PROFILE_PREOPT_FLAGS) \
 	$(GLOBAL_DEXPREOPT_FLAGS)
 endef
diff --git a/core/dex_preopt_libart_boot.mk b/core/dex_preopt_libart_boot.mk
index 54211a4..860a66a 100644
--- a/core/dex_preopt_libart_boot.mk
+++ b/core/dex_preopt_libart_boot.mk
@@ -72,7 +72,7 @@
 		--instruction-set-variant=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_CPU_VARIANT) \
 		--instruction-set-features=$($(PRIVATE_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES) \
 		--android-root=$(PRODUCT_OUT)/system \
-		--include-patch-information --runtime-arg -Xnorelocate --compile-pic \
+		--runtime-arg -Xnorelocate --compile-pic \
 		--no-generate-debug-info --generate-build-id \
 		--multi-image --no-inline-from=core-oj.jar \
 		$(PRODUCT_DEX_PREOPT_BOOT_FLAGS) $(GLOBAL_DEXPREOPT_FLAGS) $(COMPILED_CLASSES_FLAGS) $(ART_BOOT_IMAGE_EXTRA_ARGS)
diff --git a/core/dex_preopt_odex_install.mk b/core/dex_preopt_odex_install.mk
index 9a15706..4ffd6f1 100644
--- a/core/dex_preopt_odex_install.mk
+++ b/core/dex_preopt_odex_install.mk
@@ -113,6 +113,38 @@
 installed_art := $(strip $(installed_art))
 
 ifdef built_odex
+
+ifndef LOCAL_DEX_PREOPT_GENERATE_PROFILE
+ifeq (true,$(WITH_DEX_PREOPT_GENERATE_PROFILE))
+  LOCAL_DEX_PREOPT_GENERATE_PROFILE := true
+endif
+endif
+
+ifeq (true,$(LOCAL_DEX_PREOPT_GENERATE_PROFILE))
+ifndef LOCAL_DEX_PREOPT_PROFILE_CLASS_LISTING
+$(call pretty-error,Must have specified class listing (LOCAL_DEX_PREOPT_PROFILE_CLASS_LISTING))
+endif
+my_built_profile := $(dir $(LOCAL_BUILT_MODULE))/profile.prof
+my_dex_location := $(patsubst $(PRODUCT_OUT)%,%,$(LOCAL_INSTALLED_MODULE))
+$(built_odex): $(my_built_profile)
+$(built_odex): PRIVATE_PROFILE_PREOPT_FLAGS := --profile-file=$(my_built_profile)
+$(my_built_profile): PRIVATE_INSTALLED_MODULE := $(LOCAL_INSTALLED_MODULE)
+$(my_built_profile): PRIVATE_DEX_LOCATION := $(my_dex_location)
+$(my_built_profile): PRIVATE_SOURCE_CLASSES := $(LOCAL_DEX_PREOPT_PROFILE_CLASS_LISTING)
+$(my_built_profile): $(LOCAL_DEX_PREOPT_PROFILE_CLASS_LISTING)
+$(my_built_profile): $(PROFMAN)
+$(my_built_profile): $(LOCAL_INSTALLED_MODULE)
+$(my_built_profile):
+	$(hide) mkdir -p $(dir $@)
+	ANDROID_LOG_TAGS="*:e" $(PROFMAN) \
+		--create-profile-from=$(PRIVATE_SOURCE_CLASSES) \
+		--apk=$(PRIVATE_INSTALLED_MODULE) \
+		--dex-location=$(PRIVATE_DEX_LOCATION) \
+		--reference-profile-file=$@
+else
+$(built_odex): PRIVATE_PROFILE_PREOPT_FLAGS :=
+endif
+
 ifndef LOCAL_DEX_PREOPT_FLAGS
 LOCAL_DEX_PREOPT_FLAGS := $(DEXPREOPT.$(TARGET_PRODUCT).$(LOCAL_MODULE).CONFIG)
 ifndef LOCAL_DEX_PREOPT_FLAGS
diff --git a/core/help.mk b/core/help.mk
index 6e0b2c0..c034e79 100644
--- a/core/help.mk
+++ b/core/help.mk
@@ -22,6 +22,7 @@
 	@echo "droid                   Default target"
 	@echo "clean                   (aka clobber) equivalent to rm -rf out/"
 	@echo "snod                    Quickly rebuild the system image from built packages"
+	@echo "vnod                    Quickly rebuild the vendor image from built packages"
 	@echo "offline-sdk-docs        Generate the HTML for the developer SDK docs"
 	@echo "doc-comment-check-docs  Check HTML doc links & validity, without generating HTML"
 	@echo "libandroid_runtime      All the JNI framework stuff"
diff --git a/core/main.mk b/core/main.mk
index 85f5d6c..7433f90 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -10,7 +10,7 @@
 endif
 
 ifndef KATI
-USE_SOONG_UI ?= false
+USE_SOONG_UI ?= true
 endif
 ifeq ($(USE_SOONG_UI),true)
 
@@ -343,6 +343,16 @@
   ADDITIONAL_BUILD_PROPERTIES += ro.bionic.ld.warning=1
 endif
 
+# Boolean variable determining if Treble is fully enabled
+PRODUCT_FULL_TREBLE := false
+ifeq ($(PRODUCT_FULL_TREBLE_OVERRIDE),true)
+  PRODUCT_FULL_TREBLE := true
+else ifeq ($(PRODUCT_SHIPPING_API_LEVEL),)
+  #$(warning no product shipping level defined)
+else ifneq ($(call math_gt_or_eq,$(PRODUCT_SHIPPING_API_LEVEL),26),)
+  PRODUCT_FULL_TREBLE := true
+endif
+
 # -----------------------------------------------------------------
 ###
 ### In this section we set up the things that are different
diff --git a/core/package_internal.mk b/core/package_internal.mk
index c9b6814..ca12437 100644
--- a/core/package_internal.mk
+++ b/core/package_internal.mk
@@ -624,18 +624,14 @@
 $(my_all_targets): $(installed_apk_splits)
 
 ifdef LOCAL_COMPATIBILITY_SUITE
-cts_testcase_file := $(foreach s,$(my_split_suffixes),$(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(LOCAL_MODULE)_$(s).apk)
-$(cts_testcase_file) : $(COMPATIBILITY_TESTCASES_OUT_$(LOCAL_COMPATIBILITY_SUITE))/$(LOCAL_MODULE)_%.apk : $(built_module_path)/package_%.apk | $(ACP)
-	$(copy-file-to-new-target)
-common_testcase_file := $(foreach s,$(my_split_suffixes),$($(my_prefix)OUT_TESTCASES)/$(LOCAL_MODULE)/$(LOCAL_MODULE)_$(s).apk)
-$(common_testcase_file) : $($(my_prefix)OUT_TESTCASES)/$(LOCAL_MODULE)/$(LOCAL_MODULE)_%.apk : $(built_module_path)/package_%.apk
-	$(copy-file-to-new-target)
 
-COMPATIBILITY.$(LOCAL_COMPATIBILITY_SUITE).FILES := \
-  $(COMPATIBILITY.$(LOCAL_COMPATIBILITY_SUITE).FILES) \
-  $(cts_testcase_file) $(common_testcase_file)
+$(foreach suite, $(LOCAL_COMPATIBILITY_SUITE), \
+  $(eval my_compat_dist_$(suite) := $(foreach dir, $(call compatibility_suite_dirs,$(suite)), \
+    $(foreach s,$(my_split_suffixes),\
+      $(built_module_path)/package_$(s).apk:$(dir)/$(LOCAL_MODULE)_$(s).apk))))
 
-$(my_all_targets) : $(cts_testcase_file) $(common_testcase_file)
+$(call create-suite-dependencies)
+
 endif # LOCAL_COMPATIBILITY_SUITE
 endif # LOCAL_PACKAGE_SPLITS
 
diff --git a/core/product.mk b/core/product.mk
index 7d7c68b..fffd7c8 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -128,6 +128,7 @@
     VENDOR_EXCEPTION_MODULES \
     VENDOR_EXCEPTION_PATHS \
     PRODUCT_ART_USE_READ_BARRIER \
+    PRODUCT_IOT \
 
 
 
diff --git a/core/product_config.mk b/core/product_config.mk
index 8943429..7a2d9cb 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -204,7 +204,7 @@
     $(eval _cpm_word2 := $(word 2,$(_cpm_words)))\
     $(if $(_cpm_word2),\
         $(eval all_product_makefiles += $(_cpm_word2))\
-        $(eval all_named_products += $(_cpm_word2))\
+        $(eval all_named_products += $(_cpm_word1))\
         $(if $(filter $(TARGET_PRODUCT),$(_cpm_word1)),\
             $(eval current_product_makefile += $(_cpm_word2)),),\
         $(eval all_product_makefiles += $(f))\
@@ -420,3 +420,7 @@
 # Make this art variable visible to soong_config.mk.
 PRODUCT_ART_USE_READ_BARRIER := \
     $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_ART_USE_READ_BARRIER))
+
+# Whether the product is an Android Things variant.
+PRODUCT_IOT := \
+    $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_IOT))
diff --git a/core/setup_one_odex.mk b/core/setup_one_odex.mk
index 0afc5b7..f0ffe81 100644
--- a/core/setup_one_odex.mk
+++ b/core/setup_one_odex.mk
@@ -38,7 +38,14 @@
 my_installed_vdex := $(patsubst %.odex,%.vdex,$(my_installed_odex))
 my_installed_art := $(patsubst %.odex,%.art,$(my_installed_odex))
 
-ifeq (true,$(WITH_DEXPREOPT_APP_IMAGE))
+ifndef LOCAL_DEX_PREOPT_APP_IMAGE
+# Local override not defined, use the global one.
+ifeq (true,$(WITH_DEX_PREOPT_APP_IMAGE))
+  LOCAL_DEX_PREOPT_APP_IMAGE := true
+endif
+endif
+
+ifeq (true,$(LOCAL_DEX_PREOPT_APP_IMAGE))
 my_built_art := $(patsubst %.odex,%.art,$(my_built_odex))
 $(my_built_odex): PRIVATE_ART_FILE_PREOPT_FLAGS := --app-image-file=$(my_built_art) \
     --image-format=lz4
diff --git a/core/soong_config.mk b/core/soong_config.mk
index 92e66ae..ff8a51d 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -43,6 +43,7 @@
 	echo ''; \
 	echo '    "NativeCoverage": $(if $(filter true,$(NATIVE_COVERAGE)),true,false),'; \
 	echo '    "CoveragePaths": [$(if $(COVERAGE_PATHS),"$(subst $(space),"$(comma)",$(subst $(comma),$(space),$(COVERAGE_PATHS)))")],'; \
+	echo '    "CoverageExcludePaths": [$(if $(COVERAGE_EXCLUDE_PATHS),"$(subst $(space),"$(comma)",$(subst $(comma),$(space),$(COVERAGE_EXCLUDE_PATHS)))")],'; \
 	echo ''; \
 	echo '    "DeviceName": "$(TARGET_DEVICE)",'; \
 	echo '    "DeviceArch": "$(TARGET_ARCH)",'; \
@@ -68,7 +69,9 @@
 	echo ''; \
 	echo '    "ArtUseReadBarrier": $(if $(filter false,$(PRODUCT_ART_USE_READ_BARRIER)),false,true),'; \
 	echo ''; \
-	echo '    "BtConfigIncludeDir": "$(BOARD_BLUETOOTH_BDROID_BUILDCFG_INCLUDE_DIR)"'; \
+	echo '    "BtConfigIncludeDir": "$(BOARD_BLUETOOTH_BDROID_BUILDCFG_INCLUDE_DIR)",'; \
+	echo ''; \
+	echo '    "SameProcessHalDeps": [$(if $(BOARD_SAME_PROCESS_HAL_DEPS),"$(subst $(space),"$(comma)",$(BOARD_SAME_PROCESS_HAL_DEPS))")]'; \
 	echo '}') > $(SOONG_VARIABLES_TMP); \
 	if ! cmp -s $(SOONG_VARIABLES_TMP) $(SOONG_VARIABLES); then \
 	  mv $(SOONG_VARIABLES_TMP) $(SOONG_VARIABLES); \
diff --git a/core/tasks/tools/compatibility.mk b/core/tasks/tools/compatibility.mk
index d8f900e..1455a44 100644
--- a/core/tasks/tools/compatibility.mk
+++ b/core/tasks/tools/compatibility.mk
@@ -37,13 +37,14 @@
 $(compatibility_zip): PRIVATE_TOOLS := $(test_tools)
 $(compatibility_zip): PRIVATE_SUITE_NAME := $(test_suite_name)
 $(compatibility_zip): PRIVATE_DYNAMIC_CONFIG := $(test_suite_dynamic_config)
-$(compatibility_zip): $(test_artifacts) $(test_tools) $(test_suite_dynamic_config) | $(ADB) $(ACP)
+$(compatibility_zip): $(test_artifacts) $(test_tools) $(test_suite_dynamic_config) $(SOONG_ZIP) | $(ADB) $(ACP)
 # Make dir structure
 	$(hide) mkdir -p $(PRIVATE_OUT_DIR)/tools $(PRIVATE_OUT_DIR)/testcases
 # Copy tools
 	$(hide) $(ACP) -fp $(PRIVATE_TOOLS) $(PRIVATE_OUT_DIR)/tools
 	$(if $(PRIVATE_DYNAMIC_CONFIG),$(hide) $(ACP) -fp $(PRIVATE_DYNAMIC_CONFIG) $(PRIVATE_OUT_DIR)/testcases/$(PRIVATE_SUITE_NAME).dynamic)
-	$(hide) cd $(dir $@) && zip -rq $(notdir $@) $(PRIVATE_NAME)
+	$(hide) find $(dir $@)/$(PRIVATE_NAME) | sort >$@.list
+	$(hide) $(SOONG_ZIP) -d -o $@ -C $(dir $@) -l $@.list
 
 # Reset all input variables
 test_suite_name :=
diff --git a/core/tasks/tools/package-modules.mk b/core/tasks/tools/package-modules.mk
index ef49d90..4dde9fd 100644
--- a/core/tasks/tools/package-modules.mk
+++ b/core/tasks/tools/package-modules.mk
@@ -9,6 +9,7 @@
 #
 #
 
+my_makefile := $(lastword $(filter-out $(lastword $(MAKEFILE_LIST)),$(MAKEFILE_LIST)))
 my_staging_dir := $(call intermediates-dir-for,PACKAGING,$(my_package_name))
 my_built_modules :=
 my_copy_pairs :=
@@ -33,7 +34,7 @@
   $(eval _built_files := $(strip $(ALL_MODULES.$(m).BUILT_INSTALLED)\
     $(ALL_MODULES.$(m)$(TARGET_2ND_ARCH_MODULE_SUFFIX).BUILT_INSTALLED)))\
   $(if $(_pickup_files)$(_built_files),,\
-    $(warning Unknown installed file for module '$(m)'))\
+    $(shell $(call echo-warning,$(my_makefile),$(my_package_name): Unknown installed file for module '$(m)')))\
   $(eval my_pickup_files += $(_pickup_files))\
   $(foreach i, $(_built_files),\
     $(eval bui_ins := $(subst :,$(space),$(i)))\
@@ -47,26 +48,16 @@
       $(eval my_copy_pairs += $(bui):$(my_staging_dir)/$(my_copy_dest)))\
   ))
 
-define copy-tests-in-batch
-$(hide) $(foreach p, $(1),\
-  $(eval pair := $(subst :,$(space),$(p)))\
-  mkdir -p $(dir $(word 2,$(pair)));\
-  cp -Rf $(word 1,$(pair)) $(word 2,$(pair));)
-endef
-
 my_package_zip := $(my_staging_dir)/$(my_package_name).zip
 $(my_package_zip): PRIVATE_COPY_PAIRS := $(my_copy_pairs)
 $(my_package_zip): PRIVATE_PICKUP_FILES := $(my_pickup_files)
 $(my_package_zip) : $(my_built_modules)
 	@echo "Package $@"
 	@rm -rf $(dir $@) && mkdir -p $(dir $@)
-	$(call copy-tests-in-batch,$(wordlist 1,200,$(PRIVATE_COPY_PAIRS)))
-	$(call copy-tests-in-batch,$(wordlist 201,400,$(PRIVATE_COPY_PAIRS)))
-	$(call copy-tests-in-batch,$(wordlist 401,600,$(PRIVATE_COPY_PAIRS)))
-	$(call copy-tests-in-batch,$(wordlist 601,800,$(PRIVATE_COPY_PAIRS)))
-	$(call copy-tests-in-batch,$(wordlist 801,1000,$(PRIVATE_COPY_PAIRS)))
-	$(call copy-tests-in-batch,$(wordlist 1001,1200,$(PRIVATE_COPY_PAIRS)))
-	$(call copy-tests-in-batch,$(wordlist 1201,9999,$(PRIVATE_COPY_PAIRS)))
+	$(foreach p, $(PRIVATE_COPY_PAIRS),\
+	  $(eval pair := $(subst :,$(space),$(p)))\
+	  mkdir -p $(dir $(word 2,$(pair))) && \
+	  cp -Rf $(word 1,$(pair)) $(word 2,$(pair)) && ) true
 	$(hide) $(foreach f, $(PRIVATE_PICKUP_FILES),\
-	  cp -RfL $(f) $(dir $@);)
+	  cp -RfL $(f) $(dir $@) && ) true
 	$(hide) cd $(dir $@) && zip -rqX $(notdir $@) *
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index 2245aa2..3332fc7 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -76,12 +76,24 @@
 $(foreach v,$(ENABLED_VERSIONS), \
   $(eval IS_AT_LEAST_$(v) := true))
 
+# Default versions for each TARGET_PLATFORM_VERSION
+
+# This is the canonical definition of the platform version,
+# which is the version that we reveal to the end user.
+# Update this value when the platform version changes (rather
+# than overriding it somewhere else).  Can be an arbitrary string.
+PLATFORM_VERSION.O := O
+
+# This is the current development code-name, if the build is not a final
+# release build.  If this is a final release build, it is simply "REL".
+PLATFORM_VERSION_CODENAME.O := O
+
 ifndef PLATFORM_VERSION
-  # This is the canonical definition of the platform version,
-  # which is the version that we reveal to the end user.
-  # Update this value when the platform version changes (rather
-  # than overriding it somewhere else).  Can be an arbitrary string.
-  PLATFORM_VERSION := 7.1.1
+  PLATFORM_VERSION := $(PLATFORM_VERSION.$(TARGET_PLATFORM_VERSION))
+  ifndef PLATFORM_VERSION
+    # PLATFORM_VERSION falls back to TARGET_PLATFORM_VERSION
+    PLATFORM_VERSION := $(TARGET_PLATFORM_VERSION)
+  endif
 endif
 
 ifndef PLATFORM_SDK_VERSION
@@ -106,9 +118,11 @@
 endif
 
 ifndef PLATFORM_VERSION_CODENAME
-  # This is the current development code-name, if the build is not a final
-  # release build.  If this is a final release build, it is simply "REL".
-  PLATFORM_VERSION_CODENAME := REL
+  PLATFORM_VERSION_CODENAME := $(PLATFORM_VERSION_CODENAME.$(TARGET_PLATFORM_VERSION))
+  ifndef PLATFORM_VERSION_CODENAME
+    # PLATFORM_VERSION_CODENAME falls back to TARGET_PLATFORM_VERSION
+    PLATFORM_VERSION_CODENAME := $(TARGET_PLATFORM_VERSION)
+  endif
 
   # This is all of the development codenames that are active.  Should be either
   # the same as PLATFORM_VERSION_CODENAME or a comma-separated list of additional
diff --git a/envsetup.sh b/envsetup.sh
index b239441..9680780 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -1148,8 +1148,7 @@
             adb shell cat $TMP
         else
             # Dump stacks of native process
-            local USE64BIT="$(is64bit $PID)"
-            adb shell debuggerd$USE64BIT -b $PID
+            adb shell debuggerd -b $PID
         fi
     fi
 }
diff --git a/target/product/core.mk b/target/product/core.mk
index 10b2c9e..c4c7cab 100644
--- a/target/product/core.mk
+++ b/target/product/core.mk
@@ -25,6 +25,7 @@
     BlockedNumberProvider \
     BookmarkProvider \
     Browser2 \
+    BuiltInPrintService \
     Calendar \
     CalendarProvider \
     CaptivePortalLogin \
diff --git a/target/product/embedded.mk b/target/product/embedded.mk
index ed7b8b7..c77bce2 100644
--- a/target/product/embedded.mk
+++ b/target/product/embedded.mk
@@ -20,6 +20,8 @@
 PRODUCT_PACKAGES += \
     adb \
     adbd \
+    android.hardware.configstore@1.0-impl \
+    android.hardware.configstore@1.0-service \
     android.hidl.allocator@1.0-service \
     android.hidl.memory@1.0-impl \
     atrace \
@@ -84,20 +86,14 @@
 
 # SELinux packages
 PRODUCT_PACKAGES += \
-    secilc \
-    file_contexts.bin \
-    nonplat_file_contexts \
     nonplat_mac_permissions.xml \
     nonplat_property_contexts \
     nonplat_seapp_contexts \
     nonplat_service_contexts \
-    plat_file_contexts \
     plat_mac_permissions.xml \
     plat_property_contexts \
     plat_seapp_contexts \
-    plat_service_contexts \
-    selinux_version \
-    sepolicy
+    plat_service_contexts
 
 # AID Generation for
 # <pwd.h> and <grp.h>
@@ -117,3 +113,7 @@
     system/core/rootdir/init.usb.configfs.rc:root/init.usb.configfs.rc \
     system/core/rootdir/ueventd.rc:root/ueventd.rc \
     system/core/rootdir/etc/hosts:system/etc/hosts
+
+# Framework Manifest
+PRODUCT_COPY_FILES += \
+    system/libhidl/manifest.xml:system/manifest.xml
diff --git a/target/product/runtime_libart.mk b/target/product/runtime_libart.mk
index 0f42c27..fb52d67 100644
--- a/target/product/runtime_libart.mk
+++ b/target/product/runtime_libart.mk
@@ -80,4 +80,5 @@
     ro.dalvik.vm.native.bridge=0 \
     dalvik.vm.usejit=true \
     dalvik.vm.usejitprofiles=true \
+    dalvik.vm.dexopt.secondary=true \
     dalvik.vm.appimageformat=lz4
diff --git a/tools/kati_all_products.sh b/tools/kati_all_products.sh
deleted file mode 100755
index 4567dbd..0000000
--- a/tools/kati_all_products.sh
+++ /dev/null
@@ -1,7 +0,0 @@
-#!/bin/bash -e
-
-cd $ANDROID_BUILD_TOP
-mkdir -p out.kati
-source build/envsetup.sh
-
-get_build_var all_named_products | sed "s/ /\n/g" | parallel "$@" --progress "(source build/envsetup.sh; lunch {}-eng && m -j OUT_DIR=out.kati/{} out.kati/{}/build-{}.ninja) >out.kati/log.{} 2>&1"
diff --git a/tools/post_process_props.py b/tools/post_process_props.py
index 9dcaadf..295f8f6 100755
--- a/tools/post_process_props.py
+++ b/tools/post_process_props.py
@@ -19,10 +19,9 @@
 # Usage: post_process_props.py file.prop [blacklist_key, ...]
 # Blacklisted keys are removed from the property file, if present
 
-# See PROP_NAME_MAX and PROP_VALUE_MAX system_properties.h.
-# The constants in system_properties.h includes the termination NUL,
-# so we decrease the values by 1 here.
-PROP_NAME_MAX = 31
+# See PROP_VALUE_MAX in system_properties.h.
+# The constant in system_properties.h includes the terminating NUL,
+# so we decrease the value by 1 here.
 PROP_VALUE_MAX = 91
 
 # Put the modifications that you need to make into the /system/build.prop into this
@@ -59,11 +58,6 @@
   buildprops = prop.to_dict()
   for key, value in buildprops.iteritems():
     # Check build properties' length.
-    if len(key) > PROP_NAME_MAX:
-      check_pass = False
-      sys.stderr.write("error: %s cannot exceed %d bytes: " %
-                       (key, PROP_NAME_MAX))
-      sys.stderr.write("%s (%d)\n" % (key, len(key)))
     if len(value) > PROP_VALUE_MAX:
       check_pass = False
       sys.stderr.write("error: %s cannot exceed %d bytes: " %
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index 28fd474..0b8f3a0 100755
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -73,15 +73,33 @@
 OPTIONS.replace_verity_private_key = False
 OPTIONS.is_signing = False
 
+
+class OutputFile(object):
+  def __init__(self, output_zip, input_dir, prefix, name):
+    self._output_zip = output_zip
+    self.input_name = os.path.join(input_dir, prefix, name)
+
+    if self._output_zip:
+      self._zip_name = os.path.join(prefix, name)
+
+      root, suffix = os.path.splitext(name)
+      self.name = common.MakeTempFile(prefix=root + '-', suffix=suffix)
+    else:
+      self.name = self.input_name
+
+  def Write(self):
+    if self._output_zip:
+      common.ZipWrite(self._output_zip, self.name, self._zip_name)
+
+
 def GetCareMap(which, imgname):
   """Generate care_map of system (or vendor) partition"""
 
   assert which in ("system", "vendor")
-  _, blk_device = common.GetTypeAndDevice("/" + which, OPTIONS.info_dict)
 
   simg = sparse_img.SparseImage(imgname)
   care_map_list = []
-  care_map_list.append(blk_device)
+  care_map_list.append(which)
 
   care_map_ranges = simg.care_map
   key = which + "_adjusted_partition_size"
@@ -99,10 +117,10 @@
   """Turn the contents of SYSTEM into a system image and store it in
   output_zip. Returns the name of the system image file."""
 
-  prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "system.img")
-  if os.path.exists(prebuilt_path):
+  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "system.img")
+  if os.path.exists(img.input_name):
     print("system.img already exists in %s, no need to rebuild..." % (prefix,))
-    return prebuilt_path
+    return img.input_name
 
   def output_sink(fn, data):
     ofile = open(os.path.join(OPTIONS.input_tmp, "SYSTEM", fn), "w")
@@ -114,74 +132,52 @@
     common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink, recovery_img,
                              boot_img, info_dict=OPTIONS.info_dict)
 
-  block_list = common.MakeTempFile(prefix="system-blocklist-", suffix=".map")
-  imgname = BuildSystem(OPTIONS.input_tmp, OPTIONS.info_dict,
-                        block_list=block_list)
+  block_list = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "system.map")
+  CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "system", img,
+              block_list=block_list)
 
-  common.ZipWrite(output_zip, imgname, prefix + "system.img")
-  common.ZipWrite(output_zip, block_list, prefix + "system.map")
-  return imgname
-
-
-def BuildSystem(input_dir, info_dict, block_list=None):
-  """Build the (sparse) system image and return the name of a temp
-  file containing it."""
-  return CreateImage(input_dir, info_dict, "system", block_list=block_list)
+  return img.name
 
 
 def AddSystemOther(output_zip, prefix="IMAGES/"):
   """Turn the contents of SYSTEM_OTHER into a system_other image
   and store it in output_zip."""
 
-  prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "system_other.img")
-  if os.path.exists(prebuilt_path):
+  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "system_other.img")
+  if os.path.exists(img.input_name):
     print("system_other.img already exists in %s, no need to rebuild..." % (
         prefix,))
     return
 
-  imgname = BuildSystemOther(OPTIONS.input_tmp, OPTIONS.info_dict)
-  common.ZipWrite(output_zip, imgname, prefix + "system_other.img")
-
-def BuildSystemOther(input_dir, info_dict):
-  """Build the (sparse) system_other image and return the name of a temp
-  file containing it."""
-  return CreateImage(input_dir, info_dict, "system_other", block_list=None)
+  CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "system_other", img)
 
 
 def AddVendor(output_zip, prefix="IMAGES/"):
   """Turn the contents of VENDOR into a vendor image and store in it
   output_zip."""
 
-  prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "vendor.img")
-  if os.path.exists(prebuilt_path):
+  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "vendor.img")
+  if os.path.exists(img.input_name):
     print("vendor.img already exists in %s, no need to rebuild..." % (prefix,))
-    return prebuilt_path
+    return img.input_name
 
-  block_list = common.MakeTempFile(prefix="vendor-blocklist-", suffix=".map")
-  imgname = BuildVendor(OPTIONS.input_tmp, OPTIONS.info_dict,
-                        block_list=block_list)
-  common.ZipWrite(output_zip, imgname, prefix + "vendor.img")
-  common.ZipWrite(output_zip, block_list, prefix + "vendor.map")
-  return imgname
+  block_list = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "vendor.map")
+  CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "vendor", img,
+              block_list=block_list)
+  return img.name
 
 
-def BuildVendor(input_dir, info_dict, block_list=None):
-  """Build the (sparse) vendor image and return the name of a temp
-  file containing it."""
-  return CreateImage(input_dir, info_dict, "vendor", block_list=block_list)
-
-
-def CreateImage(input_dir, info_dict, what, block_list=None):
+def CreateImage(input_dir, info_dict, what, output_file, block_list=None):
   print("creating " + what + ".img...")
 
-  img = common.MakeTempFile(prefix=what + "-", suffix=".img")
-
   # The name of the directory it is making an image out of matters to
   # mkyaffs2image.  It wants "system" but we have a directory named
   # "SYSTEM", so create a symlink.
+  temp_dir = tempfile.mkdtemp()
+  OPTIONS.tempfiles.append(temp_dir)
   try:
     os.symlink(os.path.join(input_dir, what.upper()),
-               os.path.join(input_dir, what))
+               os.path.join(temp_dir, what))
   except OSError as e:
     # bogus error on my mac version?
     #   File "./build/tools/releasetools/img_from_target_files"
@@ -216,12 +212,16 @@
   if fs_config:
     image_props["fs_config"] = fs_config
   if block_list:
-    image_props["block_list"] = block_list
+    image_props["block_list"] = block_list.name
 
-  succ = build_image.BuildImage(os.path.join(input_dir, what),
-                                image_props, img)
+  succ = build_image.BuildImage(os.path.join(temp_dir, what),
+                                image_props, output_file.name)
   assert succ, "build " + what + ".img image failed"
 
+  output_file.Write()
+  if block_list:
+    block_list.Write()
+
   is_verity_partition = "verity_block_device" in image_props
   verity_supported = image_props.get("verity") == "true"
   if is_verity_partition and verity_supported:
@@ -230,8 +230,6 @@
       adjusted_blocks_key = what + "_adjusted_partition_size"
       info_dict[adjusted_blocks_key] = int(adjusted_blocks_value)/4096 - 1
 
-  return img
-
 
 def AddUserdata(output_zip, prefix="IMAGES/"):
   """Create a userdata image and store it in output_zip.
@@ -242,8 +240,8 @@
   in OPTIONS.info_dict.
   """
 
-  prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "userdata.img")
-  if os.path.exists(prebuilt_path):
+  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "userdata.img")
+  if os.path.exists(img.input_name):
     print("userdata.img already exists in %s, no need to rebuild..." % (
         prefix,))
     return
@@ -266,6 +264,7 @@
   # empty dir named "data", or a symlink to the DATA dir,
   # and build the image from that.
   temp_dir = tempfile.mkdtemp()
+  OPTIONS.tempfiles.append(temp_dir)
   user_dir = os.path.join(temp_dir, "data")
   empty = (OPTIONS.info_dict.get("userdata_img_with_data") != "true")
   if empty:
@@ -276,8 +275,6 @@
     os.symlink(os.path.join(OPTIONS.input_tmp, "DATA"),
                user_dir)
 
-  img = tempfile.NamedTemporaryFile()
-
   fstab = OPTIONS.info_dict["fstab"]
   if fstab:
     image_props["fs_type"] = fstab["/data"].fs_type
@@ -285,17 +282,15 @@
   assert succ, "build userdata.img image failed"
 
   common.CheckSize(img.name, "userdata.img", OPTIONS.info_dict)
-  common.ZipWrite(output_zip, img.name, prefix + "userdata.img")
-  img.close()
-  shutil.rmtree(temp_dir)
+  img.Write()
 
 
 def AddVBMeta(output_zip, boot_img_path, system_img_path, prefix="IMAGES/"):
   """Create a VBMeta image and store it in output_zip."""
-  _, img_file_name = tempfile.mkstemp()
+  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "vbmeta.img")
   avbtool = os.getenv('AVBTOOL') or "avbtool"
   cmd = [avbtool, "make_vbmeta_image",
-         "--output", img_file_name,
+         "--output", img.name,
          "--include_descriptors_from_image", boot_img_path,
          "--include_descriptors_from_image", system_img_path,
          "--generate_dm_verity_cmdline_from_hashtree", system_img_path]
@@ -306,19 +301,19 @@
   p = common.Run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
   p.communicate()
   assert p.returncode == 0, "avbtool make_vbmeta_image failed"
-  common.ZipWrite(output_zip, img_file_name, prefix + "vbmeta.img")
+  img.Write()
 
 
 def AddPartitionTable(output_zip, prefix="IMAGES/"):
   """Create a partition table image and store it in output_zip."""
 
-  _, img_file_name = tempfile.mkstemp()
-  _, bpt_file_name = tempfile.mkstemp()
+  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "partition-table.img")
+  bpt = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "partition-table.bpt")
 
   # use BPTTOOL from environ, or "bpttool" if empty or not set.
   bpttool = os.getenv("BPTTOOL") or "bpttool"
-  cmd = [bpttool, "make_table", "--output_json", bpt_file_name,
-         "--output_gpt", img_file_name]
+  cmd = [bpttool, "make_table", "--output_json", bpt.name,
+         "--output_gpt", img.name]
   input_files_str = OPTIONS.info_dict["board_bpt_input_files"]
   input_files = input_files_str.split(" ")
   for i in input_files:
@@ -334,15 +329,15 @@
   p.communicate()
   assert p.returncode == 0, "bpttool make_table failed"
 
-  common.ZipWrite(output_zip, img_file_name, prefix + "partition-table.img")
-  common.ZipWrite(output_zip, bpt_file_name, prefix + "partition-table.bpt")
+  img.Write()
+  bpt.Write()
 
 
 def AddCache(output_zip, prefix="IMAGES/"):
   """Create an empty cache image and store it in output_zip."""
 
-  prebuilt_path = os.path.join(OPTIONS.input_tmp, prefix, "cache.img")
-  if os.path.exists(prebuilt_path):
+  img = OutputFile(output_zip, OPTIONS.input_tmp, prefix, "cache.img")
+  if os.path.exists(img.input_name):
     print("cache.img already exists in %s, no need to rebuild..." % (prefix,))
     return
 
@@ -363,9 +358,9 @@
   # mkyaffs2image.  So we create a temp dir, and within it we create an
   # empty dir named "cache", and build the image from that.
   temp_dir = tempfile.mkdtemp()
+  OPTIONS.tempfiles.append(temp_dir)
   user_dir = os.path.join(temp_dir, "cache")
   os.mkdir(user_dir)
-  img = tempfile.NamedTemporaryFile()
 
   fstab = OPTIONS.info_dict["fstab"]
   if fstab:
@@ -374,35 +369,39 @@
   assert succ, "build cache.img image failed"
 
   common.CheckSize(img.name, "cache.img", OPTIONS.info_dict)
-  common.ZipWrite(output_zip, img.name, prefix + "cache.img")
-  img.close()
-  os.rmdir(user_dir)
-  os.rmdir(temp_dir)
+  img.Write()
 
 
 def AddImagesToTargetFiles(filename):
-  OPTIONS.input_tmp, input_zip = common.UnzipTemp(filename)
+  if os.path.isdir(filename):
+    OPTIONS.input_tmp = os.path.abspath(filename)
+    input_zip = None
+  else:
+    OPTIONS.input_tmp, input_zip = common.UnzipTemp(filename)
 
   if not OPTIONS.add_missing:
-    for n in input_zip.namelist():
-      if n.startswith("IMAGES/"):
-        print("target_files appears to already contain images.")
-        sys.exit(1)
+    if os.path.isdir(os.path.join(OPTIONS.input_tmp, "IMAGES")):
+      print("target_files appears to already contain images.")
+      sys.exit(1)
 
-  try:
-    input_zip.getinfo("VENDOR/")
-    has_vendor = True
-  except KeyError:
-    has_vendor = False
+  has_vendor = os.path.isdir(os.path.join(OPTIONS.input_tmp, "VENDOR"))
+  has_system_other = os.path.isdir(os.path.join(OPTIONS.input_tmp,
+                                                "SYSTEM_OTHER"))
 
-  has_system_other = "SYSTEM_OTHER/" in input_zip.namelist()
+  if input_zip:
+    OPTIONS.info_dict = common.LoadInfoDict(input_zip, OPTIONS.input_tmp)
 
-  OPTIONS.info_dict = common.LoadInfoDict(input_zip, OPTIONS.input_tmp)
-
-  common.ZipClose(input_zip)
-  output_zip = zipfile.ZipFile(filename, "a",
-                               compression=zipfile.ZIP_DEFLATED,
-                               allowZip64=True)
+    common.ZipClose(input_zip)
+    output_zip = zipfile.ZipFile(filename, "a",
+                                 compression=zipfile.ZIP_DEFLATED,
+                                 allowZip64=True)
+  else:
+    OPTIONS.info_dict = common.LoadInfoDict(filename, filename)
+    output_zip = None
+    images_dir = os.path.join(OPTIONS.input_tmp, "IMAGES")
+    if not os.path.isdir(images_dir):
+      os.makedirs(images_dir)
+    images_dir = None
 
   has_recovery = (OPTIONS.info_dict.get("no_recovery") != "true")
   system_root_image = (OPTIONS.info_dict.get("system_root_image", None) == "true")
@@ -423,7 +422,10 @@
     boot_image = common.GetBootableImage(
         "IMAGES/boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
     if boot_image:
-      boot_image.AddToZip(output_zip)
+      if output_zip:
+        boot_image.AddToZip(output_zip)
+      else:
+        boot_image.WriteToDir(OPTIONS.input_tmp)
 
   recovery_image = None
   if has_recovery:
@@ -439,7 +441,10 @@
       recovery_image = common.GetBootableImage(
           "IMAGES/recovery.img", "recovery.img", OPTIONS.input_tmp, "RECOVERY")
       if recovery_image:
-        recovery_image.AddToZip(output_zip)
+        if output_zip:
+          recovery_image.AddToZip(output_zip)
+        else:
+          recovery_image.WriteToDir(OPTIONS.input_tmp)
 
       banner("recovery (two-step image)")
       # The special recovery.img for two-step package use.
@@ -447,7 +452,10 @@
           "IMAGES/recovery-two-step.img", "recovery-two-step.img",
           OPTIONS.input_tmp, "RECOVERY", two_step_image=True)
       if recovery_two_step_image:
-        recovery_two_step_image.AddToZip(output_zip)
+        if output_zip:
+          recovery_two_step_image.AddToZip(output_zip)
+        else:
+          recovery_two_step_image.WriteToDir(OPTIONS.input_tmp)
 
   banner("system")
   system_img_path = AddSystem(
@@ -503,24 +511,39 @@
       img_vendor_dir = os.path.join(
         OPTIONS.input_tmp, "VENDOR_IMAGES")
       if os.path.exists(img_radio_path):
-        common.ZipWrite(output_zip, img_radio_path,
-                        os.path.join("IMAGES", img_name))
+        if output_zip:
+          common.ZipWrite(output_zip, img_radio_path,
+                          os.path.join("IMAGES", img_name))
+        else:
+          shutil.copy(img_radio_path, prebuilt_path)
       else:
         for root, _, files in os.walk(img_vendor_dir):
           if img_name in files:
-            common.ZipWrite(output_zip, os.path.join(root, img_name),
-              os.path.join("IMAGES", img_name))
+            if output_zip:
+              common.ZipWrite(output_zip, os.path.join(root, img_name),
+                os.path.join("IMAGES", img_name))
+            else:
+              shutil.copy(os.path.join(root, img_name), prebuilt_path)
             break
 
-      # Zip spec says: All slashes MUST be forward slashes.
-      img_path = 'IMAGES/' + img_name
-      assert img_path in output_zip.namelist(), "cannot find " + img_name
+      if output_zip:
+        # Zip spec says: All slashes MUST be forward slashes.
+        img_path = 'IMAGES/' + img_name
+        assert img_path in output_zip.namelist(), "cannot find " + img_name
+      else:
+        img_path = os.path.join(OPTIONS.input_tmp, "IMAGES", img_name)
+        assert os.path.exists(img_path), "cannot find " + img_name
 
     if care_map_list:
       file_path = "META/care_map.txt"
-      common.ZipWriteStr(output_zip, file_path, '\n'.join(care_map_list))
+      if output_zip:
+        common.ZipWriteStr(output_zip, file_path, '\n'.join(care_map_list))
+      else:
+        with open(os.path.join(OPTIONS.input_tmp, file_path), 'w') as fp:
+          fp.write('\n'.join(care_map_list))
 
-  common.ZipClose(output_zip)
+  if output_zip:
+    common.ZipClose(output_zip)
 
 def main(argv):
   def option_handler(o, a):
diff --git a/tools/releasetools/blockimgdiff.py b/tools/releasetools/blockimgdiff.py
index c204c90..d8fcc41 100644
--- a/tools/releasetools/blockimgdiff.py
+++ b/tools/releasetools/blockimgdiff.py
@@ -24,8 +24,8 @@
 import os.path
 import re
 import subprocess
+import sys
 import threading
-import tempfile
 
 from collections import deque, OrderedDict
 from hashlib import sha1
@@ -35,69 +35,67 @@
 __all__ = ["EmptyImage", "DataImage", "BlockImageDiff"]
 
 
-def compute_patch(src, tgt, imgdiff=False):
-  srcfd, srcfile = tempfile.mkstemp(prefix="src-")
-  tgtfd, tgtfile = tempfile.mkstemp(prefix="tgt-")
-  patchfd, patchfile = tempfile.mkstemp(prefix="patch-")
-  os.close(patchfd)
+def compute_patch(srcfile, tgtfile, imgdiff=False):
+  patchfile = common.MakeTempFile(prefix="patch-")
 
-  try:
-    with os.fdopen(srcfd, "wb") as f_src:
-      for p in src:
-        f_src.write(p)
+  if imgdiff:
+    p = subprocess.call(
+        ["imgdiff", "-z", srcfile, tgtfile, patchfile],
+        stdout=open(os.devnull, 'w'),
+        stderr=subprocess.STDOUT)
+  else:
+    p = subprocess.call(
+        ["bsdiff", srcfile, tgtfile, patchfile],
+        stdout=open(os.devnull, 'w'),
+        stderr=subprocess.STDOUT)
 
-    with os.fdopen(tgtfd, "wb") as f_tgt:
-      for p in tgt:
-        f_tgt.write(p)
-    try:
-      os.unlink(patchfile)
-    except OSError:
-      pass
-    if imgdiff:
-      p = subprocess.call(["imgdiff", "-z", srcfile, tgtfile, patchfile],
-                          stdout=open("/dev/null", "a"),
-                          stderr=subprocess.STDOUT)
-    else:
-      p = subprocess.call(["bsdiff", srcfile, tgtfile, patchfile])
+  if p:
+    raise ValueError("diff failed: " + str(p))
 
-    if p:
-      raise ValueError("diff failed: " + str(p))
-
-    with open(patchfile, "rb") as f:
-      return f.read()
-  finally:
-    try:
-      os.unlink(srcfile)
-      os.unlink(tgtfile)
-      os.unlink(patchfile)
-    except OSError:
-      pass
+  with open(patchfile, "rb") as f:
+    return f.read()
 
 
 class Image(object):
+  def RangeSha1(self, ranges):
+    raise NotImplementedError
+
   def ReadRangeSet(self, ranges):
     raise NotImplementedError
 
   def TotalSha1(self, include_clobbered_blocks=False):
     raise NotImplementedError
 
+  def WriteRangeDataToFd(self, ranges, fd):
+    raise NotImplementedError
+
 
 class EmptyImage(Image):
   """A zero-length image."""
-  blocksize = 4096
-  care_map = RangeSet()
-  clobbered_blocks = RangeSet()
-  extended = RangeSet()
-  total_blocks = 0
-  file_map = {}
+
+  def __init__(self):
+    self.blocksize = 4096
+    self.care_map = RangeSet()
+    self.clobbered_blocks = RangeSet()
+    self.extended = RangeSet()
+    self.total_blocks = 0
+    self.file_map = {}
+
+  def RangeSha1(self, ranges):
+    return sha1().hexdigest()
+
   def ReadRangeSet(self, ranges):
     return ()
+
   def TotalSha1(self, include_clobbered_blocks=False):
     # EmptyImage always carries empty clobbered_blocks, so
     # include_clobbered_blocks can be ignored.
     assert self.clobbered_blocks.size() == 0
     return sha1().hexdigest()
 
+  def WriteRangeDataToFd(self, ranges, fd):
+    raise ValueError("Can't write data from EmptyImage to file")
+
 
 class DataImage(Image):
   """An image wrapped around a single string of data."""
@@ -160,23 +158,39 @@
     if clobbered_blocks:
       self.file_map["__COPY"] = RangeSet(data=clobbered_blocks)
 
+  def _GetRangeData(self, ranges):
+    for s, e in ranges:
+      yield self.data[s*self.blocksize:e*self.blocksize]
+
+  def RangeSha1(self, ranges):
+    h = sha1()
+    for data in self._GetRangeData(ranges):
+      h.update(data)
+    return h.hexdigest()
+
   def ReadRangeSet(self, ranges):
-    return [self.data[s*self.blocksize:e*self.blocksize] for (s, e) in ranges]
+    return [self._GetRangeData(ranges)]
 
   def TotalSha1(self, include_clobbered_blocks=False):
     if not include_clobbered_blocks:
-      ranges = self.care_map.subtract(self.clobbered_blocks)
-      return sha1(self.ReadRangeSet(ranges)).hexdigest()
+      return self.RangeSha1(self.care_map.subtract(self.clobbered_blocks))
     else:
       return sha1(self.data).hexdigest()
 
+  def WriteRangeDataToFd(self, ranges, fd):
+    for data in self._GetRangeData(ranges):
+      fd.write(data)
+
 
 class Transfer(object):
-  def __init__(self, tgt_name, src_name, tgt_ranges, src_ranges, style, by_id):
+  def __init__(self, tgt_name, src_name, tgt_ranges, src_ranges, tgt_sha1,
+               src_sha1, style, by_id):
     self.tgt_name = tgt_name
     self.src_name = src_name
     self.tgt_ranges = tgt_ranges
     self.src_ranges = src_ranges
+    self.tgt_sha1 = tgt_sha1
+    self.src_sha1 = src_sha1
     self.style = style
     self.intact = (getattr(tgt_ranges, "monotonic", False) and
                    getattr(src_ranges, "monotonic", False))
@@ -251,6 +265,9 @@
 #      Implementations are free to break up the data into list/tuple
 #      elements in any way that is convenient.
 #
+#    RangeSha1(): a function that returns (as a hex string) the SHA-1
+#      hash of all the data in the specified range.
+#
 #    TotalSha1(): a function that returns (as a hex string) the SHA-1
 #      hash of all the data in the image (ie, all the blocks in the
 #      care_map minus clobbered_blocks, or including the clobbered
@@ -277,7 +294,7 @@
     self.touched_src_sha1 = None
     self.disable_imgdiff = disable_imgdiff
 
-    assert version in (1, 2, 3, 4)
+    assert version in (3, 4)
 
     self.tgt = tgt
     if src is None:
@@ -316,14 +333,11 @@
     self.FindVertexSequence()
     # Fix up the ordering dependencies that the sequence didn't
     # satisfy.
-    if self.version == 1:
-      self.RemoveBackwardEdges()
-    else:
-      self.ReverseBackwardEdges()
-      self.ImproveVertexSequence()
+    self.ReverseBackwardEdges()
+    self.ImproveVertexSequence()
 
     # Ensure the runtime stash size is under the limit.
-    if self.version >= 2 and common.OPTIONS.cache_size is not None:
+    if common.OPTIONS.cache_size is not None:
       self.ReviseStashSize()
 
     # Double-check our work.
@@ -332,15 +346,6 @@
     self.ComputePatches(prefix)
     self.WriteTransfers(prefix)
 
-  def HashBlocks(self, source, ranges): # pylint: disable=no-self-use
-    data = source.ReadRangeSet(ranges)
-    ctx = sha1()
-
-    for p in data:
-      ctx.update(p)
-
-    return ctx.hexdigest()
-
   def WriteTransfers(self, prefix):
     def WriteSplitTransfers(out, style, target_blocks):
       """Limit the size of operand in command 'new' and 'zero' to 1024 blocks.
@@ -361,13 +366,6 @@
     out = []
     total = 0
 
-    # In BBOTA v2, 'stashes' records the map from 'stash_raw_id' to 'stash_id'
-    # (aka 'sid', which is the stash slot id). The stash in a 'stash_id' will
-    # be freed immediately after its use. So unlike 'stash_raw_id' (which
-    # uniquely identifies each pair of stashed blocks), the same 'stash_id'
-    # may be reused during the life cycle of an update (maintained by
-    # 'free_stash_ids' heap and 'next_stash_id').
-    #
     # In BBOTA v3+, it uses the hash of the stashed blocks as the stash slot
     # id. 'stashes' records the map from 'hash' to the ref count. The stash
     # will be freed only if the count decrements to zero.
@@ -375,36 +373,17 @@
     stashed_blocks = 0
     max_stashed_blocks = 0
 
-    if self.version == 2:
-      free_stash_ids = []
-      next_stash_id = 0
-
     for xf in self.transfers:
 
-      if self.version < 2:
-        assert not xf.stash_before
-        assert not xf.use_stash
-
-      for stash_raw_id, sr in xf.stash_before:
-        if self.version == 2:
-          assert stash_raw_id not in stashes
-          if free_stash_ids:
-            sid = heapq.heappop(free_stash_ids)
-          else:
-            sid = next_stash_id
-            next_stash_id += 1
-          stashes[stash_raw_id] = sid
-          stashed_blocks += sr.size()
-          out.append("stash %d %s\n" % (sid, sr.to_string_raw()))
+      for _, sr in xf.stash_before:
+        sh = self.src.RangeSha1(sr)
+        if sh in stashes:
+          stashes[sh] += 1
         else:
-          sh = self.HashBlocks(self.src, sr)
-          if sh in stashes:
-            stashes[sh] += 1
-          else:
-            stashes[sh] = 1
-            stashed_blocks += sr.size()
-            self.touched_src_ranges = self.touched_src_ranges.union(sr)
-            out.append("stash %s %s\n" % (sh, sr.to_string_raw()))
+          stashes[sh] = 1
+          stashed_blocks += sr.size()
+          self.touched_src_ranges = self.touched_src_ranges.union(sr)
+          out.append("stash %s %s\n" % (sh, sr.to_string_raw()))
 
       if stashed_blocks > max_stashed_blocks:
         max_stashed_blocks = stashed_blocks
@@ -412,75 +391,47 @@
       free_string = []
       free_size = 0
 
-      if self.version == 1:
-        src_str = xf.src_ranges.to_string_raw() if xf.src_ranges else ""
-      elif self.version >= 2:
+      #   <# blocks> <src ranges>
+      #     OR
+      #   <# blocks> <src ranges> <src locs> <stash refs...>
+      #     OR
+      #   <# blocks> - <stash refs...>
 
-        #   <# blocks> <src ranges>
-        #     OR
-        #   <# blocks> <src ranges> <src locs> <stash refs...>
-        #     OR
-        #   <# blocks> - <stash refs...>
+      size = xf.src_ranges.size()
+      src_str = [str(size)]
 
-        size = xf.src_ranges.size()
-        src_str = [str(size)]
+      unstashed_src_ranges = xf.src_ranges
+      mapped_stashes = []
+      for _, sr in xf.use_stash:
+        unstashed_src_ranges = unstashed_src_ranges.subtract(sr)
+        sh = self.src.RangeSha1(sr)
+        sr = xf.src_ranges.map_within(sr)
+        mapped_stashes.append(sr)
+        assert sh in stashes
+        src_str.append("%s:%s" % (sh, sr.to_string_raw()))
+        stashes[sh] -= 1
+        if stashes[sh] == 0:
+          free_string.append("free %s\n" % (sh,))
+          free_size += sr.size()
+          stashes.pop(sh)
 
-        unstashed_src_ranges = xf.src_ranges
-        mapped_stashes = []
-        for stash_raw_id, sr in xf.use_stash:
-          unstashed_src_ranges = unstashed_src_ranges.subtract(sr)
-          sh = self.HashBlocks(self.src, sr)
-          sr = xf.src_ranges.map_within(sr)
-          mapped_stashes.append(sr)
-          if self.version == 2:
-            sid = stashes.pop(stash_raw_id)
-            src_str.append("%d:%s" % (sid, sr.to_string_raw()))
-            # A stash will be used only once. We need to free the stash
-            # immediately after the use, instead of waiting for the automatic
-            # clean-up at the end. Because otherwise it may take up extra space
-            # and lead to OTA failures.
-            # Bug: 23119955
-            free_string.append("free %d\n" % (sid,))
-            free_size += sr.size()
-            heapq.heappush(free_stash_ids, sid)
-          else:
-            assert sh in stashes
-            src_str.append("%s:%s" % (sh, sr.to_string_raw()))
-            stashes[sh] -= 1
-            if stashes[sh] == 0:
-              free_string.append("free %s\n" % (sh,))
-              free_size += sr.size()
-              stashes.pop(sh)
-
-        if unstashed_src_ranges:
-          src_str.insert(1, unstashed_src_ranges.to_string_raw())
-          if xf.use_stash:
-            mapped_unstashed = xf.src_ranges.map_within(unstashed_src_ranges)
-            src_str.insert(2, mapped_unstashed.to_string_raw())
-            mapped_stashes.append(mapped_unstashed)
-            self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
-        else:
-          src_str.insert(1, "-")
+      if unstashed_src_ranges:
+        src_str.insert(1, unstashed_src_ranges.to_string_raw())
+        if xf.use_stash:
+          mapped_unstashed = xf.src_ranges.map_within(unstashed_src_ranges)
+          src_str.insert(2, mapped_unstashed.to_string_raw())
+          mapped_stashes.append(mapped_unstashed)
           self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
+      else:
+        src_str.insert(1, "-")
+        self.AssertPartition(RangeSet(data=(0, size)), mapped_stashes)
 
-        src_str = " ".join(src_str)
+      src_str = " ".join(src_str)
 
-      # all versions:
+      # version 3+:
       #   zero <rangeset>
       #   new <rangeset>
       #   erase <rangeset>
-      #
-      # version 1:
-      #   bsdiff patchstart patchlen <src rangeset> <tgt rangeset>
-      #   imgdiff patchstart patchlen <src rangeset> <tgt rangeset>
-      #   move <src rangeset> <tgt rangeset>
-      #
-      # version 2:
-      #   bsdiff patchstart patchlen <tgt rangeset> <src_str>
-      #   imgdiff patchstart patchlen <tgt rangeset> <src_str>
-      #   move <tgt rangeset> <src_str>
-      #
-      # version 3:
       #   bsdiff patchstart patchlen srchash tgthash <tgt rangeset> <src_str>
       #   imgdiff patchstart patchlen srchash tgthash <tgt rangeset> <src_str>
       #   move hash <tgt rangeset> <src_str>
@@ -495,41 +446,6 @@
         assert xf.tgt_ranges
         assert xf.src_ranges.size() == tgt_size
         if xf.src_ranges != xf.tgt_ranges:
-          if self.version == 1:
-            out.append("%s %s %s\n" % (
-                xf.style,
-                xf.src_ranges.to_string_raw(), xf.tgt_ranges.to_string_raw()))
-          elif self.version == 2:
-            out.append("%s %s %s\n" % (
-                xf.style,
-                xf.tgt_ranges.to_string_raw(), src_str))
-          elif self.version >= 3:
-            # take into account automatic stashing of overlapping blocks
-            if xf.src_ranges.overlaps(xf.tgt_ranges):
-              temp_stash_usage = stashed_blocks + xf.src_ranges.size()
-              if temp_stash_usage > max_stashed_blocks:
-                max_stashed_blocks = temp_stash_usage
-
-            self.touched_src_ranges = self.touched_src_ranges.union(
-                xf.src_ranges)
-
-            out.append("%s %s %s %s\n" % (
-                xf.style,
-                self.HashBlocks(self.tgt, xf.tgt_ranges),
-                xf.tgt_ranges.to_string_raw(), src_str))
-          total += tgt_size
-      elif xf.style in ("bsdiff", "imgdiff"):
-        assert xf.tgt_ranges
-        assert xf.src_ranges
-        if self.version == 1:
-          out.append("%s %d %d %s %s\n" % (
-              xf.style, xf.patch_start, xf.patch_len,
-              xf.src_ranges.to_string_raw(), xf.tgt_ranges.to_string_raw()))
-        elif self.version == 2:
-          out.append("%s %d %d %s %s\n" % (
-              xf.style, xf.patch_start, xf.patch_len,
-              xf.tgt_ranges.to_string_raw(), src_str))
-        elif self.version >= 3:
           # take into account automatic stashing of overlapping blocks
           if xf.src_ranges.overlaps(xf.tgt_ranges):
             temp_stash_usage = stashed_blocks + xf.src_ranges.size()
@@ -539,12 +455,28 @@
           self.touched_src_ranges = self.touched_src_ranges.union(
               xf.src_ranges)
 
-          out.append("%s %d %d %s %s %s %s\n" % (
+          out.append("%s %s %s %s\n" % (
               xf.style,
-              xf.patch_start, xf.patch_len,
-              self.HashBlocks(self.src, xf.src_ranges),
-              self.HashBlocks(self.tgt, xf.tgt_ranges),
+              xf.tgt_sha1,
               xf.tgt_ranges.to_string_raw(), src_str))
+          total += tgt_size
+      elif xf.style in ("bsdiff", "imgdiff"):
+        assert xf.tgt_ranges
+        assert xf.src_ranges
+        # take into account automatic stashing of overlapping blocks
+        if xf.src_ranges.overlaps(xf.tgt_ranges):
+          temp_stash_usage = stashed_blocks + xf.src_ranges.size()
+          if temp_stash_usage > max_stashed_blocks:
+            max_stashed_blocks = temp_stash_usage
+
+        self.touched_src_ranges = self.touched_src_ranges.union(xf.src_ranges)
+
+        out.append("%s %d %d %s %s %s %s\n" % (
+            xf.style,
+            xf.patch_start, xf.patch_len,
+            xf.src_sha1,
+            xf.tgt_sha1,
+            xf.tgt_ranges.to_string_raw(), src_str))
         total += tgt_size
       elif xf.style == "zero":
         assert xf.tgt_ranges
@@ -558,7 +490,7 @@
         out.append("".join(free_string))
         stashed_blocks -= free_size
 
-      if self.version >= 2 and common.OPTIONS.cache_size is not None:
+      if common.OPTIONS.cache_size is not None:
         # Sanity check: abort if we're going to need more stash space than
         # the allowed size (cache_size * threshold). There are two purposes
         # of having a threshold here. a) Part of the cache may have been
@@ -573,9 +505,7 @@
                    self.tgt.blocksize, max_allowed, cache_size,
                    stash_threshold)
 
-    if self.version >= 3:
-      self.touched_src_sha1 = self.HashBlocks(
-          self.src, self.touched_src_ranges)
+    self.touched_src_sha1 = self.src.RangeSha1(self.touched_src_ranges)
 
     # Zero out extended blocks as a workaround for bug 20881595.
     if self.tgt.extended:
@@ -603,32 +533,25 @@
 
     out.insert(0, "%d\n" % (self.version,))   # format version number
     out.insert(1, "%d\n" % (total,))
-    if self.version == 2:
-      # v2 only: after the total block count, we give the number of stash slots
-      # needed, and the maximum size needed (in blocks).
-      out.insert(2, str(next_stash_id) + "\n")
-      out.insert(3, str(max_stashed_blocks) + "\n")
-    elif self.version >= 3:
-      # v3+: the number of stash slots is unused.
-      out.insert(2, "0\n")
-      out.insert(3, str(max_stashed_blocks) + "\n")
+    # v3+: the number of stash slots is unused.
+    out.insert(2, "0\n")
+    out.insert(3, str(max_stashed_blocks) + "\n")
 
     with open(prefix + ".transfer.list", "wb") as f:
       for i in out:
         f.write(i)
 
-    if self.version >= 2:
-      self._max_stashed_size = max_stashed_blocks * self.tgt.blocksize
-      OPTIONS = common.OPTIONS
-      if OPTIONS.cache_size is not None:
-        max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
-        print("max stashed blocks: %d  (%d bytes), "
-              "limit: %d bytes (%.2f%%)\n" % (
-              max_stashed_blocks, self._max_stashed_size, max_allowed,
-              self._max_stashed_size * 100.0 / max_allowed))
-      else:
-        print("max stashed blocks: %d  (%d bytes), limit: <unknown>\n" % (
-              max_stashed_blocks, self._max_stashed_size))
+    self._max_stashed_size = max_stashed_blocks * self.tgt.blocksize
+    OPTIONS = common.OPTIONS
+    if OPTIONS.cache_size is not None:
+      max_allowed = OPTIONS.cache_size * OPTIONS.stash_threshold
+      print("max stashed blocks: %d  (%d bytes), "
+            "limit: %d bytes (%.2f%%)\n" % (
+            max_stashed_blocks, self._max_stashed_size, max_allowed,
+            self._max_stashed_size * 100.0 / max_allowed))
+    else:
+      print("max stashed blocks: %d  (%d bytes), limit: <unknown>\n" % (
+            max_stashed_blocks, self._max_stashed_size))
 
   def ReviseStashSize(self):
     print("Revising stash size...")
@@ -656,10 +579,6 @@
     stashed_blocks = 0
     new_blocks = 0
 
-    if self.version == 2:
-      free_stash_ids = []
-      next_stash_id = 0
-
     # Now go through all the commands. Compute the required stash size on the
     # fly. If a command requires excess stash than available, it deletes the
     # stash by replacing the command that uses the stash with a "new" command
@@ -671,12 +590,9 @@
       for stash_raw_id, sr in xf.stash_before:
         # Check the post-command stashed_blocks.
         stashed_blocks_after = stashed_blocks
-        if self.version == 2:
+        sh = self.src.RangeSha1(sr)
+        if sh not in stashes:
           stashed_blocks_after += sr.size()
-        else:
-          sh = self.HashBlocks(self.src, sr)
-          if sh not in stashes:
-            stashed_blocks_after += sr.size()
 
         if stashed_blocks_after > max_allowed:
           # We cannot stash this one for a later command. Find out the command
@@ -686,24 +602,15 @@
           print("%10d  %9s  %s" % (sr.size(), "explicit", use_cmd))
         else:
           # Update the stashes map.
-          if self.version == 2:
-            assert stash_raw_id not in stashes
-            if free_stash_ids:
-              sid = heapq.heappop(free_stash_ids)
-            else:
-              sid = next_stash_id
-              next_stash_id += 1
-            stashes[stash_raw_id] = sid
+          if sh in stashes:
+            stashes[sh] += 1
           else:
-            if sh in stashes:
-              stashes[sh] += 1
-            else:
-              stashes[sh] = 1
+            stashes[sh] = 1
           stashed_blocks = stashed_blocks_after
 
       # "move" and "diff" may introduce implicit stashes in BBOTA v3. Prior to
       # ComputePatches(), they both have the style of "diff".
-      if xf.style == "diff" and self.version >= 3:
+      if xf.style == "diff":
         assert xf.tgt_ranges and xf.src_ranges
         if xf.src_ranges.overlaps(xf.tgt_ranges):
           if stashed_blocks + xf.src_ranges.size() > max_allowed:
@@ -725,18 +632,13 @@
         cmd.ConvertToNew()
 
       # xf.use_stash may generate free commands.
-      for stash_raw_id, sr in xf.use_stash:
-        if self.version == 2:
-          sid = stashes.pop(stash_raw_id)
+      for _, sr in xf.use_stash:
+        sh = self.src.RangeSha1(sr)
+        assert sh in stashes
+        stashes[sh] -= 1
+        if stashes[sh] == 0:
           stashed_blocks -= sr.size()
-          heapq.heappush(free_stash_ids, sid)
-        else:
-          sh = self.HashBlocks(self.src, sr)
-          assert sh in stashes
-          stashes[sh] -= 1
-          if stashes[sh] == 0:
-            stashed_blocks -= sr.size()
-            stashes.pop(sh)
+          stashes.pop(sh)
 
     num_of_bytes = new_blocks * self.tgt.blocksize
     print("  Total %d blocks (%d bytes) are packed as new blocks due to "
@@ -745,10 +647,10 @@
 
   def ComputePatches(self, prefix):
     print("Reticulating splines...")
-    diff_q = []
+    diff_queue = []
     patch_num = 0
     with open(prefix + ".new.dat", "wb") as new_f:
-      for xf in self.transfers:
+      for index, xf in enumerate(self.transfers):
         if xf.style == "zero":
           tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
           print("%10d %10d (%6.2f%%) %7s %s %s" % (
@@ -756,17 +658,13 @@
               str(xf.tgt_ranges)))
 
         elif xf.style == "new":
-          for piece in self.tgt.ReadRangeSet(xf.tgt_ranges):
-            new_f.write(piece)
+          self.tgt.WriteRangeDataToFd(xf.tgt_ranges, new_f)
           tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
           print("%10d %10d (%6.2f%%) %7s %s %s" % (
               tgt_size, tgt_size, 100.0, xf.style,
               xf.tgt_name, str(xf.tgt_ranges)))
 
         elif xf.style == "diff":
-          src = self.src.ReadRangeSet(xf.src_ranges)
-          tgt = self.tgt.ReadRangeSet(xf.tgt_ranges)
-
           # We can't compare src and tgt directly because they may have
           # the same content but be broken up into blocks differently, eg:
           #
@@ -775,20 +673,11 @@
           # We want those to compare equal, ideally without having to
           # actually concatenate the strings (these may be tens of
           # megabytes).
-
-          src_sha1 = sha1()
-          for p in src:
-            src_sha1.update(p)
-          tgt_sha1 = sha1()
-          tgt_size = 0
-          for p in tgt:
-            tgt_sha1.update(p)
-            tgt_size += len(p)
-
-          if src_sha1.digest() == tgt_sha1.digest():
+          if xf.src_sha1 == xf.tgt_sha1:
             # These are identical; we don't need to generate a patch,
             # just issue copy commands on the device.
             xf.style = "move"
+            tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
             if xf.src_ranges != xf.tgt_ranges:
               print("%10d %10d (%6.2f%%) %7s %s %s (from %s)" % (
                   tgt_size, tgt_size, 100.0, xf.style,
@@ -815,38 +704,69 @@
                        xf.tgt_name.split(".")[-1].lower()
                        in ("apk", "jar", "zip"))
             xf.style = "imgdiff" if imgdiff else "bsdiff"
-            diff_q.append((tgt_size, src, tgt, xf, patch_num))
+            diff_queue.append((index, imgdiff, patch_num))
             patch_num += 1
 
         else:
           assert False, "unknown style " + xf.style
 
-    if diff_q:
+    if diff_queue:
       if self.threads > 1:
         print("Computing patches (using %d threads)..." % (self.threads,))
       else:
         print("Computing patches...")
-      diff_q.sort()
 
-      patches = [None] * patch_num
+      diff_total = len(diff_queue)
+      patches = [None] * diff_total
+      if sys.stdout.isatty():
+        global diff_done
+        diff_done = 0
 
-      # TODO: Rewrite with multiprocessing.ThreadPool?
+      # Using multiprocessing doesn't give additional benefits, due to the
+      # pattern of the code. The diffing work is done by subprocess.call, which
+      # already runs in a separate process (not affected much by the GIL -
+      # Global Interpreter Lock). Using multiprocess also requires either a)
+      # writing the diff input files in the main process before forking, or b)
+      # reopening the image file (SparseImage) in the worker processes. Doing
+      # neither of them further improves the performance.
       lock = threading.Lock()
       def diff_worker():
         while True:
           with lock:
-            if not diff_q:
+            if not diff_queue:
               return
-            tgt_size, src, tgt, xf, patchnum = diff_q.pop()
-          patch = compute_patch(src, tgt, imgdiff=(xf.style == "imgdiff"))
-          size = len(patch)
+            xf_index, imgdiff, patch_index = diff_queue.pop()
+
+          xf = self.transfers[xf_index]
+          src_ranges = xf.src_ranges
+          tgt_ranges = xf.tgt_ranges
+
+          # Needs lock since WriteRangeDataToFd() is stateful (calling seek).
           with lock:
-            patches[patchnum] = (patch, xf)
-            print("%10d %10d (%6.2f%%) %7s %s %s %s" % (
-                size, tgt_size, size * 100.0 / tgt_size, xf.style,
-                xf.tgt_name if xf.tgt_name == xf.src_name else (
-                    xf.tgt_name + " (from " + xf.src_name + ")"),
-                str(xf.tgt_ranges), str(xf.src_ranges)))
+            src_file = common.MakeTempFile(prefix="src-")
+            with open(src_file, "wb") as fd:
+              self.src.WriteRangeDataToFd(src_ranges, fd)
+
+            tgt_file = common.MakeTempFile(prefix="tgt-")
+            with open(tgt_file, "wb") as fd:
+              self.tgt.WriteRangeDataToFd(tgt_ranges, fd)
+
+          try:
+            patch = compute_patch(src_file, tgt_file, imgdiff)
+          except ValueError as e:
+            raise ValueError(
+                "Failed to generate diff for %s: src=%s, tgt=%s: %s" % (
+                    xf.tgt_name, xf.src_ranges, xf.tgt_ranges, e.message))
+
+          with lock:
+            patches[patch_index] = (xf_index, patch)
+            if sys.stdout.isatty():
+              global diff_done
+              diff_done += 1
+              progress = diff_done * 100 / diff_total
+              # '\033[K' is to clear to EOL.
+              print(' [%d%%] %s\033[K' % (progress, xf.tgt_name), end='\r')
+              sys.stdout.flush()
 
       threads = [threading.Thread(target=diff_worker)
                  for _ in range(self.threads)]
@@ -854,16 +774,29 @@
         th.start()
       while threads:
         threads.pop().join()
+
+      if sys.stdout.isatty():
+        print('\n')
     else:
       patches = []
 
-    p = 0
-    with open(prefix + ".patch.dat", "wb") as patch_f:
-      for patch, xf in patches:
-        xf.patch_start = p
+    offset = 0
+    with open(prefix + ".patch.dat", "wb") as patch_fd:
+      for index, patch in patches:
+        xf = self.transfers[index]
         xf.patch_len = len(patch)
-        patch_f.write(patch)
-        p += len(patch)
+        xf.patch_start = offset
+        offset += xf.patch_len
+        patch_fd.write(patch)
+
+        if common.OPTIONS.verbose:
+          tgt_size = xf.tgt_ranges.size() * self.tgt.blocksize
+          print("%10d %10d (%6.2f%%) %7s %s %s %s" % (
+                xf.patch_len, tgt_size, xf.patch_len * 100.0 / tgt_size,
+                xf.style,
+                xf.tgt_name if xf.tgt_name == xf.src_name else (
+                    xf.tgt_name + " (from " + xf.src_name + ")"),
+                xf.tgt_ranges, xf.src_ranges))
 
   def AssertSequenceGood(self):
     # Simulate the sequences of transfers we will output, and check that:
@@ -878,9 +811,8 @@
       # Check that the input blocks for this transfer haven't yet been touched.
 
       x = xf.src_ranges
-      if self.version >= 2:
-        for _, sr in xf.use_stash:
-          x = x.subtract(sr)
+      for _, sr in xf.use_stash:
+        x = x.subtract(sr)
 
       for s, e in x:
         # Source image could be larger. Don't check the blocks that are in the
@@ -1211,7 +1143,9 @@
       # Change nothing for small files.
       if (tgt_ranges.size() <= max_blocks_per_transfer and
           src_ranges.size() <= max_blocks_per_transfer):
-        Transfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id)
+        Transfer(tgt_name, src_name, tgt_ranges, src_ranges,
+                 self.tgt.RangeSha1(tgt_ranges), self.src.RangeSha1(src_ranges),
+                 style, by_id)
         return
 
       while (tgt_ranges.size() > max_blocks_per_transfer and
@@ -1221,8 +1155,9 @@
         tgt_first = tgt_ranges.first(max_blocks_per_transfer)
         src_first = src_ranges.first(max_blocks_per_transfer)
 
-        Transfer(tgt_split_name, src_split_name, tgt_first, src_first, style,
-                 by_id)
+        Transfer(tgt_split_name, src_split_name, tgt_first, src_first,
+                 self.tgt.RangeSha1(tgt_first), self.src.RangeSha1(src_first),
+                 style, by_id)
 
         tgt_ranges = tgt_ranges.subtract(tgt_first)
         src_ranges = src_ranges.subtract(src_first)
@@ -1234,8 +1169,9 @@
         assert tgt_ranges.size() and src_ranges.size()
         tgt_split_name = "%s-%d" % (tgt_name, pieces)
         src_split_name = "%s-%d" % (src_name, pieces)
-        Transfer(tgt_split_name, src_split_name, tgt_ranges, src_ranges, style,
-                 by_id)
+        Transfer(tgt_split_name, src_split_name, tgt_ranges, src_ranges,
+                 self.tgt.RangeSha1(tgt_ranges), self.src.RangeSha1(src_ranges),
+                 style, by_id)
 
     def AddTransfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id,
                     split=False):
@@ -1244,7 +1180,9 @@
       # We specialize diff transfers only (which covers bsdiff/imgdiff/move);
       # otherwise add the Transfer() as is.
       if style != "diff" or not split:
-        Transfer(tgt_name, src_name, tgt_ranges, src_ranges, style, by_id)
+        Transfer(tgt_name, src_name, tgt_ranges, src_ranges,
+                 self.tgt.RangeSha1(tgt_ranges), self.src.RangeSha1(src_ranges),
+                 style, by_id)
         return
 
       # Handle .odex files specially to analyze the block-wise difference. If
@@ -1325,7 +1263,7 @@
       elif tgt_fn in self.src.file_map:
         # Look for an exact pathname match in the source.
         AddTransfer(tgt_fn, tgt_fn, tgt_ranges, self.src.file_map[tgt_fn],
-                    "diff", self.transfers, self.version >= 3)
+                    "diff", self.transfers, True)
         continue
 
       b = os.path.basename(tgt_fn)
@@ -1333,7 +1271,7 @@
         # Look for an exact basename match in the source.
         src_fn = self.src_basenames[b]
         AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
-                    "diff", self.transfers, self.version >= 3)
+                    "diff", self.transfers, True)
         continue
 
       b = re.sub("[0-9]+", "#", b)
@@ -1344,7 +1282,7 @@
         # that get bumped.)
         src_fn = self.src_numpatterns[b]
         AddTransfer(tgt_fn, src_fn, tgt_ranges, self.src.file_map[src_fn],
-                    "diff", self.transfers, self.version >= 3)
+                    "diff", self.transfers, True)
         continue
 
       AddTransfer(tgt_fn, None, tgt_ranges, empty, "new", self.transfers)
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 73cd07e..16c8018 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -25,7 +25,6 @@
 import re
 import subprocess
 import sys
-import commands
 import common
 import shlex
 import shutil
@@ -52,29 +51,24 @@
   return (output, p.returncode)
 
 def GetVerityFECSize(partition_size):
-  cmd = "fec -s %d" % partition_size
-  status, output = commands.getstatusoutput(cmd)
-  if status:
-    print output
+  cmd = ["fec", "-s", str(partition_size)]
+  output, exit_code = RunCommand(cmd)
+  if exit_code != 0:
     return False, 0
   return True, int(output)
 
 def GetVerityTreeSize(partition_size):
-  cmd = "build_verity_tree -s %d"
-  cmd %= partition_size
-  status, output = commands.getstatusoutput(cmd)
-  if status:
-    print output
+  cmd = ["build_verity_tree", "-s", str(partition_size)]
+  output, exit_code = RunCommand(cmd)
+  if exit_code != 0:
     return False, 0
   return True, int(output)
 
 def GetVerityMetadataSize(partition_size):
-  cmd = "system/extras/verity/build_verity_metadata.py size %d"
-  cmd %= partition_size
-
-  status, output = commands.getstatusoutput(cmd)
-  if status:
-    print output
+  cmd = ["system/extras/verity/build_verity_metadata.py", "size",
+         str(partition_size)]
+  output, exit_code = RunCommand(cmd)
+  if exit_code != 0:
     return False, 0
   return True, int(output)
 
@@ -191,21 +185,19 @@
 
 def BuildVerityFEC(sparse_image_path, verity_path, verity_fec_path,
                    padding_size):
-  cmd = "fec -e -p %d %s %s %s" % (padding_size, sparse_image_path,
-                                   verity_path, verity_fec_path)
-  print cmd
-  status, output = commands.getstatusoutput(cmd)
-  if status:
+  cmd = ["fec", "-e", "-p", str(padding_size), sparse_image_path,
+         verity_path, verity_fec_path]
+  output, exit_code = RunCommand(cmd)
+  if exit_code != 0:
     print "Could not build FEC data! Error: %s" % output
     return False
   return True
 
 def BuildVerityTree(sparse_image_path, verity_image_path, prop_dict):
-  cmd = "build_verity_tree -A %s %s %s" % (
-      FIXED_SALT, sparse_image_path, verity_image_path)
-  print cmd
-  status, output = commands.getstatusoutput(cmd)
-  if status:
+  cmd = ["build_verity_tree", "-A", FIXED_SALT, sparse_image_path,
+         verity_image_path]
+  output, exit_code = RunCommand(cmd)
+  if exit_code != 0:
     print "Could not build verity tree! Error: %s" % output
     return False
   root, salt = output.split()
@@ -215,16 +207,13 @@
 
 def BuildVerityMetadata(image_size, verity_metadata_path, root_hash, salt,
                         block_device, signer_path, key, signer_args):
-  cmd_template = (
-      "system/extras/verity/build_verity_metadata.py build " +
-      "%s %s %s %s %s %s %s")
-  cmd = cmd_template % (image_size, verity_metadata_path, root_hash, salt,
-                        block_device, signer_path, key)
+  cmd = ["system/extras/verity/build_verity_metadata.py", "build",
+         str(image_size), verity_metadata_path, root_hash, salt, block_device,
+         signer_path, key]
   if signer_args:
-    cmd += " --signer_args=\"%s\"" % (' '.join(signer_args),)
-  print cmd
-  status, output = commands.getstatusoutput(cmd)
-  if status:
+    cmd.append("--signer_args=\"%s\"" % (' '.join(signer_args),))
+  output, exit_code = RunCommand(cmd)
+  if exit_code != 0:
     print "Could not build verity metadata! Error: %s" % output
     return False
   return True
@@ -238,22 +227,19 @@
   Returns:
     True on success, False on failure.
   """
-  cmd = "append2simg %s %s"
-  cmd %= (sparse_image_path, unsparse_image_path)
-  print cmd
-  status, output = commands.getstatusoutput(cmd)
-  if status:
+  cmd = ["append2simg", sparse_image_path, unsparse_image_path]
+  output, exit_code = RunCommand(cmd)
+  if exit_code != 0:
     print "%s: %s" % (error_message, output)
     return False
   return True
 
 def Append(target, file_to_append, error_message):
-  cmd = 'cat %s >> %s' % (file_to_append, target)
-  print cmd
-  status, output = commands.getstatusoutput(cmd)
-  if status:
-    print "%s: %s" % (error_message, output)
-    return False
+  print "appending %s to %s" % (file_to_append, target)
+  with open(target, "a") as out_file:
+    with open(file_to_append, "r") as input_file:
+      for line in input_file:
+        out_file.write(line)
   return True
 
 def BuildVerifiedImage(data_image_path, verity_image_path,
diff --git a/tools/releasetools/check_ota_package_signature.py b/tools/releasetools/check_ota_package_signature.py
index 0da61b1..548b619 100755
--- a/tools/releasetools/check_ota_package_signature.py
+++ b/tools/releasetools/check_ota_package_signature.py
@@ -104,7 +104,7 @@
 
   # Get the signature from the input package.
   signature = package_bytes[signature_start:-6]
-  sig_file = common.MakeTempFile(prefix='sig-', suffix='')
+  sig_file = common.MakeTempFile(prefix='sig-')
   with open(sig_file, 'wb') as f:
     f.write(signature)
 
@@ -116,12 +116,12 @@
 
   digest_line = sig.strip().split('\n')[-1]
   digest_string = digest_line.split(':')[3]
-  digest_file = common.MakeTempFile(prefix='digest-', suffix='')
+  digest_file = common.MakeTempFile(prefix='digest-')
   with open(digest_file, 'wb') as f:
     f.write(digest_string.decode('hex'))
 
   # Verify the digest by outputing the decrypted result in ASN.1 structure.
-  decrypted_file = common.MakeTempFile(prefix='decrypted-', suffix='')
+  decrypted_file = common.MakeTempFile(prefix='decrypted-')
   cmd = ['openssl', 'rsautl', '-verify', '-certin', '-inkey', cert,
          '-in', digest_file, '-out', decrypted_file]
   p1 = common.Run(cmd, stdout=subprocess.PIPE)
diff --git a/tools/releasetools/check_target_files_signatures.py b/tools/releasetools/check_target_files_signatures.py
index 3048488..f9aa4fa 100755
--- a/tools/releasetools/check_target_files_signatures.py
+++ b/tools/releasetools/check_target_files_signatures.py
@@ -235,7 +235,7 @@
     self.certmap = None
 
   def LoadZipFile(self, filename):
-    d, z = common.UnzipTemp(filename, '*.apk')
+    d, z = common.UnzipTemp(filename, ['*.apk'])
     try:
       self.apks = {}
       self.apks_by_basename = {}
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index 027e9f5..e200f9f 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -146,33 +146,14 @@
       except IOError as e:
         if e.errno == errno.ENOENT:
           raise KeyError(fn)
-  d = {}
+
   try:
     d = LoadDictionaryFromLines(read_helper("META/misc_info.txt").split("\n"))
   except KeyError:
-    # ok if misc_info.txt doesn't exist
-    pass
+    raise ValueError("can't find META/misc_info.txt in input target-files")
 
-  # backwards compatibility: These values used to be in their own
-  # files.  Look for them, in case we're processing an old
-  # target_files zip.
-
-  if "recovery_api_version" not in d:
-    try:
-      d["recovery_api_version"] = read_helper(
-          "META/recovery-api-version.txt").strip()
-    except KeyError:
-      raise ValueError("can't find recovery API version in input target-files")
-
-  if "tool_extensions" not in d:
-    try:
-      d["tool_extensions"] = read_helper("META/tool-extensions.txt").strip()
-    except KeyError:
-      # ok if extensions don't exist
-      pass
-
-  if "fstab_version" not in d:
-    d["fstab_version"] = "1"
+  assert "recovery_api_version" in d
+  assert "fstab_version" in d
 
   # A few properties are stored as links to the files in the out/ directory.
   # It works fine with the build system. However, they are no longer available
@@ -268,6 +249,7 @@
   d["build.prop"] = LoadBuildProp(read_helper)
   return d
 
+
 def LoadBuildProp(read_helper):
   try:
     data = read_helper("SYSTEM/build.prop")
@@ -276,6 +258,7 @@
     data = ""
   return LoadDictionaryFromLines(data.split("\n"))
 
+
 def LoadDictionaryFromLines(lines):
   d = {}
   for line in lines:
@@ -287,15 +270,15 @@
       d[name] = value
   return d
 
+
 def LoadRecoveryFSTab(read_helper, fstab_version, recovery_fstab_path,
                       system_root_image=False):
   class Partition(object):
-    def __init__(self, mount_point, fs_type, device, length, device2, context):
+    def __init__(self, mount_point, fs_type, device, length, context):
       self.mount_point = mount_point
       self.fs_type = fs_type
       self.device = device
       self.length = length
-      self.device2 = device2
       self.context = context
 
   try:
@@ -304,81 +287,44 @@
     print("Warning: could not find {}".format(recovery_fstab_path))
     data = ""
 
-  if fstab_version == 1:
-    d = {}
-    for line in data.split("\n"):
-      line = line.strip()
-      if not line or line.startswith("#"):
-        continue
-      pieces = line.split()
-      if not 3 <= len(pieces) <= 4:
-        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
-      options = None
-      if len(pieces) >= 4:
-        if pieces[3].startswith("/"):
-          device2 = pieces[3]
-          if len(pieces) >= 5:
-            options = pieces[4]
-        else:
-          device2 = None
-          options = pieces[3]
+  assert fstab_version == 2
+
+  d = {}
+  for line in data.split("\n"):
+    line = line.strip()
+    if not line or line.startswith("#"):
+      continue
+
+    # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
+    pieces = line.split()
+    if len(pieces) != 5:
+      raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
+
+    # Ignore entries that are managed by vold.
+    options = pieces[4]
+    if "voldmanaged=" in options:
+      continue
+
+    # It's a good line, parse it.
+    length = 0
+    options = options.split(",")
+    for i in options:
+      if i.startswith("length="):
+        length = int(i[7:])
       else:
-        device2 = None
-
-      mount_point = pieces[0]
-      length = 0
-      if options:
-        options = options.split(",")
-        for i in options:
-          if i.startswith("length="):
-            length = int(i[7:])
-          else:
-            print("%s: unknown option \"%s\"" % (mount_point, i))
-
-      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[1],
-                                 device=pieces[2], length=length,
-                                 device2=device2)
-
-  elif fstab_version == 2:
-    d = {}
-    for line in data.split("\n"):
-      line = line.strip()
-      if not line or line.startswith("#"):
-        continue
-      # <src> <mnt_point> <type> <mnt_flags and options> <fs_mgr_flags>
-      pieces = line.split()
-      if len(pieces) != 5:
-        raise ValueError("malformed recovery.fstab line: \"%s\"" % (line,))
-
-      # Ignore entries that are managed by vold
-      options = pieces[4]
-      if "voldmanaged=" in options:
+        # Ignore all unknown options in the unified fstab.
         continue
 
-      # It's a good line, parse it
-      length = 0
-      options = options.split(",")
-      for i in options:
-        if i.startswith("length="):
-          length = int(i[7:])
-        else:
-          # Ignore all unknown options in the unified fstab
-          continue
+    mount_flags = pieces[3]
+    # Honor the SELinux context if present.
+    context = None
+    for i in mount_flags.split(","):
+      if i.startswith("context="):
+        context = i
 
-      mount_flags = pieces[3]
-      # Honor the SELinux context if present.
-      context = None
-      for i in mount_flags.split(","):
-        if i.startswith("context="):
-          context = i
-
-      mount_point = pieces[1]
-      d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
-                                 device=pieces[0], length=length,
-                                 device2=None, context=context)
-
-  else:
-    raise ValueError("Unknown fstab_version: \"%d\"" % (fstab_version,))
+    mount_point = pieces[1]
+    d[mount_point] = Partition(mount_point=mount_point, fs_type=pieces[2],
+                               device=pieces[0], length=length, context=context)
 
   # / is used for the system mount point when the root directory is included in
   # system. Other areas assume system is always at "/system" so point /system
@@ -618,7 +564,7 @@
   def unzip_to_dir(filename, dirname):
     cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
     if pattern is not None:
-      cmd.append(pattern)
+      cmd.extend(pattern)
     p = Run(cmd, stdout=subprocess.PIPE)
     p.communicate()
     if p.returncode != 0:
@@ -926,7 +872,7 @@
   return args
 
 
-def MakeTempFile(prefix=None, suffix=None):
+def MakeTempFile(prefix='tmp', suffix=''):
   """Make a temp file and add it to the list of things to be deleted
   when Cleanup() is called.  Return the filename."""
   fd, fn = tempfile.mkstemp(prefix=prefix, suffix=suffix)
@@ -1245,6 +1191,10 @@
     t.flush()
     return t
 
+  def WriteToDir(self, d):
+    with open(os.path.join(d, self.name), "wb") as fp:
+      fp.write(self.data)
+
   def AddToZip(self, z, compression=None):
     ZipWriteStr(z, self.name, self.data, compress_type=compression)
 
@@ -1390,6 +1340,7 @@
         version = max(
             int(i) for i in
             OPTIONS.info_dict.get("blockimgdiff_versions", "1").split(","))
+    assert version >= 3
     self.version = version
 
     b = blockimgdiff.BlockImageDiff(tgt, src, threads=OPTIONS.worker_threads,
@@ -1454,7 +1405,7 @@
 
     # incremental OTA
     else:
-      if touched_blocks_only and self.version >= 3:
+      if touched_blocks_only:
         ranges = self.touched_src_ranges
         expected_sha1 = self.touched_src_sha1
       else:
@@ -1466,23 +1417,12 @@
         return
 
       ranges_str = ranges.to_string_raw()
-      if self.version >= 4:
-        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
-                            'block_image_verify("%s", '
-                            'package_extract_file("%s.transfer.list"), '
-                            '"%s.new.dat", "%s.patch.dat")) then') % (
-                            self.device, ranges_str, expected_sha1,
-                            self.device, partition, partition, partition))
-      elif self.version == 3:
-        script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
-                            'block_image_verify("%s", '
-                            'package_extract_file("%s.transfer.list"), '
-                            '"%s.new.dat", "%s.patch.dat")) then') % (
-                            self.device, ranges_str, expected_sha1,
-                            self.device, partition, partition, partition))
-      else:
-        script.AppendExtra('if range_sha1("%s", "%s") == "%s" then' % (
-                           self.device, ranges_str, self.src.TotalSha1()))
+      script.AppendExtra(('if (range_sha1("%s", "%s") == "%s" || '
+                          'block_image_verify("%s", '
+                          'package_extract_file("%s.transfer.list"), '
+                          '"%s.new.dat", "%s.patch.dat")) then') % (
+                          self.device, ranges_str, expected_sha1,
+                          self.device, partition, partition, partition))
       script.Print('Verified %s image...' % (partition,))
       script.AppendExtra('else')
 
diff --git a/tools/releasetools/edify_generator.py b/tools/releasetools/edify_generator.py
index 343f344..2a9a417 100644
--- a/tools/releasetools/edify_generator.py
+++ b/tools/releasetools/edify_generator.py
@@ -277,36 +277,6 @@
 
     self.script.append('wipe_block_device("%s", %s);' % (device, size))
 
-  def DeleteFiles(self, file_list):
-    """Delete all files in file_list."""
-    if not file_list:
-      return
-    cmd = "delete(" + ",\0".join(['"%s"' % (i,) for i in file_list]) + ");"
-    self.script.append(self.WordWrap(cmd))
-
-  def DeleteFilesIfNotMatching(self, file_list):
-    """Delete the file in file_list if not matching the checksum."""
-    if not file_list:
-      return
-    for name, sha1 in file_list:
-      cmd = ('sha1_check(read_file("{name}"), "{sha1}") || '
-             'delete("{name}");'.format(name=name, sha1=sha1))
-      self.script.append(self.WordWrap(cmd))
-
-  def RenameFile(self, srcfile, tgtfile):
-    """Moves a file from one location to another."""
-    if self.info.get("update_rename_support", False):
-      self.script.append('rename("%s", "%s");' % (srcfile, tgtfile))
-    else:
-      raise ValueError("Rename not supported by update binary")
-
-  def SkipNextActionIfTargetExists(self, tgtfile, tgtsha1):
-    """Prepend an action with an apply_patch_check in order to
-       skip the action if the file exists.  Used when a patch
-       is later renamed."""
-    cmd = ('sha1_check(read_file("%s"), %s) ||' % (tgtfile, tgtsha1))
-    self.script.append(self.WordWrap(cmd))
-
   def ApplyPatch(self, srcfile, tgtfile, tgtsize, tgtsha1, *patchpairs):
     """Apply binary patches (in *patchpairs) to the given srcfile to
     produce tgtfile (which may be "-" to indicate overwriting the
@@ -343,48 +313,6 @@
         raise ValueError(
             "don't know how to write \"%s\" partitions" % p.fs_type)
 
-  def SetPermissions(self, fn, uid, gid, mode, selabel, capabilities):
-    """Set file ownership and permissions."""
-    if not self.info.get("use_set_metadata", False):
-      self.script.append('set_perm(%d, %d, 0%o, "%s");' % (uid, gid, mode, fn))
-    else:
-      if capabilities is None:
-        capabilities = "0x0"
-      cmd = 'set_metadata("%s", "uid", %d, "gid", %d, "mode", 0%o, ' \
-          '"capabilities", %s' % (fn, uid, gid, mode, capabilities)
-      if selabel is not None:
-        cmd += ', "selabel", "%s"' % selabel
-      cmd += ');'
-      self.script.append(cmd)
-
-  def SetPermissionsRecursive(self, fn, uid, gid, dmode, fmode, selabel,
-                              capabilities):
-    """Recursively set path ownership and permissions."""
-    if not self.info.get("use_set_metadata", False):
-      self.script.append('set_perm_recursive(%d, %d, 0%o, 0%o, "%s");'
-                         % (uid, gid, dmode, fmode, fn))
-    else:
-      if capabilities is None:
-        capabilities = "0x0"
-      cmd = 'set_metadata_recursive("%s", "uid", %d, "gid", %d, ' \
-          '"dmode", 0%o, "fmode", 0%o, "capabilities", %s' \
-          % (fn, uid, gid, dmode, fmode, capabilities)
-      if selabel is not None:
-        cmd += ', "selabel", "%s"' % selabel
-      cmd += ');'
-      self.script.append(cmd)
-
-  def MakeSymlinks(self, symlink_list):
-    """Create symlinks, given a list of (dest, link) pairs."""
-    by_dest = {}
-    for d, l in symlink_list:
-      by_dest.setdefault(d, []).append(l)
-
-    for dest, links in sorted(by_dest.iteritems()):
-      cmd = ('symlink("%s", ' % (dest,) +
-             ",\0".join(['"' + i + '"' for i in sorted(links)]) + ");")
-      self.script.append(self.WordWrap(cmd))
-
   def AppendExtra(self, extra):
     """Append text verbatim to the output script."""
     self.script.append(extra)
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 4b1b3a0..8222377 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -55,7 +55,6 @@
       properties on the OEM partition of the intended device.
       Multiple expected values can be used by providing multiple files.
 
-
   --oem_no_mount
       For devices with OEM-specific properties but without an OEM partition,
       do not mount the OEM partition in the updater-script. This should be
@@ -72,7 +71,19 @@
       will be replaced by "ota-downgrade=yes" in the metadata file. A data
       wipe will always be enforced, so "ota-wipe=yes" will also be included in
       the metadata file. The update-binary in the source build will be used in
-      the OTA package, unless --binary flag is specified.
+      the OTA package, unless --binary flag is specified. Please also check the
+      doc for --override_timestamp below.
+
+  --override_timestamp
+      Intentionally generate an incremental OTA that updates from a newer
+      build to an older one (based on timestamp comparison), by overriding the
+      timestamp in package metadata. This differs from --downgrade flag: we
+      know for sure this is NOT an actual downgrade case, but two builds are
+      cut in a reverse order. A legit use case is that we cut a new build C
+      (after having A and B), but want to enfore an update path of A -> C -> B.
+      Specifying --downgrade may not help since that would enforce a data wipe
+      for C -> B update. The value of "post-timestamp" will be set to the newer
+      timestamp plus one, so that the package can be pushed and applied.
 
   -e  (--extra_script)  <file>
       Insert the contents of file at the end of the update script.
@@ -83,9 +94,10 @@
       using the new recovery (new kernel, etc.).
 
   --block
-      Generate a block-based OTA if possible.  Will fall back to a
-      file-based OTA if the target_files is older and doesn't support
-      block-based OTAs.
+      Generate a block-based OTA for non-A/B device. We have deprecated the
+      support for file-based OTA since O. Block-based OTA will be used by
+      default for all non-A/B devices. Keeping this flag here to not break
+      existing callers.
 
   -b  (--binary)  <file>
       Use the given binary as the update-binary in the output package,
@@ -144,18 +156,17 @@
 OPTIONS.package_key = None
 OPTIONS.incremental_source = None
 OPTIONS.verify = False
-OPTIONS.require_verbatim = set()
-OPTIONS.prohibit_verbatim = set(("system/build.prop",))
 OPTIONS.patch_threshold = 0.95
 OPTIONS.wipe_user_data = False
 OPTIONS.downgrade = False
+OPTIONS.timestamp = False
 OPTIONS.extra_script = None
 OPTIONS.worker_threads = multiprocessing.cpu_count() // 2
 if OPTIONS.worker_threads == 0:
   OPTIONS.worker_threads = 1
 OPTIONS.two_step = False
 OPTIONS.no_signing = False
-OPTIONS.block_based = False
+OPTIONS.block_based = True
 OPTIONS.updater_binary = None
 OPTIONS.oem_source = None
 OPTIONS.oem_no_mount = False
@@ -171,272 +182,7 @@
 OPTIONS.payload_signer_args = []
 
 METADATA_NAME = 'META-INF/com/android/metadata'
-
-def MostPopularKey(d, default):
-  """Given a dict, return the key corresponding to the largest
-  value.  Returns 'default' if the dict is empty."""
-  x = [(v, k) for (k, v) in d.iteritems()]
-  if not x:
-    return default
-  x.sort()
-  return x[-1][1]
-
-
-def IsSymlink(info):
-  """Return true if the zipfile.ZipInfo object passed in represents a
-  symlink."""
-  return (info.external_attr >> 16) & 0o770000 == 0o120000
-
-def IsRegular(info):
-  """Return true if the zipfile.ZipInfo object passed in represents a
-  regular file."""
-  return (info.external_attr >> 16) & 0o770000 == 0o100000
-
-def ClosestFileMatch(src, tgtfiles, existing):
-  """Returns the closest file match between a source file and list
-     of potential matches.  The exact filename match is preferred,
-     then the sha1 is searched for, and finally a file with the same
-     basename is evaluated.  Rename support in the updater-binary is
-     required for the latter checks to be used."""
-
-  result = tgtfiles.get("path:" + src.name)
-  if result is not None:
-    return result
-
-  if not OPTIONS.target_info_dict.get("update_rename_support", False):
-    return None
-
-  if src.size < 1000:
-    return None
-
-  result = tgtfiles.get("sha1:" + src.sha1)
-  if result is not None and existing.get(result.name) is None:
-    return result
-  result = tgtfiles.get("file:" + src.name.split("/")[-1])
-  if result is not None and existing.get(result.name) is None:
-    return result
-  return None
-
-class ItemSet(object):
-  def __init__(self, partition, fs_config):
-    self.partition = partition
-    self.fs_config = fs_config
-    self.ITEMS = {}
-
-  def Get(self, name, is_dir=False):
-    if name not in self.ITEMS:
-      self.ITEMS[name] = Item(self, name, is_dir=is_dir)
-    return self.ITEMS[name]
-
-  def GetMetadata(self, input_zip):
-    # The target_files contains a record of what the uid,
-    # gid, and mode are supposed to be.
-    output = input_zip.read(self.fs_config)
-
-    for line in output.split("\n"):
-      if not line:
-        continue
-      columns = line.split()
-      name, uid, gid, mode = columns[:4]
-      selabel = None
-      capabilities = None
-
-      # After the first 4 columns, there are a series of key=value
-      # pairs. Extract out the fields we care about.
-      for element in columns[4:]:
-        key, value = element.split("=")
-        if key == "selabel":
-          selabel = value
-        if key == "capabilities":
-          capabilities = value
-
-      i = self.ITEMS.get(name, None)
-      if i is not None:
-        i.uid = int(uid)
-        i.gid = int(gid)
-        i.mode = int(mode, 8)
-        i.selabel = selabel
-        i.capabilities = capabilities
-        if i.is_dir:
-          i.children.sort(key=lambda i: i.name)
-
-    # Set metadata for the files generated by this script. For full recovery
-    # image at system/etc/recovery.img, it will be taken care by fs_config.
-    i = self.ITEMS.get("system/recovery-from-boot.p", None)
-    if i:
-      i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0o644, None, None
-    i = self.ITEMS.get("system/etc/install-recovery.sh", None)
-    if i:
-      i.uid, i.gid, i.mode, i.selabel, i.capabilities = 0, 0, 0o544, None, None
-
-
-class Item(object):
-  """Items represent the metadata (user, group, mode) of files and
-  directories in the system image."""
-  def __init__(self, itemset, name, is_dir=False):
-    self.itemset = itemset
-    self.name = name
-    self.uid = None
-    self.gid = None
-    self.mode = None
-    self.selabel = None
-    self.capabilities = None
-    self.is_dir = is_dir
-    self.descendants = None
-    self.best_subtree = None
-
-    if name:
-      self.parent = itemset.Get(os.path.dirname(name), is_dir=True)
-      self.parent.children.append(self)
-    else:
-      self.parent = None
-    if self.is_dir:
-      self.children = []
-
-  def Dump(self, indent=0):
-    if self.uid is not None:
-      print("%s%s %d %d %o" % (
-          "  " * indent, self.name, self.uid, self.gid, self.mode))
-    else:
-      print("%s%s %s %s %s" % (
-          "  " * indent, self.name, self.uid, self.gid, self.mode))
-    if self.is_dir:
-      print("%s%s" % ("  " * indent, self.descendants))
-      print("%s%s" % ("  " * indent, self.best_subtree))
-      for i in self.children:
-        i.Dump(indent=indent+1)
-
-  def CountChildMetadata(self):
-    """Count up the (uid, gid, mode, selabel, capabilities) tuples for
-    all children and determine the best strategy for using set_perm_recursive
-    and set_perm to correctly chown/chmod all the files to their desired
-    values.  Recursively calls itself for all descendants.
-
-    Returns a dict of {(uid, gid, dmode, fmode, selabel, capabilities): count}
-    counting up all descendants of this node.  (dmode or fmode may be None.)
-    Also sets the best_subtree of each directory Item to the (uid, gid, dmode,
-    fmode, selabel, capabilities) tuple that will match the most descendants of
-    that Item.
-    """
-
-    assert self.is_dir
-    key = (self.uid, self.gid, self.mode, None, self.selabel,
-           self.capabilities)
-    self.descendants = {key: 1}
-    d = self.descendants
-    for i in self.children:
-      if i.is_dir:
-        for k, v in i.CountChildMetadata().iteritems():
-          d[k] = d.get(k, 0) + v
-      else:
-        k = (i.uid, i.gid, None, i.mode, i.selabel, i.capabilities)
-        d[k] = d.get(k, 0) + 1
-
-    # Find the (uid, gid, dmode, fmode, selabel, capabilities)
-    # tuple that matches the most descendants.
-
-    # First, find the (uid, gid) pair that matches the most
-    # descendants.
-    ug = {}
-    for (uid, gid, _, _, _, _), count in d.iteritems():
-      ug[(uid, gid)] = ug.get((uid, gid), 0) + count
-    ug = MostPopularKey(ug, (0, 0))
-
-    # Now find the dmode, fmode, selabel, and capabilities that match
-    # the most descendants with that (uid, gid), and choose those.
-    best_dmode = (0, 0o755)
-    best_fmode = (0, 0o644)
-    best_selabel = (0, None)
-    best_capabilities = (0, None)
-    for k, count in d.iteritems():
-      if k[:2] != ug:
-        continue
-      if k[2] is not None and count >= best_dmode[0]:
-        best_dmode = (count, k[2])
-      if k[3] is not None and count >= best_fmode[0]:
-        best_fmode = (count, k[3])
-      if k[4] is not None and count >= best_selabel[0]:
-        best_selabel = (count, k[4])
-      if k[5] is not None and count >= best_capabilities[0]:
-        best_capabilities = (count, k[5])
-    self.best_subtree = ug + (
-        best_dmode[1], best_fmode[1], best_selabel[1], best_capabilities[1])
-
-    return d
-
-  def SetPermissions(self, script):
-    """Append set_perm/set_perm_recursive commands to 'script' to
-    set all permissions, users, and groups for the tree of files
-    rooted at 'self'."""
-
-    self.CountChildMetadata()
-
-    def recurse(item, current):
-      # current is the (uid, gid, dmode, fmode, selabel, capabilities) tuple
-      # that the current item (and all its children) have already been set to.
-      # We only need to issue set_perm/set_perm_recursive commands if we're
-      # supposed to be something different.
-      if item.is_dir:
-        if current != item.best_subtree:
-          script.SetPermissionsRecursive("/"+item.name, *item.best_subtree)
-          current = item.best_subtree
-
-        if item.uid != current[0] or item.gid != current[1] or \
-           item.mode != current[2] or item.selabel != current[4] or \
-           item.capabilities != current[5]:
-          script.SetPermissions("/"+item.name, item.uid, item.gid,
-                                item.mode, item.selabel, item.capabilities)
-
-        for i in item.children:
-          recurse(i, current)
-      else:
-        if item.uid != current[0] or item.gid != current[1] or \
-               item.mode != current[3] or item.selabel != current[4] or \
-               item.capabilities != current[5]:
-          script.SetPermissions("/"+item.name, item.uid, item.gid,
-                                item.mode, item.selabel, item.capabilities)
-
-    recurse(self, (-1, -1, -1, -1, None, None))
-
-
-def CopyPartitionFiles(itemset, input_zip, output_zip=None, substitute=None):
-  """Copies files for the partition in the input zip to the output
-  zip.  Populates the Item class with their metadata, and returns a
-  list of symlinks.  output_zip may be None, in which case the copy is
-  skipped (but the other side effects still happen).  substitute is an
-  optional dict of {output filename: contents} to be output instead of
-  certain input files.
-  """
-
-  symlinks = []
-
-  partition = itemset.partition
-
-  for info in input_zip.infolist():
-    prefix = partition.upper() + "/"
-    if info.filename.startswith(prefix):
-      basefilename = info.filename[len(prefix):]
-      if IsSymlink(info):
-        symlinks.append((input_zip.read(info.filename),
-                         "/" + partition + "/" + basefilename))
-      else:
-        info2 = copy.copy(info)
-        fn = info2.filename = partition + "/" + basefilename
-        if substitute and fn in substitute and substitute[fn] is None:
-          continue
-        if output_zip is not None:
-          if substitute and fn in substitute:
-            data = substitute[fn]
-          else:
-            data = input_zip.read(info.filename)
-          common.ZipWriteStr(output_zip, info2, data)
-        if fn.endswith("/"):
-          itemset.Get(fn[:-1], is_dir=True)
-        else:
-          itemset.Get(fn)
-
-  symlinks.sort()
-  return symlinks
+UNZIP_PATTERN = ['IMAGES/*', 'META/*']
 
 
 def SignOutput(temp_zip_name, output_zip_name):
@@ -467,12 +213,12 @@
       script.AssertOemProperty(prop, values)
 
 
-def _LoadOemDicts(script, recovery_mount_options):
+def _LoadOemDicts(script, recovery_mount_options=None):
   """Returns the list of loaded OEM properties dict."""
   oem_dicts = None
   if OPTIONS.oem_source is None:
     raise common.ExternalError("OEM source required for this build")
-  if not OPTIONS.oem_no_mount:
+  if not OPTIONS.oem_no_mount and script:
     script.Mount("/oem", recovery_mount_options)
   oem_dicts = []
   for oem_file in OPTIONS.oem_source:
@@ -519,6 +265,7 @@
   return ("SYSTEM/recovery-from-boot.p" in namelist or
           "SYSTEM/etc/recovery.img" in namelist)
 
+
 def HasVendorPartition(target_files_zip):
   try:
     target_files_zip.getinfo("VENDOR/")
@@ -526,6 +273,7 @@
   except KeyError:
     return False
 
+
 def GetOemProperty(name, oem_props, oem_dict, info_dict):
   if oem_props is not None and name in oem_props:
     return oem_dict[name]
@@ -542,36 +290,21 @@
       GetBuildProp("ro.build.thumbprint", info_dict))
 
 
-def GetImage(which, tmpdir, info_dict):
-  # Return an image object (suitable for passing to BlockImageDiff)
-  # for the 'which' partition (most be "system" or "vendor").  If a
-  # prebuilt image and file map are found in tmpdir they are used,
-  # otherwise they are reconstructed from the individual files.
+def GetImage(which, tmpdir):
+  """Returns an image object suitable for passing to BlockImageDiff.
+
+  'which' partition must be "system" or "vendor". A prebuilt image and file
+  map must already exist in tmpdir.
+  """
 
   assert which in ("system", "vendor")
 
   path = os.path.join(tmpdir, "IMAGES", which + ".img")
   mappath = os.path.join(tmpdir, "IMAGES", which + ".map")
-  if os.path.exists(path) and os.path.exists(mappath):
-    print("using %s.img from target-files" % (which,))
-    # This is a 'new' target-files, which already has the image in it.
 
-  else:
-    print("building %s.img from target-files" % (which,))
-
-    # This is an 'old' target-files, which does not contain images
-    # already built.  Build them.
-
-    mappath = tempfile.mkstemp()[1]
-    OPTIONS.tempfiles.append(mappath)
-
-    import add_img_to_target_files
-    if which == "system":
-      path = add_img_to_target_files.BuildSystem(
-          tmpdir, info_dict, block_list=mappath)
-    elif which == "vendor":
-      path = add_img_to_target_files.BuildVendor(
-          tmpdir, info_dict, block_list=mappath)
+  # The image and map files must have been created prior to calling
+  # ota_from_target_files.py (since LMP).
+  assert os.path.exists(path) and os.path.exists(mappath)
 
   # Bug: http://b/20939131
   # In ext4 filesystems, block 0 might be changed even being mounted
@@ -598,6 +331,7 @@
   target_fp = CalculateFingerprint(oem_props, oem_dicts and oem_dicts[0],
                                    OPTIONS.info_dict)
   metadata = {
+      "post-build": target_fp,
       "pre-device": GetOemProperty("ro.product.device", oem_props,
                                    oem_dicts and oem_dicts[0],
                                    OPTIONS.info_dict),
@@ -613,10 +347,9 @@
       metadata=metadata,
       info_dict=OPTIONS.info_dict)
 
-  has_recovery_patch = HasRecoveryPatch(input_zip)
-  block_based = OPTIONS.block_based and has_recovery_patch
+  assert HasRecoveryPatch(input_zip)
 
-  metadata["ota-type"] = "BLOCK" if block_based else "FILE"
+  metadata["ota-type"] = "BLOCK"
 
   ts = GetBuildProp("ro.build.date.utc", OPTIONS.info_dict)
   ts_text = GetBuildProp("ro.build.date", OPTIONS.info_dict)
@@ -690,61 +423,27 @@
 
   recovery_mount_options = OPTIONS.info_dict.get("recovery_mount_options")
 
-  system_items = ItemSet("system", "META/filesystem_config.txt")
   script.ShowProgress(system_progress, 0)
 
-  if block_based:
-    # Full OTA is done as an "incremental" against an empty source
-    # image.  This has the effect of writing new data from the package
-    # to the entire partition, but lets us reuse the updater code that
-    # writes incrementals to do it.
-    system_tgt = GetImage("system", OPTIONS.input_tmp, OPTIONS.info_dict)
-    system_tgt.ResetFileMap()
-    system_diff = common.BlockDifference("system", system_tgt, src=None)
-    system_diff.WriteScript(script, output_zip)
-  else:
-    script.FormatPartition("/system")
-    script.Mount("/system", recovery_mount_options)
-    if not has_recovery_patch:
-      script.UnpackPackageDir("recovery", "/system")
-    script.UnpackPackageDir("system", "/system")
-
-    symlinks = CopyPartitionFiles(system_items, input_zip, output_zip)
-    script.MakeSymlinks(symlinks)
+  # Full OTA is done as an "incremental" against an empty source image. This
+  # has the effect of writing new data from the package to the entire
+  # partition, but lets us reuse the updater code that writes incrementals to
+  # do it.
+  system_tgt = GetImage("system", OPTIONS.input_tmp)
+  system_tgt.ResetFileMap()
+  system_diff = common.BlockDifference("system", system_tgt, src=None)
+  system_diff.WriteScript(script, output_zip)
 
   boot_img = common.GetBootableImage(
       "boot.img", "boot.img", OPTIONS.input_tmp, "BOOT")
 
-  if not block_based:
-    def output_sink(fn, data):
-      common.ZipWriteStr(output_zip, "recovery/" + fn, data)
-      system_items.Get("system/" + fn)
-
-    common.MakeRecoveryPatch(OPTIONS.input_tmp, output_sink,
-                             recovery_img, boot_img)
-
-    system_items.GetMetadata(input_zip)
-    system_items.Get("system").SetPermissions(script)
-
   if HasVendorPartition(input_zip):
-    vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt")
     script.ShowProgress(0.1, 0)
 
-    if block_based:
-      vendor_tgt = GetImage("vendor", OPTIONS.input_tmp, OPTIONS.info_dict)
-      vendor_tgt.ResetFileMap()
-      vendor_diff = common.BlockDifference("vendor", vendor_tgt)
-      vendor_diff.WriteScript(script, output_zip)
-    else:
-      script.FormatPartition("/vendor")
-      script.Mount("/vendor", recovery_mount_options)
-      script.UnpackPackageDir("vendor", "/vendor")
-
-      symlinks = CopyPartitionFiles(vendor_items, input_zip, output_zip)
-      script.MakeSymlinks(symlinks)
-
-      vendor_items.GetMetadata(input_zip)
-      vendor_items.Get("vendor").SetPermissions(script)
+    vendor_tgt = GetImage("vendor", OPTIONS.input_tmp)
+    vendor_tgt.ResetFileMap()
+    vendor_diff = common.BlockDifference("vendor", vendor_tgt)
+    vendor_diff.WriteScript(script, output_zip)
 
   common.CheckSize(boot_img.data, "boot.img", OPTIONS.info_dict)
   common.ZipWriteStr(output_zip, "boot.img", boot_img.data)
@@ -797,20 +496,6 @@
                      compress_type=zipfile.ZIP_STORED)
 
 
-def LoadPartitionFiles(z, partition):
-  """Load all the files from the given partition in a given target-files
-  ZipFile, and return a dict of {filename: File object}."""
-  out = {}
-  prefix = partition.upper() + "/"
-  for info in z.infolist():
-    if info.filename.startswith(prefix) and not IsSymlink(info):
-      basefilename = info.filename[len(prefix):]
-      fn = partition + "/" + basefilename
-      data = z.read(info.filename)
-      out[fn] = common.File(fn, data, info.compress_size)
-  return out
-
-
 def GetBuildProp(prop, info_dict):
   """Return the fingerprint of the build of a given target-files info_dict."""
   try:
@@ -819,18 +504,6 @@
     raise common.ExternalError("couldn't find %s in build.prop" % (prop,))
 
 
-def AddToKnownPaths(filename, known_paths):
-  if filename[-1] == "/":
-    return
-  dirs = filename.split("/")[:-1]
-  while len(dirs) > 0:
-    path = "/".join(dirs)
-    if path in known_paths:
-      break
-    known_paths.add(path)
-    dirs.pop()
-
-
 def HandleDowngradeMetadata(metadata):
   # Only incremental OTAs are allowed to reach here.
   assert OPTIONS.incremental_source is not None
@@ -840,26 +513,25 @@
   is_downgrade = long(post_timestamp) < long(pre_timestamp)
 
   if OPTIONS.downgrade:
-    metadata["ota-downgrade"] = "yes"
     if not is_downgrade:
       raise RuntimeError("--downgrade specified but no downgrade detected: "
                          "pre: %s, post: %s" % (pre_timestamp, post_timestamp))
+    metadata["ota-downgrade"] = "yes"
+  elif OPTIONS.timestamp:
+    if not is_downgrade:
+      raise RuntimeError("--timestamp specified but no timestamp hack needed: "
+                         "pre: %s, post: %s" % (pre_timestamp, post_timestamp))
+    metadata["post-timestamp"] = str(long(pre_timestamp) + 1)
   else:
     if is_downgrade:
-      # Non-fatal here to allow generating such a package which may require
-      # manual work to adjust the post-timestamp. A legit use case is that we
-      # cut a new build C (after having A and B), but want to enfore the
-      # update path of A -> C -> B. Specifying --downgrade may not help since
-      # that would enforce a data wipe for C -> B update.
-      print("\nWARNING: downgrade detected: pre: %s, post: %s.\n"
-            "The package may not be deployed properly. "
-            "Try --downgrade?\n" % (pre_timestamp, post_timestamp))
+      raise RuntimeError("Downgrade detected based on timestamp check: "
+                         "pre: %s, post: %s. Need to specify --timestamp OR "
+                         "--downgrade to allow building the incremental." % (
+                             pre_timestamp, post_timestamp))
     metadata["post-timestamp"] = post_timestamp
 
 
 def WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip):
-  # TODO(tbao): We should factor out the common parts between
-  # WriteBlockIncrementalOTAPackage() and WriteIncrementalOTAPackage().
   source_version = OPTIONS.source_info_dict["recovery_api_version"]
   target_version = OPTIONS.target_info_dict["recovery_api_version"]
 
@@ -919,8 +591,8 @@
   target_recovery = common.GetBootableImage(
       "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY")
 
-  system_src = GetImage("system", OPTIONS.source_tmp, OPTIONS.source_info_dict)
-  system_tgt = GetImage("system", OPTIONS.target_tmp, OPTIONS.target_info_dict)
+  system_src = GetImage("system", OPTIONS.source_tmp)
+  system_tgt = GetImage("system", OPTIONS.target_tmp)
 
   blockimgdiff_version = 1
   if OPTIONS.info_dict:
@@ -947,10 +619,8 @@
   if HasVendorPartition(target_zip):
     if not HasVendorPartition(source_zip):
       raise RuntimeError("can't generate incremental that adds /vendor")
-    vendor_src = GetImage("vendor", OPTIONS.source_tmp,
-                          OPTIONS.source_info_dict)
-    vendor_tgt = GetImage("vendor", OPTIONS.target_tmp,
-                          OPTIONS.target_info_dict)
+    vendor_src = GetImage("vendor", OPTIONS.source_tmp)
+    vendor_tgt = GetImage("vendor", OPTIONS.target_tmp)
 
     # Check first block of vendor partition for remount R/W only if
     # disk type is ext4
@@ -1176,7 +846,7 @@
       "recovery_mount_options")
   oem_dicts = None
   if oem_props:
-    oem_dicts = _LoadOemDicts(script, oem_props, recovery_mount_options)
+    oem_dicts = _LoadOemDicts(script, recovery_mount_options)
 
   target_fp = CalculateFingerprint(oem_props, oem_dicts and oem_dicts[0],
                                    OPTIONS.info_dict)
@@ -1220,13 +890,13 @@
       recovery_type, recovery_device, recovery_img.size, recovery_img.sha1))
   script.AppendExtra("")
 
-  system_tgt = GetImage("system", OPTIONS.input_tmp, OPTIONS.info_dict)
+  system_tgt = GetImage("system", OPTIONS.input_tmp)
   system_tgt.ResetFileMap()
   system_diff = common.BlockDifference("system", system_tgt, src=None)
   system_diff.WriteStrictVerifyScript(script)
 
   if HasVendorPartition(input_zip):
-    vendor_tgt = GetImage("vendor", OPTIONS.input_tmp, OPTIONS.info_dict)
+    vendor_tgt = GetImage("vendor", OPTIONS.input_tmp)
     vendor_tgt.ResetFileMap()
     vendor_diff = common.BlockDifference("vendor", vendor_tgt, src=None)
     vendor_diff.WriteStrictVerifyScript(script)
@@ -1321,7 +991,7 @@
   oem_props = OPTIONS.info_dict.get("oem_fingerprint_properties", None)
   oem_dicts = None
   if oem_props:
-    oem_dicts = _LoadOemDicts(script, None)
+    oem_dicts = _LoadOemDicts(None)
 
   metadata = {
       "post-build": CalculateFingerprint(oem_props, oem_dicts and oem_dicts[0],
@@ -1507,556 +1177,6 @@
   common.ZipClose(output_zip)
 
 
-class FileDifference(object):
-  def __init__(self, partition, source_zip, target_zip, output_zip):
-    self.deferred_patch_list = None
-    print("Loading target...")
-    self.target_data = target_data = LoadPartitionFiles(target_zip, partition)
-    print("Loading source...")
-    self.source_data = source_data = LoadPartitionFiles(source_zip, partition)
-
-    self.verbatim_targets = verbatim_targets = []
-    self.patch_list = patch_list = []
-    diffs = []
-    self.renames = renames = {}
-    known_paths = set()
-    largest_source_size = 0
-
-    matching_file_cache = {}
-    for fn, sf in source_data.items():
-      assert fn == sf.name
-      matching_file_cache["path:" + fn] = sf
-      if fn in target_data.keys():
-        AddToKnownPaths(fn, known_paths)
-      # Only allow eligibility for filename/sha matching
-      # if there isn't a perfect path match.
-      if target_data.get(sf.name) is None:
-        matching_file_cache["file:" + fn.split("/")[-1]] = sf
-        matching_file_cache["sha:" + sf.sha1] = sf
-
-    for fn in sorted(target_data.keys()):
-      tf = target_data[fn]
-      assert fn == tf.name
-      sf = ClosestFileMatch(tf, matching_file_cache, renames)
-      if sf is not None and sf.name != tf.name:
-        print("File has moved from " + sf.name + " to " + tf.name)
-        renames[sf.name] = tf
-
-      if sf is None or fn in OPTIONS.require_verbatim:
-        # This file should be included verbatim
-        if fn in OPTIONS.prohibit_verbatim:
-          raise common.ExternalError("\"%s\" must be sent verbatim" % (fn,))
-        print("send", fn, "verbatim")
-        tf.AddToZip(output_zip)
-        verbatim_targets.append((fn, tf.size, tf.sha1))
-        if fn in target_data.keys():
-          AddToKnownPaths(fn, known_paths)
-      elif tf.sha1 != sf.sha1:
-        # File is different; consider sending as a patch
-        diffs.append(common.Difference(tf, sf))
-      else:
-        # Target file data identical to source (may still be renamed)
-        pass
-
-    common.ComputeDifferences(diffs)
-
-    for diff in diffs:
-      tf, sf, d = diff.GetPatch()
-      path = "/".join(tf.name.split("/")[:-1])
-      if d is None or len(d) > tf.compress_size * OPTIONS.patch_threshold or \
-          path not in known_paths:
-        # patch is almost as big as the file; don't bother patching
-        # or a patch + rename cannot take place due to the target
-        # directory not existing
-        tf.AddToZip(output_zip)
-        verbatim_targets.append((tf.name, tf.size, tf.sha1))
-        if sf.name in renames:
-          del renames[sf.name]
-        AddToKnownPaths(tf.name, known_paths)
-      else:
-        common.ZipWriteStr(output_zip, "patch/" + sf.name + ".p", d)
-        patch_list.append((tf, sf, tf.size, common.sha1(d).hexdigest()))
-        largest_source_size = max(largest_source_size, sf.size)
-
-    self.largest_source_size = largest_source_size
-
-  def EmitVerification(self, script):
-    so_far = 0
-    for tf, sf, _, _ in self.patch_list:
-      if tf.name != sf.name:
-        script.SkipNextActionIfTargetExists(tf.name, tf.sha1)
-      script.PatchCheck("/"+sf.name, tf.sha1, sf.sha1)
-      so_far += sf.size
-    return so_far
-
-  def EmitExplicitTargetVerification(self, script):
-    for fn, _, sha1 in self.verbatim_targets:
-      if fn[-1] != "/":
-        script.FileCheck("/"+fn, sha1)
-    for tf, _, _, _ in self.patch_list:
-      script.FileCheck(tf.name, tf.sha1)
-
-  def RemoveUnneededFiles(self, script, extras=()):
-    file_list = ["/" + i[0] for i in self.verbatim_targets]
-    file_list += ["/" + i for i in self.source_data
-                  if i not in self.target_data and i not in self.renames]
-    file_list += list(extras)
-    # Sort the list in descending order, which removes all the files first
-    # before attempting to remove the folder. (Bug: 22960996)
-    script.DeleteFiles(sorted(file_list, reverse=True))
-
-  def TotalPatchSize(self):
-    return sum(i[1].size for i in self.patch_list)
-
-  def EmitPatches(self, script, total_patch_size, so_far):
-    self.deferred_patch_list = deferred_patch_list = []
-    for item in self.patch_list:
-      tf, sf, _, _ = item
-      if tf.name == "system/build.prop":
-        deferred_patch_list.append(item)
-        continue
-      if sf.name != tf.name:
-        script.SkipNextActionIfTargetExists(tf.name, tf.sha1)
-      script.ApplyPatch("/" + sf.name, "-", tf.size, tf.sha1, sf.sha1,
-                        "patch/" + sf.name + ".p")
-      so_far += tf.size
-      script.SetProgress(so_far / total_patch_size)
-    return so_far
-
-  def EmitDeferredPatches(self, script):
-    for item in self.deferred_patch_list:
-      tf, sf, _, _ = item
-      script.ApplyPatch("/"+sf.name, "-", tf.size, tf.sha1, sf.sha1,
-                        "patch/" + sf.name + ".p")
-    script.SetPermissions("/system/build.prop", 0, 0, 0o644, None, None)
-
-  def EmitRenames(self, script):
-    if len(self.renames) > 0:
-      script.Print("Renaming files...")
-      for src, tgt in self.renames.iteritems():
-        print("Renaming " + src + " to " + tgt.name)
-        script.RenameFile(src, tgt.name)
-
-
-def WriteIncrementalOTAPackage(target_zip, source_zip, output_zip):
-  target_has_recovery_patch = HasRecoveryPatch(target_zip)
-  source_has_recovery_patch = HasRecoveryPatch(source_zip)
-
-  if (OPTIONS.block_based and
-      target_has_recovery_patch and
-      source_has_recovery_patch):
-    return WriteBlockIncrementalOTAPackage(target_zip, source_zip, output_zip)
-
-  source_version = OPTIONS.source_info_dict["recovery_api_version"]
-  target_version = OPTIONS.target_info_dict["recovery_api_version"]
-
-  if source_version == 0:
-    print("WARNING: generating edify script for a source that "
-          "can't install it.")
-  script = edify_generator.EdifyGenerator(
-      source_version, OPTIONS.target_info_dict,
-      fstab=OPTIONS.source_info_dict["fstab"])
-
-  recovery_mount_options = OPTIONS.source_info_dict.get(
-      "recovery_mount_options")
-  source_oem_props = OPTIONS.source_info_dict.get("oem_fingerprint_properties")
-  target_oem_props = OPTIONS.target_info_dict.get("oem_fingerprint_properties")
-  oem_dicts = None
-  if source_oem_props or target_oem_props:
-    oem_dicts = _LoadOemDicts(script, recovery_mount_options)
-
-  metadata = {
-      "pre-device": GetOemProperty("ro.product.device", source_oem_props,
-                                   oem_dicts and oem_dicts[0],
-                                   OPTIONS.source_info_dict),
-      "ota-type": "FILE",
-  }
-
-  HandleDowngradeMetadata(metadata)
-
-  device_specific = common.DeviceSpecificParams(
-      source_zip=source_zip,
-      source_version=source_version,
-      target_zip=target_zip,
-      target_version=target_version,
-      output_zip=output_zip,
-      script=script,
-      metadata=metadata,
-      info_dict=OPTIONS.source_info_dict)
-
-  system_diff = FileDifference("system", source_zip, target_zip, output_zip)
-  script.Mount("/system", recovery_mount_options)
-  if HasVendorPartition(target_zip):
-    vendor_diff = FileDifference("vendor", source_zip, target_zip, output_zip)
-    script.Mount("/vendor", recovery_mount_options)
-  else:
-    vendor_diff = None
-
-  target_fp = CalculateFingerprint(target_oem_props, oem_dicts and oem_dicts[0],
-                                   OPTIONS.target_info_dict)
-  source_fp = CalculateFingerprint(source_oem_props, oem_dicts and oem_dicts[0],
-                                   OPTIONS.source_info_dict)
-
-  if source_oem_props is None and target_oem_props is None:
-    script.AssertSomeFingerprint(source_fp, target_fp)
-  elif source_oem_props is not None and target_oem_props is not None:
-    script.AssertSomeThumbprint(
-        GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict),
-        GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict))
-  elif source_oem_props is None and target_oem_props is not None:
-    script.AssertFingerprintOrThumbprint(
-        source_fp,
-        GetBuildProp("ro.build.thumbprint", OPTIONS.target_info_dict))
-  else:
-    script.AssertFingerprintOrThumbprint(
-        target_fp,
-        GetBuildProp("ro.build.thumbprint", OPTIONS.source_info_dict))
-
-  metadata["pre-build"] = source_fp
-  metadata["post-build"] = target_fp
-  metadata["pre-build-incremental"] = GetBuildProp(
-      "ro.build.version.incremental", OPTIONS.source_info_dict)
-  metadata["post-build-incremental"] = GetBuildProp(
-      "ro.build.version.incremental", OPTIONS.target_info_dict)
-
-  source_boot = common.GetBootableImage(
-      "/tmp/boot.img", "boot.img", OPTIONS.source_tmp, "BOOT",
-      OPTIONS.source_info_dict)
-  target_boot = common.GetBootableImage(
-      "/tmp/boot.img", "boot.img", OPTIONS.target_tmp, "BOOT")
-  updating_boot = (not OPTIONS.two_step and
-                   (source_boot.data != target_boot.data))
-
-  source_recovery = common.GetBootableImage(
-      "/tmp/recovery.img", "recovery.img", OPTIONS.source_tmp, "RECOVERY",
-      OPTIONS.source_info_dict)
-  target_recovery = common.GetBootableImage(
-      "/tmp/recovery.img", "recovery.img", OPTIONS.target_tmp, "RECOVERY")
-  updating_recovery = (source_recovery.data != target_recovery.data)
-
-  # Here's how we divide up the progress bar:
-  #  0.1 for verifying the start state (PatchCheck calls)
-  #  0.8 for applying patches (ApplyPatch calls)
-  #  0.1 for unpacking verbatim files, symlinking, and doing the
-  #      device-specific commands.
-
-  AppendAssertions(script, OPTIONS.target_info_dict, oem_dicts)
-  device_specific.IncrementalOTA_Assertions()
-
-  # Two-step incremental package strategy (in chronological order,
-  # which is *not* the order in which the generated script has
-  # things):
-  #
-  # if stage is not "2/3" or "3/3":
-  #    do verification on current system
-  #    write recovery image to boot partition
-  #    set stage to "2/3"
-  #    reboot to boot partition and restart recovery
-  # else if stage is "2/3":
-  #    write recovery image to recovery partition
-  #    set stage to "3/3"
-  #    reboot to recovery partition and restart recovery
-  # else:
-  #    (stage must be "3/3")
-  #    perform update:
-  #       patch system files, etc.
-  #       force full install of new boot image
-  #       set up system to update recovery partition on first boot
-  #    complete script normally
-  #    (allow recovery to mark itself finished and reboot)
-
-  if OPTIONS.two_step:
-    if not OPTIONS.source_info_dict.get("multistage_support", None):
-      assert False, "two-step packages not supported by this build"
-    fs = OPTIONS.source_info_dict["fstab"]["/misc"]
-    assert fs.fs_type.upper() == "EMMC", \
-        "two-step packages only supported on devices with EMMC /misc partitions"
-    bcb_dev = {"bcb_dev": fs.device}
-    common.ZipWriteStr(output_zip, "recovery.img", target_recovery.data)
-    script.AppendExtra("""
-if get_stage("%(bcb_dev)s") == "2/3" then
-""" % bcb_dev)
-
-    # Stage 2/3: Write recovery image to /recovery (currently running /boot).
-    script.Comment("Stage 2/3")
-    script.AppendExtra("sleep(20);\n")
-    script.WriteRawImage("/recovery", "recovery.img")
-    script.AppendExtra("""
-set_stage("%(bcb_dev)s", "3/3");
-reboot_now("%(bcb_dev)s", "recovery");
-else if get_stage("%(bcb_dev)s") != "3/3" then
-""" % bcb_dev)
-
-    # Stage 1/3: (a) Verify the current system.
-    script.Comment("Stage 1/3")
-
-  # Dump fingerprints
-  script.Print("Source: %s" % (source_fp,))
-  script.Print("Target: %s" % (target_fp,))
-
-  script.Print("Verifying current system...")
-
-  device_specific.IncrementalOTA_VerifyBegin()
-
-  script.ShowProgress(0.1, 0)
-  so_far = system_diff.EmitVerification(script)
-  if vendor_diff:
-    so_far += vendor_diff.EmitVerification(script)
-
-  size = []
-  if system_diff.patch_list:
-    size.append(system_diff.largest_source_size)
-  if vendor_diff:
-    if vendor_diff.patch_list:
-      size.append(vendor_diff.largest_source_size)
-
-  if updating_boot:
-    d = common.Difference(target_boot, source_boot)
-    _, _, d = d.ComputePatch()
-    print("boot      target: %d  source: %d  diff: %d" % (
-        target_boot.size, source_boot.size, len(d)))
-
-    common.ZipWriteStr(output_zip, "patch/boot.img.p", d)
-
-    boot_type, boot_device = common.GetTypeAndDevice(
-        "/boot", OPTIONS.source_info_dict)
-
-    script.PatchCheck("%s:%s:%d:%s:%d:%s" %
-                      (boot_type, boot_device,
-                       source_boot.size, source_boot.sha1,
-                       target_boot.size, target_boot.sha1))
-    so_far += source_boot.size
-    size.append(target_boot.size)
-
-  if size:
-    script.CacheFreeSpaceCheck(max(size))
-
-  device_specific.IncrementalOTA_VerifyEnd()
-
-  if OPTIONS.two_step:
-    # Stage 1/3: (b) Write recovery image to /boot.
-    _WriteRecoveryImageToBoot(script, output_zip)
-
-    script.AppendExtra("""
-set_stage("%(bcb_dev)s", "2/3");
-reboot_now("%(bcb_dev)s", "");
-else
-""" % bcb_dev)
-
-    # Stage 3/3: Make changes.
-    script.Comment("Stage 3/3")
-
-  script.Comment("---- start making changes here ----")
-
-  device_specific.IncrementalOTA_InstallBegin()
-
-  if OPTIONS.two_step:
-    common.ZipWriteStr(output_zip, "boot.img", target_boot.data)
-    script.WriteRawImage("/boot", "boot.img")
-    print("writing full boot image (forced by two-step mode)")
-
-  script.Print("Removing unneeded files...")
-  system_diff.RemoveUnneededFiles(script, ("/system/recovery.img",))
-  if vendor_diff:
-    vendor_diff.RemoveUnneededFiles(script)
-
-  script.ShowProgress(0.8, 0)
-  total_patch_size = 1.0 + system_diff.TotalPatchSize()
-  if vendor_diff:
-    total_patch_size += vendor_diff.TotalPatchSize()
-  if updating_boot:
-    total_patch_size += target_boot.size
-
-  script.Print("Patching system files...")
-  so_far = system_diff.EmitPatches(script, total_patch_size, 0)
-  if vendor_diff:
-    script.Print("Patching vendor files...")
-    so_far = vendor_diff.EmitPatches(script, total_patch_size, so_far)
-
-  if not OPTIONS.two_step:
-    if updating_boot:
-      # Produce the boot image by applying a patch to the current
-      # contents of the boot partition, and write it back to the
-      # partition.
-      script.Print("Patching boot image...")
-      script.ApplyPatch("%s:%s:%d:%s:%d:%s"
-                        % (boot_type, boot_device,
-                           source_boot.size, source_boot.sha1,
-                           target_boot.size, target_boot.sha1),
-                        "-",
-                        target_boot.size, target_boot.sha1,
-                        source_boot.sha1, "patch/boot.img.p")
-      so_far += target_boot.size
-      script.SetProgress(so_far / total_patch_size)
-      print("boot image changed; including.")
-    else:
-      print("boot image unchanged; skipping.")
-
-  system_items = ItemSet("system", "META/filesystem_config.txt")
-  if vendor_diff:
-    vendor_items = ItemSet("vendor", "META/vendor_filesystem_config.txt")
-
-  if updating_recovery:
-    # Recovery is generated as a patch using both the boot image
-    # (which contains the same linux kernel as recovery) and the file
-    # /system/etc/recovery-resource.dat (which contains all the images
-    # used in the recovery UI) as sources.  This lets us minimize the
-    # size of the patch, which must be included in every OTA package.
-    #
-    # For older builds where recovery-resource.dat is not present, we
-    # use only the boot image as the source.
-
-    if not target_has_recovery_patch:
-      def output_sink(fn, data):
-        common.ZipWriteStr(output_zip, "recovery/" + fn, data)
-        system_items.Get("system/" + fn)
-
-      common.MakeRecoveryPatch(OPTIONS.target_tmp, output_sink,
-                               target_recovery, target_boot)
-      script.DeleteFiles(["/system/recovery-from-boot.p",
-                          "/system/etc/recovery.img",
-                          "/system/etc/install-recovery.sh"])
-    print("recovery image changed; including as patch from boot.")
-  else:
-    print("recovery image unchanged; skipping.")
-
-  script.ShowProgress(0.1, 10)
-
-  target_symlinks = CopyPartitionFiles(system_items, target_zip, None)
-  if vendor_diff:
-    target_symlinks.extend(CopyPartitionFiles(vendor_items, target_zip, None))
-
-  temp_script = script.MakeTemporary()
-  system_items.GetMetadata(target_zip)
-  system_items.Get("system").SetPermissions(temp_script)
-  if vendor_diff:
-    vendor_items.GetMetadata(target_zip)
-    vendor_items.Get("vendor").SetPermissions(temp_script)
-
-  # Note that this call will mess up the trees of Items, so make sure
-  # we're done with them.
-  source_symlinks = CopyPartitionFiles(system_items, source_zip, None)
-  if vendor_diff:
-    source_symlinks.extend(CopyPartitionFiles(vendor_items, source_zip, None))
-
-  target_symlinks_d = dict([(i[1], i[0]) for i in target_symlinks])
-  source_symlinks_d = dict([(i[1], i[0]) for i in source_symlinks])
-
-  # Delete all the symlinks in source that aren't in target.  This
-  # needs to happen before verbatim files are unpacked, in case a
-  # symlink in the source is replaced by a real file in the target.
-
-  # If a symlink in the source will be replaced by a regular file, we cannot
-  # delete the symlink/file in case the package gets applied again. For such
-  # a symlink, we prepend a sha1_check() to detect if it has been updated.
-  # (Bug: 23646151)
-  replaced_symlinks = dict()
-  if system_diff:
-    for i in system_diff.verbatim_targets:
-      replaced_symlinks["/%s" % (i[0],)] = i[2]
-  if vendor_diff:
-    for i in vendor_diff.verbatim_targets:
-      replaced_symlinks["/%s" % (i[0],)] = i[2]
-
-  if system_diff:
-    for tf in system_diff.renames.values():
-      replaced_symlinks["/%s" % (tf.name,)] = tf.sha1
-  if vendor_diff:
-    for tf in vendor_diff.renames.values():
-      replaced_symlinks["/%s" % (tf.name,)] = tf.sha1
-
-  always_delete = []
-  may_delete = []
-  for dest, link in source_symlinks:
-    if link not in target_symlinks_d:
-      if link in replaced_symlinks:
-        may_delete.append((link, replaced_symlinks[link]))
-      else:
-        always_delete.append(link)
-  script.DeleteFiles(always_delete)
-  script.DeleteFilesIfNotMatching(may_delete)
-
-  if system_diff.verbatim_targets:
-    script.Print("Unpacking new system files...")
-    script.UnpackPackageDir("system", "/system")
-  if vendor_diff and vendor_diff.verbatim_targets:
-    script.Print("Unpacking new vendor files...")
-    script.UnpackPackageDir("vendor", "/vendor")
-
-  if updating_recovery and not target_has_recovery_patch:
-    script.Print("Unpacking new recovery...")
-    script.UnpackPackageDir("recovery", "/system")
-
-  system_diff.EmitRenames(script)
-  if vendor_diff:
-    vendor_diff.EmitRenames(script)
-
-  script.Print("Symlinks and permissions...")
-
-  # Create all the symlinks that don't already exist, or point to
-  # somewhere different than what we want.  Delete each symlink before
-  # creating it, since the 'symlink' command won't overwrite.
-  to_create = []
-  for dest, link in target_symlinks:
-    if link in source_symlinks_d:
-      if dest != source_symlinks_d[link]:
-        to_create.append((dest, link))
-    else:
-      to_create.append((dest, link))
-  script.DeleteFiles([i[1] for i in to_create])
-  script.MakeSymlinks(to_create)
-
-  # Now that the symlinks are created, we can set all the
-  # permissions.
-  script.AppendScript(temp_script)
-
-  # Do device-specific installation (eg, write radio image).
-  device_specific.IncrementalOTA_InstallEnd()
-
-  if OPTIONS.extra_script is not None:
-    script.AppendExtra(OPTIONS.extra_script)
-
-  # Patch the build.prop file last, so if something fails but the
-  # device can still come up, it appears to be the old build and will
-  # get set the OTA package again to retry.
-  script.Print("Patching remaining system files...")
-  system_diff.EmitDeferredPatches(script)
-
-  if OPTIONS.wipe_user_data:
-    script.Print("Erasing user data...")
-    script.FormatPartition("/data")
-    metadata["ota-wipe"] = "yes"
-
-  if OPTIONS.two_step:
-    script.AppendExtra("""
-set_stage("%(bcb_dev)s", "");
-endif;
-endif;
-""" % bcb_dev)
-
-  if OPTIONS.verify and system_diff:
-    script.Print("Remounting and verifying system partition files...")
-    script.Unmount("/system")
-    script.Mount("/system", recovery_mount_options)
-    system_diff.EmitExplicitTargetVerification(script)
-
-  if OPTIONS.verify and vendor_diff:
-    script.Print("Remounting and verifying vendor partition files...")
-    script.Unmount("/vendor")
-    script.Mount("/vendor", recovery_mount_options)
-    vendor_diff.EmitExplicitTargetVerification(script)
-
-  # For downgrade OTAs, we prefer to use the update-binary in the source
-  # build that is actually newer than the one in the target build.
-  if OPTIONS.downgrade:
-    script.AddToZip(source_zip, output_zip, input_path=OPTIONS.updater_binary)
-  else:
-    script.AddToZip(target_zip, output_zip, input_path=OPTIONS.updater_binary)
-
-  metadata["ota-required-cache"] = str(script.required_cache)
-  WriteMetadata(metadata, output_zip)
-
-
 def main(argv):
 
   def option_handler(o, a):
@@ -2075,6 +1195,8 @@
     elif o == "--downgrade":
       OPTIONS.downgrade = True
       OPTIONS.wipe_user_data = True
+    elif o == "--override_timestamp":
+      OPTIONS.timestamp = True
     elif o in ("-o", "--oem_settings"):
       OPTIONS.oem_source = a.split(',')
     elif o == "--oem_no_mount":
@@ -2127,6 +1249,7 @@
                                  "full_bootloader",
                                  "wipe_user_data",
                                  "downgrade",
+                                 "override_timestamp",
                                  "extra_script=",
                                  "worker_threads=",
                                  "two_step",
@@ -2159,6 +1282,9 @@
     if OPTIONS.incremental_source is None:
       raise ValueError("Cannot generate downgradable full OTAs")
 
+  assert not (OPTIONS.downgrade and OPTIONS.timestamp), \
+      "Cannot have --downgrade AND --override_timestamp both"
+
   # Load the dict file from the zip directly to have a peek at the OTA type.
   # For packages using A/B update, unzipping is not needed.
   input_zip = zipfile.ZipFile(args[0], "r")
@@ -2194,7 +1320,8 @@
     OPTIONS.extra_script = open(OPTIONS.extra_script).read()
 
   print("unzipping target target-files...")
-  OPTIONS.input_tmp, input_zip = common.UnzipTemp(args[0])
+  OPTIONS.input_tmp, input_zip = common.UnzipTemp(
+      args[0], UNZIP_PATTERN)
 
   OPTIONS.target_tmp = OPTIONS.input_tmp
   OPTIONS.info_dict = common.LoadInfoDict(input_zip, OPTIONS.target_tmp)
@@ -2262,7 +1389,8 @@
   else:
     print("unzipping source target-files...")
     OPTIONS.source_tmp, source_zip = common.UnzipTemp(
-        OPTIONS.incremental_source)
+        OPTIONS.incremental_source,
+        UNZIP_PATTERN)
     OPTIONS.target_info_dict = OPTIONS.info_dict
     OPTIONS.source_info_dict = common.LoadInfoDict(source_zip,
                                                    OPTIONS.source_tmp)
@@ -2270,7 +1398,7 @@
       print("--- source info ---")
       common.DumpInfoDict(OPTIONS.source_info_dict)
     try:
-      WriteIncrementalOTAPackage(input_zip, source_zip, output_zip)
+      WriteBlockIncrementalOTAPackage(input_zip, source_zip, output_zip)
       if OPTIONS.log_diff:
         out_file = open(OPTIONS.log_diff, 'w')
         import target_files_diff
diff --git a/tools/releasetools/sparse_img.py b/tools/releasetools/sparse_img.py
index 4ba7560..7eb60d9 100644
--- a/tools/releasetools/sparse_img.py
+++ b/tools/releasetools/sparse_img.py
@@ -144,6 +144,12 @@
     f.seek(16, os.SEEK_SET)
     f.write(struct.pack("<2I", self.total_blocks, self.total_chunks))
 
+  def RangeSha1(self, ranges):
+    h = sha1()
+    for data in self._GetRangeData(ranges):
+      h.update(data)
+    return h.hexdigest()
+
   def ReadRangeSet(self, ranges):
     return [d for d in self._GetRangeData(ranges)]
 
@@ -155,10 +161,11 @@
     ranges = self.care_map
     if not include_clobbered_blocks:
       ranges = ranges.subtract(self.clobbered_blocks)
-    h = sha1()
-    for d in self._GetRangeData(ranges):
-      h.update(d)
-    return h.hexdigest()
+    return self.RangeSha1(ranges)
+
+  def WriteRangeDataToFd(self, ranges, fd):
+    for data in self._GetRangeData(ranges):
+      fd.write(data)
 
   def _GetRangeData(self, ranges):
     """Generator that produces all the image data in 'ranges'.  The
diff --git a/tools/soong_to_convert.py b/tools/soong_to_convert.py
index 379a1ad..3d62d43 100755
--- a/tools/soong_to_convert.py
+++ b/tools/soong_to_convert.py
@@ -42,7 +42,6 @@
   dotdot_incs: LOCAL_C_INCLUDES contains paths include '..'
   srcs_dotarm: LOCAL_SRC_FILES contains source files like <...>.c.arm
   aidl: LOCAL_SRC_FILES contains .aidl sources
-  dbus: LOCAL_SRC_FILES contains .dbus-xml sources
   objc: LOCAL_SRC_FILES contains Objective-C sources
   proto: LOCAL_SRC_FILES contains .proto sources
   rs: LOCAL_SRC_FILES contains renderscript sources