Merge "Revert^2 "Add the Android Runtime APEX module to PRODUCT_PACKAGES.""
diff --git a/CleanSpec.mk b/CleanSpec.mk
index 39441e1..7d42fc9 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -515,6 +515,8 @@
 # Clean up old ninja files
 $(call add-clean-step, rm -f $(OUT_DIR)/build-*-dist*.ninja)
 
+$(call add-clean-step, rm -f $(HOST_OUT)/*ts/host-libprotobuf-java-*.jar)
+
 # ************************************************
 # NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
 # ************************************************
diff --git a/core/Makefile b/core/Makefile
index 6721757..3e0125e 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -407,7 +407,6 @@
 			TARGET_CPU_ABI_LIST_64_BIT="$(TARGET_CPU_ABI_LIST_64_BIT)" \
 			TARGET_CPU_ABI="$(TARGET_CPU_ABI)" \
 			TARGET_CPU_ABI2="$(TARGET_CPU_ABI2)" \
-			TARGET_AAPT_CHARACTERISTICS="$(TARGET_AAPT_CHARACTERISTICS)" \
 	        bash $(BUILDINFO_SH) >> $@
 	$(hide) $(foreach file,$(system_prop_file), \
 		if [ -f "$(file)" ]; then \
@@ -496,7 +495,8 @@
 endif  # BOARD_USES_PRODUCTIMAGE
 	$(hide) echo "#" >> $@; \
 	        echo "# ADDITIONAL PRODUCT PROPERTIES" >> $@; \
-	        echo "#" >> $@;
+	        echo "#" >> $@; \
+          echo "ro.build.characteristics=$(TARGET_AAPT_CHARACTERISTICS)" >> $@;
 	$(hide) $(foreach line,$(FINAL_PRODUCT_PROPERTIES), \
 		echo "$(line)" >> $@;)
 	$(hide) build/make/tools/post_process_props.py $@
@@ -977,7 +977,7 @@
 $(2) : $(3)
 $(3) : $(6) $(BUILD_SYSTEM)/Makefile build/make/tools/generate-notice-files.py
 	build/make/tools/generate-notice-files.py --text-output $(2) \
-		$(if $(filter $(1),xml_excluded_extra_partitions),-e vendor$(comma)product$(comma)product_services --xml-output, \
+		$(if $(filter $(1),xml_excluded_extra_partitions),-e vendor -e product -e product_services --xml-output, \
 		  $(if $(filter $(1),xml_vendor),-i vendor --xml-output, \
 		    $(if $(filter $(1),xml_product),-i product --xml-output, \
 		      $(if $(filter $(1),xml_product_services),-i product_services --xml-output, \
@@ -986,21 +986,37 @@
 notice_files: $(2) $(3)
 endef
 
+# Notice file logic isn't relevant for TARGET_BUILD_APPS
+ifndef TARGET_BUILD_APPS
+
 # TODO These intermediate NOTICE.txt/NOTICE.html files should go into
 # TARGET_OUT_NOTICE_FILES now that the notice files are gathered from
 # the src subdirectory.
-
 target_notice_file_txt := $(TARGET_OUT_INTERMEDIATES)/NOTICE.txt
-target_notice_file_html_or_xml := $(TARGET_OUT_INTERMEDIATES)/NOTICE.html
-target_notice_file_html_or_xml_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE.html.gz
-installed_notice_html_or_xml_gz := $(TARGET_OUT)/etc/NOTICE.html.gz
 tools_notice_file_txt := $(HOST_OUT_INTERMEDIATES)/NOTICE.txt
 tools_notice_file_html := $(HOST_OUT_INTERMEDIATES)/NOTICE.html
+kernel_notice_file := $(TARGET_OUT_NOTICE_FILES)/src/kernel.txt
+winpthreads_notice_file := $(TARGET_OUT_NOTICE_FILES)/src/winpthreads.txt
+pdk_fusion_notice_files := $(filter $(TARGET_OUT_NOTICE_FILES)/%, $(ALL_PDK_FUSION_FILES))
 
 # TODO(b/69865032): Make PRODUCT_NOTICE_SPLIT the default behavior.
-ifeq ($(PRODUCT_NOTICE_SPLIT),true)
-target_notice_file_html_or_xml := $(TARGET_OUT_INTERMEDIATES)/NOTICE.xml
-target_notice_file_html_or_xml_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE.xml.gz
+ifneq ($(PRODUCT_NOTICE_SPLIT),true)
+target_notice_file_html := $(TARGET_OUT_INTERMEDIATES)/NOTICE.html
+target_notice_file_html_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE.html.gz
+installed_notice_html_or_xml_gz := $(TARGET_OUT)/etc/NOTICE.html.gz
+$(eval $(call combine-notice-files, html, \
+			$(target_notice_file_txt), \
+			$(target_notice_file_html), \
+			"Notices for files contained in the filesystem images in this directory:", \
+			$(TARGET_OUT_NOTICE_FILES), \
+			$(ALL_DEFAULT_INSTALLED_MODULES) $(kernel_notice_file) $(pdk_fusion_notice_files)))
+$(target_notice_file_html_gz): $(target_notice_file_html) | $(MINIGZIP)
+	$(hide) $(MINIGZIP) -9 < $< > $@
+$(installed_notice_html_or_xml_gz): $(target_notice_file_html_gz)
+	$(copy-file-to-target)
+else
+target_notice_file_xml := $(TARGET_OUT_INTERMEDIATES)/NOTICE.xml
+target_notice_file_xml_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE.xml.gz
 installed_notice_html_or_xml_gz := $(TARGET_OUT)/etc/NOTICE.xml.gz
 
 target_vendor_notice_file_txt := $(TARGET_OUT_INTERMEDIATES)/NOTICE_VENDOR.txt
@@ -1017,49 +1033,72 @@
 target_product_services_notice_file_xml := $(TARGET_OUT_INTERMEDIATES)/NOTICE_PRODUCT_SERVICES.xml
 target_product_services_notice_file_xml_gz := $(TARGET_OUT_INTERMEDIATES)/NOTICE_PRODUCT_SERVICES.xml.gz
 installed_product_services_notice_xml_gz := $(TARGET_OUT_PRODUCT_SERVICES)/etc/NOTICE.xml.gz
-endif
 
-ifndef TARGET_BUILD_APPS
-kernel_notice_file := $(TARGET_OUT_NOTICE_FILES)/src/kernel.txt
-winpthreads_notice_file := $(TARGET_OUT_NOTICE_FILES)/src/winpthreads.txt
-pdk_fusion_notice_files := $(filter $(TARGET_OUT_NOTICE_FILES)/%, $(ALL_PDK_FUSION_FILES))
+# Notice files are copied to TARGET_OUT_NOTICE_FILES as a side-effect of their module
+# being built. A notice xml file must depend on all modules that could potentially
+# install a license file relevant to it.
+license_modules := $(ALL_DEFAULT_INSTALLED_MODULES) $(kernel_notice_file) $(pdk_fusion_notice_files)
+# Phonys/fakes don't have notice files (though their deps might)
+license_modules := $(filter-out $(TARGET_OUT_FAKE)/%,$(license_modules))
+license_modules_vendor := $(filter $(TARGET_OUT_VENDOR)/%,$(license_modules))
+license_modules_product := $(filter $(TARGET_OUT_PRODUCT)/%,$(license_modules))
+license_modules_product_services := $(filter $(TARGET_OUT_PRODUCT_SERVICES)/%,$(license_modules))
+license_modules_agg := $(license_modules_vendor) $(license_modules_product) $(license_modules_product_services)
+license_modules_rest := $(filter-out $(license_modules_agg),$(license_modules))
 
-ifdef target_vendor_notice_file_xml_gz
 $(eval $(call combine-notice-files, xml_excluded_extra_partitions, \
 			$(target_notice_file_txt), \
-			$(target_notice_file_html_or_xml), \
+			$(target_notice_file_xml), \
 			"Notices for files contained in the filesystem images in this directory:", \
 			$(TARGET_OUT_NOTICE_FILES), \
-			$(ALL_DEFAULT_INSTALLED_MODULES) $(kernel_notice_file) $(pdk_fusion_notice_files)))
+			$(license_modules_rest)))
 $(eval $(call combine-notice-files, xml_vendor, \
 			$(target_vendor_notice_file_txt), \
 			$(target_vendor_notice_file_xml), \
 			"Notices for files contained in the vendor filesystem image in this directory:", \
 			$(TARGET_OUT_NOTICE_FILES), \
-			$(target_notice_file_html_or_xml)))
-ifdef target_product_notice_file_txt
+			$(license_modules_vendor)))
 $(eval $(call combine-notice-files, xml_product, \
 			$(target_product_notice_file_txt), \
 			$(target_product_notice_file_xml), \
 			"Notices for files contained in the product filesystem image in this directory:", \
 			$(TARGET_OUT_NOTICE_FILES), \
-			$(target_notice_file_html_or_xml)))
-endif
-ifdef target_product_services_notice_file_txt
+			$(license_modules_product)))
 $(eval $(call combine-notice-files, xml_product_services, \
 			$(target_product_services_notice_file_txt), \
 			$(target_product_services_notice_file_xml), \
 			"Notices for files contained in the product_services filesystem image in this directory:", \
 			$(TARGET_OUT_NOTICE_FILES), \
-			$(target_notice_file_html_or_xml)))
+			$(license_modules_product_services)))
+
+$(target_notice_file_xml_gz): $(target_notice_file_xml) | $(MINIGZIP)
+	$(hide) $(MINIGZIP) -9 < $< > $@
+$(target_vendor_notice_file_xml_gz): $(target_vendor_notice_file_xml) | $(MINIGZIP)
+	$(hide) $(MINIGZIP) -9 < $< > $@
+$(target_product_notice_file_xml_gz): $(target_product_notice_file_xml) | $(MINIGZIP)
+	$(hide) $(MINIGZIP) -9 < $< > $@
+$(target_product_services_notice_file_xml_gz): $(target_product_services_notice_file_xml) | $(MINIGZIP)
+	$(hide) $(MINIGZIP) -9 < $< > $@
+$(installed_notice_html_or_xml_gz): $(target_notice_file_xml_gz)
+	$(copy-file-to-target)
+$(installed_vendor_notice_xml_gz): $(target_vendor_notice_file_xml_gz)
+	$(copy-file-to-target)
+$(installed_product_notice_xml_gz): $(target_product_notice_file_xml_gz)
+	$(copy-file-to-target)
+$(installed_product_services_notice_xml_gz): $(target_product_services_notice_file_xml_gz)
+	$(copy-file-to-target)
+
+# if we've been run my mm, mmm, etc, don't reinstall this every time
+ifeq ($(ONE_SHOT_MAKEFILE),)
+  ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz)
+  ALL_DEFAULT_INSTALLED_MODULES += $(installed_vendor_notice_xml_gz)
+  ALL_DEFAULT_INSTALLED_MODULES += $(installed_product_notice_xml_gz)
+  ALL_DEFAULT_INSTALLED_MODULES += $(installed_product_services_notice_xml_gz)
 endif
-else
-$(eval $(call combine-notice-files, html, \
-			$(target_notice_file_txt), \
-			$(target_notice_file_html_or_xml), \
-			"Notices for files contained in the filesystem images in this directory:", \
-			$(TARGET_OUT_NOTICE_FILES), \
-			$(ALL_DEFAULT_INSTALLED_MODULES) $(kernel_notice_file) $(pdk_fusion_notice_files)))
+endif # PRODUCT_NOTICE_SPLIT
+
+ifeq ($(ONE_SHOT_MAKEFILE),)
+  ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz)
 endif
 
 $(eval $(call combine-notice-files, html, \
@@ -1070,53 +1109,6 @@
 			$(ALL_DEFAULT_INSTALLED_MODULES) \
 			$(winpthreads_notice_file)))
 
-# Install the html file at /system/etc/NOTICE.html.gz.
-# This is not ideal, but this is very late in the game, after a lot of
-# the module processing has already been done -- in fact, we used the
-# fact that all that has been done to get the list of modules that we
-# need notice files for.
-$(target_notice_file_html_or_xml_gz): $(target_notice_file_html_or_xml) | $(MINIGZIP)
-	$(hide) $(MINIGZIP) -9 < $< > $@
-$(installed_notice_html_or_xml_gz): $(target_notice_file_html_or_xml_gz)
-	$(copy-file-to-target)
-
-ifdef target_vendor_notice_file_xml_gz
-# Install the vendor html file at /vendor/etc/NOTICE.xml.gz.
-$(target_vendor_notice_file_xml_gz): $(target_vendor_notice_file_xml) | $(MINIGZIP)
-	$(hide) $(MINIGZIP) -9 < $< > $@
-$(installed_vendor_notice_xml_gz): $(target_vendor_notice_file_xml_gz)
-	$(copy-file-to-target)
-endif
-
-ifdef target_product_notice_file_xml_gz
-# Install the product html file at /product/etc/NOTICE.xml.gz.
-$(target_product_notice_file_xml_gz): $(target_product_notice_file_xml) | $(MINIGZIP)
-	$(hide) $(MINIGZIP) -9 < $< > $@
-$(installed_product_notice_xml_gz): $(target_product_notice_file_xml_gz)
-	$(copy-file-to-target)
-endif
-
-ifdef target_product_services_notice_file_xml_gz
-# Install the product html file at /product_services/etc/NOTICE.xml.gz.
-$(target_product_services_notice_file_xml_gz): $(target_product_services_notice_file_xml) | $(MINIGZIP)
-	$(hide) $(MINIGZIP) -9 < $< > $@
-$(installed_product_services_notice_xml_gz): $(target_product_services_notice_file_xml_gz)
-	$(copy-file-to-target)
-endif
-
-# if we've been run my mm, mmm, etc, don't reinstall this every time
-ifeq ($(ONE_SHOT_MAKEFILE),)
-  ALL_DEFAULT_INSTALLED_MODULES += $(installed_notice_html_or_xml_gz)
-  ifdef target_vendor_notice_file_xml_gz
-    ALL_DEFAULT_INSTALLED_MODULES += $(installed_vendor_notice_xml_gz)
-  endif
-  ifdef target_product_notice_file_xml_gz
-    ALL_DEFAULT_INSTALLED_MODULES += $(installed_product_notice_xml_gz)
-  endif
-  ifdef target_product_services_notice_file_xml_gz
-    ALL_DEFAULT_INSTALLED_MODULES += $(installed_product_services_notice_xml_gz)
-  endif
-endif
 endif  # TARGET_BUILD_APPS
 
 # The kernel isn't really a module, so to get its module file in there, we
@@ -1241,7 +1233,7 @@
 INTERNAL_USERIMAGES_DEPS += $(MKE2FS_CONF)
 endif
 
-ifeq (true,$(PRODUCT_USE_LOGICAL_PARTITIONS))
+ifeq (true,$(PRODUCT_USE_DYNAMIC_PARTITIONS))
 
 ifeq ($(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_SUPPORTS_VERITY),true)
   $(error vboot 1.0 doesn't support logical partition)
@@ -1250,7 +1242,7 @@
 # TODO(b/80195851): Should not define BOARD_AVB_SYSTEM_KEY_PATH without
 # BOARD_AVB_SYSTEM_DETACHED_VBMETA.
 
-endif # PRODUCT_USE_LOGICAL_PARTITIONS
+endif # PRODUCT_USE_DYNAMIC_PARTITIONS
 
 # $(1): the path of the output dictionary file
 # $(2): a subset of "system vendor cache userdata product product_services oem odm"
@@ -1784,12 +1776,12 @@
     $(filter-out $(foreach p,$(overridden_packages),$(p) %/$(p).apk), \
         $(ALL_PDK_FUSION_FILES))
 
-INTERNAL_SYSTEMIMAGE_FILES := $(filter $(TARGET_OUT)/%, \
+INTERNAL_SYSTEMIMAGE_FILES := $(sort $(filter $(TARGET_OUT)/%, \
     $(ALL_GENERATED_SOURCES) \
     $(ALL_DEFAULT_INSTALLED_MODULES) \
     $(PDK_FUSION_SYSIMG_FILES) \
     $(RECOVERY_RESOURCE_ZIP)) \
-    $(PDK_FUSION_SYMLINK_STAMP)
+    $(PDK_FUSION_SYMLINK_STAMP))
 
 FULL_SYSTEMIMAGE_DEPS := $(INTERNAL_SYSTEMIMAGE_FILES) $(INTERNAL_USERIMAGES_DEPS)
 
@@ -1988,8 +1980,8 @@
 endif
 endif
 
-.PHONY: sync
-sync: $(INTERNAL_SYSTEMIMAGE_FILES)
+.PHONY: sync syncsys
+sync syncsys: $(INTERNAL_SYSTEMIMAGE_FILES)
 
 #######
 ## system tarball
@@ -2919,27 +2911,20 @@
 # BOARD_SUPER_PARTITION_SIZE must be defined to build super image.
 ifneq ($(BOARD_SUPER_PARTITION_SIZE),)
 
-INSTALLED_SUPERIMAGE_TARGET := $(PRODUCT_OUT)/super.img
-INSTALLED_SUPERIMAGE_EMPTY_TARGET := $(PRODUCT_OUT)/super_empty.img
-
-$(INSTALLED_SUPERIMAGE_TARGET): $(call images-for-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))
-
-# For A/B devices, super partition always contains sub-partitions in the _a slot, because this
-# image should only be used for bootstrapping / initializing the device. When flashing the image,
-# bootloader fastboot should always mark _a slot as bootable.
-ifeq ($(AB_OTA_UPDATER),true)
-$(INSTALLED_SUPERIMAGE_TARGET) $(INSTALLED_SUPERIMAGE_EMPTY_TARGET): PRIVATE_PARTITION_SUFFIX=_a
-endif # AB_OTA_UPDATER
-
-$(INSTALLED_SUPERIMAGE_TARGET) $(INSTALLED_SUPERIMAGE_EMPTY_TARGET): $(LPMAKE)
+define super-slot-suffix
+$(if $(filter true,$(AB_OTA_UPDATER)),$(if $(filter true,$(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS)),,_a))
+endef
 
 # $(1): slot A suffix (_a or empty)
 # $(2): include images or not (true or empty)
 define build-superimage-target-args
   $(if $(2), --sparse) \
   --metadata-size 65536 \
-  --metadata-slots $(if $(1),2,1) \
-  --device-size $(BOARD_SUPER_PARTITION_SIZE) \
+  --metadata-slots $(if $(filter true,$(AB_OTA_UPDATER)),2,1) \
+  --super-name $(BOARD_SUPER_PARTITION_METADATA_DEVICE) \
+  $(if $(filter true,$(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS)), $(if $(filter true,$(AB_OTA_UPDATER)), --auto-slot-suffixing)) \
+  $(foreach device,$(BOARD_SUPER_PARTITION_BLOCK_DEVICES), \
+    --device $(device):$(BOARD_SUPER_PARTITION_$(call to-upper,$(device))_DEVICE_SIZE)) \
   $(foreach group,$(BOARD_SUPER_PARTITION_GROUPS), \
     --group $(group)$(1):$(BOARD_$(call to-upper,$(group))_SIZE) \
     $(if $(1), --group $(group)_b:$(BOARD_$(call to-upper,$(group))_SIZE)) \
@@ -2959,15 +2944,23 @@
     --output $(1)
 endef
 
-$(INSTALLED_SUPERIMAGE_TARGET):
+# For A/B devices, super partition always contains sub-partitions in the _a slot, because this
+# image should only be used for bootstrapping / initializing the device. When flashing the image,
+# bootloader fastboot should always mark _a slot as bootable.
+
+ifneq (true,$(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS))
+INSTALLED_SUPERIMAGE_TARGET := $(PRODUCT_OUT)/super.img
+$(INSTALLED_SUPERIMAGE_TARGET): $(LPMAKE) $(call images-for-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))
 	$(call pretty,"Target super fs image: $@")
-	$(call build-superimage-target,$@,$(PRIVATE_PARTITION_SUFFIX),true)
+	$(call build-superimage-target,$@,$(call super-slot-suffix),true)
+endif
 
 $(call dist-for-goals,dist_files,$(INSTALLED_SUPERIMAGE_TARGET))
 
-$(INSTALLED_SUPERIMAGE_EMPTY_TARGET):
+INSTALLED_SUPERIMAGE_EMPTY_TARGET := $(PRODUCT_OUT)/super_empty.img
+$(INSTALLED_SUPERIMAGE_EMPTY_TARGET): $(LPMAKE)
 	$(call pretty,"Target empty super fs image: $@")
-	$(call build-superimage-target,$@,$(PRIVATE_PARTITION_SUFFIX))
+	$(call build-superimage-target,$@,$(call super-slot-suffix))
 
 $(call dist-for-goals,dist_files,$(INSTALLED_SUPERIMAGE_EMPTY_TARGET))
 
@@ -2989,6 +2982,25 @@
 # Add image dependencies so that generated_*_image_info.txt are written before checking.
 check-all-partition-sizes: $(call images-for-partitions,$(BOARD_SUPER_PARTITION_PARTITION_LIST))
 
+ifeq ($(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS),true)
+# Check sum(super partition block devices) == super partition (/ 2 for A/B)
+# Non-retrofit devices already defines BOARD_SUPER_PARTITION_SUPER_DEVICE_SIZE = BOARD_SUPER_PARTITION_SIZE
+define check-super-partition-size
+  size_list="$(foreach device,$(call to-upper,$(BOARD_SUPER_PARTITION_BLOCK_DEVICES)),$(BOARD_SUPER_PARTITION_$(device)_DEVICE_SIZE))"; \
+  sum_sizes_expr=$$(sed -e 's/ /+/g' <<< "$${size_list}"); \
+  max_size_tail=$(if $(filter true,$(AB_OTA_UPDATER))," / 2"); \
+  max_size_expr="$(BOARD_SUPER_PARTITION_SIZE)$${max_size_tail}"; \
+  if [ $$(( $${sum_sizes_expr} )) -ne $$(( $${max_size_expr} )) ]; then \
+    echo "The sum of super partition block device sizes is not equal to BOARD_SUPER_PARTITION_SIZE$${max_size_tail}:"; \
+    echo $${sum_sizes_expr} '!=' $${max_size_expr}; \
+    exit 1; \
+  else \
+    echo "The sum of super partition block device sizes is equal to BOARD_SUPER_PARTITION_SIZE$${max_size_tail}:"; \
+    echo $${sum_sizes_expr} '==' $${max_size_expr}; \
+  fi
+endef
+endif
+
 # $(1): human-readable max size string
 # $(2): max size expression
 # $(3): list of partition names
@@ -3035,6 +3047,7 @@
 
 check-all-partition-sizes check-all-partition-sizes-nodeps:
 	$(call check-all-partition-sizes-target)
+	$(call check-super-partition-size)
 
 endif # PRODUCT_BUILD_SUPER_PARTITION
 
@@ -3658,7 +3671,7 @@
 	$(hide) echo "super_size=$(BOARD_SUPER_PARTITION_SIZE)" >> $(zip_root)/META/misc_info.txt
 	$(hide) echo "lpmake=$(notdir $(LPMAKE))" >> $(zip_root)/META/misc_info.txt
 	$(hide) echo -n "lpmake_args=" >> $(zip_root)/META/misc_info.txt
-	$(hide) echo $(call build-superimage-target-args,$(if $(filter true,$(AB_OTA_UPDATER)),_a,)) \
+	$(hide) echo $(call build-superimage-target-args,$(call super-slot-suffix)) \
 	    >> $(zip_root)/META/misc_info.txt
 endif
 ifneq ($(BOARD_SUPER_PARTITION_GROUPS),)
diff --git a/core/aux_toolchain.mk b/core/aux_toolchain.mk
index de0b139..c710228 100644
--- a/core/aux_toolchain.mk
+++ b/core/aux_toolchain.mk
@@ -50,4 +50,3 @@
 LOCAL_SYSTEM_SHARED_LIBRARIES :=
 LOCAL_CXX_STL := none
 LOCAL_NO_PIC := true
-LOCAL_NO_LIBCOMPILER_RT := true
diff --git a/core/base_rules.mk b/core/base_rules.mk
index 57fd818..9c5c69d 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -508,8 +508,9 @@
       $(eval _src_base := $(call word-colon,1,$(td))), \
       $(eval _src_base := $(LOCAL_PATH)) \
         $(eval _file := $(call word-colon,1,$(td)))) \
-    $(if $(findstring ..,$(_file)),$(error $(LOCAL_MODULE_MAKEFILE): LOCAL_TEST_DATA may not include '..': $(_file))) \
-    $(if $(filter /%,$(_src_base) $(_file)),$(error $(LOCAL_MODULE_MAKEFILE): LOCAL_TEST_DATA may not include absolute paths: $(_src_base) $(_file))) \
+    $(if $(call streq,$(LOCAL_MODULE_MAKEFILE),$(SOONG_ANDROID_MK)),, \
+      $(if $(findstring ..,$(_file)),$(error $(LOCAL_MODULE_MAKEFILE): LOCAL_TEST_DATA may not include '..': $(_file))) \
+      $(if $(filter /%,$(_src_base) $(_file)),$(error $(LOCAL_MODULE_MAKEFILE): LOCAL_TEST_DATA may not include absolute paths: $(_src_base) $(_file)))) \
     $(eval my_test_data_file_pairs := $(my_test_data_file_pairs) $(call append-path,$(_src_base),$(_file)):$(_file)) \
     $(call append-path,$(_src_base),$(_file)):$(call append-path,$(my_module_path),$(_file))))
 
diff --git a/core/binary.mk b/core/binary.mk
index 07fb48a..d7112b6 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -396,13 +396,6 @@
 
 include $(BUILD_SYSTEM)/config_sanitizers.mk
 
-ifneq ($(LOCAL_NO_LIBCOMPILER_RT),true)
-# Add in libcompiler_rt for all regular device builds
-ifeq (,$(WITHOUT_LIBCOMPILER_RT))
-  my_static_libraries += $(COMPILER_RT_CONFIG_EXTRA_STATIC_LIBRARIES)
-endif
-endif
-
 # Statically link libwinpthread when cross compiling win32.
 ifeq ($($(my_prefix)OS),windows)
   my_static_libraries += libwinpthread
@@ -1539,7 +1532,7 @@
 ifeq ($(ONE_SHOT_MAKEFILE),)
 installed_static_library_notice_file_targets := \
     $(foreach lib,$(my_static_libraries) $(my_whole_static_libraries), \
-      NOTICE-$(if $(LOCAL_IS_HOST_MODULE),HOST,TARGET)-STATIC_LIBRARIES-$(lib))
+      NOTICE-$(if $(LOCAL_IS_HOST_MODULE),HOST$(if $(my_host_cross),_CROSS,),TARGET)-STATIC_LIBRARIES-$(lib))
 else
 installed_static_library_notice_file_targets :=
 endif
diff --git a/core/clang/HOST_CROSS_x86.mk b/core/clang/HOST_CROSS_x86.mk
index ffd7811..7581353 100644
--- a/core/clang/HOST_CROSS_x86.mk
+++ b/core/clang/HOST_CROSS_x86.mk
@@ -1 +1,2 @@
 $(clang_2nd_arch_prefix)HOST_CROSS_LIBPROFILE_RT := $(LLVM_RTLIB_PATH)/libclang_rt.profile-i386.a
+$(clang_2nd_arch_prefix)HOST_CROSS_LIBCRT_BUILTINS := $(LLVM_RTLIB_PATH)/libclang_rt.bulitins-i386.a
diff --git a/core/clang/HOST_CROSS_x86_64.mk b/core/clang/HOST_CROSS_x86_64.mk
index f921a1c..9a971c7 100644
--- a/core/clang/HOST_CROSS_x86_64.mk
+++ b/core/clang/HOST_CROSS_x86_64.mk
@@ -1 +1,2 @@
 $(clang_2nd_arch_prefix)HOST_CROSS_LIBPROFILE_RT := $(LLVM_RTLIB_PATH)/libclang_rt.profile-x86_64.a
+$(clang_2nd_arch_prefix)HOST_CROSS_LIBCRT_BUILTINS := $(LLVM_RTLIB_PATH)/libclang_rt.builtins-x86_64.a
diff --git a/core/clang/HOST_x86.mk b/core/clang/HOST_x86.mk
index 2803517..2e0865b 100644
--- a/core/clang/HOST_x86.mk
+++ b/core/clang/HOST_x86.mk
@@ -1 +1,2 @@
 $(clang_2nd_arch_prefix)HOST_LIBPROFILE_RT := $(LLVM_RTLIB_PATH)/libclang_rt.profile-i386.a
+$(clang_2nd_arch_prefix)HOST_LIBCRT_BUILTINS := $(LLVM_RTLIB_PATH)/libclang_rt.builtins-i386.a
diff --git a/core/clang/HOST_x86_64.mk b/core/clang/HOST_x86_64.mk
index 4fdffd8..3fd0541 100644
--- a/core/clang/HOST_x86_64.mk
+++ b/core/clang/HOST_x86_64.mk
@@ -1 +1,2 @@
 HOST_LIBPROFILE_RT := $(LLVM_RTLIB_PATH)/libclang_rt.profile-x86_64.a
+HOST_LIBCRT_BUILTINS := $(LLVM_RTLIB_PATH)/libclang_rt.builtins-x86_64.a
diff --git a/core/clang/TARGET_arm.mk b/core/clang/TARGET_arm.mk
index 9c1a836..6140d7c 100644
--- a/core/clang/TARGET_arm.mk
+++ b/core/clang/TARGET_arm.mk
@@ -3,6 +3,7 @@
 $(clang_2nd_arch_prefix)RS_COMPAT_TRIPLE := armv7-none-linux-gnueabi
 
 $(clang_2nd_arch_prefix)TARGET_LIBPROFILE_RT := $(LLVM_RTLIB_PATH)/libclang_rt.profile-arm-android.a
+$(clang_2nd_arch_prefix)TARGET_LIBCRT_BUILTINS := $(LLVM_RTLIB_PATH)/libclang_rt.builtins-arm-android.a
 
 # Address sanitizer clang config
 $(clang_2nd_arch_prefix)ADDRESS_SANITIZER_LINKER := /system/bin/linker_asan
diff --git a/core/clang/TARGET_arm64.mk b/core/clang/TARGET_arm64.mk
index 9a67b6b..9fe5530 100644
--- a/core/clang/TARGET_arm64.mk
+++ b/core/clang/TARGET_arm64.mk
@@ -3,6 +3,7 @@
 RS_COMPAT_TRIPLE := aarch64-linux-android
 
 TARGET_LIBPROFILE_RT := $(LLVM_RTLIB_PATH)/libclang_rt.profile-aarch64-android.a
+TARGET_LIBCRT_BUILTINS := $(LLVM_RTLIB_PATH)/libclang_rt.builtins-aarch64-android.a
 
 # Address sanitizer clang config
 ADDRESS_SANITIZER_LINKER := /system/bin/linker_asan64
diff --git a/core/clang/TARGET_x86.mk b/core/clang/TARGET_x86.mk
index 1b9c78c..5e2d57e 100644
--- a/core/clang/TARGET_x86.mk
+++ b/core/clang/TARGET_x86.mk
@@ -3,6 +3,7 @@
 $(clang_2nd_arch_prefix)RS_COMPAT_TRIPLE := i686-linux-android
 
 $(clang_2nd_arch_prefix)TARGET_LIBPROFILE_RT := $(LLVM_RTLIB_PATH)/libclang_rt.profile-i686-android.a
+$(clang_2nd_arch_prefix)TARGET_LIBCRT_BUILTINS := $(LLVM_RTLIB_PATH)/libclang_rt.builtins-i686-android.a
 
 # Address sanitizer clang config
 $(clang_2nd_arch_prefix)ADDRESS_SANITIZER_LINKER := /system/bin/linker_asan
diff --git a/core/clang/TARGET_x86_64.mk b/core/clang/TARGET_x86_64.mk
index 3161f84..86b3798 100644
--- a/core/clang/TARGET_x86_64.mk
+++ b/core/clang/TARGET_x86_64.mk
@@ -3,6 +3,7 @@
 RS_COMPAT_TRIPLE := x86_64-linux-android
 
 TARGET_LIBPROFILE_RT := $(LLVM_RTLIB_PATH)/libclang_rt.profile-x86_64-android.a
+TARGET_LIBCRT_BUILTINS := $(LLVM_RTLIB_PATH)/libclang_rt.builtins-x86_64-android.a
 
 # Address sanitizer clang config
 ADDRESS_SANITIZER_LINKER := /system/bin/linker_asan64
diff --git a/core/clang/config.mk b/core/clang/config.mk
index 98e01da..ca3a1fa 100644
--- a/core/clang/config.mk
+++ b/core/clang/config.mk
@@ -1,9 +1,6 @@
 ## Clang configurations.
 
-LLVM_RTLIB_PATH := $(LLVM_PREBUILTS_PATH)/../lib64/clang/$(LLVM_RELEASE_VERSION)/lib/linux/
-
-CLANG_TBLGEN := $(BUILD_OUT_EXECUTABLES)/clang-tblgen$(BUILD_EXECUTABLE_SUFFIX)
-LLVM_TBLGEN := $(BUILD_OUT_EXECUTABLES)/llvm-tblgen$(BUILD_EXECUTABLE_SUFFIX)
+LLVM_RTLIB_PATH := $(LLVM_PREBUILTS_BASE)/linux-x86/$(LLVM_PREBUILTS_VERSION)/lib64/clang/$(LLVM_RELEASE_VERSION)/lib/linux/
 
 define convert-to-clang-flags
 $(strip $(filter-out $(CLANG_CONFIG_UNKNOWN_CFLAGS),$(1)))
@@ -64,8 +61,4 @@
 include $(BUILD_SYSTEM)/clang/TARGET_$(TARGET_2ND_ARCH).mk
 endif
 
-# This allows us to use the superset of functionality that compiler-rt
-# provides to Clang (for supporting features like -ftrapv).
-COMPILER_RT_CONFIG_EXTRA_STATIC_LIBRARIES := libcompiler_rt-extras
-
 include $(BUILD_SYSTEM)/clang/tidy.mk
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index 7f8e9c3..64caa18 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -184,8 +184,8 @@
 LOCAL_NO_CRT:=
 LOCAL_NO_DEFAULT_COMPILER_FLAGS:=
 LOCAL_NO_FPIE :=
-LOCAL_NO_LIBCOMPILER_RT:=
 LOCAL_NO_LIBGCC:=
+LOCAL_NO_LIBCRT_BUILTINS:=
 LOCAL_NO_NOTICE_FILE:=
 LOCAL_NO_PIC:=
 LOCAL_NOSANITIZE:=
@@ -254,6 +254,8 @@
 LOCAL_SDK_VERSION:=
 LOCAL_SHARED_ANDROID_LIBRARIES:=
 LOCAL_SHARED_LIBRARIES:=
+LOCAL_SOONG_AAR :=
+LOCAL_SOONG_BUNDLE :=
 LOCAL_SOONG_CLASSES_JAR :=
 LOCAL_SOONG_DEX_JAR :=
 LOCAL_SOONG_EXPORT_PROGUARD_FLAGS :=
diff --git a/core/combo/TARGET_linux-arm.mk b/core/combo/TARGET_linux-arm.mk
index ffb6021..4503062 100644
--- a/core/combo/TARGET_linux-arm.mk
+++ b/core/combo/TARGET_linux-arm.mk
@@ -36,9 +36,21 @@
 KNOWN_ARMv8_CORES := cortex-a53 cortex-a53.a57 cortex-a55 cortex-a73 cortex-a75 cortex-a76
 KNOWN_ARMv8_CORES += kryo denver64 exynos-m1 exynos-m2
 
+KNOWN_ARMv82a_CORES := cortex-a55 cortex-a75
+
+# Check for cores that implement armv8-2a ISAs.
+ifneq (,$(filter $(TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT), $(KNOWN_ARMv82a_CORES)))
+  ifneq ($(TARGET_$(combo_2nd_arch_prefix)ARCH_VARIANT),armv8-2a)
+    $(warning $(TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT) is armv8-2a.)
+    ifneq (,$(TARGET_$(combo_2nd_arch_prefix)ARCH_VARIANT))
+      $(warning TARGET_$(combo_2nd_arch_prefix)ARCH_VARIANT, $(TARGET_$(combo_2nd_arch_prefix)ARCH_VARIANT), ignored! Use armv8-2a instead.)
+    endif
+    # Overwrite TARGET_$(combo_2nd_arch_prefix)ARCH_VARIANT
+    TARGET_$(combo_2nd_arch_prefix)ARCH_VARIANT := armv8-2a
+  endif
 # Many devices (incorrectly) use armv7-a-neon as the 2nd architecture variant
 # for cores that implement armv8-a ISAs. The following sets it to armv8-a.
-ifneq (,$(filter $(TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT), $(KNOWN_ARMv8_CORES)))
+else ifneq (,$(filter $(TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT), $(KNOWN_ARMv8_CORES)))
   ifneq ($(TARGET_$(combo_2nd_arch_prefix)ARCH_VARIANT),armv8-a)
     $(warning $(TARGET_$(combo_2nd_arch_prefix)CPU_VARIANT) is armv8-a.)
     ifneq (,$(TARGET_$(combo_2nd_arch_prefix)ARCH_VARIANT))
diff --git a/core/combo/arch/arm/armv8-2a.mk b/core/combo/arch/arm/armv8-2a.mk
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/core/combo/arch/arm/armv8-2a.mk
diff --git a/core/config.mk b/core/config.mk
index 3289a89..9eced54 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -204,6 +204,11 @@
 JAVA_TMPDIR_ARG :=
 endif
 
+# A list of the jars that provide information about usages of the hidden API.
+# The core-oj-hiddenapi provides information for the core-oj jar.
+HIDDENAPI_EXTRA_APP_USAGE_JARS := \
+    core-oj-hiddenapi \
+
 # Default to remove the org.apache.http.legacy from bootclasspath
 ifeq ($(REMOVE_OAHL_FROM_BCP),)
 REMOVE_OAHL_FROM_BCP := true
@@ -527,8 +532,8 @@
 ifdef PDK_FUSION_PLATFORM_ZIP
 TARGET_BUILD_PDK := true
 ifeq (,$(wildcard $(PDK_FUSION_PLATFORM_ZIP)))
-  ifneq (,$(wildcard $(dir $(PDK_FUSION_PLATFORM_ZIP))/pdk.mk))
-    PDK_FUSION_PLATFORM_DIR := $(dir $(PDK_FUSION_PLATFORM_ZIP))
+  ifneq (,$(wildcard $(patsubst %.zip,%,$(PDK_FUSION_PLATFORM_ZIP))/pdk.mk))
+    PDK_FUSION_PLATFORM_DIR := $(patsubst %.zip,%,$(PDK_FUSION_PLATFORM_ZIP))
     PDK_FUSION_PLATFORM_ZIP :=
   else
     $(error Cannot find file $(PDK_FUSION_PLATFORM_ZIP).)
@@ -932,13 +937,26 @@
 endif
 .KATI_READONLY := PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS
 
-ifeq ($(PRODUCT_USE_LOGICAL_PARTITIONS),true)
+ifeq ($(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS),true)
+  ifneq ($(PRODUCT_USE_DYNAMIC_PARTITIONS),true)
+    $(error PRODUCT_USE_DYNAMIC_PARTITIONS must be true when PRODUCT_RETROFIT_DYNAMIC_PARTITIONS \
+        is set)
+  endif
+  ifdef PRODUCT_SHIPPING_API_LEVEL
+    ifeq (true,$(call math_gt_or_eq,$(PRODUCT_SHIPPING_API_LEVEL),29))
+      $(error Devices with shipping API level $(PRODUCT_SHIPPING_API_LEVEL) must not set \
+          PRODUCT_RETROFIT_DYNAMIC_PARTITIONS)
+    endif
+  endif
+endif
+
+ifeq ($(PRODUCT_USE_DYNAMIC_PARTITIONS),true)
     requirements := \
         PRODUCT_USE_DYNAMIC_PARTITION_SIZE \
         PRODUCT_BUILD_SUPER_PARTITION \
 
     $(foreach req,$(requirements),$(if $(filter false,$($(req))),\
-        $(error PRODUCT_USE_LOGICAL_PARTITIONS requires $(req) to be true)))
+        $(error PRODUCT_USE_DYNAMIC_PARTITIONS requires $(req) to be true)))
 
     requirements :=
 
@@ -1021,6 +1039,48 @@
         $(BOARD_$(group)_PARTITION_LIST))
 .KATI_READONLY := BOARD_SUPER_PARTITION_PARTITION_LIST
 
+ifdef BOARD_SUPER_PARTITION_SIZE
+ifeq ($(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS),true)
+
+# The metadata device must be specified manually for retrofitting.
+ifndef BOARD_SUPER_PARTITION_METADATA_DEVICE
+$(error Must specify BOARD_SUPER_PARTITION_METADATA_DEVICE if BOARD_SUPER_PARTITION_BLOCK_DEVICES is used.)
+endif
+
+# The metadata device must be included in the super partition block device list.
+ifeq (,$(filter $(BOARD_SUPER_PARTITION_METADATA_DEVICE),$(BOARD_SUPER_PARTITION_BLOCK_DEVICES)))
+$(error BOARD_SUPER_PARTITION_METADATA_DEVICE is not listed in BOARD_SUPER_PARTITION_BLOCK_DEVICES.)
+endif
+
+# The metadata device must be supplied to init via the kernel command-line.
+BOARD_KERNEL_CMDLINE += androidboot.super_partition=$(BOARD_SUPER_PARTITION_METADATA_DEVICE)
+
+else # PRODUCT_RETROFIT_DYNAMIC_PARTITIONS
+
+# These should not be specified on devices launching with dynamic partition support.
+ifdef BOARD_SUPER_PARTITION_BLOCK_DEVICES
+$(error BOARD_SUPER_PARTITION_BLOCK_DEVICES can only be used if PRODUCT_RETROFIT_DYNAMIC_PARTITIONS is true.)
+endif
+ifdef BOARD_SUPER_PARTITION_METADATA_DEVICE
+$(error BOARD_SUPER_PARTITION_METADATA_DEVICE can only be used if PRODUCT_RETROFIT_DYNAMIC_PARTITIONS is true.)
+endif
+
+# For normal devices, we populate BOARD_SUPER_PARTITION_BLOCK_DEVICES so the
+# build can handle both cases consistently.
+BOARD_SUPER_PARTITION_BLOCK_DEVICES := super
+BOARD_SUPER_PARTITION_METADATA_DEVICE := super
+BOARD_SUPER_PARTITION_SUPER_DEVICE_SIZE := $(BOARD_SUPER_PARTITION_SIZE)
+
+endif # PRODUCT_RETROFIT_DYNAMIC_PARTITIONS
+endif # BOARD_SUPER_PARTITION_SIZE
+.KATI_READONLY := BOARD_SUPER_PARTITION_BLOCK_DEVICES
+.KATI_READONLY := BOARD_SUPER_PARTITION_METADATA_DEVICE
+
+$(foreach device,$(call to-upper,$(BOARD_SUPER_PARTITION_BLOCK_DEVICES)), \
+    $(if $(BOARD_SUPER_PARTITION_$(device)_DEVICE_SIZE),, \
+        $(error $(BOARD_SUPER_PARTITION_$(device)_DEVICE_SIZE must not be empty))) \
+    $(eval .KATI_READONLY := BOARD_SUPER_PARTITION_$(device)_DEVICE_SIZE))
+
 endif # PRODUCT_BUILD_SUPER_PARTITION
 
 # ###############################################################
diff --git a/core/config_sanitizers.mk b/core/config_sanitizers.mk
index be1b124..fcf527e 100644
--- a/core/config_sanitizers.mk
+++ b/core/config_sanitizers.mk
@@ -180,7 +180,7 @@
 
 ifneq ($(filter hwaddress,$(my_sanitize)),)
   my_shared_libraries += $($(LOCAL_2ND_ARCH_VAR_PREFIX)HWADDRESS_SANITIZER_RUNTIME_LIBRARY)
-  ifeq ($(LOCAL_MODULE_CLASS),EXECUTABLES)
+  ifneq ($(filter EXECUTABLES NATIVE_TESTS,$(LOCAL_MODULE_CLASS)),)
     ifeq ($(LOCAL_FORCE_STATIC_EXECUTABLE),true)
       my_static_libraries := $(my_static_libraries) $($(LOCAL_2ND_ARCH_VAR_PREFIX)HWADDRESS_SANITIZER_STATIC_LIBRARY)
     endif
diff --git a/core/definitions.mk b/core/definitions.mk
index 362680e..0d4b6c4 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -797,13 +797,13 @@
 # $(1): path (and optionally line) information
 # $(2): message to print
 define echo-warning
-echo -e "$(ESC_BOLD)$(1): $(ESC_WARNING)warning:$(ESC_RESET)$(ESC_BOLD)" $(2) "$(ESC_RESET)" >&2
+echo -e "$(ESC_BOLD)$(1): $(ESC_WARNING)warning:$(ESC_RESET)$(ESC_BOLD)" '$(subst ','\'',$(2))'  "$(ESC_RESET)" >&2
 endef
 
 # $(1): path (and optionally line) information
 # $(2): message to print
 define echo-error
-echo -e "$(ESC_BOLD)$(1): $(ESC_ERROR)error:$(ESC_RESET)$(ESC_BOLD)" $(2) "$(ESC_RESET)" >&2
+echo -e "$(ESC_BOLD)$(1): $(ESC_ERROR)error:$(ESC_RESET)$(ESC_BOLD)" '$(subst ','\'',$(2))'  "$(ESC_RESET)" >&2
 endef
 
 ###########################################################
@@ -1693,6 +1693,7 @@
 	$(PRIVATE_ALL_STATIC_LIBRARIES) \
 	$(if $(PRIVATE_GROUP_STATIC_LIBRARIES),-Wl$(comma)--end-group) \
 	$(if $(filter true,$(NATIVE_COVERAGE)),$(PRIVATE_TARGET_COVERAGE_LIB)) \
+	$(PRIVATE_TARGET_LIBCRT_BUILTINS) \
 	$(PRIVATE_TARGET_LIBATOMIC) \
 	$(PRIVATE_TARGET_LIBGCC) \
 	$(PRIVATE_TARGET_GLOBAL_LDFLAGS) \
@@ -1728,6 +1729,7 @@
 	$(PRIVATE_ALL_STATIC_LIBRARIES) \
 	$(if $(PRIVATE_GROUP_STATIC_LIBRARIES),-Wl$(comma)--end-group) \
 	$(if $(filter true,$(NATIVE_COVERAGE)),$(PRIVATE_TARGET_COVERAGE_LIB)) \
+	$(PRIVATE_TARGET_LIBCRT_BUILTINS) \
 	$(PRIVATE_TARGET_LIBATOMIC) \
 	$(PRIVATE_TARGET_LIBGCC) \
 	$(PRIVATE_TARGET_GLOBAL_LDFLAGS) \
@@ -1775,6 +1777,7 @@
 	$(if $(filter true,$(NATIVE_COVERAGE)),$(PRIVATE_TARGET_COVERAGE_LIB)) \
 	$(PRIVATE_TARGET_LIBATOMIC) \
 	$(filter %libcompiler_rt.a %libcompiler_rt.hwasan.a,$(PRIVATE_ALL_STATIC_LIBRARIES)) \
+	$(PRIVATE_TARGET_LIBCRT_BUILTINS) \
 	$(PRIVATE_TARGET_LIBGCC) \
 	-Wl,--end-group \
 	$(PRIVATE_TARGET_CRTEND_O)
@@ -3007,14 +3010,17 @@
 # 1. Copy the files to the many suite output directories.
 #    And for test config files, we'll check the .xml is well-formed before copy.
 # 2. Add all the files to each suite's dependent files list.
-# 3. Do the dependency addition to my_all_targets
+# 3. Do the dependency addition to my_all_targets.
+# 4. Save the module name to COMPATIBILITY.$(suite).MODULES for each suite.
 # Requires for each suite: use my_compat_dist_config_$(suite) to define the test config.
 #    and use my_compat_dist_$(suite) to define the others.
 define create-suite-dependencies
 $(foreach suite, $(LOCAL_COMPATIBILITY_SUITE), \
   $(eval COMPATIBILITY.$(suite).FILES := \
     $$(COMPATIBILITY.$(suite).FILES) $$(foreach f,$$(my_compat_dist_$(suite)),$$(call word-colon,2,$$(f))) \
-      $$(foreach f,$$(my_compat_dist_config_$(suite)),$$(call word-colon,2,$$(f))))) \
+      $$(foreach f,$$(my_compat_dist_config_$(suite)),$$(call word-colon,2,$$(f)))) \
+  $(eval COMPATIBILITY.$(suite).MODULES := \
+    $$(COMPATIBILITY.$(suite).MODULES) $$(my_register_name))) \
 $(eval $(my_all_targets) : $(call copy-many-files, \
   $(sort $(foreach suite,$(LOCAL_COMPATIBILITY_SUITE),$(my_compat_dist_$(suite))))) \
   $(call copy-many-xml-files-checked, \
diff --git a/core/executable_internal.mk b/core/executable_internal.mk
index 70b2ea8..c28c144 100644
--- a/core/executable_internal.mk
+++ b/core/executable_internal.mk
@@ -36,6 +36,11 @@
 endif
 
 # Define PRIVATE_ variables from global vars
+ifeq ($(LOCAL_NO_LIBCRT_BUILTINS),true)
+my_target_libcrt_builtins :=
+else
+my_target_libcrt_builtins := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)LIBCRT_BUILTINS)
+endif
 ifeq ($(LOCAL_NO_LIBGCC),true)
 my_target_libgcc :=
 else
@@ -60,6 +65,7 @@
 my_target_crtbegin_static_o := $(wildcard $(my_ndk_sysroot_lib)/crtbegin_static.o)
 my_target_crtend_o := $(wildcard $(my_ndk_sysroot_lib)/crtend_android.o)
 endif
+$(linked_module): PRIVATE_TARGET_LIBCRT_BUILTINS := $(my_target_libcrt_builtins)
 $(linked_module): PRIVATE_TARGET_LIBGCC := $(my_target_libgcc)
 $(linked_module): PRIVATE_TARGET_LIBATOMIC := $(my_target_libatomic)
 $(linked_module): PRIVATE_TARGET_CRTBEGIN_DYNAMIC_O := $(my_target_crtbegin_dynamic_o)
@@ -68,11 +74,11 @@
 $(linked_module): PRIVATE_POST_LINK_CMD := $(LOCAL_POST_LINK_CMD)
 
 ifeq ($(LOCAL_FORCE_STATIC_EXECUTABLE),true)
-$(linked_module): $(my_target_crtbegin_static_o) $(all_objects) $(all_libraries) $(my_target_crtend_o) $(my_target_libgcc) $(my_target_libatomic)
+$(linked_module): $(my_target_crtbegin_static_o) $(all_objects) $(all_libraries) $(my_target_crtend_o) $(my_target_libcrt_builtins) $(my_target_libgcc) $(my_target_libatomic)
 	$(transform-o-to-static-executable)
 	$(PRIVATE_POST_LINK_CMD)
 else
-$(linked_module): $(my_target_crtbegin_dynamic_o) $(all_objects) $(all_libraries) $(my_target_crtend_o) $(my_target_libgcc) $(my_target_libatomic)
+$(linked_module): $(my_target_crtbegin_dynamic_o) $(all_objects) $(all_libraries) $(my_target_crtend_o) $(my_target_libcrt_builtins) $(my_target_libgcc) $(my_target_libatomic)
 	$(transform-o-to-executable)
 	$(PRIVATE_POST_LINK_CMD)
 endif
diff --git a/core/generate_enforce_rro.mk b/core/generate_enforce_rro.mk
index 62a8c8d..c88c779 100644
--- a/core/generate_enforce_rro.mk
+++ b/core/generate_enforce_rro.mk
@@ -27,4 +27,10 @@
 LOCAL_AAPT_FLAGS += --auto-add-overlay
 LOCAL_RESOURCE_DIR := $(enforce_rro_source_overlays)
 
+ifeq (framework-res__auto_generated_rro,$(enforce_rro_module))
+LOCAL_PRIVATE_PLATFORM_APIS := true
+else
+LOCAL_SDK_VERSION := current
+endif
+
 include $(BUILD_RRO_PACKAGE)
diff --git a/core/host_java_library_common.mk b/core/host_java_library_common.mk
index 8df4b37..0e62f60 100644
--- a/core/host_java_library_common.mk
+++ b/core/host_java_library_common.mk
@@ -32,15 +32,15 @@
 proto_sources := $(filter %.proto,$(LOCAL_SRC_FILES))
 ifneq ($(proto_sources),)
 ifeq ($(LOCAL_PROTOC_OPTIMIZE_TYPE),micro)
-    LOCAL_JAVA_LIBRARIES += host-libprotobuf-java-micro
+    LOCAL_JAVA_LIBRARIES += libprotobuf-java-micro
 else
   ifeq ($(LOCAL_PROTOC_OPTIMIZE_TYPE),nano)
-    LOCAL_JAVA_LIBRARIES += host-libprotobuf-java-nano
+    LOCAL_JAVA_LIBRARIES += libprotobuf-java-nano
   else
     ifeq ($(LOCAL_PROTOC_OPTIMIZE_TYPE),full)
-      LOCAL_JAVA_LIBRARIES += host-libprotobuf-java-full
+      LOCAL_JAVA_LIBRARIES += libprotobuf-java-full
     else
-      LOCAL_JAVA_LIBRARIES += host-libprotobuf-java-lite
+      LOCAL_JAVA_LIBRARIES += libprotobuf-java-lite
     endif
   endif
 endif
diff --git a/core/java_common.mk b/core/java_common.mk
index 860d73c..f4c47c8 100644
--- a/core/java_common.mk
+++ b/core/java_common.mk
@@ -499,7 +499,7 @@
 ifeq ($(ONE_SHOT_MAKEFILE),)
 installed_static_library_notice_file_targets := \
     $(foreach lib,$(LOCAL_STATIC_JAVA_LIBRARIES), \
-      NOTICE-$(if $(LOCAL_IS_HOST_MODULE),HOST,TARGET)-JAVA_LIBRARIES-$(lib))
+      NOTICE-$(if $(LOCAL_IS_HOST_MODULE),HOST$(if $(my_host_cross),_CROSS,),TARGET)-JAVA_LIBRARIES-$(lib))
 else
 installed_static_library_notice_file_targets :=
 endif
diff --git a/core/json.mk b/core/json.mk
new file mode 100644
index 0000000..ba8ffa7
--- /dev/null
+++ b/core/json.mk
@@ -0,0 +1,35 @@
+4space :=$= $(space)$(space)$(space)$(space)
+invert_bool =$= $(if $(strip $(1)),,true)
+
+# Converts a list to a JSON list.
+# $1: List separator.
+# $2: List.
+_json_list =$= [$(if $(2),"$(subst $(1),"$(comma)",$(2))")]
+
+# Converts a space-separated list to a JSON list.
+json_list =$= $(call _json_list,$(space),$(1))
+
+# Converts a comma-separated list to a JSON list.
+csv_to_json_list =$= $(call _json_list,$(comma),$(1))
+
+# Adds or removes 4 spaces from _json_indent
+json_increase_indent =$= $(eval _json_indent := $$(_json_indent)$$(4space))
+json_decrease_indent =$= $(eval _json_indent := $$(subst _,$$(space),$$(patsubst %____,%,$$(subst $$(space),_,$$(_json_indent)))))
+
+# 1: Key name
+# 2: Value
+add_json_val =$= $(eval _json_contents := $$(_json_contents)$$(_json_indent)"$$(strip $$(1))": $$(strip $$(2))$$(comma)$$(newline))
+add_json_str =$= $(call add_json_val,$(1),"$(strip $(2))")
+add_json_list =$= $(call add_json_val,$(1),$(call json_list,$(patsubst %,%,$(2))))
+add_json_csv =$= $(call add_json_val,$(1),$(call csv_to_json_list,$(strip $(2))))
+add_json_bool =$= $(call add_json_val,$(1),$(if $(strip $(2)),true,false))
+add_json_map =$= $(eval _json_contents := $$(_json_contents)$$(_json_indent)"$$(strip $$(1))": {$$(newline))$(json_increase_indent)
+end_json_map =$= $(json_decrease_indent)$(eval _json_contents := $$(_json_contents)$$(if $$(filter %$$(comma),$$(lastword $$(_json_contents))),__SV_END)$$(_json_indent)},$$(newline))
+
+# Clears _json_contents to start a new json file
+json_start =$= $(eval _json_contents := {$$(newline))$(eval _json_indent := $$(4space))
+
+# Adds the trailing close brace to _json_contents, and removes any trailing commas if necessary
+json_end =$= $(eval _json_contents := $$(subst $$(comma)$$(newline)__SV_END,$$(newline),$$(_json_contents)__SV_END}$$(newline)))
+
+json_contents =$= $(_json_contents)
diff --git a/core/local_systemsdk.mk b/core/local_systemsdk.mk
index 49085fd..7acb57a 100644
--- a/core/local_systemsdk.mk
+++ b/core/local_systemsdk.mk
@@ -25,7 +25,7 @@
   ifneq (,$(filter JAVA_LIBRARIES APPS,$(LOCAL_MODULE_CLASS)))
     ifndef LOCAL_SDK_VERSION
       ifeq ($(_is_vendor_app),true)
-        ifeq (,$(findstring __auto_generated_rro,$(LOCAL_MODULE)))
+        ifeq (,$(filter framework-res__auto_generated_rro,$(LOCAL_MODULE)))
           # Runtime resource overlay for framework-res is exempted from building
           # against System SDK.
           # TODO(b/35859726): remove this exception
diff --git a/core/main.mk b/core/main.mk
index 6374be9..84457e0 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -221,13 +221,11 @@
 # Enable dynamic linker and hidden API developer warnings for
 # userdebug, eng and non-REL builds
 ifneq ($(TARGET_BUILD_VARIANT),user)
-  ADDITIONAL_BUILD_PROPERTIES += ro.bionic.ld.warning=1 \
-                                 ro.art.hiddenapi.warning=1
+  ADDITIONAL_BUILD_PROPERTIES += ro.bionic.ld.warning=1
 else
 # Enable it for user builds as long as they are not final.
 ifneq ($(PLATFORM_VERSION_CODENAME),REL)
-  ADDITIONAL_BUILD_PROPERTIES += ro.bionic.ld.warning=1 \
-                                 ro.art.hiddenapi.warning=1
+  ADDITIONAL_BUILD_PROPERTIES += ro.bionic.ld.warning=1
 endif
 endif
 
@@ -246,7 +244,11 @@
 ADDITIONAL_DEFAULT_PROPERTIES += ro.actionable_compatible_property.enabled=${PRODUCT_COMPATIBLE_PROPERTY}
 endif
 
-ADDITIONAL_PRODUCT_PROPERTIES += ro.boot.logical_partitions=$(PRODUCT_USE_LOGICAL_PARTITIONS)
+# TODO(b/119286600): remove ro.logical_partitions
+ADDITIONAL_PRODUCT_PROPERTIES += \
+    ro.boot.logical_partitions=$(PRODUCT_USE_DYNAMIC_PARTITIONS) \
+    ro.boot.dynamic_partitions=$(PRODUCT_USE_DYNAMIC_PARTITIONS) \
+    ro.boot.dynamic_partitions_retrofit=$(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS)
 
 # -----------------------------------------------------------------
 ###
@@ -727,11 +729,44 @@
   $(eval r := $(filter $($(root))/%,$(call module-installed-files,\
     $(deps))))\
   $(if $(filter $(1),HOST_),\
+    $(eval ALL_MODULES.$(mod).HOST_SHARED_LIBRARY_FILES := $$(ALL_MODULES.$(mod).HOST_SHARED_LIBRARY_FILES) $(word 2,$(p)) $(r))\
+    $(eval ALL_MODULES.$(mod).HOST_SHARED_LIBRARIES := $$(ALL_MODULES.$(mod).HOST_SHARED_LIBRARIES) $(deps))\
     $(eval $(call add-required-host-so-deps,$(word 2,$(p)),$(r))),\
     $(eval $(call add-required-deps,$(word 2,$(p)),$(r))))\
   $(eval ALL_MODULES.$(mod).REQUIRED += $(deps)))
 endef
 
+# Recursively resolve host shared library dependency for a given module.
+# $(1): module name
+# Returns all dependencies of shared library.
+define get-all-shared-libs-deps
+$(if $(_all_deps_for_$(1)_set_),$(_all_deps_for_$(1)_),\
+  $(eval _all_deps_for_$(1)_ :=) \
+  $(foreach dep,$(ALL_MODULES.$(1).HOST_SHARED_LIBRARIES),\
+    $(foreach m,$(call get-all-shared-libs-deps,$(dep)),\
+      $(eval _all_deps_for_$(1)_ := $$(_all_deps_for_$(1)_) $(m))\
+      $(eval _all_deps_for_$(1)_ := $(sort $(_all_deps_for_$(1)_))))\
+    $(eval _all_deps_for_$(1)_ := $$(_all_deps_for_$(1)_) $(dep))\
+    $(eval _all_deps_for_$(1)_ := $(sort $(_all_deps_for_$(1)_) $(dep)))\
+    $(eval _all_deps_for_$(1)_set_ := true))\
+$(_all_deps_for_$(1)_))
+endef
+
+# Scan all modules in general-tests and device-tests suite and flatten the
+# shared library dependencies.
+define update-host-shared-libs-deps-for-suites
+$(foreach suite,general-tests device-tests,\
+  $(foreach m,$(COMPATIBILITY.$(suite).MODULES),\
+    $(eval my_deps := $(call get-all-shared-libs-deps,$(m)))\
+    $(foreach dep,$(my_deps),\
+      $(foreach f,$(ALL_MODULES.$(dep).HOST_SHARED_LIBRARY_FILES),\
+        $(eval target := $(HOST_OUT_TESTCASES)/$(lastword $(subst /, ,$(dir $(f))))/$(notdir $(f)))\
+        $(eval COMPATIBILITY.$(suite).HOST_SHARED_LIBRARY.FILES := \
+          $$(COMPATIBILITY.$(suite).HOST_SHARED_LIBRARY.FILES) $(f):$(target))\
+        $(eval COMPATIBILITY.$(suite).HOST_SHARED_LIBRARY.FILES := \
+          $(sort $(COMPATIBILITY.$(suite).HOST_SHARED_LIBRARY.FILES)))))))
+endef
+
 $(call resolve-shared-libs-depes,TARGET_)
 ifdef TARGET_2ND_ARCH
 $(call resolve-shared-libs-depes,TARGET_,true)
@@ -740,6 +775,9 @@
 ifdef HOST_2ND_ARCH
 $(call resolve-shared-libs-depes,HOST_,true)
 endif
+# Update host side shared library dependencies for tests in suite device-tests and general-tests.
+# This should be called after calling resolve-shared-libs-depes for HOST_2ND_ARCH.
+$(call update-host-shared-libs-deps-for-suites)
 ifdef HOST_CROSS_OS
 $(call resolve-shared-libs-depes,HOST_CROSS_,,true)
 endif
@@ -1041,7 +1079,8 @@
   product_MODULES := $(_pif_modules)
 
   # Verify the artifact path requirements made by included products.
-
+  is_asan := $(if $(filter address,$(SANITIZE_TARGET)),true)
+  ifneq (true,$(or $(is_asan),$(DISABLE_ARTIFACT_PATH_REQUIREMENTS)))
   # Fakes don't get installed, and host files are irrelevant.
   static_whitelist_patterns := $(TARGET_OUT_FAKE)/% $(HOST_OUT)/%
   # RROs become REQUIRED by the source module, but are always placed on the vendor partition.
@@ -1085,6 +1124,7 @@
 $(PRODUCT_OUT)/offending_artifacts.txt:
 	rm -f $@
 	$(foreach f,$(sort $(all_offending_files)),echo $(f) >> $@;)
+  endif
 else
   # We're not doing a full build, and are probably only including
   # a subset of the module makefiles.  Don't try to build any modules
@@ -1164,6 +1204,9 @@
 modules_to_check += $(foreach m,$(ALL_MODULES),$(ALL_MODULES.$(m).BUILT))
 endif
 
+# Build docs as part of checkbuild to catch more breakages.
+module_to_check += $(ALL_DOCS)
+
 # for easier debugging
 modules_to_check := $(sort $(modules_to_check))
 #$(error modules_to_check $(modules_to_check))
@@ -1303,6 +1346,12 @@
   # Dist the installed files if they exist.
   apps_only_installed_files := $(foreach m,$(unbundled_build_modules),$(ALL_MODULES.$(m).INSTALLED))
   $(call dist-for-goals,apps_only, $(apps_only_installed_files))
+
+  # Dist the bundle files if they exist.
+  apps_only_bundle_files := $(foreach m,$(unbundled_build_modules),\
+    $(if $(ALL_MODULES.$(m).BUNDLE),$(ALL_MODULES.$(m).BUNDLE):$(m)-base.zip))
+  $(call dist-for-goals,apps_only, $(apps_only_bundle_files))
+
   # For uninstallable modules such as static Java library, we have to dist the built file,
   # as <module_name>.<suffix>
   apps_only_dist_built_files := $(foreach m,$(unbundled_build_modules),$(if $(ALL_MODULES.$(m).INSTALLED),,\
diff --git a/core/notice_files.mk b/core/notice_files.mk
index 08778c5..e687ab2 100644
--- a/core/notice_files.mk
+++ b/core/notice_files.mk
@@ -119,6 +119,6 @@
 # Define it even if the notice file doesn't exist so that other
 # modules can depend on it.
 notice_target := NOTICE-$(if \
-    $(LOCAL_IS_HOST_MODULE),HOST,TARGET)-$(LOCAL_MODULE_CLASS)-$(LOCAL_MODULE)
+    $(LOCAL_IS_HOST_MODULE),HOST$(if $(my_host_cross),_CROSS,),TARGET)-$(LOCAL_MODULE_CLASS)-$(LOCAL_MODULE)
 .PHONY: $(notice_target)
 $(notice_target): $(installed_notice_file)
diff --git a/core/package_internal.mk b/core/package_internal.mk
index 9a52af7..9327587 100644
--- a/core/package_internal.mk
+++ b/core/package_internal.mk
@@ -194,6 +194,11 @@
 my_res_package := $(intermediates)/package-res.apk
 LOCAL_INTERMEDIATE_TARGETS += $(my_res_package)
 
+ifeq ($(LOCAL_USE_AAPT2),true)
+  my_bundle_module := $(intermediates)/base.zip
+  LOCAL_INTERMEDIATE_TARGETS += $(my_bundle_module)
+endif
+
 # Always run aapt2, because we need to at least compile the AndroidManifest.xml.
 need_compile_res := true
 
@@ -674,6 +679,55 @@
 	$(compress-package)
 endif  # LOCAL_COMPRESSED_MODULE
 
+ifeq ($(LOCAL_USE_AAPT2),true)
+  my_package_res_pb := $(intermediates)/package-res.pb.apk
+  $(my_package_res_pb): $(my_res_package) $(AAPT2)
+	$(AAPT2) convert --output-format proto $< -o $@
+
+  $(my_bundle_module): $(my_package_res_pb)
+  $(my_bundle_module): PRIVATE_RES_PACKAGE := $(my_package_res_pb)
+
+  $(my_bundle_module): $(jni_shared_libraries)
+  $(my_bundle_module): PRIVATE_JNI_SHARED_LIBRARIES := $(jni_shared_libraries_with_abis)
+  $(my_bundle_module): PRIVATE_JNI_SHARED_LIBRARIES_ABI := $(jni_shared_libraries_abis)
+
+  ifneq ($(full_classes_jar),)
+    $(my_bundle_module): PRIVATE_DEX_FILE := $(built_dex)
+    # Use the jarjar processed archive as the initial package file.
+    $(my_bundle_module): PRIVATE_SOURCE_ARCHIVE := $(full_classes_pre_proguard_jar)
+    $(my_bundle_module): $(built_dex)
+  else
+    $(my_bundle_module): PRIVATE_DEX_FILE :=
+    $(my_bundle_module): PRIVATE_SOURCE_ARCHIVE :=
+  endif # full_classes_jar
+
+  $(my_bundle_module): $(MERGE_ZIPS) $(SOONG_ZIP) $(ZIP2ZIP)
+	@echo "target Bundle: $(PRIVATE_MODULE) ($@)"
+	rm -rf $@.parts
+	mkdir -p $@.parts
+	$(ZIP2ZIP) -i $(PRIVATE_RES_PACKAGE) -o $@.parts/apk.zip AndroidManifest.xml:manifest/AndroidManifest.xml resources.pb "res/**/*" "assets/**/*"
+        ifneq ($(jni_shared_libraries),)
+	  $(call create-jni-shared-libs-package,$@.parts/jni.zip)
+        endif
+        ifeq ($(full_classes_jar),)
+        # We don't build jar, need to add the Java resources here.
+	  $(if $(PRIVATE_EXTRA_JAR_ARGS),\
+	    $(call create-java-resources-jar,$@.parts/res.zip) && \
+	    $(ZIP2ZIP) -i $@.parts/res.zip -o $@.parts/res.zip.tmp "**/*:root/" && \
+	    mv -f $@.parts/res.zip.tmp $@.parts/res.zip)
+        else  # full_classes_jar
+	  $(call create-dex-jar,$@.parts/dex.zip,$(PRIVATE_DEX_FILE))
+	  $(ZIP2ZIP) -i $@.parts/dex.zip -o $@.parts/dex.zip.tmp "classes*.dex:dex/"
+	  mv -f $@.parts/dex.zip.tmp $@.parts/dex.zip
+	  $(call extract-resources-jar,$@.parts/res.zip,$(PRIVATE_SOURCE_ARCHIVE))
+	  $(ZIP2ZIP) -i $@.parts/res.zip -o $@.parts/res.zip.tmp "**/*:root/"
+	  mv -f $@.parts/res.zip.tmp $@.parts/res.zip
+        endif  # full_classes_jar
+	$(MERGE_ZIPS) $@ $@.parts/*.zip
+	rm -rf $@.parts
+  ALL_MODULES.$(LOCAL_MODULE).BUNDLE := $(my_bundle_module)
+endif
+
 ###############################
 ## Build dpi-specific apks, if it's apps_only build.
 ifdef TARGET_BUILD_APPS
@@ -689,7 +743,7 @@
 ifdef LOCAL_DEX_PREOPT
 $(built_odex): PRIVATE_DEX_FILE := $(built_dex)
 ifeq (true, $(LOCAL_UNCOMPRESS_DEX))
-$(built_odex): $(ZIP2ZIP)
+$(built_odex): $(ZIP2ZIP) $(ZIPALIGN)
 endif
 # Use pattern rule - we may have multiple built odex files.
 $(built_odex) : $(dir $(LOCAL_BUILT_MODULE))% : $(built_dex)
diff --git a/core/pdk_fusion_modules.mk b/core/pdk_fusion_modules.mk
index 9aabd0f..235acf9 100644
--- a/core/pdk_fusion_modules.mk
+++ b/core/pdk_fusion_modules.mk
@@ -37,12 +37,14 @@
 
 # The source prebuilts are extracted in the rule of _pdk_fusion_stamp.
 # Use a touch rule to establish the dependency.
+ifndef PDK_FUSION_PLATFORM_DIR
 $(3) $(11) : $(_pdk_fusion_stamp)
 	$(hide) if [ ! -f $$@ ]; then \
 	  echo 'Error: $$@ does not exist. Check your platform.zip.' 1>&2; \
 	  exit 1; \
 	fi
 	$(hide) touch $$@
+endif
 endef
 
 # We don't have a LOCAL_PATH for the auto-generated modules, so let it be the $(BUILD_SYSTEM).
diff --git a/core/product.mk b/core/product.mk
index f9f8d60..661416c 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -210,6 +210,8 @@
     PRODUCT_USE_DYNAMIC_PARTITION_SIZE \
     PRODUCT_BUILD_SUPER_PARTITION \
     PRODUCT_FORCE_PRODUCT_MODULES_TO_SYSTEM_PARTITION \
+    PRODUCT_USE_DYNAMIC_PARTITIONS \
+    PRODUCT_RETROFIT_DYNAMIC_PARTITIONS \
 
 define dump-product
 $(info ==== $(1) ====)\
diff --git a/core/product_config.mk b/core/product_config.mk
index 577bafe..5a727c4 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -513,20 +513,32 @@
 PRODUCT_ACTIONABLE_COMPATIBLE_PROPERTY_DISABLE := \
     $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_ACTIONABLE_COMPATIBLE_PROPERTY_DISABLE))
 
-# Logical and Resizable Partitions feature flag.
-PRODUCT_USE_LOGICAL_PARTITIONS := \
-    $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_USE_LOGICAL_PARTITIONS))
-.KATI_READONLY := PRODUCT_USE_LOGICAL_PARTITIONS
+# Dynamic partition feature flags.
 
-# All requirements of PRODUCT_USE_LOGICAL_PARTITIONS falls back to
-# PRODUCT_USE_LOGICAL_PARTITIONS if not defined.
+# When this is true, dynamic partitions is retrofitted on a device that has
+# already been launched without dynamic partitions. Otherwise, the device
+# is launched with dynamic partitions.
+# This flag implies PRODUCT_USE_DYNAMIC_PARTITIONS.
+PRODUCT_RETROFIT_DYNAMIC_PARTITIONS := \
+    $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_RETROFIT_DYNAMIC_PARTITIONS))
+.KATI_READONLY := PRODUCT_RETROFIT_DYNAMIC_PARTITIONS
+
+# TODO(b/119286600): remove PRODUCT_USE_LOGICAL_PARTITIONS
+PRODUCT_USE_DYNAMIC_PARTITIONS := $(or \
+    $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_USE_DYNAMIC_PARTITIONS)), \
+    $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_USE_LOGICAL_PARTITIONS)), \
+    $(PRODUCT_RETROFIT_DYNAMIC_PARTITIONS))
+.KATI_READONLY := PRODUCT_USE_DYNAMIC_PARTITIONS
+
+# All requirements of PRODUCT_USE_DYNAMIC_PARTITIONS falls back to
+# PRODUCT_USE_DYNAMIC_PARTITIONS if not defined.
 PRODUCT_USE_DYNAMIC_PARTITION_SIZE := $(or \
     $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_USE_DYNAMIC_PARTITION_SIZE)),\
-    $(PRODUCT_USE_LOGICAL_PARTITIONS))
+    $(PRODUCT_USE_DYNAMIC_PARTITIONS))
 .KATI_READONLY := PRODUCT_USE_DYNAMIC_PARTITION_SIZE
 PRODUCT_BUILD_SUPER_PARTITION := $(or \
     $(strip $(PRODUCTS.$(INTERNAL_PRODUCT).PRODUCT_BUILD_SUPER_PARTITION)),\
-    $(PRODUCT_USE_LOGICAL_PARTITIONS))
+    $(PRODUCT_USE_DYNAMIC_PARTITIONS))
 .KATI_READONLY := PRODUCT_BUILD_SUPER_PARTITION
 
 # List of modules that should be forcefully unmarked from being LOCAL_PRODUCT_MODULE, and hence
diff --git a/core/shared_library_internal.mk b/core/shared_library_internal.mk
index 41e6a95..44bb020 100644
--- a/core/shared_library_internal.mk
+++ b/core/shared_library_internal.mk
@@ -34,6 +34,11 @@
 include $(BUILD_SYSTEM)/dynamic_binary.mk
 
 # Define PRIVATE_ variables from global vars
+ifeq ($(LOCAL_NO_LIBCRT_BUILTINS),true)
+my_target_libcrt_builtins :=
+else
+my_target_libcrt_builtins := $($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)LIBCRT_BUILTINS)
+endif
 ifeq ($(LOCAL_NO_LIBGCC),true)
 my_target_libgcc :=
 else
@@ -54,6 +59,7 @@
 my_target_crtbegin_so_o := $(wildcard $(my_ndk_sysroot_lib)/crtbegin_so.o)
 my_target_crtend_so_o := $(wildcard $(my_ndk_sysroot_lib)/crtend_so.o)
 endif
+$(linked_module): PRIVATE_TARGET_LIBCRT_BUILTINS := $(my_target_libcrt_builtins)
 $(linked_module): PRIVATE_TARGET_LIBGCC := $(my_target_libgcc)
 $(linked_module): PRIVATE_TARGET_LIBATOMIC := $(my_target_libatomic)
 $(linked_module): PRIVATE_TARGET_CRTBEGIN_SO_O := $(my_target_crtbegin_so_o)
@@ -64,6 +70,7 @@
         $(all_libraries) \
         $(my_target_crtbegin_so_o) \
         $(my_target_crtend_so_o) \
+        $(my_target_libcrt_builtins) \
         $(my_target_libgcc) \
         $(my_target_libatomic) \
         $(LOCAL_ADDITIONAL_DEPENDENCIES)
diff --git a/core/soong_app_prebuilt.mk b/core/soong_app_prebuilt.mk
index 31cb28e..837920f 100644
--- a/core/soong_app_prebuilt.mk
+++ b/core/soong_app_prebuilt.mk
@@ -102,6 +102,10 @@
 
 PACKAGES.$(LOCAL_MODULE).OVERRIDES := $(strip $(LOCAL_OVERRIDES_PACKAGES))
 
+ifdef LOCAL_SOONG_BUNDLE
+  ALL_MODULES.$(LOCAL_MODULE).BUNDLE := $(LOCAL_SOONG_BUNDLE)
+endif
+
 ifndef LOCAL_IS_HOST_MODULE
 ifeq ($(LOCAL_SDK_VERSION),system_current)
 my_link_type := java:system
diff --git a/core/soong_cc_prebuilt.mk b/core/soong_cc_prebuilt.mk
index f213563..088b076 100644
--- a/core/soong_cc_prebuilt.mk
+++ b/core/soong_cc_prebuilt.mk
@@ -8,7 +8,6 @@
   $(call pretty-error,soong_cc_prebuilt.mk may only be used from Soong)
 endif
 
-skip_module :=
 ifdef LOCAL_IS_HOST_MODULE
   ifneq ($(HOST_OS),$(LOCAL_MODULE_HOST_OS))
     my_prefix := HOST_CROSS_
@@ -31,6 +30,7 @@
   $(call pretty-error,Unsupported LOCAL_MODULE_$(my_prefix)ARCH=$(LOCAL_MODULE_$(my_prefix)ARCH))
 endif
 
+skip_module :=
 ifeq ($(TARGET_TRANSLATE_2ND_ARCH),true)
   ifndef LOCAL_IS_HOST_MODULE
     ifdef LOCAL_2ND_ARCH_VAR_PREFIX
@@ -177,6 +177,22 @@
 
 $(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
 
+# We don't care about installed static libraries, since the libraries have
+# already been linked into the module at that point. We do, however, care
+# about the NOTICE files for any static libraries that we use.
+# (see notice_files.mk)
+#
+# Filter out some NDK libraries that are not being exported.
+my_static_libraries := \
+    $(filter-out ndk_libc++_static ndk_libc++abi ndk_libandroid_support ndk_libunwind, \
+      $(LOCAL_STATIC_LIBRARIES))
+installed_static_library_notice_file_targets := \
+    $(foreach lib,$(my_static_libraries) $(LOCAL_WHOLE_STATIC_LIBRARIES), \
+      NOTICE-$(if $(LOCAL_IS_HOST_MODULE),HOST$(if $(my_host_cross),_CROSS,),TARGET)-STATIC_LIBRARIES-$(lib))
+
+$(notice_target): | $(installed_static_library_notice_file_targets)
+$(LOCAL_INSTALLED_MODULE): | $(notice_target)
+
 endif # !skip_module
 
 skip_module :=
diff --git a/core/soong_config.mk b/core/soong_config.mk
index 7a884e0..4328453 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -14,31 +14,13 @@
 endif
 
 ifeq ($(WRITE_SOONG_VARIABLES),true)
-# Converts a list to a JSON list.
-# $1: List separator.
-# $2: List.
-_json_list = [$(if $(2),"$(subst $(1),"$(comma)",$(2))")]
 
-# Converts a space-separated list to a JSON list.
-json_list = $(call _json_list,$(space),$(1))
-
-# Converts a comma-separated list to a JSON list.
-csv_to_json_list = $(call _json_list,$(comma),$(1))
-
-# 1: Key name
-# 2: Value
-add_json_val = $(eval _contents := $$(_contents)    "$$(strip $$(1))":$$(space)$$(strip $$(2))$$(comma)$$(newline))
-add_json_str = $(call add_json_val,$(1),"$(strip $(2))")
-add_json_list = $(call add_json_val,$(1),$(call json_list,$(patsubst %,%,$(2))))
-add_json_csv = $(call add_json_val,$(1),$(call csv_to_json_list,$(strip $(2))))
-add_json_bool = $(call add_json_val,$(1),$(if $(strip $(2)),true,false))
-
-invert_bool = $(if $(strip $(1)),,true)
+include $(BUILD_SYSTEM)/json.mk
 
 # Create soong.variables with copies of makefile settings.  Runs every build,
 # but only updates soong.variables if it changes
 $(shell mkdir -p $(dir $(SOONG_VARIABLES)))
-_contents := {$(newline)
+$(call json_start)
 
 $(call add_json_str,  Make_suffix, -$(TARGET_PRODUCT))
 
@@ -154,17 +136,19 @@
 $(call add_json_list, BoardPlatPublicSepolicyDirs,       $(BOARD_PLAT_PUBLIC_SEPOLICY_DIR))
 $(call add_json_list, BoardPlatPrivateSepolicyDirs,      $(BOARD_PLAT_PRIVATE_SEPOLICY_DIR))
 
-_contents := $(_contents)    "VendorVars": {$(newline)
+$(call add_json_bool, FlattenApex,                       $(filter true,$(TARGET_FLATTEN_APEX)))
+
+$(call add_json_map, VendorVars)
 $(foreach namespace,$(SOONG_CONFIG_NAMESPACES),\
-  $(eval _contents := $$(_contents)        "$(namespace)": {$$(newline)) \
+  $(call add_json_map, $(namespace))\
   $(foreach key,$(SOONG_CONFIG_$(namespace)),\
-    $(eval _contents := $$(_contents)            "$(key)": "$(SOONG_CONFIG_$(namespace)_$(key))",$$(newline)))\
-  $(eval _contents := $$(_contents)$(if $(strip $(SOONG_CONFIG_$(namespace))),__SV_END)        },$$(newline)))
-_contents := $(_contents)$(if $(strip $(SOONG_CONFIG_NAMESPACES)),__SV_END)    },$(newline)
+    $(call add_json_str,$(key),$(SOONG_CONFIG_$(namespace)_$(key))))\
+  $(call end_json_map))
+$(call end_json_map)
 
-_contents := $(subst $(comma)$(newline)__SV_END,$(newline),$(_contents)__SV_END}$(newline))
+$(call json_end)
 
-$(file >$(SOONG_VARIABLES).tmp,$(_contents))
+$(file >$(SOONG_VARIABLES).tmp,$(json_contents))
 
 $(shell if ! cmp -s $(SOONG_VARIABLES).tmp $(SOONG_VARIABLES); then \
 	  mv $(SOONG_VARIABLES).tmp $(SOONG_VARIABLES); \
@@ -172,15 +156,4 @@
 	  rm $(SOONG_VARIABLES).tmp; \
 	fi)
 
-_json_list :=
-json_list :=
-csv_to_json_list :=
-add_json_val :=
-add_json_str :=
-add_json_list :=
-add_json_csv :=
-add_json_bool :=
-invert_bool :=
-_contents :=
-
 endif # CONFIGURE_SOONG
diff --git a/core/soong_java_prebuilt.mk b/core/soong_java_prebuilt.mk
index 288a763..3e6b261 100644
--- a/core/soong_java_prebuilt.mk
+++ b/core/soong_java_prebuilt.mk
@@ -120,6 +120,15 @@
 
     java-dex : $(LOCAL_BUILT_MODULE)
   else  # LOCAL_UNINSTALLABLE_MODULE
+
+    ifneq ($(filter $(LOCAL_MODULE),$(HIDDENAPI_EXTRA_APP_USAGE_JARS)),)
+      # Derive greylist from classes.jar.
+      # We use full_classes_jar here, which is the post-proguard jar (on the basis that we also
+      # have a full_classes_pre_proguard_jar). This is consistent with the equivalent code in
+      # java.mk.
+      $(eval $(call hiddenapi-generate-greylist-txt,$(full_classes_jar),$(hiddenapi_whitelist_txt),$(hiddenapi_greylist_txt),$(hiddenapi_darkgreylist_txt),$(hiddenapi_greylist_metadata_csv)))
+    endif
+
     $(eval $(call copy-one-file,$(full_classes_jar),$(LOCAL_BUILT_MODULE)))
     $(eval $(call copy-one-file,$(LOCAL_SOONG_DEX_JAR),$(common_javalib.jar)))
     java-dex : $(common_javalib.jar)
@@ -128,6 +137,10 @@
   $(eval $(call copy-one-file,$(full_classes_jar),$(LOCAL_BUILT_MODULE)))
 endif  # LOCAL_SOONG_DEX_JAR
 
+ifdef LOCAL_SOONG_AAR
+  ALL_MODULES.$(LOCAL_MODULE).AAR := $(LOCAL_SOONG_AAR)
+endif
+
 javac-check : $(full_classes_jar)
 javac-check-$(LOCAL_MODULE) : $(full_classes_jar)
 .PHONY: javac-check-$(LOCAL_MODULE)
diff --git a/core/tasks/device-tests.mk b/core/tasks/device-tests.mk
index a2b626e..0028ce4 100644
--- a/core/tasks/device-tests.mk
+++ b/core/tasks/device-tests.mk
@@ -18,12 +18,16 @@
 device-tests-zip := $(PRODUCT_OUT)/device-tests.zip
 # Create an artifact to include a list of test config files in device-tests.
 device-tests-list-zip := $(PRODUCT_OUT)/device-tests_list.zip
+my_host_shared_lib_for_device_tests := $(call copy-many-files,$(COMPATIBILITY.device-tests.HOST_SHARED_LIBRARY.FILES))
 $(device-tests-zip) : .KATI_IMPLICIT_OUTPUTS := $(device-tests-list-zip)
 $(device-tests-zip) : PRIVATE_device_tests_list := $(PRODUCT_OUT)/device-tests_list
-
-$(device-tests-zip) : $(COMPATIBILITY.device-tests.FILES) $(SOONG_ZIP)
+$(device-tests-zip) : PRIVATE_HOST_SHARED_LIBS := $(my_host_shared_lib_for_device_tests)
+$(device-tests-zip) : $(COMPATIBILITY.device-tests.FILES) $(my_host_shared_lib_for_device_tests) $(SOONG_ZIP)
 	echo $(sort $(COMPATIBILITY.device-tests.FILES)) | tr " " "\n" > $@.list
 	grep $(HOST_OUT_TESTCASES) $@.list > $@-host.list || true
+	$(hide) for shared_lib in $(PRIVATE_HOST_SHARED_LIBS); do \
+	  echo $$shared_lib >> $@-host.list; \
+	done
 	grep $(TARGET_OUT_TESTCASES) $@.list > $@-target.list || true
 	$(hide) $(SOONG_ZIP) -d -o $@ -P host -C $(HOST_OUT) -l $@-host.list -P target -C $(PRODUCT_OUT) -l $@-target.list
 	rm -f $(PRIVATE_device_tests_list)
diff --git a/core/tasks/general-tests.mk b/core/tasks/general-tests.mk
index 9619bbe..36ab101 100644
--- a/core/tasks/general-tests.mk
+++ b/core/tasks/general-tests.mk
@@ -22,17 +22,33 @@
 general_tests_zip := $(PRODUCT_OUT)/general-tests.zip
 # Create an artifact to include a list of test config files in general-tests.
 general_tests_list_zip := $(PRODUCT_OUT)/general-tests_list.zip
+
+# Filter shared entries between general-tests and device-tests's HOST_SHARED_LIBRARY.FILES,
+# to avoid warning about overriding commands.
+my_host_shared_lib_for_general_tests := \
+  $(foreach m,$(filter $(COMPATIBILITY.device-tests.HOST_SHARED_LIBRARY.FILES),\
+	   $(COMPATIBILITY.general-tests.HOST_SHARED_LIBRARY.FILES)),$(call word-colon,2,$(m)))
+my_general_tests_shared_lib_files := \
+  $(filter-out $(COMPATIBILITY.device-tests.HOST_SHARED_LIBRARY.FILES),\
+	 $(COMPATIBILITY.general-tests.HOST_SHARED_LIBRARY.FILES))
+
+my_host_shared_lib_for_general_tests += $(call copy-many-files,$(my_general_tests_shared_lib_files))
+
 $(general_tests_zip) : PRIVATE_general_tests_list_zip := $(general_tests_list_zip)
 $(general_tests_zip) : .KATI_IMPLICIT_OUTPUTS := $(general_tests_list_zip)
 $(general_tests_zip) : PRIVATE_TOOLS := $(general_tests_tools)
 $(general_tests_zip) : PRIVATE_INTERMEDIATES_DIR := $(intermediates_dir)
-$(general_tests_zip) : $(COMPATIBILITY.general-tests.FILES) $(general_tests_tools) $(SOONG_ZIP)
+$(general_tests_zip) : PRIVATE_HOST_SHARED_LIBS := $(my_host_shared_lib_for_general_tests)
+$(general_tests_zip) : $(COMPATIBILITY.general-tests.FILES) $(general_tests_tools) $(my_host_shared_lib_for_general_tests) $(SOONG_ZIP)
 	rm -rf $(PRIVATE_INTERMEDIATES_DIR)
 	rm -f $@ $(PRIVATE_general_tests_list_zip)
 	mkdir -p $(PRIVATE_INTERMEDIATES_DIR) $(PRIVATE_INTERMEDIATES_DIR)/tools
 	echo $(sort $(COMPATIBILITY.general-tests.FILES)) | tr " " "\n" > $(PRIVATE_INTERMEDIATES_DIR)/list
 	grep $(HOST_OUT_TESTCASES) $(PRIVATE_INTERMEDIATES_DIR)/list > $(PRIVATE_INTERMEDIATES_DIR)/host.list || true
 	grep $(TARGET_OUT_TESTCASES) $(PRIVATE_INTERMEDIATES_DIR)/list > $(PRIVATE_INTERMEDIATES_DIR)/target.list || true
+	$(hide) for shared_lib in $(PRIVATE_HOST_SHARED_LIBS); do \
+	  echo $$shared_lib >> $(PRIVATE_INTERMEDIATES_DIR)/host.list; \
+	done
 	cp -fp $(PRIVATE_TOOLS) $(PRIVATE_INTERMEDIATES_DIR)/tools/
 	$(SOONG_ZIP) -d -o $@ \
 	  -P host -C $(PRIVATE_INTERMEDIATES_DIR) -D $(PRIVATE_INTERMEDIATES_DIR)/tools \
diff --git a/envsetup.sh b/envsetup.sh
index a4d950e..40f6c46 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -1109,7 +1109,7 @@
     Darwin)
         function sgrep()
         {
-            find -E . -name .repo -prune -o -name .git -prune -o  -type f -iregex '.*\.(c|h|cc|cpp|S|java|xml|sh|mk|aidl|vts)' \
+            find -E . -name .repo -prune -o -name .git -prune -o  -type f -iregex '.*\.(c|h|cc|cpp|hpp|S|java|xml|sh|mk|aidl|vts)' \
                 -exec grep --color -n "$@" {} +
         }
 
@@ -1117,7 +1117,7 @@
     *)
         function sgrep()
         {
-            find . -name .repo -prune -o -name .git -prune -o  -type f -iregex '.*\.\(c\|h\|cc\|cpp\|S\|java\|xml\|sh\|mk\|aidl\|vts\)' \
+            find . -name .repo -prune -o -name .git -prune -o  -type f -iregex '.*\.\(c\|h\|cc\|cpp\|hpp\|S\|java\|xml\|sh\|mk\|aidl\|vts\)' \
                 -exec grep --color -n "$@" {} +
         }
         ;;
@@ -1182,7 +1182,7 @@
 
         function treegrep()
         {
-            find -E . -name .repo -prune -o -name .git -prune -o -type f -iregex '.*\.(c|h|cpp|S|java|xml)' \
+            find -E . -name .repo -prune -o -name .git -prune -o -type f -iregex '.*\.(c|h|cpp|hpp|S|java|xml)' \
                 -exec grep --color -n -i "$@" {} +
         }
 
@@ -1196,7 +1196,7 @@
 
         function treegrep()
         {
-            find . -name .repo -prune -o -name .git -prune -o -regextype posix-egrep -iregex '.*\.(c|h|cpp|S|java|xml)' -type f \
+            find . -name .repo -prune -o -name .git -prune -o -regextype posix-egrep -iregex '.*\.(c|h|cpp|hpp|S|java|xml)' -type f \
                 -exec grep --color -n -i "$@" {} +
         }
 
@@ -1567,10 +1567,19 @@
 
 function atest()
 {
-    # TODO (sbasi): Replace this to be a destination in the build out when & if
-    # atest is built by the build system. (This will be necessary if it ever
-    # depends on external pip projects).
-    "$(gettop)"/tools/tradefederation/core/atest/atest.py "$@"
+    # Let's use the built version over the prebuilt, then source code.
+    local os_arch=$(get_build_var HOST_PREBUILT_TAG)
+    local built_atest=${ANDROID_HOST_OUT}/bin/atest
+    local prebuilt_atest="$(gettop)"/prebuilts/asuite/atest/$os_arch/atest
+    if [[ -x $built_atest ]]; then
+        $built_atest "$@"
+    elif [[ -x $prebuilt_atest ]]; then
+        $prebuilt_atest "$@"
+    else
+        # TODO: once prebuilt atest released, remove the source code section
+        # and change the location of atest_completion.sh in addcompletions().
+        "$(gettop)"/tools/tradefederation/core/atest/atest.py "$@"
+    fi
 }
 
 # Zsh needs bashcompinit called to support bash-style completion.
diff --git a/target/product/OWNERS b/target/product/OWNERS
new file mode 100644
index 0000000..1c74859
--- /dev/null
+++ b/target/product/OWNERS
@@ -0,0 +1 @@
+per-file runtime_libart.mk = agampe@google.com, calin@google.com, mast@google.com, ngeoffray@google.com, oth@google.com, rpl@google.com, sehr@google.com, vmarko@google.com
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index a3c9ac7..85bd136 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -68,6 +68,7 @@
     ExtServices \
     ExtShared \
     fastboot \
+    flags_health_check \
     framework \
     framework-res \
     framework-sysconfig.xml \
@@ -77,6 +78,7 @@
     heapprofd \
     heapprofd_client \
     gatekeeperd \
+    gpuservice \
     healthd \
     hid \
     hwservicemanager \
@@ -308,7 +310,7 @@
 # Packages included only for eng or userdebug builds, previously debug tagged
 PRODUCT_PACKAGES_DEBUG := \
     adb_keys \
-    apex_debug_key \
+    apex.test.key \
     iotop \
     logpersist.start \
     perfprofd \
diff --git a/target/product/mainline_system.mk b/target/product/mainline_system.mk
index b0edb56..3a8bd11 100644
--- a/target/product/mainline_system.mk
+++ b/target/product/mainline_system.mk
@@ -31,6 +31,7 @@
     RcsService \
     SafetyRegulatoryInfo \
     Stk \
+    TimeZoneUpdater \
 
 # OTA support
 PRODUCT_PACKAGES += \
diff --git a/target/product/security/Android.mk b/target/product/security/Android.mk
index 73ebd75..4142ea9 100644
--- a/target/product/security/Android.mk
+++ b/target/product/security/Android.mk
@@ -12,19 +12,6 @@
 include $(BUILD_PREBUILT)
 
 #######################################
-# apex_debug_key for eng/userdebug
-ifneq ($(filter eng userdebug,$(TARGET_BUILD_VARIANT)),)
-  include $(CLEAR_VARS)
-
-  LOCAL_MODULE := apex_debug_key
-  LOCAL_SRC_FILES := $(LOCAL_MODULE)
-  LOCAL_MODULE_CLASS := ETC
-  LOCAL_MODULE_PATH := $(TARGET_OUT)/etc/security/apex
-
-  include $(BUILD_PREBUILT)
-endif
-
-#######################################
 # adb key, if configured via PRODUCT_ADB_KEYS
 ifdef PRODUCT_ADB_KEYS
   ifneq ($(filter eng userdebug,$(TARGET_BUILD_VARIANT)),)
diff --git a/target/product/security/apex_debug_key b/target/product/security/apex_debug_key
deleted file mode 100644
index 28bc8f7..0000000
--- a/target/product/security/apex_debug_key
+++ /dev/null
Binary files differ
diff --git a/target/product/security/apex_debug_key.pem b/target/product/security/apex_debug_key.pem
deleted file mode 100644
index bd56778..0000000
--- a/target/product/security/apex_debug_key.pem
+++ /dev/null
@@ -1,51 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIJKgIBAAKCAgEAt4iSfTF+e2khGQf0bUzTMwWFsgaiQbwQB3cvyBlE9XekFXUt
-GdOEhC2J0p+930UoF6gjjRRrgGF+8K5iV1m3oEbB3qGz6UUOurvVkt4tq96e/Q5a
-ogCOZEuWHjZfs2tQUVNJJtptIp9+0cM768vdf+qnK2JNFIhBqSY0FhjVljKevMcM
-w2tWFRZnKPQ3JoRnWqi5CIauQtBcWRFKIApyf41uHGMjpQRd8aTGeLXBRTi/yD73
-HltuKwSF2SXpj1F+9j4stqskQvipjQnid/Wb+nN3CNgyrGuRrtGvz71WWYcK3DLM
-jvGLOl06QrN6a7ZfLUN4qQjJ6Is5SLTSw/sfFE7Fpcbg6/Geh+jSvChuo6EUtzoX
-Qu42HsVXhrJLQ9/AVTWNmGc9IDr4PMtDiQc4FN8MOpUtR6V/zwrZFoeR3PHl9Z7v
-uTxLIcQLIott0mAjPhbNgbFBs5HP1Z8TfFcyZWpShlx+aM1V2mzYQ7sgsWjFKMSQ
-wIUk/YZ9QK/H5WKjC5M0yxueCU0ocvWFaAZ4RyS/r/SUyQpvyNXNwUsdp1a8sNxp
-LP9U7FG64C+T791yoQJ0sKVbts5SEu/Tojw6miYbH6Fspdo2xxfCbrv6SAbkjlct
-afOnEepgTlHet0G+y0N7OZRJ9WRGyLJNgGjmmDy9XSYGAykwwe4Fv348D0cCAwEA
-AQKCAgBuFra/78NNpXbb++CK+20oCqTyb3Y+dd8rizuXDElH8Fb1JA9EkZLIckRc
-mcMbvPDal9mTU29UV6b8Ga4VdVRnCGpb76TqRKkcK3Vlnm3IzUWSx1xoFmtTD9/h
-CX6IMdPApHOZoaWbAg7hJfm4a9XWV9ukc1eG/GBeZPMTWhwr9vsugztNsQG2rnR8
-pVi7eupAADrVOWwn2bG7H1rWM04Q4rXswy7rWd48BzmhyGxA6FRpehNjGzbPCOx8
-n3gkpp7Ad/T8MVYT8fJKDmbQy/ue1EnPfVeQAwok0dRiiNDV7OH/yVzYVVzNSoSa
-4+uH1qHqlbE3u3TZT0GyMfzG38f4scsbvG/AhH1fuPsy4QcWyLlMV6KUnk3KPc3Q
-yOeRR82qndQMTYQ5/PFiilk7cNbTU0OBjuNpu/t1LIE2J2gGZ5Jw+g2NGtM/xsgC
-jOahpRYvZB8fZ/bSjirwwmSSU+v0ZoPDHtt75R/QxqwPG2jai8kaGr7GEXWJfrfv
-CktMnb6LoCyNiiiZSMUgdDHOQEkVNmt9fxiVaxsaIL4BygropwlD4WbuyRMevfYz
-EffvvmaqC24zJi8WzDszCNLgP/piNhXDyxZX+KaQXj0Do/tzWBBkO0OO6mVGOkX2
-6dadXfhOIggWO8K2lKCUKwWMO9LaKwSwZ4gzcc1a+U9rpE8kUQKCAQEA8lBGLzOL
-Ht8+d13SY+NdPbL6qGvoqsKd5BfIhaNbH04Cp2zQs2TWySxmV47df03pGUpQOCKn
-tFRxoczUrf1gfFDCCC95+A/crls8QJHG+MScTBH5U8Q0s9ReUo/0xaa55u77x5uS
-0fAtdnOdqP8/pf1fSXUJvyLW85LWdkge1c7jk7I5MnWVO2Ak9/GkuRgITSSgVdBa
-kr8nU1BCzDY0gOTWo5J1+NqqVH2eYfEI621iD4SAE3n2JrCC4K/Nt2enEJwup2TR
-ym15g9nClicUQP5Y67eDfqTZu1d0I0Ezl1tL8UPxcLI+ucN4V6KL8RvqTVMnGX/R
-s1FwkPVMQ6dKaQKCAQEAweZeggcSFukr+tTbnzDAHxg4YqiR+30wo7i8NadGu6W/
-EiAdcCdmZYMI9KKc+B/N3cuFqBnaSd7VM7XvINdwZRanRj56Ya8LvQMi0S9YPiRn
-T4TXC3EeewN5+SSO0Dkw83tW1PLqgSINy5ijBs5lGoIYMCC+GSA2DuRBiPpcfhqJ
-kmC9uFQvrsge8CC8Sb1wHCr0Wz34qhPoTff6ZV8wm11Jkb5+tT7PMS5Ft0sEBsxV
-R1JFtLNs0k/YpMb4/OrZFZZSIFCTUVPvHQ1/5BwumVnolBC4LORCaSk1xUOydU9h
-bZd4qzIpFteGLGGRT6nEWC1YejLAvcFHVJiKs1F2LwKCAQEAzgnwA8bCLvgIt5rx
-gLod2I7NkFRhPIHLm92VRf0HSHEe1Jo0Q7Yk5F56j00NjmgDItwLpg/hpfZ/wOLY
-nTFrz4kj0636+jESprcxXn4WQAV+GTjXVqDpZ1fW9EEwEriYLoNbV/kzOIwPPD9G
-+iJATrZJRb7dEMdhGy/qaB0fCxKmdDoBZKSSxjAUfzfbpv+GX4IbS5ykx07+81q1
-0crtjgQHdoLdCUN1ve4qtIEt4nHaBfPWq7jy0ycXwlH6jE74wajsCq4xrPy1bKXH
-TcHg+PrNRXF/wDoQYboVKL0ST0r0IixxqjAGIhLRy0KN1/CypBlmj8od12oSW1AZ
-DxW6sQKCAQEAtIMW8M5MVO/2dam8XFMySMBvncl5PjuqEIFnFjwIaaFAZEtpnIPR
-nCeFKtpIb+aL7TQP1hNbWPIOYfm6CUUH6dRRHeAEZvRjZS+KNlxxNkkFtM3itVA2
-JCd0YjFakxbrL4FfsRgEoPtnBGexPiDflvIOOqAA2btXGD3/lNofSXbDJHbTqMsX
-KQw9YSfYon2t5UtH+bmTyiKGXi/B+KXJxpnuZ7SEmY9DrHF7jcxUj0+jBKbfJf70
-DEcxVRW3rx2jw6kSA+t/enM9ZDqxGVfzOeit0UpPa9uEyAoJeQAxH20rMq+VMyub
-fRxgWOjsMtHFbKGqgPjG3uEU2vi4B4CLGQKCAQEA2Mr5f2AXPR8jca1+Id+CxZpU
-bgMML7gW31L4lGX9Teo9z+zSdN7sIwqe42Zla1N9wda8p5ribnJxwRdxcPL8bid5
-LLlls4xXD/jQCQCFL90X59Tm6VD6tm1VyCjL44nRwAqP4vJObSB5rTqJYtkfVmnp
-KERF5P0i5yv4Oox0ZOsThou9jtyl1dS50Td0Urhp4LhPdmpDPUq25K1sDDfnGFm6
-IcMPkVznRPUoKQCG9DSQcQqttkSV9Po+qfLa3aHtdndfe88Gd9uom8bsAMTZAfSZ
-D4YhqBHSLWrxvtQ8GxkaPITJv7hocwssdFRUj5/UJKJBgUXPBXEXh+fxlDaGQQ==
------END RSA PRIVATE KEY-----
diff --git a/target/product/vndk/Android.mk b/target/product/vndk/Android.mk
index 5d009f9..7953db0 100644
--- a/target/product/vndk/Android.mk
+++ b/target/product/vndk/Android.mk
@@ -42,6 +42,10 @@
 ifeq ($(TARGET_IS_64_BIT)|$(TARGET_2ND_ARCH),true|)
 # TODO(b/110429754) remove this condition when we support 64-bit-only device
 check-vndk-list: ;
+else ifeq ($(TARGET_BUILD_PDK),true)
+# b/118634643: don't check VNDK lib list when building PDK. Some libs (libandroid_net.so
+# and some render-script related ones) can't be built in PDK due to missing frameworks/base.
+check-vndk-list: ;
 else
 check-vndk-list: $(check-vndk-list-timestamp)
 endif
diff --git a/target/product/vndk/current.txt b/target/product/vndk/current.txt
index 6120e9d..624102a 100644
--- a/target/product/vndk/current.txt
+++ b/target/product/vndk/current.txt
@@ -121,6 +121,7 @@
 VNDK-core: android.hardware.power@1.2.so
 VNDK-core: android.hardware.power@1.3.so
 VNDK-core: android.hardware.radio.config@1.0.so
+VNDK-core: android.hardware.radio.config@1.1.so
 VNDK-core: android.hardware.radio.deprecated@1.0.so
 VNDK-core: android.hardware.radio@1.0.so
 VNDK-core: android.hardware.radio@1.1.so
diff --git a/tools/buildinfo.sh b/tools/buildinfo.sh
index 5a54462..c2e6f85 100755
--- a/tools/buildinfo.sh
+++ b/tools/buildinfo.sh
@@ -59,6 +59,5 @@
 if [ -n "$BUILD_THUMBPRINT" ] ; then
   echo "ro.build.thumbprint=$BUILD_THUMBPRINT"
 fi
-echo "ro.build.characteristics=$TARGET_AAPT_CHARACTERISTICS"
 
 echo "# end build properties"
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index b083ffd..d32090a 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -221,8 +221,8 @@
             adjusted_blocks))
 
 
-def BuildImage(in_dir, prop_dict, out_file, target_out=None):
-  """Builds an image for the files under in_dir and writes it to out_file.
+def BuildImageMkfs(in_dir, prop_dict, out_file, target_out, fs_config):
+  """Builds a pure image for the files under in_dir and writes it to out_file.
 
   Args:
     in_dir: Path to input directory.
@@ -233,104 +233,15 @@
         points to the /system directory under PRODUCT_OUT. fs_config (the one
         under system/core/libcutils) reads device specific FS config files from
         there.
+    fs_config: The fs_config file that drives the prototype
 
   Raises:
     BuildImageError: On build image failures.
   """
-  original_mount_point = prop_dict["mount_point"]
-  in_dir, fs_config = SetUpInDirAndFsConfig(in_dir, prop_dict)
-
   build_command = []
   fs_type = prop_dict.get("fs_type", "")
   run_e2fsck = False
 
-  fs_spans_partition = True
-  if fs_type.startswith("squash"):
-    fs_spans_partition = False
-
-  is_verity_partition = "verity_block_device" in prop_dict
-  verity_supported = prop_dict.get("verity") == "true"
-  verity_fec_supported = prop_dict.get("verity_fec") == "true"
-
-  avb_footer_type = None
-  if prop_dict.get("avb_hash_enable") == "true":
-    avb_footer_type = "hash"
-  elif prop_dict.get("avb_hashtree_enable") == "true":
-    avb_footer_type = "hashtree"
-
-  if avb_footer_type:
-    avbtool = prop_dict.get("avb_avbtool")
-    avb_signing_args = prop_dict.get(
-        "avb_add_" + avb_footer_type + "_footer_args")
-
-  if (prop_dict.get("use_dynamic_partition_size") == "true" and
-      "partition_size" not in prop_dict):
-    # If partition_size is not defined, use output of `du' + reserved_size.
-    size = GetDiskUsage(in_dir)
-    logger.info(
-        "The tree size of %s is %d MB.", in_dir, size // BYTES_IN_MB)
-    # If not specified, give us 16MB margin for GetDiskUsage error ...
-    size += int(prop_dict.get("partition_reserved_size", BYTES_IN_MB * 16))
-    # Round this up to a multiple of 4K so that avbtool works
-    size = common.RoundUpTo4K(size)
-    # Adjust partition_size to add more space for AVB footer, to prevent
-    # it from consuming partition_reserved_size.
-    if avb_footer_type:
-      size = verity_utils.AVBCalcMinPartitionSize(
-          size,
-          lambda x: verity_utils.AVBCalcMaxImageSize(
-              avbtool, avb_footer_type, x, avb_signing_args))
-    prop_dict["partition_size"] = str(size)
-    if fs_type.startswith("ext"):
-      if "extfs_inode_count" not in prop_dict:
-        prop_dict["extfs_inode_count"] = str(GetInodeUsage(in_dir))
-      logger.info(
-          "First Pass based on estimates of %d MB and %s inodes.",
-          size // BYTES_IN_MB, prop_dict["extfs_inode_count"])
-      prop_dict["mount_point"] = original_mount_point
-      BuildImage(in_dir, prop_dict, out_file, target_out)
-      fs_dict = GetFilesystemCharacteristics(out_file)
-      block_size = int(fs_dict.get("Block size", "4096"))
-      free_size = int(fs_dict.get("Free blocks", "0")) * block_size
-      reserved_size = int(prop_dict.get("partition_reserved_size", 0))
-      if free_size <= reserved_size:
-        logger.info(
-            "Not worth reducing image %d <= %d.", free_size, reserved_size)
-      else:
-        size -= free_size + (free_size // 59)
-        size += reserved_size
-        if block_size <= 4096:
-          size = common.RoundUpTo4K(size)
-        else:
-          size = ((size + block_size - 1) // block_size) * block_size
-      extfs_inode_count = prop_dict["extfs_inode_count"]
-      inodes = int(fs_dict.get("Inode count", extfs_inode_count))
-      inodes -= int(fs_dict.get("Free inodes", "0"))
-      prop_dict["extfs_inode_count"] = str(inodes)
-      prop_dict["partition_size"] = str(size)
-      logger.info(
-          "Allocating %d Inodes for %s.", inodes, out_file)
-    logger.info(
-        "Allocating %d MB for %s.", size // BYTES_IN_MB, out_file)
-
-  prop_dict["image_size"] = prop_dict["partition_size"]
-
-  # Adjust the image size to make room for the hashes if this is to be verified.
-  if verity_supported and is_verity_partition:
-    partition_size = int(prop_dict.get("partition_size"))
-    image_size, verity_size = verity_utils.AdjustPartitionSizeForVerity(
-        partition_size, verity_fec_supported)
-    prop_dict["image_size"] = str(image_size)
-    prop_dict["verity_size"] = str(verity_size)
-
-  # Adjust the image size for AVB hash footer or AVB hashtree footer.
-  if avb_footer_type:
-    partition_size = prop_dict["partition_size"]
-    # avb_add_hash_footer_args or avb_add_hashtree_footer_args.
-    max_image_size = verity_utils.AVBCalcMaxImageSize(
-        avbtool, avb_footer_type, partition_size, avb_signing_args)
-    prop_dict["image_size"] = str(max_image_size)
-
   if fs_type.startswith("ext"):
     build_command = [prop_dict["ext_mkuserimg"]]
     if "extfs_sparse_flag" in prop_dict:
@@ -423,8 +334,8 @@
       logger.exception("Failed to compute disk usage with du")
       du_str = "unknown"
     print(
-        "Out of space? The tree size of {} is {}, with reserved space of {} "
-        "bytes ({} MB).".format(
+        "Out of space? Out of inodes? The tree size of {} is {}, "
+        "with reserved space of {} bytes ({} MB).".format(
             in_dir, du_str,
             int(prop_dict.get("partition_reserved_size", 0)),
             int(prop_dict.get("partition_reserved_size", 0)) // BYTES_IN_MB))
@@ -437,38 +348,6 @@
             int(prop_dict["partition_size"]) // BYTES_IN_MB))
     raise
 
-  # Check if there's enough headroom space available for ext4 image.
-  if "partition_headroom" in prop_dict and fs_type.startswith("ext4"):
-    CheckHeadroom(mkfs_output, prop_dict)
-
-  if not fs_spans_partition:
-    mount_point = prop_dict.get("mount_point")
-    image_size = int(prop_dict["image_size"])
-    sparse_image_size = verity_utils.GetSimgSize(out_file)
-    if sparse_image_size > image_size:
-      raise BuildImageError(
-          "Error: {} image size of {} is larger than partition size of "
-          "{}".format(mount_point, sparse_image_size, image_size))
-    if verity_supported and is_verity_partition:
-      verity_utils.ZeroPadSimg(out_file, image_size - sparse_image_size)
-
-  # Create the verified image if this is to be verified.
-  if verity_supported and is_verity_partition:
-    verity_utils.MakeVerityEnabledImage(
-        out_file, verity_fec_supported, prop_dict)
-
-  # Add AVB HASH or HASHTREE footer (metadata).
-  if avb_footer_type:
-    partition_size = prop_dict["partition_size"]
-    partition_name = prop_dict["partition_name"]
-    # key_path and algorithm are only available when chain partition is used.
-    key_path = prop_dict.get("avb_key_path")
-    algorithm = prop_dict.get("avb_algorithm")
-    salt = prop_dict.get("avb_salt")
-    verity_utils.AVBAddFooter(
-        out_file, avbtool, avb_footer_type, partition_size, partition_name,
-        key_path, algorithm, salt, avb_signing_args)
-
   if run_e2fsck and prop_dict.get("skip_fsck") != "true":
     unsparse_image = UnsparseImage(out_file, replace=False)
 
@@ -479,6 +358,112 @@
     finally:
       os.remove(unsparse_image)
 
+  return mkfs_output
+
+
+def BuildImage(in_dir, prop_dict, out_file, target_out=None):
+  """Builds an image for the files under in_dir and writes it to out_file.
+
+  Args:
+    in_dir: Path to input directory.
+    prop_dict: A property dict that contains info like partition size. Values
+        will be updated with computed values.
+    out_file: The output image file.
+    target_out: Path to the TARGET_OUT directory as in Makefile. It actually
+        points to the /system directory under PRODUCT_OUT. fs_config (the one
+        under system/core/libcutils) reads device specific FS config files from
+        there.
+
+  Raises:
+    BuildImageError: On build image failures.
+  """
+  in_dir, fs_config = SetUpInDirAndFsConfig(in_dir, prop_dict)
+
+  build_command = []
+  fs_type = prop_dict.get("fs_type", "")
+
+  fs_spans_partition = True
+  if fs_type.startswith("squash"):
+    fs_spans_partition = False
+
+  # Get a builder for creating an image that's to be verified by Verified Boot,
+  # or None if not applicable.
+  verity_image_builder = verity_utils.CreateVerityImageBuilder(prop_dict)
+
+  if (prop_dict.get("use_dynamic_partition_size") == "true" and
+      "partition_size" not in prop_dict):
+    # If partition_size is not defined, use output of `du' + reserved_size.
+    size = GetDiskUsage(in_dir)
+    logger.info(
+        "The tree size of %s is %d MB.", in_dir, size // BYTES_IN_MB)
+    # If not specified, give us 16MB margin for GetDiskUsage error ...
+    reserved_size = int(prop_dict.get("partition_reserved_size", BYTES_IN_MB * 16))
+    partition_headroom = int(prop_dict.get("partition_headroom", 0))
+    if fs_type.startswith("ext4") and partition_headroom > reserved_size:
+      reserved_size = partition_headroom
+    size += reserved_size
+    # Round this up to a multiple of 4K so that avbtool works
+    size = common.RoundUpTo4K(size)
+    if fs_type.startswith("ext"):
+      prop_dict["partition_size"] = str(size)
+      prop_dict["image_size"] = str(size)
+      if "extfs_inode_count" not in prop_dict:
+        prop_dict["extfs_inode_count"] = str(GetInodeUsage(in_dir))
+      logger.info(
+          "First Pass based on estimates of %d MB and %s inodes.",
+          size // BYTES_IN_MB, prop_dict["extfs_inode_count"])
+      BuildImageMkfs(in_dir, prop_dict, out_file, target_out, fs_config)
+      fs_dict = GetFilesystemCharacteristics(out_file)
+      os.remove(out_file)
+      block_size = int(fs_dict.get("Block size", "4096"))
+      free_size = int(fs_dict.get("Free blocks", "0")) * block_size
+      reserved_size = int(prop_dict.get("partition_reserved_size", 0))
+      partition_headroom = int(fs_dict.get("partition_headroom", 0))
+      if fs_type.startswith("ext4") and partition_headroom > reserved_size:
+        reserved_size = partition_headroom
+      if free_size <= reserved_size:
+        logger.info(
+            "Not worth reducing image %d <= %d.", free_size, reserved_size)
+      else:
+        size -= free_size
+        size += reserved_size
+        if block_size <= 4096:
+          size = common.RoundUpTo4K(size)
+        else:
+          size = ((size + block_size - 1) // block_size) * block_size
+      extfs_inode_count = prop_dict["extfs_inode_count"]
+      inodes = int(fs_dict.get("Inode count", extfs_inode_count))
+      inodes -= int(fs_dict.get("Free inodes", "0"))
+      prop_dict["extfs_inode_count"] = str(inodes)
+      prop_dict["partition_size"] = str(size)
+      logger.info(
+          "Allocating %d Inodes for %s.", inodes, out_file)
+    if verity_image_builder:
+      size = verity_image_builder.CalculateDynamicPartitionSize(size)
+    prop_dict["partition_size"] = str(size)
+    logger.info(
+        "Allocating %d MB for %s.", size // BYTES_IN_MB, out_file)
+
+  prop_dict["image_size"] = prop_dict["partition_size"]
+
+  # Adjust the image size to make room for the hashes if this is to be verified.
+  if verity_image_builder:
+    max_image_size = verity_image_builder.CalculateMaxImageSize()
+    prop_dict["image_size"] = str(max_image_size)
+
+  mkfs_output = BuildImageMkfs(in_dir, prop_dict, out_file, target_out, fs_config)
+
+  # Check if there's enough headroom space available for ext4 image.
+  if "partition_headroom" in prop_dict and fs_type.startswith("ext4"):
+    CheckHeadroom(mkfs_output, prop_dict)
+
+  if not fs_spans_partition and verity_image_builder:
+    verity_image_builder.PadSparseImage(out_file)
+
+  # Create the verified image if this is to be verified.
+  if verity_image_builder:
+    verity_image_builder.Build(out_file)
+
 
 def ImagePropFromGlobalDict(glob_dict, mount_point):
   """Build an image property dictionary from the global dictionary.
diff --git a/tools/releasetools/test_validate_target_files.py b/tools/releasetools/test_validate_target_files.py
index d778d11..a6a8876 100644
--- a/tools/releasetools/test_validate_target_files.py
+++ b/tools/releasetools/test_validate_target_files.py
@@ -24,6 +24,7 @@
 import test_utils
 import verity_utils
 from validate_target_files import ValidateVerifiedBootImages
+from verity_utils import CreateVerityImageBuilder
 
 
 class ValidateTargetFilesTest(test_utils.ReleaseToolsTestCase):
@@ -107,10 +108,16 @@
         options)
 
   def _generate_system_image(self, output_file):
-    verity_fec = True
-    partition_size = 1024 * 1024
-    image_size, verity_size = verity_utils.AdjustPartitionSizeForVerity(
-        partition_size, verity_fec)
+    prop_dict = {
+        'partition_size': str(1024 * 1024),
+        'verity': 'true',
+        'verity_block_device': '/dev/block/system',
+        'verity_key' : os.path.join(self.testdata_dir, 'testkey'),
+        'verity_fec': "true",
+        'verity_signer_cmd': 'verity_signer',
+    }
+    verity_image_builder = CreateVerityImageBuilder(prop_dict)
+    image_size = verity_image_builder.CalculateMaxImageSize()
 
     # Use an empty root directory.
     system_root = common.MakeTempDir()
@@ -124,15 +131,7 @@
             stdoutdata))
 
     # Append the verity metadata.
-    prop_dict = {
-        'partition_size' : str(partition_size),
-        'image_size' : str(image_size),
-        'verity_block_device' : '/dev/block/system',
-        'verity_key' : os.path.join(self.testdata_dir, 'testkey'),
-        'verity_signer_cmd' : 'verity_signer',
-        'verity_size' : str(verity_size),
-    }
-    verity_utils.MakeVerityEnabledImage(output_file, verity_fec, prop_dict)
+    verity_image_builder.Build(output_file)
 
   def test_ValidateVerifiedBootImages_systemImage(self):
     input_tmp = common.MakeTempDir()
diff --git a/tools/releasetools/test_verity_utils.py b/tools/releasetools/test_verity_utils.py
index 0988d8e..e0607c8 100644
--- a/tools/releasetools/test_verity_utils.py
+++ b/tools/releasetools/test_verity_utils.py
@@ -16,6 +16,7 @@
 
 """Unittests for verity_utils.py."""
 
+import copy
 import math
 import os.path
 import random
@@ -25,10 +26,11 @@
 from rangelib import RangeSet
 from test_utils import get_testdata_dir, ReleaseToolsTestCase
 from verity_utils import (
-    AdjustPartitionSizeForVerity, AVBCalcMinPartitionSize, BLOCK_SIZE,
-    CreateHashtreeInfoGenerator, HashtreeInfo, MakeVerityEnabledImage,
+    CreateHashtreeInfoGenerator, CreateVerityImageBuilder, HashtreeInfo,
     VerifiedBootVersion1HashtreeInfoGenerator)
 
+BLOCK_SIZE = common.BLOCK_SIZE
+
 
 class VerifiedBootVersion1HashtreeInfoGeneratorTest(ReleaseToolsTestCase):
 
@@ -64,8 +66,17 @@
 
   def _generate_image(self):
     partition_size = 1024 * 1024
-    adjusted_size, verity_size = AdjustPartitionSizeForVerity(
-        partition_size, True)
+    prop_dict = {
+        'partition_size': str(partition_size),
+        'verity': 'true',
+        'verity_block_device': '/dev/block/system',
+        'verity_key': os.path.join(self.testdata_dir, 'testkey'),
+        'verity_fec': 'true',
+        'verity_signer_cmd': 'verity_signer',
+    }
+    verity_image_builder = CreateVerityImageBuilder(prop_dict)
+    self.assertIsNotNone(verity_image_builder)
+    adjusted_size = verity_image_builder.CalculateMaxImageSize()
 
     raw_image = ""
     for i in range(adjusted_size):
@@ -74,15 +85,7 @@
     output_file = self._create_simg(raw_image)
 
     # Append the verity metadata.
-    prop_dict = {
-        'partition_size': str(partition_size),
-        'image_size': str(adjusted_size),
-        'verity_block_device': '/dev/block/system',
-        'verity_key': os.path.join(self.testdata_dir, 'testkey'),
-        'verity_signer_cmd': 'verity_signer',
-        'verity_size': str(verity_size),
-    }
-    MakeVerityEnabledImage(output_file, True, prop_dict)
+    verity_image_builder.Build(output_file)
 
     return output_file
 
@@ -163,23 +166,174 @@
     self.assertEqual(self.expected_root_hash, info.root_hash)
 
 
-class VerityUtilsTest(ReleaseToolsTestCase):
+class VerifiedBootVersion1VerityImageBuilderTest(ReleaseToolsTestCase):
 
-  def setUp(self):
-    # To test AVBCalcMinPartitionSize(), by using 200MB to 2GB image size.
+  DEFAULT_PARTITION_SIZE = 4096 * 1024
+  DEFAULT_PROP_DICT = {
+      'partition_size': str(DEFAULT_PARTITION_SIZE),
+      'verity': 'true',
+      'verity_block_device': '/dev/block/system',
+      'verity_key': os.path.join(get_testdata_dir(), 'testkey'),
+      'verity_fec': 'true',
+      'verity_signer_cmd': 'verity_signer',
+  }
+
+  def test_init(self):
+    prop_dict = copy.deepcopy(self.DEFAULT_PROP_DICT)
+    verity_image_builder = CreateVerityImageBuilder(prop_dict)
+    self.assertIsNotNone(verity_image_builder)
+    self.assertEqual(1, verity_image_builder.version)
+
+  def test_init_MissingProps(self):
+    prop_dict = copy.deepcopy(self.DEFAULT_PROP_DICT)
+    del prop_dict['verity']
+    self.assertIsNone(CreateVerityImageBuilder(prop_dict))
+
+    prop_dict = copy.deepcopy(self.DEFAULT_PROP_DICT)
+    del prop_dict['verity_block_device']
+    self.assertIsNone(CreateVerityImageBuilder(prop_dict))
+
+  def test_CalculateMaxImageSize(self):
+    verity_image_builder = CreateVerityImageBuilder(self.DEFAULT_PROP_DICT)
+    size = verity_image_builder.CalculateMaxImageSize()
+    self.assertLess(size, self.DEFAULT_PARTITION_SIZE)
+
+    # Same result by explicitly passing the partition size.
+    self.assertEqual(
+        verity_image_builder.CalculateMaxImageSize(),
+        verity_image_builder.CalculateMaxImageSize(
+            self.DEFAULT_PARTITION_SIZE))
+
+  @staticmethod
+  def _BuildAndVerify(prop, verify_key):
+    verity_image_builder = CreateVerityImageBuilder(prop)
+    image_size = verity_image_builder.CalculateMaxImageSize()
+
+    # Build the sparse image with verity metadata.
+    input_dir = common.MakeTempDir()
+    image = common.MakeTempFile(suffix='.img')
+    cmd = ['mkuserimg_mke2fs', input_dir, image, 'ext4', '/system',
+           str(image_size), '-j', '0', '-s']
+    common.RunAndCheckOutput(cmd)
+    verity_image_builder.Build(image)
+
+    # Verify the verity metadata.
+    cmd = ['verity_verifier', image, '-mincrypt', verify_key]
+    common.RunAndCheckOutput(cmd)
+
+  def test_Build(self):
+    self._BuildAndVerify(
+        self.DEFAULT_PROP_DICT,
+        os.path.join(get_testdata_dir(), 'testkey_mincrypt'))
+
+  def test_Build_SanityCheck(self):
+    # A sanity check for the test itself: the image shouldn't be verifiable
+    # with wrong key.
+    self.assertRaises(
+        common.ExternalError,
+        self._BuildAndVerify,
+        self.DEFAULT_PROP_DICT,
+        os.path.join(get_testdata_dir(), 'verity_mincrypt'))
+
+  def test_Build_FecDisabled(self):
+    prop_dict = copy.deepcopy(self.DEFAULT_PROP_DICT)
+    del prop_dict['verity_fec']
+    self._BuildAndVerify(
+        prop_dict,
+        os.path.join(get_testdata_dir(), 'testkey_mincrypt'))
+
+  def test_Build_SquashFs(self):
+    verity_image_builder = CreateVerityImageBuilder(self.DEFAULT_PROP_DICT)
+    verity_image_builder.CalculateMaxImageSize()
+
+    # Build the sparse image with verity metadata.
+    input_dir = common.MakeTempDir()
+    image = common.MakeTempFile(suffix='.img')
+    cmd = ['mksquashfsimage.sh', input_dir, image, '-s']
+    common.RunAndCheckOutput(cmd)
+    verity_image_builder.PadSparseImage(image)
+    verity_image_builder.Build(image)
+
+    # Verify the verity metadata.
+    cmd = ["verity_verifier", image, '-mincrypt',
+           os.path.join(get_testdata_dir(), 'testkey_mincrypt')]
+    common.RunAndCheckOutput(cmd)
+
+
+class VerifiedBootVersion2VerityImageBuilderTest(ReleaseToolsTestCase):
+
+  DEFAULT_PROP_DICT = {
+      'partition_size': str(4096 * 1024),
+      'partition_name': 'system',
+      'avb_avbtool': 'avbtool',
+      'avb_hashtree_enable': 'true',
+      'avb_add_hashtree_footer_args': '',
+  }
+
+  def test_init(self):
+    prop_dict = copy.deepcopy(self.DEFAULT_PROP_DICT)
+    verity_image_builder = CreateVerityImageBuilder(prop_dict)
+    self.assertIsNotNone(verity_image_builder)
+    self.assertEqual(2, verity_image_builder.version)
+
+  def test_init_MissingProps(self):
+    prop_dict = copy.deepcopy(self.DEFAULT_PROP_DICT)
+    del prop_dict['avb_hashtree_enable']
+    verity_image_builder = CreateVerityImageBuilder(prop_dict)
+    self.assertIsNone(verity_image_builder)
+
+  def test_Build(self):
+    prop_dict = copy.deepcopy(self.DEFAULT_PROP_DICT)
+    verity_image_builder = CreateVerityImageBuilder(prop_dict)
+    self.assertIsNotNone(verity_image_builder)
+    self.assertEqual(2, verity_image_builder.version)
+
+    input_dir = common.MakeTempDir()
+    image_dir = common.MakeTempDir()
+    system_image = os.path.join(image_dir, 'system.img')
+    system_image_size = verity_image_builder.CalculateMaxImageSize()
+    cmd = ['mkuserimg_mke2fs', input_dir, system_image, 'ext4', '/system',
+           str(system_image_size), '-j', '0', '-s']
+    common.RunAndCheckOutput(cmd)
+    verity_image_builder.Build(system_image)
+
+    # Additionally make vbmeta image so that we can verify with avbtool.
+    vbmeta_image = os.path.join(image_dir, 'vbmeta.img')
+    cmd = ['avbtool', 'make_vbmeta_image', '--include_descriptors_from_image',
+           system_image, '--output', vbmeta_image]
+    common.RunAndCheckOutput(cmd)
+
+    # Verify the verity metadata.
+    cmd = ['avbtool', 'verify_image', '--image', vbmeta_image]
+    common.RunAndCheckOutput(cmd)
+
+  def _test_CalculateMinPartitionSize_SetUp(self):
+    # To test CalculateMinPartitionSize(), by using 200MB to 2GB image size.
     #   -  51200 = 200MB * 1024 * 1024 / 4096
     #   - 524288 = 2GB * 1024 * 1024 * 1024 / 4096
-    self._image_sizes = [BLOCK_SIZE * random.randint(51200, 524288) + offset
-                         for offset in range(BLOCK_SIZE)]
+    image_sizes = [BLOCK_SIZE * random.randint(51200, 524288) + offset
+                   for offset in range(BLOCK_SIZE)]
 
-  def test_AVBCalcMinPartitionSize_LinearFooterSize(self):
+    prop_dict = {
+        'partition_size': None,
+        'partition_name': 'system',
+        'avb_avbtool': 'avbtool',
+        'avb_hashtree_enable': 'true',
+        'avb_add_hashtree_footer_args': None,
+    }
+    builder = CreateVerityImageBuilder(prop_dict)
+    self.assertEqual(2, builder.version)
+    return image_sizes, builder
+
+  def test_CalculateMinPartitionSize_LinearFooterSize(self):
     """Tests with footer size which is linear to partition size."""
-    for image_size in self._image_sizes:
+    image_sizes, builder = self._test_CalculateMinPartitionSize_SetUp()
+    for image_size in image_sizes:
       for ratio in 0.95, 0.56, 0.22:
         expected_size = common.RoundUpTo4K(int(math.ceil(image_size / ratio)))
         self.assertEqual(
             expected_size,
-            AVBCalcMinPartitionSize(
+            builder.CalculateMinPartitionSize(
                 image_size, lambda x, ratio=ratio: int(x * ratio)))
 
   def test_AVBCalcMinPartitionSize_SlowerGrowthFooterSize(self):
@@ -190,8 +344,10 @@
       # Minus footer size to return max image size.
       return partition_size - int(math.pow(partition_size, 0.95))
 
-    for image_size in self._image_sizes:
-      min_partition_size = AVBCalcMinPartitionSize(image_size, _SizeCalculator)
+    image_sizes, builder = self._test_CalculateMinPartitionSize_SetUp()
+    for image_size in image_sizes:
+      min_partition_size = builder.CalculateMinPartitionSize(
+          image_size, _SizeCalculator)
       # Checks min_partition_size can accommodate image_size.
       self.assertGreaterEqual(
           _SizeCalculator(min_partition_size),
@@ -201,7 +357,7 @@
           _SizeCalculator(min_partition_size - BLOCK_SIZE),
           image_size)
 
-  def test_AVBCalcMinPartitionSize_FasterGrowthFooterSize(self):
+  def test_CalculateMinPartitionSize_FasterGrowthFooterSize(self):
     """Tests with footer size which grows faster than partition size."""
 
     def _SizeCalculator(partition_size):
@@ -210,8 +366,10 @@
       # footer size grows faster than partition size.
       return int(math.pow(partition_size, 0.95))
 
-    for image_size in self._image_sizes:
-      min_partition_size = AVBCalcMinPartitionSize(image_size, _SizeCalculator)
+    image_sizes, builder = self._test_CalculateMinPartitionSize_SetUp()
+    for image_size in image_sizes:
+      min_partition_size = builder.CalculateMinPartitionSize(
+          image_size, _SizeCalculator)
       # Checks min_partition_size can accommodate image_size.
       self.assertGreaterEqual(
           _SizeCalculator(min_partition_size),
diff --git a/tools/releasetools/testdata/verity_mincrypt b/tools/releasetools/testdata/verity_mincrypt
new file mode 100644
index 0000000..31982d9
--- /dev/null
+++ b/tools/releasetools/testdata/verity_mincrypt
Binary files differ
diff --git a/tools/releasetools/verity_utils.py b/tools/releasetools/verity_utils.py
index 00af296..3a58755 100644
--- a/tools/releasetools/verity_utils.py
+++ b/tools/releasetools/verity_utils.py
@@ -39,30 +39,30 @@
     Exception.__init__(self, message)
 
 
-def GetVerityFECSize(partition_size):
-  cmd = ["fec", "-s", str(partition_size)]
+def GetVerityFECSize(image_size):
+  cmd = ["fec", "-s", str(image_size)]
   output = common.RunAndCheckOutput(cmd, verbose=False)
   return int(output)
 
 
-def GetVerityTreeSize(partition_size):
-  cmd = ["build_verity_tree", "-s", str(partition_size)]
+def GetVerityTreeSize(image_size):
+  cmd = ["build_verity_tree", "-s", str(image_size)]
   output = common.RunAndCheckOutput(cmd, verbose=False)
   return int(output)
 
 
-def GetVerityMetadataSize(partition_size):
-  cmd = ["build_verity_metadata.py", "size", str(partition_size)]
+def GetVerityMetadataSize(image_size):
+  cmd = ["build_verity_metadata.py", "size", str(image_size)]
   output = common.RunAndCheckOutput(cmd, verbose=False)
   return int(output)
 
 
-def GetVeritySize(partition_size, fec_supported):
-  verity_tree_size = GetVerityTreeSize(partition_size)
-  verity_metadata_size = GetVerityMetadataSize(partition_size)
+def GetVeritySize(image_size, fec_supported):
+  verity_tree_size = GetVerityTreeSize(image_size)
+  verity_metadata_size = GetVerityMetadataSize(image_size)
   verity_size = verity_tree_size + verity_metadata_size
   if fec_supported:
-    fec_size = GetVerityFECSize(partition_size + verity_size)
+    fec_size = GetVerityFECSize(image_size + verity_size)
     return verity_size + fec_size
   return verity_size
 
@@ -79,54 +79,6 @@
   simg.AppendFillChunk(0, blocks)
 
 
-def AdjustPartitionSizeForVerity(partition_size, fec_supported):
-  """Modifies the provided partition size to account for the verity metadata.
-
-  This information is used to size the created image appropriately.
-
-  Args:
-    partition_size: the size of the partition to be verified.
-
-  Returns:
-    A tuple of the size of the partition adjusted for verity metadata, and
-    the size of verity metadata.
-  """
-  key = "%d %d" % (partition_size, fec_supported)
-  if key in AdjustPartitionSizeForVerity.results:
-    return AdjustPartitionSizeForVerity.results[key]
-
-  hi = partition_size
-  if hi % BLOCK_SIZE != 0:
-    hi = (hi // BLOCK_SIZE) * BLOCK_SIZE
-
-  # verity tree and fec sizes depend on the partition size, which
-  # means this estimate is always going to be unnecessarily small
-  verity_size = GetVeritySize(hi, fec_supported)
-  lo = partition_size - verity_size
-  result = lo
-
-  # do a binary search for the optimal size
-  while lo < hi:
-    i = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
-    v = GetVeritySize(i, fec_supported)
-    if i + v <= partition_size:
-      if result < i:
-        result = i
-        verity_size = v
-      lo = i + BLOCK_SIZE
-    else:
-      hi = i
-
-  logger.info(
-      "Adjusted partition size for verity, partition_size: %s, verity_size: %s",
-      result, verity_size)
-  AdjustPartitionSizeForVerity.results[key] = (result, verity_size)
-  return (result, verity_size)
-
-
-AdjustPartitionSizeForVerity.results = {}
-
-
 def BuildVerityFEC(sparse_image_path, verity_path, verity_fec_path,
                    padding_size):
   cmd = ["fec", "-e", "-p", str(padding_size), sparse_image_path,
@@ -168,6 +120,7 @@
   try:
     common.RunAndCheckOutput(cmd)
   except:
+    logger.exception(error_message)
     raise BuildVerityImageError(error_message)
 
 
@@ -182,190 +135,369 @@
       for line in input_file:
         out_file.write(line)
   except IOError:
+    logger.exception(error_message)
     raise BuildVerityImageError(error_message)
 
 
-def BuildVerifiedImage(data_image_path, verity_image_path,
-                       verity_metadata_path, verity_fec_path,
-                       padding_size, fec_supported):
-  Append(
-      verity_image_path, verity_metadata_path,
-      "Could not append verity metadata!")
-
-  if fec_supported:
-    # Build FEC for the entire partition, including metadata.
-    BuildVerityFEC(
-        data_image_path, verity_image_path, verity_fec_path, padding_size)
-    Append(verity_image_path, verity_fec_path, "Could not append FEC!")
-
-  Append2Simg(
-      data_image_path, verity_image_path, "Could not append verity data!")
-
-
-def MakeVerityEnabledImage(out_file, fec_supported, prop_dict):
-  """Creates an image that is verifiable using dm-verity.
+def CreateVerityImageBuilder(prop_dict):
+  """Returns a verity image builder based on the given build properties.
 
   Args:
-    out_file: the location to write the verifiable image at
-    prop_dict: a dictionary of properties required for image creation and
-               verification
-
-  Raises:
-    AssertionError: On invalid partition sizes.
-  """
-  # get properties
-  image_size = int(prop_dict["image_size"])
-  block_dev = prop_dict["verity_block_device"]
-  signer_key = prop_dict["verity_key"] + ".pk8"
-  if OPTIONS.verity_signer_path is not None:
-    signer_path = OPTIONS.verity_signer_path
-  else:
-    signer_path = prop_dict["verity_signer_cmd"]
-  signer_args = OPTIONS.verity_signer_args
-
-  tempdir_name = common.MakeTempDir(suffix="_verity_images")
-
-  # Get partial image paths.
-  verity_image_path = os.path.join(tempdir_name, "verity.img")
-  verity_metadata_path = os.path.join(tempdir_name, "verity_metadata.img")
-  verity_fec_path = os.path.join(tempdir_name, "verity_fec.img")
-
-  # Build the verity tree and get the root hash and salt.
-  root_hash, salt = BuildVerityTree(out_file, verity_image_path)
-
-  # Build the metadata blocks.
-  verity_disable = "verity_disable" in prop_dict
-  BuildVerityMetadata(
-      image_size, verity_metadata_path, root_hash, salt, block_dev, signer_path,
-      signer_key, signer_args, verity_disable)
-
-  # Build the full verified image.
-  partition_size = int(prop_dict["partition_size"])
-  verity_size = int(prop_dict["verity_size"])
-
-  padding_size = partition_size - image_size - verity_size
-  assert padding_size >= 0
-
-  BuildVerifiedImage(
-      out_file, verity_image_path, verity_metadata_path, verity_fec_path,
-      padding_size, fec_supported)
-
-
-def AVBCalcMaxImageSize(avbtool, footer_type, partition_size, additional_args):
-  """Calculates max image size for a given partition size.
-
-  Args:
-    avbtool: String with path to avbtool.
-    footer_type: 'hash' or 'hashtree' for generating footer.
-    partition_size: The size of the partition in question.
-    additional_args: Additional arguments to pass to "avbtool add_hash_footer"
-        or "avbtool add_hashtree_footer".
+    prop_dict: A dict that contains the build properties. In particular, it will
+        look for verity-related property values.
 
   Returns:
-    The maximum image size.
-
-  Raises:
-    BuildVerityImageError: On invalid image size.
+    A VerityImageBuilder instance for Verified Boot 1.0 or Verified Boot 2.0; or
+        None if the given build doesn't support Verified Boot.
   """
-  cmd = [avbtool, "add_%s_footer" % footer_type,
-         "--partition_size", str(partition_size), "--calc_max_image_size"]
-  cmd.extend(shlex.split(additional_args))
+  partition_size = prop_dict.get("partition_size")
+  # partition_size could be None at this point, if using dynamic partitions.
+  if partition_size:
+    partition_size = int(partition_size)
 
-  output = common.RunAndCheckOutput(cmd)
-  image_size = int(output)
-  if image_size <= 0:
-    raise BuildVerityImageError(
-        "Invalid max image size: {}".format(output))
-  return image_size
-
-
-def AVBCalcMinPartitionSize(image_size, size_calculator):
-  """Calculates min partition size for a given image size.
-
-  Args:
-    image_size: The size of the image in question.
-    size_calculator: The function to calculate max image size
-        for a given partition size.
-
-  Returns:
-    The minimum partition size required to accommodate the image size.
-  """
-  # Use image size as partition size to approximate final partition size.
-  image_ratio = size_calculator(image_size) / float(image_size)
-
-  # Prepare a binary search for the optimal partition size.
-  lo = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE - BLOCK_SIZE
-
-  # Ensure lo is small enough: max_image_size should <= image_size.
-  delta = BLOCK_SIZE
-  max_image_size = size_calculator(lo)
-  while max_image_size > image_size:
-    image_ratio = max_image_size / float(lo)
-    lo = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE - delta
-    delta *= 2
-    max_image_size = size_calculator(lo)
-
-  hi = lo + BLOCK_SIZE
-
-  # Ensure hi is large enough: max_image_size should >= image_size.
-  delta = BLOCK_SIZE
-  max_image_size = size_calculator(hi)
-  while max_image_size < image_size:
-    image_ratio = max_image_size / float(hi)
-    hi = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE + delta
-    delta *= 2
-    max_image_size = size_calculator(hi)
-
-  partition_size = hi
-
-  # Start to binary search.
-  while lo < hi:
-    mid = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
-    max_image_size = size_calculator(mid)
-    if max_image_size >= image_size:  # if mid can accommodate image_size
-      if mid < partition_size:  # if a smaller partition size is found
-        partition_size = mid
-      hi = mid
+  # Verified Boot 1.0
+  verity_supported = prop_dict.get("verity") == "true"
+  is_verity_partition = "verity_block_device" in prop_dict
+  if verity_supported and is_verity_partition:
+    if OPTIONS.verity_signer_path is not None:
+      signer_path = OPTIONS.verity_signer_path
     else:
-      lo = mid + BLOCK_SIZE
+      signer_path = prop_dict["verity_signer_cmd"]
+    return Version1VerityImageBuilder(
+        partition_size,
+        prop_dict["verity_block_device"],
+        prop_dict.get("verity_fec") == "true",
+        signer_path,
+        prop_dict["verity_key"] + ".pk8",
+        OPTIONS.verity_signer_args,
+        "verity_disable" in prop_dict)
 
-  logger.info(
-      "AVBCalcMinPartitionSize(%d): partition_size: %d.",
-      image_size, partition_size)
+  # Verified Boot 2.0
+  if (prop_dict.get("avb_hash_enable") == "true" or
+      prop_dict.get("avb_hashtree_enable") == "true"):
+    # key_path and algorithm are only available when chain partition is used.
+    key_path = prop_dict.get("avb_key_path")
+    algorithm = prop_dict.get("avb_algorithm")
+    if prop_dict.get("avb_hash_enable") == "true":
+      return VerifiedBootVersion2VerityImageBuilder(
+          prop_dict["partition_name"],
+          partition_size,
+          VerifiedBootVersion2VerityImageBuilder.AVB_HASH_FOOTER,
+          prop_dict["avb_avbtool"],
+          key_path,
+          algorithm,
+          prop_dict.get("avb_salt"),
+          prop_dict["avb_add_hash_footer_args"])
+    else:
+      return VerifiedBootVersion2VerityImageBuilder(
+          prop_dict["partition_name"],
+          partition_size,
+          VerifiedBootVersion2VerityImageBuilder.AVB_HASHTREE_FOOTER,
+          prop_dict["avb_avbtool"],
+          key_path,
+          algorithm,
+          prop_dict.get("avb_salt"),
+          prop_dict["avb_add_hashtree_footer_args"])
 
-  return partition_size
+  return None
 
 
-def AVBAddFooter(image_path, avbtool, footer_type, partition_size,
-                 partition_name, key_path, algorithm, salt,
-                 additional_args):
-  """Adds dm-verity hashtree and AVB metadata to an image.
+class VerityImageBuilder(object):
+  """A builder that generates an image with verity metadata for Verified Boot.
 
-  Args:
-    image_path: Path to image to modify.
-    avbtool: String with path to avbtool.
-    footer_type: 'hash' or 'hashtree' for generating footer.
-    partition_size: The size of the partition in question.
-    partition_name: The name of the partition - will be embedded in metadata.
-    key_path: Path to key to use or None.
-    algorithm: Name of algorithm to use or None.
-    salt: The salt to use (a hexadecimal string) or None.
-    additional_args: Additional arguments to pass to "avbtool add_hash_footer"
-        or "avbtool add_hashtree_footer".
+  A VerityImageBuilder instance handles the works for building an image with
+  verity metadata for supporting Android Verified Boot. This class defines the
+  common interface between Verified Boot 1.0 and Verified Boot 2.0. A matching
+  builder will be returned based on the given build properties.
+
+  More info on the verity image generation can be found at the following link.
+  https://source.android.com/security/verifiedboot/dm-verity#implementation
   """
-  cmd = [avbtool, "add_%s_footer" % footer_type,
-         "--partition_size", partition_size,
-         "--partition_name", partition_name,
-         "--image", image_path]
 
-  if key_path and algorithm:
-    cmd.extend(["--key", key_path, "--algorithm", algorithm])
-  if salt:
-    cmd.extend(["--salt", salt])
+  def CalculateMaxImageSize(self, partition_size):
+    """Calculates the filesystem image size for the given partition size."""
+    raise NotImplementedError
 
-  cmd.extend(shlex.split(additional_args))
+  def CalculateDynamicPartitionSize(self, image_size):
+    """Calculates and sets the partition size for a dynamic partition."""
+    raise NotImplementedError
 
-  common.RunAndCheckOutput(cmd)
+  def PadSparseImage(self, out_file):
+    """Adds padding to the generated sparse image."""
+    raise NotImplementedError
+
+  def Build(self, out_file):
+    """Builds the verity image and writes it to the given file."""
+    raise NotImplementedError
+
+
+class Version1VerityImageBuilder(VerityImageBuilder):
+  """A VerityImageBuilder for Verified Boot 1.0."""
+
+  def __init__(self, partition_size, block_dev, fec_supported, signer_path,
+               signer_key, signer_args, verity_disable):
+    self.version = 1
+    self.partition_size = partition_size
+    self.block_device = block_dev
+    self.fec_supported = fec_supported
+    self.signer_path = signer_path
+    self.signer_key = signer_key
+    self.signer_args = signer_args
+    self.verity_disable = verity_disable
+    self.image_size = None
+    self.verity_size = None
+
+  def CalculateDynamicPartitionSize(self, image_size):
+    # This needs to be implemented. Note that returning the given image size as
+    # the partition size doesn't make sense, as it will fail later.
+    raise NotImplementedError
+
+  def CalculateMaxImageSize(self, partition_size=None):
+    """Calculates the max image size by accounting for the verity metadata.
+
+    Args:
+      partition_size: The partition size, which defaults to self.partition_size
+          if unspecified.
+
+    Returns:
+      The size of the image adjusted for verity metadata.
+    """
+    if partition_size is None:
+      partition_size = self.partition_size
+    assert partition_size > 0, \
+        "Invalid partition size: {}".format(partition_size)
+
+    hi = partition_size
+    if hi % BLOCK_SIZE != 0:
+      hi = (hi // BLOCK_SIZE) * BLOCK_SIZE
+
+    # verity tree and fec sizes depend on the partition size, which
+    # means this estimate is always going to be unnecessarily small
+    verity_size = GetVeritySize(hi, self.fec_supported)
+    lo = partition_size - verity_size
+    result = lo
+
+    # do a binary search for the optimal size
+    while lo < hi:
+      i = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
+      v = GetVeritySize(i, self.fec_supported)
+      if i + v <= partition_size:
+        if result < i:
+          result = i
+          verity_size = v
+        lo = i + BLOCK_SIZE
+      else:
+        hi = i
+
+    self.image_size = result
+    self.verity_size = verity_size
+
+    logger.info(
+        "Calculated image size for verity: partition_size %d, image_size %d, "
+        "verity_size %d", partition_size, result, verity_size)
+    return result
+
+  def Build(self, out_file):
+    """Creates an image that is verifiable using dm-verity.
+
+    Args:
+      out_file: the output image.
+
+    Returns:
+      AssertionError: On invalid partition sizes.
+      BuildVerityImageError: On other errors.
+    """
+    image_size = int(self.image_size)
+    tempdir_name = common.MakeTempDir(suffix="_verity_images")
+
+    # Get partial image paths.
+    verity_image_path = os.path.join(tempdir_name, "verity.img")
+    verity_metadata_path = os.path.join(tempdir_name, "verity_metadata.img")
+
+    # Build the verity tree and get the root hash and salt.
+    root_hash, salt = BuildVerityTree(out_file, verity_image_path)
+
+    # Build the metadata blocks.
+    BuildVerityMetadata(
+        image_size, verity_metadata_path, root_hash, salt, self.block_device,
+        self.signer_path, self.signer_key, self.signer_args,
+        self.verity_disable)
+
+    padding_size = self.partition_size - self.image_size - self.verity_size
+    assert padding_size >= 0
+
+    # Build the full verified image.
+    Append(
+        verity_image_path, verity_metadata_path,
+        "Failed to append verity metadata")
+
+    if self.fec_supported:
+      # Build FEC for the entire partition, including metadata.
+      verity_fec_path = os.path.join(tempdir_name, "verity_fec.img")
+      BuildVerityFEC(
+          out_file, verity_image_path, verity_fec_path, padding_size)
+      Append(verity_image_path, verity_fec_path, "Failed to append FEC")
+
+    Append2Simg(
+        out_file, verity_image_path, "Failed to append verity data")
+
+  def PadSparseImage(self, out_file):
+    sparse_image_size = GetSimgSize(out_file)
+    if sparse_image_size > self.image_size:
+      raise BuildVerityImageError(
+          "Error: image size of {} is larger than partition size of "
+          "{}".format(sparse_image_size, self.image_size))
+    ZeroPadSimg(out_file, self.image_size - sparse_image_size)
+
+
+class VerifiedBootVersion2VerityImageBuilder(VerityImageBuilder):
+  """A VerityImageBuilder for Verified Boot 2.0."""
+
+  AVB_HASH_FOOTER = 1
+  AVB_HASHTREE_FOOTER = 2
+
+  def __init__(self, partition_name, partition_size, footer_type, avbtool,
+               key_path, algorithm, salt, signing_args):
+    self.version = 2
+    self.partition_name = partition_name
+    self.partition_size = partition_size
+    self.footer_type = footer_type
+    self.avbtool = avbtool
+    self.algorithm = algorithm
+    self.key_path = key_path
+    self.salt = salt
+    self.signing_args = signing_args
+    self.image_size = None
+
+  def CalculateMinPartitionSize(self, image_size, size_calculator=None):
+    """Calculates min partition size for a given image size.
+
+    This is used when determining the partition size for a dynamic partition,
+    which should be cover the given image size (for filesystem files) as well as
+    the verity metadata size.
+
+    Args:
+      image_size: The size of the image in question.
+      size_calculator: The function to calculate max image size
+          for a given partition size.
+
+    Returns:
+      The minimum partition size required to accommodate the image size.
+    """
+    if size_calculator is None:
+      size_calculator = self.CalculateMaxImageSize
+
+    # Use image size as partition size to approximate final partition size.
+    image_ratio = size_calculator(image_size) / float(image_size)
+
+    # Prepare a binary search for the optimal partition size.
+    lo = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE - BLOCK_SIZE
+
+    # Ensure lo is small enough: max_image_size should <= image_size.
+    delta = BLOCK_SIZE
+    max_image_size = size_calculator(lo)
+    while max_image_size > image_size:
+      image_ratio = max_image_size / float(lo)
+      lo = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE - delta
+      delta *= 2
+      max_image_size = size_calculator(lo)
+
+    hi = lo + BLOCK_SIZE
+
+    # Ensure hi is large enough: max_image_size should >= image_size.
+    delta = BLOCK_SIZE
+    max_image_size = size_calculator(hi)
+    while max_image_size < image_size:
+      image_ratio = max_image_size / float(hi)
+      hi = int(image_size / image_ratio) // BLOCK_SIZE * BLOCK_SIZE + delta
+      delta *= 2
+      max_image_size = size_calculator(hi)
+
+    partition_size = hi
+
+    # Start to binary search.
+    while lo < hi:
+      mid = ((lo + hi) // (2 * BLOCK_SIZE)) * BLOCK_SIZE
+      max_image_size = size_calculator(mid)
+      if max_image_size >= image_size:  # if mid can accommodate image_size
+        if mid < partition_size:  # if a smaller partition size is found
+          partition_size = mid
+        hi = mid
+      else:
+        lo = mid + BLOCK_SIZE
+
+    logger.info(
+        "CalculateMinPartitionSize(%d): partition_size %d.", image_size,
+        partition_size)
+
+    return partition_size
+
+  def CalculateDynamicPartitionSize(self, image_size):
+    self.partition_size = self.CalculateMinPartitionSize(image_size)
+    return self.partition_size
+
+  def CalculateMaxImageSize(self, partition_size=None):
+    """Calculates max image size for a given partition size.
+
+    Args:
+      partition_size: The partition size, which defaults to self.partition_size
+          if unspecified.
+
+    Returns:
+      The maximum image size.
+
+    Raises:
+      BuildVerityImageError: On error or getting invalid image size.
+    """
+    if partition_size is None:
+      partition_size = self.partition_size
+    assert partition_size > 0, \
+        "Invalid partition size: {}".format(partition_size)
+
+    add_footer = ("add_hash_footer" if self.footer_type == self.AVB_HASH_FOOTER
+                  else "add_hashtree_footer")
+    cmd = [self.avbtool, add_footer, "--partition_size",
+           str(partition_size), "--calc_max_image_size"]
+    cmd.extend(shlex.split(self.signing_args))
+
+    proc = common.Run(cmd)
+    output, _ = proc.communicate()
+    if proc.returncode != 0:
+      raise BuildVerityImageError(
+          "Failed to calculate max image size:\n{}".format(output))
+    image_size = int(output)
+    if image_size <= 0:
+      raise BuildVerityImageError(
+          "Invalid max image size: {}".format(output))
+    self.image_size = image_size
+    return image_size
+
+  def PadSparseImage(self, out_file):
+    # No-op as the padding is taken care of by avbtool.
+    pass
+
+  def Build(self, out_file):
+    """Adds dm-verity hashtree and AVB metadata to an image.
+
+    Args:
+      out_file: Path to image to modify.
+    """
+    add_footer = ("add_hash_footer" if self.footer_type == self.AVB_HASH_FOOTER
+                  else "add_hashtree_footer")
+    cmd = [self.avbtool, add_footer,
+           "--partition_size", str(self.partition_size),
+           "--partition_name", self.partition_name,
+           "--image", out_file]
+    if self.key_path and self.algorithm:
+      cmd.extend(["--key", self.key_path, "--algorithm", self.algorithm])
+    if self.salt:
+      cmd.extend(["--salt", self.salt])
+    cmd.extend(shlex.split(self.signing_args))
+
+    proc = common.Run(cmd)
+    output, _ = proc.communicate()
+    if proc.returncode != 0:
+      raise BuildVerityImageError("Failed to add AVB footer: {}".format(output))
 
 
 class HashtreeInfoGenerationError(Exception):
@@ -415,7 +547,7 @@
 
     Arguments:
       partition_size: The whole size in bytes of a partition, including the
-        filesystem size, padding size, and verity size.
+          filesystem size, padding size, and verity size.
       block_size: Expected size in bytes of each block for the sparse image.
       fec_supported: True if the verity section contains fec data.
     """
@@ -429,6 +561,20 @@
     self.hashtree_size = None
     self.metadata_size = None
 
+    prop_dict = {
+        'partition_size': str(partition_size),
+        'verity': 'true',
+        'verity_fec': 'true' if fec_supported else None,
+        # 'verity_block_device' needs to be present to indicate a verity-enabled
+        # partition.
+        'verity_block_device': '',
+        # We don't need the following properties that are needed for signing the
+        # verity metadata.
+        'verity_key': '',
+        'verity_signer_cmd': None,
+    }
+    self.verity_image_builder = CreateVerityImageBuilder(prop_dict)
+
     self.hashtree_info = HashtreeInfo()
 
   def DecomposeSparseImage(self, image):
@@ -445,8 +591,7 @@
         "partition size {} doesn't match with the calculated image size." \
         " total_blocks: {}".format(self.partition_size, image.total_blocks)
 
-    adjusted_size, _ = AdjustPartitionSizeForVerity(
-        self.partition_size, self.fec_supported)
+    adjusted_size = self.verity_image_builder.CalculateMaxImageSize()
     assert adjusted_size % self.block_size == 0
 
     verity_tree_size = GetVerityTreeSize(adjusted_size)
@@ -502,7 +647,7 @@
   def ValidateHashtree(self):
     """Checks that we can reconstruct the verity hash tree."""
 
-    # Writes the file system section to a temp file; and calls the executable
+    # Writes the filesystem section to a temp file; and calls the executable
     # build_verity_tree to construct the hash tree.
     adjusted_partition = common.MakeTempFile(prefix="adjusted_partition")
     with open(adjusted_partition, "wb") as fd: