Merge "Snap for 7110675 from a6f77c11e03ce0b44070756621051f919ec62944 to sdk-release" into sdk-release
diff --git a/CleanSpec.mk b/CleanSpec.mk
index 67aca7c..41defb2 100644
--- a/CleanSpec.mk
+++ b/CleanSpec.mk
@@ -751,6 +751,10 @@
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/vendor/default.prop)
$(call add-clean-step, rm -rf $(PRODUCT_OUT)/system/etc/prop.default)
+# Workaround for Soong not being able to rebuild the host binary if its
+# JNI dependencies change: b/170389375
+$(call add-clean-step, rm -rf $(OUT_DIR)/soong/host/*/lib*/libconscrypt_openjdk_jni.so)
+
# ************************************************
# NEWER CLEAN STEPS MUST BE AT THE END OF THE LIST
# ************************************************
diff --git a/OWNERS b/OWNERS
index 05f8b3d..4cac0f5 100644
--- a/OWNERS
+++ b/OWNERS
@@ -1,11 +1 @@
-# Core build team (MTV)
-ccross@android.com
-dwillemsen@google.com
-asmundak@google.com
-jungjw@google.com
-
-# To expedite LON reviews
-hansson@google.com
-
-# For version updates
-per-file version_defaults.mk = aseaton@google.com,elisapascual@google.com,lubomir@google.com,pscovanner@google.com
+include platform/build/soong:/OWNERS
diff --git a/common/math.mk b/common/math.mk
index 83f2218..ec15f88 100644
--- a/common/math.mk
+++ b/common/math.mk
@@ -181,6 +181,22 @@
$(call math-expect,(call numbers_less_than,4,0 2 1 3),0 2 1 3)
$(call math-expect,(call numbers_less_than,3,0 2 1 3 2),0 2 1 2)
+# Returns the words in $2 that are numbers and are greater or equal to $1
+define numbers_greater_or_equal_to
+$(strip \
+ $(foreach n,$2, \
+ $(if $(call math_is_number,$(n)), \
+ $(if $(call math_gt_or_eq,$(n),$(1)), \
+ $(n)))))
+endef
+
+$(call math-expect,(call numbers_greater_or_equal_to,4,0 1 2 3),)
+$(call math-expect,(call numbers_greater_or_equal_to,3,0 2 1 3),3)
+$(call math-expect,(call numbers_greater_or_equal_to,2,0 2 1 3),2 3)
+$(call math-expect,(call numbers_greater_or_equal_to,1,0 2 1 3),2 1 3)
+$(call math-expect,(call numbers_greater_or_equal_to,0,0 2 1 3),0 2 1 3)
+$(call math-expect,(call numbers_greater_or_equal_to,1,0 2 1 3 2),2 1 3 2)
+
_INT_LIMIT_WORDS := $(foreach a,x x,$(foreach b,x x x x x x x x x x x x x x x x,\
$(foreach c,x x x x x x x x x x x x x x x x,x x x x x x x x x x x x x x x x)))
diff --git a/core/Makefile b/core/Makefile
index 37ff378..95bac40 100644
--- a/core/Makefile
+++ b/core/Makefile
@@ -40,6 +40,10 @@
)
endef
+# Phony target to check PRODUCT_COPY_FILES copy pairs don't contain ELF files
+.PHONY: check-elf-prebuilt-product-copy-files
+check-elf-prebuilt-product-copy-files:
+
check_elf_prebuilt_product_copy_files := true
ifneq (,$(filter true,$(BUILD_BROKEN_ELF_PREBUILT_PRODUCT_COPY_FILES)))
check_elf_prebuilt_product_copy_files :=
@@ -162,11 +166,19 @@
$(call dist-for-goals,sdk,$(API_FINGERPRINT))
INSTALLED_RECOVERYIMAGE_TARGET :=
+# Build recovery image if
+# BUILDING_RECOVERY_IMAGE && !BOARD_USES_RECOVERY_AS_BOOT && !BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT.
+# If BOARD_USES_RECOVERY_AS_BOOT is true, leave empty because INSTALLED_BOOTIMAGE_TARGET is built
+# with recovery resources.
+# If BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT is true, leave empty to build recovery resources
+# but not the final recovery image.
ifdef BUILDING_RECOVERY_IMAGE
ifneq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+ifneq ($(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT),true)
INSTALLED_RECOVERYIMAGE_TARGET := $(PRODUCT_OUT)/recovery.img
endif
endif
+endif
include $(BUILD_SYSTEM)/sysprop.mk
@@ -378,10 +390,31 @@
VENDOR_RAMDISK_STRIPPED_MODULE_STAGING_DIR :=
endif
+# Create the "kernel module directory" to "vendor ramdisk fragment" inverse mapping.
+$(foreach vendor_ramdisk_fragment,$(BOARD_VENDOR_RAMDISK_FRAGMENTS), \
+ $(if $(and $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS), \
+ $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).PREBUILT)), \
+ $(error Must not specify KERNEL_MODULE_DIRS for prebuilt vendor ramdisk fragment "$(vendor_ramdisk_fragment)": $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS))) \
+ $(eval VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).STAGING_DIR := $(call intermediates-dir-for,PACKAGING,vendor_ramdisk_fragment-dlkm-$(vendor_ramdisk_fragment))) \
+ $(eval VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).FILES :=) \
+ $(foreach dir,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).KERNEL_MODULE_DIRS), \
+ $(eval kmd_vrf := KERNEL_MODULE_DIR_VENDOR_RAMDISK_FRAGMENT_$(dir)) \
+ $(if $($(kmd_vrf)),$(error Kernel module directory "$(dir)" belongs to multiple vendor ramdisk fragments: "$($(kmd_vrf))" "$(vendor_ramdisk_fragment)", each kernel module directory should belong to exactly one or none vendor ramdisk fragment)) \
+ $(eval $(kmd_vrf) := $(vendor_ramdisk_fragment)) \
+ ) \
+)
+
BOARD_KERNEL_MODULE_DIRS += top
$(foreach dir,$(BOARD_KERNEL_MODULE_DIRS), \
$(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,RECOVERY,$(TARGET_RECOVERY_ROOT_OUT),,modules.load.recovery,,$(dir))) \
- $(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,VENDOR_RAMDISK,$(TARGET_VENDOR_RAMDISK_OUT),,modules.load,$(VENDOR_RAMDISK_STRIPPED_MODULE_STAGING_DIR),$(dir))) \
+ $(eval vendor_ramdisk_fragment := $(KERNEL_MODULE_DIR_VENDOR_RAMDISK_FRAGMENT_$(dir))) \
+ $(if $(vendor_ramdisk_fragment), \
+ $(eval output_dir := $(VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).STAGING_DIR)) \
+ $(eval result_var := VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).FILES) \
+ $(eval ### else ###), \
+ $(eval output_dir := $(TARGET_VENDOR_RAMDISK_OUT)) \
+ $(eval result_var := ALL_DEFAULT_INSTALLED_MODULES)) \
+ $(eval $(result_var) += $(call build-image-kernel-modules-dir,VENDOR_RAMDISK,$(output_dir),,modules.load,$(VENDOR_RAMDISK_STRIPPED_MODULE_STAGING_DIR),$(dir))) \
$(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-vendor-ramdisk-recovery-load,$(dir))) \
$(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-image-kernel-modules-dir,VENDOR,$(if $(filter true,$(BOARD_USES_VENDOR_DLKMIMAGE)),$(TARGET_OUT_VENDOR_DLKM),$(TARGET_OUT_VENDOR)),vendor,modules.load,$(VENDOR_STRIPPED_MODULE_STAGING_DIR),$(dir))) \
$(eval ALL_DEFAULT_INSTALLED_MODULES += $(call build-vendor-charger-load,$(dir))) \
@@ -437,7 +470,7 @@
$(if $(PACKAGES.$(p).APKCERTS_FILE),\
$(call _apkcerts_merge,$(PACKAGES.$(p).APKCERTS_FILE), $@),\
$(if $(PACKAGES.$(p).EXTERNAL_KEY),\
- $(call _apkcerts_write_line,$(PACKAGES.$(p).STEM),"EXTERNAL","",$(PACKAGES.$(p).COMPRESSED),$(PACKAGES.$(p).PARTITION),$@),\
+ $(call _apkcerts_write_line,$(PACKAGES.$(p).STEM),EXTERNAL,,$(PACKAGES.$(p).COMPRESSED),$(PACKAGES.$(p).PARTITION),$@),\
$(call _apkcerts_write_line,$(PACKAGES.$(p).STEM),$(PACKAGES.$(p).CERTIFICATE),$(PACKAGES.$(p).PRIVATE_KEY),$(PACKAGES.$(p).COMPRESSED),$(PACKAGES.$(p).PARTITION),$@))))
# In case value of PACKAGES is empty.
$(hide) touch $@
@@ -535,6 +568,12 @@
$(call dist-for-goals,droidcore,$(PGO_PROFILE_MISSING))
+CERTIFICATE_VIOLATION_MODULES_FILENAME := $(PRODUCT_OUT)/certificate_violation_modules.txt
+$(CERTIFICATE_VIOLATION_MODULES_FILENAME):
+ rm -f $@
+ $(foreach m,$(sort $(CERTIFICATE_VIOLATION_MODULES)), echo $(m) >> $@;)
+$(call dist-for-goals,droidcore,$(CERTIFICATE_VIOLATION_MODULES_FILENAME))
+
# -----------------------------------------------------------------
# The dev key is used to sign this package, and as the key required
# for future OTA packages installed by this system. Actual product
@@ -703,6 +742,26 @@
BUILT_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/boot.img
endif
+INTERNAL_PREBUILT_BOOTIMAGE :=
+
+my_installed_prebuilt_gki_apex := $(strip $(foreach package,$(PRODUCT_PACKAGES),$(if $(ALL_MODULES.$(package).EXTRACTED_BOOT_IMAGE),$(package))))
+ifdef my_installed_prebuilt_gki_apex
+ ifneq (1,$(words $(my_installed_prebuilt_gki_apex))) # len(my_installed_prebuilt_gki_apex) > 1
+ $(error More than one prebuilt GKI APEXes are installed: $(my_installed_prebuilt_gki_apex))
+ endif # len(my_installed_prebuilt_gki_apex) > 1
+
+ ifdef BOARD_PREBUILT_BOOTIMAGE
+ $(error Must not define BOARD_PREBUILT_BOOTIMAGE because a prebuilt GKI APEX is installed: $(my_installed_prebuilt_gki_apex))
+ endif # BOARD_PREBUILT_BOOTIMAGE defined
+
+ my_apex_extracted_boot_image := $(ALL_MODULES.$(my_installed_prebuilt_gki_apex).EXTRACTED_BOOT_IMAGE)
+ INSTALLED_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/boot.img
+ $(eval $(call copy-one-file,$(my_apex_extracted_boot_image),$(INSTALLED_BOOTIMAGE_TARGET)))
+
+ INTERNAL_PREBUILT_BOOTIMAGE := $(my_apex_extracted_boot_image)
+
+else # my_installed_prebuilt_gki_apex not defined
+
# $1: boot image target
# returns the kernel used to make the bootimage
define bootimage-to-kernel
@@ -746,22 +805,27 @@
INTERNAL_KERNEL_CMDLINE := $(strip $(INTERNAL_KERNEL_CMDLINE) buildvariant=$(TARGET_BUILD_VARIANT) $(VERITY_KEYID))
-ifndef BUILDING_VENDOR_BOOT_IMAGE
-ifdef BOARD_KERNEL_BASE
- INTERNAL_BOOTIMAGE_ARGS += --base $(BOARD_KERNEL_BASE)
-endif
-ifdef BOARD_KERNEL_PAGESIZE
- INTERNAL_BOOTIMAGE_ARGS += --pagesize $(BOARD_KERNEL_PAGESIZE)
-endif
-ifdef INTERNAL_KERNEL_CMDLINE
- INTERNAL_BOOTIMAGE_ARGS += --cmdline "$(INTERNAL_KERNEL_CMDLINE)"
-endif
-else
-# building vendor boot image, dtb/base/pagesize go there
-ifdef GENERIC_KERNEL_CMDLINE
- INTERNAL_BOOTIMAGE_ARGS += --cmdline "$(GENERIC_KERNEL_CMDLINE)"
-endif
-endif
+# kernel cmdline/base/pagesize in boot.
+# - If using GKI, use GENERIC_KERNEL_CMDLINE. Remove kernel base and pagesize because they are
+# device-specific.
+# - If not using GKI:
+# - If building vendor_boot, INTERNAL_KERNEL_CMDLINE, base and pagesize goes in vendor_boot.
+# - Otherwise, put them in boot.
+ifeq (true,$(BOARD_USES_GENERIC_KERNEL_IMAGE))
+ ifdef GENERIC_KERNEL_CMDLINE
+ INTERNAL_BOOTIMAGE_ARGS += --cmdline "$(GENERIC_KERNEL_CMDLINE)"
+ endif
+else ifndef BUILDING_VENDOR_BOOT_IMAGE # && BOARD_USES_GENERIC_KERNEL_IMAGE != true
+ ifdef INTERNAL_KERNEL_CMDLINE
+ INTERNAL_BOOTIMAGE_ARGS += --cmdline "$(INTERNAL_KERNEL_CMDLINE)"
+ endif
+ ifdef BOARD_KERNEL_BASE
+ INTERNAL_BOOTIMAGE_ARGS += --base $(BOARD_KERNEL_BASE)
+ endif
+ ifdef BOARD_KERNEL_PAGESIZE
+ INTERNAL_BOOTIMAGE_ARGS += --pagesize $(BOARD_KERNEL_PAGESIZE)
+ endif
+endif # BUILDING_VENDOR_BOOT_IMAGE == "" && BOARD_USES_GENERIC_KERNEL_IMAGE != true
INTERNAL_MKBOOTIMG_VERSION_ARGS := \
--os_version $(PLATFORM_VERSION_LAST_STABLE) \
@@ -773,21 +837,24 @@
ifeq ($(TARGET_BOOTIMAGE_USE_EXT2),true)
$(error TARGET_BOOTIMAGE_USE_EXT2 is not supported anymore)
+endif # TARGET_BOOTIMAGE_USE_EXT2
-else ifeq (true,$(BOARD_AVB_ENABLE)) # TARGET_BOOTIMAGE_USE_EXT2 != true
+$(foreach b,$(INSTALLED_BOOTIMAGE_TARGET), $(eval $(call add-dependency,$(b),$(call bootimage-to-kernel,$(b)))))
+
+ifeq (true,$(BOARD_AVB_ENABLE))
# $1: boot image target
define build_boot_board_avb_enabled
$(MKBOOTIMG) --kernel $(call bootimage-to-kernel,$(1)) $(INTERNAL_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1)
- $(call assert-max-image-size,$(1),$(call get-hash-image-max-size,$(BOARD_BOOTIMAGE_PARTITION_SIZE)))
+ $(call assert-max-image-size,$(1),$(call get-hash-image-max-size,$(call get-bootimage-partition-size,$(1),boot)))
$(AVBTOOL) add_hash_footer \
--image $(1) \
- --partition_size $(BOARD_BOOTIMAGE_PARTITION_SIZE) \
+ --partition_size $(call get-bootimage-partition-size,$(1),boot) \
--partition_name boot $(INTERNAL_AVB_BOOT_SIGNING_ARGS) \
$(BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS)
endef
-$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(AVBTOOL) $(INTERNAL_BOOTIMAGE_FILES) $(BOARD_AVB_BOOT_KEY_PATH) $(call bootimage-to-kernel,$@)
+$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(AVBTOOL) $(INTERNAL_BOOTIMAGE_FILES) $(BOARD_AVB_BOOT_KEY_PATH)
$(call pretty,"Target boot image: $@")
$(call build_boot_board_avb_enabled,$@)
@@ -802,10 +869,10 @@
define build_boot_supports_boot_signer
$(MKBOOTIMG) --kernel $(call bootimage-to-kernel,$(1)) $(INTERNAL_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1)
$(BOOT_SIGNER) /boot $@ $(PRODUCT_VERITY_SIGNING_KEY).pk8 $(PRODUCT_VERITY_SIGNING_KEY).x509.pem $(1)
- $(call assert-max-image-size,$(1),$(BOARD_BOOTIMAGE_PARTITION_SIZE))
+ $(call assert-max-image-size,$(1),$(call get-bootimage-partition-size,$(1),boot))
endef
-$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(BOOT_SIGNER) $(call bootimage-to-kernel,$@)
+$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(BOOT_SIGNER)
$(call pretty,"Target boot image: $@")
$(call build_boot_supports_boot_signer,$@)
@@ -820,10 +887,10 @@
define build_boot_supports_vboot
$(MKBOOTIMG) --kernel $(call bootimage-to-kernel,$(1)) $(INTERNAL_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1).unsigned
$(VBOOT_SIGNER) $(FUTILITY) $(1).unsigned $(PRODUCT_VBOOT_SIGNING_KEY).vbpubk $(PRODUCT_VBOOT_SIGNING_KEY).vbprivk $(PRODUCT_VBOOT_SIGNING_SUBKEY).vbprivk $(1).keyblock $(1)
- $(call assert-max-image-size,$(1),$(BOARD_BOOTIMAGE_PARTITION_SIZE))
+ $(call assert-max-image-size,$(1),$(call get-bootimage-partition-size,$(1),boot))
endef
-$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(VBOOT_SIGNER) $(FUTILITY) $(call bootimage-to-kernel,$@)
+$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(VBOOT_SIGNER) $(FUTILITY)
$(call pretty,"Target boot image: $@")
$(call build_boot_supports_vboot,$@)
@@ -837,10 +904,10 @@
# $1: boot image target
define build_boot_novboot
$(MKBOOTIMG) --kernel $(call bootimage-to-kernel,$(1)) $(INTERNAL_BOOTIMAGE_ARGS) $(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_MKBOOTIMG_ARGS) --output $(1)
- $(call assert-max-image-size,$1,$(BOARD_BOOTIMAGE_PARTITION_SIZE))
+ $(call assert-max-image-size,$1,$(call get-bootimage-partition-size,$(1),boot))
endef
-$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES) $(call bootimage-to-kernel,$@)
+$(INSTALLED_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_BOOTIMAGE_FILES)
$(call pretty,"Target boot image: $@")
$(call build_boot_novboot,$@)
@@ -849,7 +916,7 @@
@echo "make $@: ignoring dependencies"
$(foreach b,$(INSTALLED_BOOTIMAGE_TARGET),$(call build_boot_novboot,$(b)))
-endif # TARGET_BOOTIMAGE_USE_EXT2
+endif # BOARD_AVB_ENABLE
endif # BUILDING_BOOT_IMAGE
else # TARGET_NO_KERNEL == "true"
@@ -858,12 +925,23 @@
# Remove when b/63676296 is resolved.
$(error Prebuilt bootimage is only supported for AB targets)
endif
+INTERNAL_PREBUILT_BOOTIMAGE := $(BOARD_PREBUILT_BOOTIMAGE)
INSTALLED_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/boot.img
-$(eval $(call copy-one-file,$(BOARD_PREBUILT_BOOTIMAGE),$(INSTALLED_BOOTIMAGE_TARGET)))
+$(eval $(call copy-one-file,$(INTERNAL_PREBUILT_BOOTIMAGE),$(INSTALLED_BOOTIMAGE_TARGET)))
else # BOARD_PREBUILT_BOOTIMAGE not defined
INSTALLED_BOOTIMAGE_TARGET :=
endif # BOARD_PREBUILT_BOOTIMAGE
endif # TARGET_NO_KERNEL
+endif # my_installed_prebuilt_gki_apex not defined
+
+my_apex_extracted_boot_image :=
+my_installed_prebuilt_gki_apex :=
+
+# -----------------------------------------------------------------
+# declare recovery ramdisk files
+ifeq ($(BUILDING_RECOVERY_IMAGE),true)
+INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP := $(call intermediates-dir-for,PACKAGING,recovery)/ramdisk_files-timestamp
+endif
# -----------------------------------------------------------------
# vendor boot image
@@ -878,8 +956,25 @@
$(ALL_DEFAULT_INSTALLED_MODULES))
INTERNAL_VENDOR_RAMDISK_TARGET := $(call intermediates-dir-for,PACKAGING,vendor-boot)/vendor-ramdisk.cpio$(RAMDISK_EXT)
+
+ifeq (true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT))
+$(INTERNAL_VENDOR_RAMDISK_TARGET): $(INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP)
+$(INTERNAL_VENDOR_RAMDISK_TARGET): PRIVATE_ADDITIONAL_DIR := $(TARGET_RECOVERY_ROOT_OUT)
+endif
+
$(INTERNAL_VENDOR_RAMDISK_TARGET): $(MKBOOTFS) $(INTERNAL_VENDOR_RAMDISK_FILES) | $(COMPRESSION_COMMAND_DEPS)
- $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_VENDOR_RAMDISK_OUT) | $(COMPRESSION_COMMAND) > $@
+ $(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_VENDOR_RAMDISK_OUT) $(PRIVATE_ADDITIONAL_DIR) | $(COMPRESSION_COMMAND) > $@
+
+INSTALLED_FILES_FILE_VENDOR_RAMDISK := $(PRODUCT_OUT)/installed-files-vendor-ramdisk.txt
+INSTALLED_FILES_JSON_VENDOR_RAMDISK := $(INSTALLED_FILES_FILE_VENDOR_RAMDISK:.txt=.json)
+$(INSTALLED_FILES_FILE_VENDOR_RAMDISK): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON_VENDOR_RAMDISK)
+$(INSTALLED_FILES_FILE_VENDOR_RAMDISK): $(INTERNAL_VENDOR_RAMDISK_TARGET)
+$(INSTALLED_FILES_FILE_VENDOR_RAMDISK): $(INTERNAL_VENDOR_RAMDISK_FILES) $(FILESLIST) $(FILESLIST_UTIL)
+ echo Installed file list: $@
+ mkdir -p $(dir $@)
+ rm -f $@
+ $(hide) $(FILESLIST) $(TARGET_VENDOR_RAMDISK_OUT) > $(@:.txt=.json)
+ $(hide) $(FILESLIST_UTIL) -c $(@:.txt=.json) > $@
ifdef BOARD_INCLUDE_DTB_IN_BOOTIMG
INTERNAL_VENDOR_BOOTIMAGE_ARGS += --dtb $(INSTALLED_DTBIMAGE_TARGET)
@@ -894,12 +989,58 @@
INTERNAL_VENDOR_BOOTIMAGE_ARGS += --vendor_cmdline "$(INTERNAL_KERNEL_CMDLINE)"
endif
+# $(1): Build target name
+# $(2): Staging dir to be compressed
+# $(3): Build dependencies
+define build-vendor-ramdisk-fragment-target
+$(1): $(3) $(MKBOOTFS) | $(COMPRESSION_COMMAND_DEPS)
+ $(MKBOOTFS) -d $(TARGET_OUT) $(2) | $(COMPRESSION_COMMAND) > $$@
+endef
+
+# $(1): Ramdisk name
+define build-vendor-ramdisk-fragment
+$(strip \
+ $(eval build_target := $(call intermediates-dir-for,PACKAGING,vendor_ramdisk_fragments)/$(1).cpio$(RAMDISK_EXT)) \
+ $(eval $(call build-vendor-ramdisk-fragment-target,$(build_target),$(VENDOR_RAMDISK_FRAGMENT.$(1).STAGING_DIR),$(VENDOR_RAMDISK_FRAGMENT.$(1).FILES))) \
+ $(build_target) \
+)
+endef
+
+# $(1): Ramdisk name
+# $(2): Prebuilt file path
+define build-prebuilt-vendor-ramdisk-fragment
+$(strip \
+ $(eval build_target := $(call intermediates-dir-for,PACKAGING,prebuilt_vendor_ramdisk_fragments)/$(1)) \
+ $(eval $(call copy-one-file,$(2),$(build_target))) \
+ $(build_target) \
+)
+endef
+
+INTERNAL_VENDOR_RAMDISK_FRAGMENT_TARGETS :=
+INTERNAL_VENDOR_RAMDISK_FRAGMENT_ARGS :=
+
+$(foreach vendor_ramdisk_fragment,$(BOARD_VENDOR_RAMDISK_FRAGMENTS), \
+ $(eval prebuilt_vendor_ramdisk_fragment_file := $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).PREBUILT)) \
+ $(if $(prebuilt_vendor_ramdisk_fragment_file), \
+ $(eval vendor_ramdisk_fragment_target := $(call build-prebuilt-vendor-ramdisk-fragment,$(vendor_ramdisk_fragment),$(prebuilt_vendor_ramdisk_fragment_file))) \
+ $(eval ### else ###), \
+ $(eval vendor_ramdisk_fragment_target := $(call build-vendor-ramdisk-fragment,$(vendor_ramdisk_fragment))) \
+ $(if $(filter --ramdisk_type,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS)),, \
+ $(eval BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS += --ramdisk_type DLKM))) \
+ $(if $(filter --ramdisk_name,$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS)), \
+ $(error Must not specify --ramdisk_name for vendor ramdisk fragment: $(vendor_ramdisk_fragment))) \
+ $(eval BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS += --ramdisk_name $(vendor_ramdisk_fragment)) \
+ $(eval INTERNAL_VENDOR_RAMDISK_FRAGMENT_TARGETS += $(vendor_ramdisk_fragment_target)) \
+ $(eval INTERNAL_VENDOR_RAMDISK_FRAGMENT_ARGS += $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS) --vendor_ramdisk_fragment $(vendor_ramdisk_fragment_target)) \
+)
+
INSTALLED_VENDOR_BOOTIMAGE_TARGET := $(PRODUCT_OUT)/vendor_boot.img
$(INSTALLED_VENDOR_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INTERNAL_VENDOR_RAMDISK_TARGET) $(INSTALLED_DTBIMAGE_TARGET)
+$(INSTALLED_VENDOR_BOOTIMAGE_TARGET): $(INTERNAL_VENDOR_RAMDISK_FRAGMENT_TARGETS)
ifeq ($(BOARD_AVB_ENABLE),true)
$(INSTALLED_VENDOR_BOOTIMAGE_TARGET): $(AVBTOOL) $(BOARD_AVB_VENDOR_BOOTIMAGE_KEY_PATH)
$(call pretty,"Target vendor_boot image: $@")
- $(MKBOOTIMG) $(INTERNAL_VENDOR_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --vendor_ramdisk $(INTERNAL_VENDOR_RAMDISK_TARGET) --vendor_boot $@
+ $(MKBOOTIMG) $(INTERNAL_VENDOR_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --vendor_ramdisk $(INTERNAL_VENDOR_RAMDISK_TARGET) $(INTERNAL_VENDOR_RAMDISK_FRAGMENT_ARGS) --vendor_boot $@
$(call assert-max-image-size,$@,$(BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE))
$(AVBTOOL) add_hash_footer \
--image $@ \
@@ -909,7 +1050,7 @@
else
$(INSTALLED_VENDOR_BOOTIMAGE_TARGET):
$(call pretty,"Target vendor_boot image: $@")
- $(MKBOOTIMG) $(INTERNAL_VENDOR_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --vendor_ramdisk $(INTERNAL_VENDOR_RAMDISK_TARGET) --vendor_boot $@
+ $(MKBOOTIMG) $(INTERNAL_VENDOR_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --vendor_ramdisk $(INTERNAL_VENDOR_RAMDISK_TARGET) $(INTERNAL_VENDOR_RAMDISK_FRAGMENT_ARGS) --vendor_boot $@
$(call assert-max-image-size,$@,$(BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE))
endif
endif # BUILDING_VENDOR_BOOT_IMAGE
@@ -1244,6 +1385,9 @@
ifneq (true,$(TARGET_USERIMAGES_SPARSE_EXT_DISABLED))
INTERNAL_USERIMAGES_SPARSE_EXT_FLAG := -s
endif
+ifneq (true,$(TARGET_USERIMAGES_SPARSE_EROFS_DISABLED))
+ INTERNAL_USERIMAGES_SPARSE_EROFS_FLAG := -s
+endif
ifneq (true,$(TARGET_USERIMAGES_SPARSE_SQUASHFS_DISABLED))
INTERNAL_USERIMAGES_SPARSE_SQUASHFS_FLAG := -s
endif
@@ -1268,6 +1412,18 @@
$(BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE) \
$(BOARD_VENDOR_DLKMIMAGE_FILE_SYSTEM_TYPE) \
$(BOARD_ODM_DLKMIMAGE_FILE_SYSTEM_TYPE) \
+ ,erofs),)
+INTERNAL_USERIMAGES_DEPS += $(MKEROFSUSERIMG)
+endif
+
+ifneq ($(filter \
+ $(BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE) \
+ $(BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE) \
+ $(BOARD_ODMIMAGE_FILE_SYSTEM_TYPE) \
+ $(BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE) \
+ $(BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE) \
+ $(BOARD_VENDOR_DLKMIMAGE_FILE_SYSTEM_TYPE) \
+ $(BOARD_ODM_DLKMIMAGE_FILE_SYSTEM_TYPE) \
,squashfs),)
INTERNAL_USERIMAGES_DEPS += $(MKSQUASHFSUSERIMG)
endif
@@ -1286,7 +1442,13 @@
# Get a colon-separated list of search paths.
INTERNAL_USERIMAGES_BINARY_PATHS := $(subst $(space),:,$(sort $(dir $(INTERNAL_USERIMAGES_DEPS))))
+# Collects file_contexts files from modules to be installed
+$(call merge-fc-files, \
+ $(sort $(foreach m,$(product_MODULES),$(ALL_MODULES.$(m).FILE_CONTEXTS))),\
+ $(call intermediates-dir-for,ETC,file_contexts.bin)/file_contexts.modules.tmp)
+
SELINUX_FC := $(call intermediates-dir-for,ETC,file_contexts.bin)/file_contexts.bin
+
INTERNAL_USERIMAGES_DEPS += $(SELINUX_FC)
ifeq (true,$(PRODUCT_USE_DYNAMIC_PARTITIONS))
@@ -1305,6 +1467,8 @@
$(if $(BOARD_SYSTEMIMAGE_PARTITION_SIZE),$(hide) echo "system_size=$(BOARD_SYSTEMIMAGE_PARTITION_SIZE)" >> $(1))
$(if $(INTERNAL_SYSTEM_OTHER_PARTITION_SIZE),$(hide) echo "system_other_size=$(INTERNAL_SYSTEM_OTHER_PARTITION_SIZE)" >> $(1))
$(if $(BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE),$(hide) echo "system_fs_type=$(BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE)" >> $(1))
+ $(if $(BOARD_SYSTEMIMAGE_FILE_SYSTEM_COMPRESS),$(hide) echo "system_fs_compress=$(BOARD_SYSTEMIMAGE_FILE_SYSTEM_COMPRESS)" >> $(1))
+ $(if $(BOARD_SYSTEMIMAGE_F2FS_SLOAD_COMPRESS_FLAGS),$(hide) echo "system_f2fs_sldc_flags=$(BOARD_SYSTEMIMAGE_F2FS_SLOAD_COMPRESS_FLAGS)" >> $(1))
$(if $(BOARD_SYSTEMIMAGE_EXTFS_INODE_COUNT),$(hide) echo "system_extfs_inode_count=$(BOARD_SYSTEMIMAGE_EXTFS_INODE_COUNT)" >> $(1))
$(if $(BOARD_SYSTEMIMAGE_EXTFS_RSV_PCT),$(hide) echo "system_extfs_rsv_pct=$(BOARD_SYSTEMIMAGE_EXTFS_RSV_PCT)" >> $(1))
$(if $(BOARD_SYSTEMIMAGE_JOURNAL_SIZE),$(hide) echo "system_journal_size=$(BOARD_SYSTEMIMAGE_JOURNAL_SIZE)" >> $(1))
@@ -1326,6 +1490,7 @@
$(if $(BOARD_USERDATAIMAGE_PARTITION_SIZE),$(hide) echo "userdata_size=$(BOARD_USERDATAIMAGE_PARTITION_SIZE)" >> $(1))
$(if $(PRODUCT_FS_CASEFOLD),$(hide) echo "needs_casefold=$(PRODUCT_FS_CASEFOLD)" >> $(1))
$(if $(PRODUCT_QUOTA_PROJID),$(hide) echo "needs_projid=$(PRODUCT_QUOTA_PROJID)" >> $(1))
+ $(if $(PRODUCT_FS_COMPRESSION),$(hide) echo "needs_compress=$(PRODUCT_FS_COMPRESSION)" >> $(1))
$(hide) echo "userdata_selinux_fc=$(SELINUX_FC)" >> $(1)
$(hide) echo "building_userdata_image=$(BUILDING_USERDATA_IMAGE)" >> $(1)
)
@@ -1433,6 +1598,7 @@
$(if $(INTERNAL_USERIMAGES_EXT_VARIANT),$(hide) echo "fs_type=$(INTERNAL_USERIMAGES_EXT_VARIANT)" >> $(1))
$(if $(INTERNAL_USERIMAGES_SPARSE_EXT_FLAG),$(hide) echo "extfs_sparse_flag=$(INTERNAL_USERIMAGES_SPARSE_EXT_FLAG)" >> $(1))
+$(if $(INTERNAL_USERIMAGES_SPARSE_EROFS_FLAG),$(hide) echo "erofs_sparse_flag=$(INTERNAL_USERIMAGES_SPARSE_EROFS_FLAG)" >> $(1))
$(if $(INTERNAL_USERIMAGES_SPARSE_SQUASHFS_FLAG),$(hide) echo "squashfs_sparse_flag=$(INTERNAL_USERIMAGES_SPARSE_SQUASHFS_FLAG)" >> $(1))
$(if $(INTERNAL_USERIMAGES_SPARSE_F2FS_FLAG),$(hide) echo "f2fs_sparse_flag=$(INTERNAL_USERIMAGES_SPARSE_F2FS_FLAG)" >> $(1))
$(if $(BOARD_EXT4_SHARE_DUP_BLOCKS),$(hide) echo "ext4_share_dup_blocks=$(BOARD_EXT4_SHARE_DUP_BLOCKS)" >> $(1))
@@ -1579,16 +1745,15 @@
INSTALLED_FILES_FILE_RECOVERY := $(PRODUCT_OUT)/installed-files-recovery.txt
INSTALLED_FILES_JSON_RECOVERY := $(INSTALLED_FILES_FILE_RECOVERY:.txt=.json)
+ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+INSTALLED_BOOTIMAGE_TARGET := $(BUILT_BOOTIMAGE_TARGET)
+endif
+
# TODO(b/30414428): Can't depend on INTERNAL_RECOVERYIMAGE_FILES alone like other
# INSTALLED_FILES_FILE_* rules. Because currently there're cp/rsync/rm commands in
# build-recoveryimage-target, which would touch the files under TARGET_RECOVERY_OUT and race with
# the call to FILELIST.
-ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
-INSTALLED_BOOTIMAGE_TARGET := $(BUILT_BOOTIMAGE_TARGET)
-$(INSTALLED_FILES_FILE_RECOVERY): $(INSTALLED_BOOTIMAGE_TARGET)
-else
-$(INSTALLED_FILES_FILE_RECOVERY): $(INSTALLED_RECOVERYIMAGE_TARGET)
-endif
+$(INSTALLED_FILES_FILE_RECOVERY): $(INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP)
$(INSTALLED_FILES_FILE_RECOVERY): .KATI_IMPLICIT_OUTPUTS := $(INSTALLED_FILES_JSON_RECOVERY)
$(INSTALLED_FILES_FILE_RECOVERY): $(INTERNAL_RECOVERYIMAGE_FILES) $(FILESLIST) $(FILESLIST_UTIL)
@@ -1787,9 +1952,13 @@
# e) We include the recovery ACPIO image within recovery - not needing the resource file as we
# do bsdiff because boot and recovery will contain different number of entries
# (BOARD_INCLUDE_RECOVERY_ACPIO = true).
+# f) We build a single image that contains vendor_boot and recovery both - no recovery image to
+# install
+# (BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT = true).
ifeq (,$(filter true, $(BOARD_USES_FULL_RECOVERY_IMAGE) $(BOARD_USES_RECOVERY_AS_BOOT) \
- $(BOARD_BUILD_SYSTEM_ROOT_IMAGE) $(BOARD_INCLUDE_RECOVERY_DTBO) $(BOARD_INCLUDE_RECOVERY_ACPIO)))
+ $(BOARD_BUILD_SYSTEM_ROOT_IMAGE) $(BOARD_INCLUDE_RECOVERY_DTBO) $(BOARD_INCLUDE_RECOVERY_ACPIO) \
+ $(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT)))
# Named '.dat' so we don't attempt to use imgdiff for patching it.
RECOVERY_RESOURCE_ZIP := $(TARGET_OUT_VENDOR)/etc/recovery-resource.dat
ALL_DEFAULT_INSTALLED_MODULES += $(RECOVERY_RESOURCE_ZIP)
@@ -1842,19 +2011,25 @@
$(hide) cat $(INSTALLED_SYSTEM_EXT_BUILD_PROP_TARGET) >> $@
$(call append-recovery-ui-properties,$(PRIVATE_RECOVERY_UI_PROPERTIES),$@)
-ifeq (truetrue,$(strip $(BUILDING_VENDOR_BOOT_IMAGE))$(strip $(AB_OTA_UPDATER)))
- INTERNAL_RECOVERYIMAGE_ARGS := --ramdisk $(recovery_ramdisk)
-ifdef GENERIC_KERNEL_CMDLINE
- INTERNAL_RECOVERYIMAGE_ARGS += --cmdline "$(GENERIC_KERNEL_CMDLINE)"
+# Only install boot/etc/build.prop to recovery image on recovery_as_boot.
+# On device with dedicated recovery partition, the file should come from the boot
+# ramdisk.
+ifeq (true,$(BOARD_USES_RECOVERY_AS_BOOT))
+INSTALLED_RECOVERY_RAMDISK_BUILD_PROP_TARGET := $(TARGET_RECOVERY_ROOT_OUT)/$(RAMDISK_BUILD_PROP_REL_PATH)
+$(INSTALLED_RECOVERY_RAMDISK_BUILD_PROP_TARGET): $(INSTALLED_RAMDISK_BUILD_PROP_TARGET)
+ $(copy-file-to-target)
endif
-else # not (BUILDING_VENDOR_BOOT_IMAGE and AB_OTA_UPDATER)
- INTERNAL_RECOVERYIMAGE_ARGS := \
- $(addprefix --second ,$(INSTALLED_2NDBOOTLOADER_TARGET)) \
- --ramdisk $(recovery_ramdisk)
+
+INTERNAL_RECOVERYIMAGE_ARGS := --ramdisk $(recovery_ramdisk)
+
+ifneq (truetrue,$(strip $(BUILDING_VENDOR_BOOT_IMAGE))$(strip $(BOARD_USES_RECOVERY_AS_BOOT)))
+INTERNAL_RECOVERYIMAGE_ARGS += $(addprefix --second ,$(INSTALLED_2NDBOOTLOADER_TARGET))
# Assumes this has already been stripped
+ifneq (true,$(BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE))
ifdef INTERNAL_KERNEL_CMDLINE
INTERNAL_RECOVERYIMAGE_ARGS += --cmdline "$(INTERNAL_KERNEL_CMDLINE)"
-endif
+endif # INTERNAL_KERNEL_CMDLINE != ""
+endif # BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE != true
ifdef BOARD_KERNEL_BASE
INTERNAL_RECOVERYIMAGE_ARGS += --base $(BOARD_KERNEL_BASE)
endif
@@ -1874,18 +2049,19 @@
ifdef BOARD_INCLUDE_DTB_IN_BOOTIMG
INTERNAL_RECOVERYIMAGE_ARGS += --dtb $(INSTALLED_DTBIMAGE_TARGET)
endif
-endif # INSTALLED_VENDOR_BOOTIMAGE_TARGET not defined
+endif # (BUILDING_VENDOR_BOOT_IMAGE and BOARD_USES_RECOVERY_AS_BOOT)
ifndef BOARD_RECOVERY_MKBOOTIMG_ARGS
BOARD_RECOVERY_MKBOOTIMG_ARGS := $(BOARD_MKBOOTIMG_ARGS)
endif
-$(recovery_ramdisk): $(MKBOOTFS) $(COMPRESSION_COMMAND_DEPS) \
+$(INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP): $(MKBOOTFS) $(COMPRESSION_COMMAND_DEPS) \
$(INTERNAL_ROOT_FILES) \
$(INSTALLED_RAMDISK_TARGET) \
$(INTERNAL_RECOVERYIMAGE_FILES) \
$(recovery_sepolicy) \
$(INSTALLED_2NDBOOTLOADER_TARGET) \
$(INSTALLED_RECOVERY_BUILD_PROP_TARGET) \
+ $(INSTALLED_RECOVERY_RAMDISK_BUILD_PROP_TARGET) \
$(recovery_resource_deps) \
$(recovery_fstab)
# Making recovery image
@@ -1914,16 +2090,19 @@
cp -f $(recovery_wipe) $(TARGET_RECOVERY_ROOT_OUT)/system/etc/recovery.wipe)
ln -sf prop.default $(TARGET_RECOVERY_ROOT_OUT)/default.prop
$(BOARD_RECOVERY_IMAGE_PREPARE)
+ $(hide) touch $@
+
+$(recovery_ramdisk): $(INTERNAL_RECOVERY_RAMDISK_FILES_TIMESTAMP)
$(MKBOOTFS) -d $(TARGET_OUT) $(TARGET_RECOVERY_ROOT_OUT) | $(COMPRESSION_COMMAND) > $(recovery_ramdisk)
# $(1): output file
-# $(2): kernel file
+# $(2): optional kernel file
define build-recoveryimage-target
$(if $(filter true,$(PRODUCT_SUPPORTS_VBOOT)), \
- $(MKBOOTIMG) --kernel $(2) $(INTERNAL_RECOVERYIMAGE_ARGS) \
+ $(MKBOOTIMG) $(if $(strip $(2)),--kernel $(strip $(2))) $(INTERNAL_RECOVERYIMAGE_ARGS) \
$(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_RECOVERY_MKBOOTIMG_ARGS) \
--output $(1).unsigned, \
- $(MKBOOTIMG) --kernel $(2) $(INTERNAL_RECOVERYIMAGE_ARGS) \
+ $(MKBOOTIMG) $(if $(strip $(2)),--kernel $(strip $(2))) $(INTERNAL_RECOVERYIMAGE_ARGS) \
$(INTERNAL_MKBOOTIMG_VERSION_ARGS) $(BOARD_RECOVERY_MKBOOTIMG_ARGS) \
--output $(1))
$(if $(filter true,$(PRODUCT_SUPPORTS_BOOT_SIGNER)),\
@@ -1968,13 +2147,15 @@
endif
ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+$(foreach b,$(INSTALLED_BOOTIMAGE_TARGET), $(eval $(call add-dependency,$(b),$(call bootimage-to-kernel,$(b)))))
$(INSTALLED_BOOTIMAGE_TARGET): $(recoveryimage-deps)
$(call pretty,"Target boot image from recovery: $@")
$(call build-recoveryimage-target, $@, $(PRODUCT_OUT)/$(subst .img,,$(subst boot,kernel,$(notdir $@))))
endif # BOARD_USES_RECOVERY_AS_BOOT
$(INSTALLED_RECOVERYIMAGE_TARGET): $(recoveryimage-deps)
- $(call build-recoveryimage-target, $@, $(recovery_kernel))
+ $(call build-recoveryimage-target, $@, \
+ $(if $(filter true, $(BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE)),, $(recovery_kernel)))
ifdef RECOVERY_RESOURCE_ZIP
$(RECOVERY_RESOURCE_ZIP): $(INSTALLED_RECOVERYIMAGE_TARGET) | $(ZIPTIME)
@@ -1986,7 +2167,8 @@
.PHONY: recoveryimage-nodeps
recoveryimage-nodeps:
@echo "make $@: ignoring dependencies"
- $(call build-recoveryimage-target, $(INSTALLED_RECOVERYIMAGE_TARGET), $(recovery_kernel))
+ $(call build-recoveryimage-target, $(INSTALLED_RECOVERYIMAGE_TARGET), \
+ $(if $(filter true, $(BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE)),, $(recovery_kernel)))
else # BUILDING_RECOVERY_IMAGE
RECOVERY_RESOURCE_ZIP :=
@@ -2084,6 +2266,7 @@
#
# Note: it's intentional to skip signing for boot-debug.img, because it
# can only be used if the device is unlocked with verification error.
+ifneq ($(INSTALLED_BOOTIMAGE_TARGET),)
ifneq ($(strip $(TARGET_NO_KERNEL)),true)
ifneq ($(strip $(BOARD_KERNEL_BINARIES)),)
INSTALLED_DEBUG_BOOTIMAGE_TARGET := $(foreach k,$(subst kernel,boot-debug,$(BOARD_KERNEL_BINARIES)), \
@@ -2138,15 +2321,20 @@
$(foreach b,$(INSTALLED_DEBUG_BOOTIMAGE_TARGET),$(call build-debug-bootimage-target,$b))
endif # TARGET_NO_KERNEL
+endif # INSTALLED_BOOTIMAGE_TARGET
ifeq ($(BUILDING_VENDOR_BOOT_IMAGE),true)
ifeq ($(BUILDING_RAMDISK_IMAGE),true)
# -----------------------------------------------------------------
# vendor debug ramdisk
# Combines vendor ramdisk files and debug ramdisk files to build the vendor debug ramdisk.
-INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET := $(PRODUCT_OUT)/vendor-ramdisk-debug.cpio$(RAMDISK_EXT)
-$(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET): DEBUG_RAMDISK_FILES := $(INTERNAL_DEBUG_RAMDISK_FILES)
-$(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET): VENDOR_RAMDISK_DIR := $(TARGET_VENDOR_RAMDISK_OUT)
+INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET := $(call intermediates-dir-for,PACKAGING,vendor_boot-debug)/vendor-ramdisk-debug.cpio$(RAMDISK_EXT)
+$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): DEBUG_RAMDISK_FILES := $(INTERNAL_DEBUG_RAMDISK_FILES)
+$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): VENDOR_RAMDISK_DIR := $(TARGET_VENDOR_RAMDISK_OUT)
+
+ifeq (true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT))
+$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): PRIVATE_ADDITIONAL_DIR := $(TARGET_RECOVERY_ROOT_OUT)
+endif
INTERNAL_VENDOR_DEBUG_RAMDISK_FILES := $(filter $(TARGET_VENDOR_DEBUG_RAMDISK_OUT)/%, \
$(ALL_GENERATED_SOURCES) \
@@ -2156,16 +2344,16 @@
# if BOARD_USES_RECOVERY_AS_BOOT is true. Otherwise, it will be $(PRODUCT_OUT)/vendor_debug_ramdisk.
# But the path of $(VENDOR_DEBUG_RAMDISK_DIR) to build the vendor debug ramdisk, is always
# $(PRODUCT_OUT)/vendor_debug_ramdisk.
-$(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET): VENDOR_DEBUG_RAMDISK_DIR := $(PRODUCT_OUT)/vendor_debug_ramdisk
-$(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET): $(INTERNAL_VENDOR_RAMDISK_TARGET) $(INSTALLED_DEBUG_RAMDISK_TARGET)
-$(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET): $(MKBOOTFS) $(INTERNAL_VENDOR_DEBUG_RAMDISK_FILES) | $(COMPRESSION_COMMAND_DEPS)
+$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): DEBUG_RAMDISK_DIR := $(PRODUCT_OUT)/debug_ramdisk
+$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): VENDOR_DEBUG_RAMDISK_DIR := $(PRODUCT_OUT)/vendor_debug_ramdisk
+$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): $(INTERNAL_VENDOR_RAMDISK_TARGET) $(INSTALLED_DEBUG_RAMDISK_TARGET)
+$(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET): $(MKBOOTFS) $(INTERNAL_VENDOR_DEBUG_RAMDISK_FILES) | $(COMPRESSION_COMMAND_DEPS)
$(call pretty,"Target vendor debug ram disk: $@")
mkdir -p $(TARGET_VENDOR_DEBUG_RAMDISK_OUT)
touch $(TARGET_VENDOR_DEBUG_RAMDISK_OUT)/force_debuggable
$(foreach debug_file,$(DEBUG_RAMDISK_FILES), \
- cp -f $(debug_file) $(subst $(PRODUCT_OUT)/debug_ramdisk,$(PRODUCT_OUT)/vendor_debug_ramdisk,$(debug_file)) &&) true
- rsync -a $(VENDOR_RAMDISK_DIR)/ $(VENDOR_DEBUG_RAMDISK_DIR)
- $(MKBOOTFS) -d $(TARGET_OUT) $(VENDOR_DEBUG_RAMDISK_DIR) | $(COMPRESSION_COMMAND) > $@
+ cp -f $(debug_file) $(patsubst $(DEBUG_RAMDISK_DIR)/%,$(VENDOR_DEBUG_RAMDISK_DIR)/%,$(debug_file)) &&) true
+ $(MKBOOTFS) -d $(TARGET_OUT) $(VENDOR_RAMDISK_DIR) $(VENDOR_DEBUG_RAMDISK_DIR) $(PRIVATE_ADDITIONAL_DIR) | $(COMPRESSION_COMMAND) > $@
INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK := $(PRODUCT_OUT)/installed-files-vendor-ramdisk-debug.txt
INSTALLED_FILES_JSON_VENDOR_DEBUG_RAMDISK := $(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK:.txt=.json)
@@ -2174,7 +2362,7 @@
# The vendor debug ramdisk will rsync from $(TARGET_VENDOR_RAMDISK_OUT) and $(INTERNAL_DEBUG_RAMDISK_FILES),
# so we have to wait for the vendor debug ramdisk to be built before generating the installed file list.
-$(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK): $(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET)
+$(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK): $(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET)
$(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK): $(INTERNAL_VENDOR_DEBUG_RAMDISK_FILES) $(FILESLIST) $(FILESLIST_UTIL)
echo Installed file list: $@
mkdir -p $(dir $@)
@@ -2205,9 +2393,10 @@
endif
# Depends on vendor_boot.img and vendor-ramdisk-debug.cpio.gz to build the new vendor_boot-debug.img
-$(INSTALLED_VENDOR_DEBUG_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INSTALLED_VENDOR_BOOTIMAGE_TARGET) $(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET)
+$(INSTALLED_VENDOR_DEBUG_BOOTIMAGE_TARGET): $(MKBOOTIMG) $(INSTALLED_VENDOR_BOOTIMAGE_TARGET) $(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET)
+$(INSTALLED_VENDOR_DEBUG_BOOTIMAGE_TARGET): $(INTERNAL_VENDOR_RAMDISK_FRAGMENT_TARGETS)
$(call pretty,"Target vendor_boot debug image: $@")
- $(MKBOOTIMG) $(INTERNAL_VENDOR_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --vendor_ramdisk $(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET) --vendor_boot $@
+ $(MKBOOTIMG) $(INTERNAL_VENDOR_BOOTIMAGE_ARGS) $(BOARD_MKBOOTIMG_ARGS) --vendor_ramdisk $(INTERNAL_VENDOR_DEBUG_RAMDISK_TARGET) $(INTERNAL_VENDOR_RAMDISK_FRAGMENT_ARGS) --vendor_boot $@
$(call assert-max-image-size,$@,$(BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE))
$(if $(BOARD_AVB_VENDOR_BOOT_KEY_PATH),$(call test-key-sign-vendor-bootimage,$@))
@@ -2318,6 +2507,7 @@
exit 1; \
fi
ln -sfn $2 $1
+$1: .KATI_SYMLINK_OUTPUTS := $1
)
$1
endef
@@ -2360,6 +2550,15 @@
# -----------------------------------------------------------------
ifdef BUILDING_SYSTEM_IMAGE
+# Install system linker configuration
+# Collect all available stub libraries installed in system and install with predefined linker configuration
+SYSTEM_LINKER_CONFIG := $(TARGET_OUT)/etc/linker.config.pb
+$(SYSTEM_LINKER_CONFIG) : $(INTERNAL_SYSTEMIMAGE_FILES) $(LINKER_CONFIG_PATH_system_linker_config) | conv_linker_config
+ $(HOST_OUT_EXECUTABLES)/conv_linker_config systemprovide --source $(LINKER_CONFIG_PATH_system_linker_config)\
+ --output $@ --value "$(STUB_LIBRARIES)" --system "$(TARGET_OUT)"
+
+FULL_SYSTEMIMAGE_DEPS += $(SYSTEM_LINKER_CONFIG)
+
# installed file list
# Depending on anything that $(BUILT_SYSTEMIMAGE) depends on.
# We put installed-files.txt ahead of image itself in the dependency graph
@@ -3034,6 +3233,26 @@
endif # BOARD_PREBUILT_DTBOIMAGE
+# -----------------------------------------------------------------
+# Protected VM firmware image
+ifdef BOARD_PREBUILT_PVMFWIMAGE
+INSTALLED_PVMFWIMAGE_TARGET := $(PRODUCT_OUT)/pvmfw.img
+
+ifeq ($(BOARD_AVB_ENABLE),true)
+$(INSTALLED_PVMFWIMAGE_TARGET): $(BOARD_PREBUILT_PVMFWIMAGE) $(AVBTOOL) $(BOARD_AVB_PVMFW_KEY_PATH)
+ cp $(BOARD_PREBUILT_PVMFWIMAGE) $@
+ $(AVBTOOL) add_hash_footer \
+ --image $@ \
+ --partition_size $(BOARD_PVMFWIMG_PARTITION_SIZE) \
+ --partition_name pvmfw $(INTERNAL_AVB_PVMFW_SIGNING_ARGS) \
+ $(BOARD_AVB_PVMFW_ADD_HASH_FOOTER_ARGS)
+else
+$(INSTALLED_PVMFWIMAGE_TARGET): $(BOARD_PREBUILT_PVMFWIMAGE)
+ cp $(BOARD_PREBUILT_PVMFWIMAGE) $@
+endif
+
+endif # BOARD_PREBUILT_PVMFWIMAGE
+
# Returns a list of image targets corresponding to the given list of partitions. For example, it
# returns "$(INSTALLED_PRODUCTIMAGE_TARGET)" for "product", or "$(INSTALLED_SYSTEMIMAGE_TARGET)
# $(INSTALLED_VENDORIMAGE_TARGET)" for "system vendor".
@@ -3181,6 +3400,9 @@
BOARD_AVB_DTBO_ADD_HASH_FOOTER_ARGS += \
--prop com.android.build.dtbo.fingerprint:$(BUILD_FINGERPRINT_FROM_FILE)
+BOARD_AVB_PVMFW_ADD_HASH_FOOTER_ARGS += \
+ --prop com.android.build.pvmfw.fingerprint:$(BUILD_FINGERPRINT_FROM_FILE)
+
# The following vendor- and odm-specific images needs explicit SPL set per board.
ifdef BOOT_SECURITY_PATCH
BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS += \
@@ -3207,9 +3429,15 @@
--prop com.android.build.odm_dlkm.security_patch:$(ODM_DLKM_SECURITY_PATCH)
endif
+ifdef PVMFW_SECURITY_PATCH
+BOARD_AVB_PVMFW_ADD_HASH_FOOTER_ARGS += \
+ --prop com.android.build.pvmfw.security_patch:$(PVMFW_SECURITY_PATCH)
+endif
+
BOOT_FOOTER_ARGS := BOARD_AVB_BOOT_ADD_HASH_FOOTER_ARGS
VENDOR_BOOT_FOOTER_ARGS := BOARD_AVB_VENDOR_BOOT_ADD_HASH_FOOTER_ARGS
DTBO_FOOTER_ARGS := BOARD_AVB_DTBO_ADD_HASH_FOOTER_ARGS
+PVMFW_FOOTER_ARGS := BOARD_AVB_PVMFW_ADD_HASH_FOOTER_ARGS
SYSTEM_FOOTER_ARGS := BOARD_AVB_SYSTEM_ADD_HASHTREE_FOOTER_ARGS
VENDOR_FOOTER_ARGS := BOARD_AVB_VENDOR_ADD_HASHTREE_FOOTER_ARGS
RECOVERY_FOOTER_ARGS := BOARD_AVB_RECOVERY_ADD_HASH_FOOTER_ARGS
@@ -3324,6 +3552,10 @@
$(eval $(call check-and-set-avb-args,dtbo))
endif
+ifdef INSTALLED_PVMFWIMAGE_TARGET
+$(eval $(call check-and-set-avb-args,pvmfw))
+endif
+
ifdef INSTALLED_RECOVERYIMAGE_TARGET
$(eval $(call check-and-set-avb-args,recovery))
endif
@@ -3409,6 +3641,9 @@
$(if $(BOARD_AVB_DTBO_KEY_PATH),\
$(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_DTBO_KEY_PATH) \
--output $(1)/dtbo.avbpubkey)
+ $(if $(BOARD_AVB_PVMFW_KEY_PATH),\
+ $(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_PVMFW_KEY_PATH) \
+ --output $(1)/pvmfw.avbpubkey)
$(if $(BOARD_AVB_RECOVERY_KEY_PATH),\
$(hide) $(AVBTOOL) extract_public_key --key $(BOARD_AVB_RECOVERY_KEY_PATH) \
--output $(1)/recovery.avbpubkey)
@@ -3491,6 +3726,7 @@
$(INSTALLED_VENDOR_DLKMIMAGE_TARGET) \
$(INSTALLED_ODM_DLKMIMAGE_TARGET) \
$(INSTALLED_DTBOIMAGE_TARGET) \
+ $(INSTALLED_PVMFWIMAGE_TARGET) \
$(INSTALLED_CUSTOMIMAGES_TARGET) \
$(INSTALLED_RECOVERYIMAGE_TARGET) \
$(INSTALLED_VBMETA_SYSTEMIMAGE_TARGET) \
@@ -3509,6 +3745,18 @@
endif # BOARD_AVB_ENABLE
+# List of files from all images
+INTERNAL_ALLIMAGES_FILES := \
+ $(FULL_SYSTEMIMAGE_DEPS) \
+ $(INTERNAL_RAMDISK_FILES) \
+ $(INTERNAL_USERDATAIMAGE_FILES) \
+ $(INTERNAL_VENDORIMAGE_FILES) \
+ $(INTERNAL_PRODUCTIMAGE_FILES) \
+ $(INTERNAL_SYSTEM_EXTIMAGE_FILES) \
+ $(INTERNAL_ODMIMAGE_FILES) \
+ $(INTERNAL_VENDOR_DLKMIMAGE_FILES) \
+ $(INTERNAL_ODM_DLKMIMAGE_FILES) \
+
# -----------------------------------------------------------------
# Check VINTF of build
@@ -3527,13 +3775,7 @@
$(TARGET_OUT_PRODUCT)/etc/vintf/% \
$(TARGET_OUT_SYSTEM_EXT)/etc/vintf/% \
-check_vintf_common_srcs := $(sort $(filter $(check_vintf_common_srcs_patterns), \
- $(INTERNAL_SYSTEMIMAGE_FILES) \
- $(INTERNAL_VENDORIMAGE_FILES) \
- $(INTERNAL_ODMIMAGE_FILES) \
- $(INTERNAL_PRODUCTIMAGE_FILES) \
- $(INTERNAL_SYSTEM_EXTIMAGE_FILES) \
-))
+check_vintf_common_srcs := $(sort $(filter $(check_vintf_common_srcs_patterns),$(INTERNAL_ALLIMAGES_FILES)))
check_vintf_common_srcs_patterns :=
check_vintf_has_system :=
@@ -3550,11 +3792,19 @@
check_vintf_system_deps := $(filter $(TARGET_OUT)/etc/vintf/%, $(check_vintf_common_srcs))
ifneq ($(check_vintf_system_deps),)
check_vintf_has_system := true
+
check_vintf_system_log := $(intermediates)/check_vintf_system_log
check_vintf_all_deps += $(check_vintf_system_log)
$(check_vintf_system_log): $(HOST_OUT_EXECUTABLES)/checkvintf $(check_vintf_system_deps)
@( $< --check-one --dirmap /system:$(TARGET_OUT) > $@ 2>&1 ) || ( cat $@ && exit 1 )
check_vintf_system_log :=
+
+vintffm_log := $(intermediates)/vintffm_log
+check_vintf_all_deps += $(vintffm_log)
+$(vintffm_log): $(HOST_OUT_EXECUTABLES)/vintffm $(check_vintf_system_deps)
+ @( $< --check --dirmap /system:$(TARGET_OUT) \
+ $(VINTF_FRAMEWORK_MANIFEST_FROZEN_DIR) > $@ 2>&1 ) || ( cat $@ && exit 1 )
+
endif # check_vintf_system_deps
check_vintf_system_deps :=
@@ -3642,7 +3892,7 @@
$(BUILT_KERNEL_CONFIGS_FILE): $(EXTRACT_KERNEL) $(firstword $(INSTALLED_KERNEL_TARGET))
$< --tools $(PRIVATE_DECOMPRESS_TOOLS) --input $(firstword $(INSTALLED_KERNEL_TARGET)) \
--output-configs $@ \
- --output-version $(BUILT_KERNEL_VERSION_FILE)
+ --output-release $(BUILT_KERNEL_VERSION_FILE)
my_decompress_tools :=
@@ -3651,7 +3901,7 @@
endif # INSTALLED_KERNEL_TARGET
-check_vintf_compatible_args += --kernel $$(cat $(BUILT_KERNEL_VERSION_FILE)):$(BUILT_KERNEL_CONFIGS_FILE)
+check_vintf_compatible_args += --kernel $(BUILT_KERNEL_VERSION_FILE):$(BUILT_KERNEL_CONFIGS_FILE)
check_vintf_compatible_deps += $(BUILT_KERNEL_CONFIGS_FILE) $(BUILT_KERNEL_VERSION_FILE)
endif # PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS
@@ -3836,6 +4086,7 @@
fec \
fs_config \
generate_verity_key \
+ host_init_verifier \
img2simg \
img_from_target_files \
imgdiff \
@@ -3852,6 +4103,8 @@
mkbootimg \
mke2fs \
mke2fs.conf \
+ mkfs.erofs \
+ mkerofsimage.sh \
mkf2fsuserimg.sh \
mksquashfs \
mksquashfsimage.sh \
@@ -3865,6 +4118,7 @@
signapk \
simg2img \
sload_f2fs \
+ toybox \
tune2fs \
unpack_bootimg \
update_host_simulator \
@@ -3876,6 +4130,7 @@
# Additional tools to unpack and repack the apex file.
INTERNAL_OTATOOLS_MODULES += \
apexer \
+ apex_compression_tool \
deapexer \
debugfs_static \
merge_zips \
@@ -4033,6 +4288,9 @@
$(hide) echo "board_uses_vendorimage=true" >> $@
endif
ifeq ($(BOARD_AVB_ENABLE),true)
+ifeq ($(BUILDING_VBMETA_IMAGE),true)
+ $(hide) echo "avb_building_vbmeta_image=true" >> $@
+endif # BUILDING_VBMETA_IMAGE
$(hide) echo "avb_enable=true" >> $@
$(hide) echo "avb_vbmeta_key_path=$(BOARD_AVB_KEY_PATH)" >> $@
$(hide) echo "avb_vbmeta_algorithm=$(BOARD_AVB_ALGORITHM)" >> $@
@@ -4109,6 +4367,18 @@
endif # BOARD_AVB_DTBO_KEY_PATH
endif # BOARD_AVB_ENABLE
endif # BOARD_PREBUILT_DTBOIMAGE
+ifdef BOARD_PREBUILT_PVMFWIMAGE
+ $(hide) echo "has_pvmfw=true" >> $@
+ifeq ($(BOARD_AVB_ENABLE),true)
+ $(hide) echo "pvmfw_size=$(BOARD_PVMFWIMG_PARTITION_SIZE)" >> $@
+ $(hide) echo "avb_pvmfw_add_hash_footer_args=$(BOARD_AVB_PVMFW_ADD_HASH_FOOTER_ARGS)" >> $@
+ifdef BOARD_AVB_PVMFW_KEY_PATH
+ $(hide) echo "avb_pvmfw_key_path=$(BOARD_AVB_PVMFW_KEY_PATH)" >> $@
+ $(hide) echo "avb_pvmfw_algorithm=$(BOARD_AVB_PVMFW_ALGORITHM)" >> $@
+ $(hide) echo "avb_pvmfw_rollback_index_location=$(BOARD_AVB_PVMFW_ROLLBACK_INDEX_LOCATION)" >> $@
+endif # BOARD_AVB_PVMFW_KEY_PATH
+endif # BOARD_AVB_ENABLE
+endif # BOARD_PREBUILT_PVMFWIMAGE
$(call dump-dynamic-partitions-info,$@)
@# VINTF checks
ifeq ($(PRODUCT_ENFORCE_VINTF_MANIFEST),true)
@@ -4129,6 +4399,12 @@
ifeq ($(BOARD_BOOTLOADER_IN_UPDATE_PACKAGE),true)
$(hide) echo "bootloader_in_update_package=true" >> $@
endif
+ifeq ($(BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE),true)
+ $(hide) echo "exclude_kernel_from_recovery_image=true" >> $@
+endif
+ifneq ($(BOARD_PARTIAL_OTA_UPDATE_PARTITIONS_LIST),)
+ $(hide) echo "partial_ota_update_partitions_list=$(BOARD_PARTIAL_OTA_UPDATE_PARTITIONS_LIST)" >> $@
+endif
.PHONY: misc_info
misc_info: $(INSTALLED_MISC_INFO_TARGET)
@@ -4187,8 +4463,11 @@
$(BUILT_TARGET_FILES_PACKAGE): $(updater_dep)
# If we are using recovery as boot, output recovery files to BOOT/.
+# If we are moving recovery resources to vendor_boot, output recovery files to VENDOR_BOOT/.
ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_RECOVERY_OUT := BOOT
+else ifeq ($(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT),true)
+$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_RECOVERY_OUT := VENDOR_BOOT
else
$(BUILT_TARGET_FILES_PACKAGE): PRIVATE_RECOVERY_OUT := RECOVERY
endif
@@ -4269,6 +4548,8 @@
echo "super_partition_error_limit=$(BOARD_SUPER_PARTITION_ERROR_LIMIT)" >> $(1))
$(if $(filter true,$(PRODUCT_VIRTUAL_AB_OTA)), \
echo "virtual_ab=true" >> $(1))
+ $(if $(filter true,$(PRODUCT_VIRTUAL_AB_COMPRESSION)), \
+ echo "virtual_ab_compression=true" >> $(1))
$(if $(filter true,$(PRODUCT_VIRTUAL_AB_OTA_RETROFIT)), \
echo "virtual_ab_retrofit=true" >> $(1))
endef
@@ -4290,6 +4571,7 @@
ifdef BUILDING_VENDOR_BOOT_IMAGE
$(BUILT_TARGET_FILES_PACKAGE): $(INTERNAL_VENDOR_RAMDISK_FILES)
+ $(BUILT_TARGET_FILES_PACKAGE): $(INTERNAL_VENDOR_RAMDISK_FRAGMENT_TARGETS)
endif
ifdef BUILDING_RECOVERY_IMAGE
@@ -4329,7 +4611,8 @@
ifdef BUILDING_BOOT_IMAGE
$(BUILT_TARGET_FILES_PACKAGE): $(INTERNAL_RAMDISK_FILES)
-else ifdef BOARD_PREBUILT_BOOTIMAGE
+endif
+ifneq (,$(INTERNAL_PREBUILT_BOOTIMAGE) $(filter true,$(BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES)))
$(BUILT_TARGET_FILES_PACKAGE): $(INSTALLED_BOOTIMAGE_TARGET)
endif
@@ -4368,9 +4651,11 @@
$(INSTALLED_RECOVERYIMAGE_TARGET) \
$(INSTALLED_CACHEIMAGE_TARGET) \
$(INSTALLED_DTBOIMAGE_TARGET) \
+ $(INSTALLED_PVMFWIMAGE_TARGET) \
$(INSTALLED_CUSTOMIMAGES_TARGET) \
$(INSTALLED_ANDROID_INFO_TXT_TARGET) \
$(INSTALLED_KERNEL_TARGET) \
+ $(INSTALLED_RAMDISK_TARGET) \
$(INSTALLED_DTBIMAGE_TARGET) \
$(INSTALLED_2NDBOOTLOADER_TARGET) \
$(BOARD_PREBUILT_DTBOIMAGE) \
@@ -4398,7 +4683,7 @@
@echo "Package target files: $@"
$(hide) rm -rf $@ $@.list $(zip_root)
$(hide) mkdir -p $(dir $@) $(zip_root)
-ifneq (,$(INSTALLED_RECOVERYIMAGE_TARGET)$(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)))
+ifneq (,$(INSTALLED_RECOVERYIMAGE_TARGET)$(filter true,$(BOARD_USES_RECOVERY_AS_BOOT))$(filter true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT)))
@# Components of the recovery image
$(hide) mkdir -p $(zip_root)/$(PRIVATE_RECOVERY_OUT)
$(hide) $(call package_files-copy-root, \
@@ -4406,13 +4691,11 @@
ifdef INSTALLED_KERNEL_TARGET
ifneq (,$(filter true,$(BOARD_USES_RECOVERY_AS_BOOT)))
cp $(INSTALLED_KERNEL_TARGET) $(zip_root)/$(PRIVATE_RECOVERY_OUT)/
-else # BOARD_USES_RECOVERY_AS_BOOT not true
+else ifneq (true,$(BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE))
cp $(firstword $(INSTALLED_KERNEL_TARGET)) $(zip_root)/$(PRIVATE_RECOVERY_OUT)/kernel
endif
endif
-ifeq (truetrue,$(strip $(BUILDING_VENDOR_BOOT_IMAGE))$(strip $(AB_OTA_UPDATER)))
- echo "$(GENERIC_KERNEL_CMDLINE)" > $(zip_root)/$(PRIVATE_RECOVERY_OUT)/cmdline
-else # not (BUILDING_VENDOR_BOOT_IMAGE and AB_OTA_UPDATER)
+ifneq (truetrue,$(strip $(BUILDING_VENDOR_BOOT_IMAGE))$(strip $(BOARD_USES_RECOVERY_AS_BOOT)))
ifdef INSTALLED_2NDBOOTLOADER_TARGET
cp $(INSTALLED_2NDBOOTLOADER_TARGET) $(zip_root)/$(PRIVATE_RECOVERY_OUT)/second
endif
@@ -4429,16 +4712,18 @@
ifdef INSTALLED_DTBIMAGE_TARGET
cp $(INSTALLED_DTBIMAGE_TARGET) $(zip_root)/$(PRIVATE_RECOVERY_OUT)/dtb
endif
+ifneq (true,$(BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE))
ifdef INTERNAL_KERNEL_CMDLINE
echo "$(INTERNAL_KERNEL_CMDLINE)" > $(zip_root)/$(PRIVATE_RECOVERY_OUT)/cmdline
-endif
+endif # INTERNAL_KERNEL_CMDLINE != ""
+endif # BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE != true
ifdef BOARD_KERNEL_BASE
echo "$(BOARD_KERNEL_BASE)" > $(zip_root)/$(PRIVATE_RECOVERY_OUT)/base
endif
ifdef BOARD_KERNEL_PAGESIZE
echo "$(BOARD_KERNEL_PAGESIZE)" > $(zip_root)/$(PRIVATE_RECOVERY_OUT)/pagesize
endif
-endif # not (BUILDING_VENDOR_BOOT_IMAGE and AB_OTA_UPDATER)
+endif # not (BUILDING_VENDOR_BOOT_IMAGE and BOARD_USES_RECOVERY_AS_BOOT)
endif # INSTALLED_RECOVERYIMAGE_TARGET defined or BOARD_USES_RECOVERY_AS_BOOT is true
@# Components of the boot image
$(hide) mkdir -p $(zip_root)/BOOT
@@ -4454,23 +4739,23 @@
ifdef INSTALLED_KERNEL_TARGET
$(hide) cp $(INSTALLED_KERNEL_TARGET) $(zip_root)/BOOT/
endif
-ifndef INSTALLED_VENDOR_BOOTIMAGE_TARGET
+ifeq (true,$(BOARD_USES_GENERIC_KERNEL_IMAGE))
+ echo "$(GENERIC_KERNEL_CMDLINE)" > $(zip_root)/BOOT/cmdline
+else ifndef INSTALLED_VENDOR_BOOTIMAGE_TARGET # && BOARD_USES_GENERIC_KERNEL_IMAGE != true
+ echo "$(INTERNAL_KERNEL_CMDLINE)" > $(zip_root)/BOOT/cmdline
ifdef INSTALLED_2NDBOOTLOADER_TARGET
cp $(INSTALLED_2NDBOOTLOADER_TARGET) $(zip_root)/BOOT/second
endif
ifdef INSTALLED_DTBIMAGE_TARGET
cp $(INSTALLED_DTBIMAGE_TARGET) $(zip_root)/BOOT/dtb
endif
- echo "$(INTERNAL_KERNEL_CMDLINE)" > $(zip_root)/BOOT/cmdline
ifdef BOARD_KERNEL_BASE
echo "$(BOARD_KERNEL_BASE)" > $(zip_root)/BOOT/base
endif
ifdef BOARD_KERNEL_PAGESIZE
echo "$(BOARD_KERNEL_PAGESIZE)" > $(zip_root)/BOOT/pagesize
endif
-else # INSTALLED_VENDOR_BOOTIMAGE_TARGET defined
- echo "$(GENERIC_KERNEL_CMDLINE)" > $(zip_root)/BOOT/cmdline
-endif # INSTALLED_VENDOR_BOOTIMAGE_TARGET defined
+endif # INSTALLED_VENDOR_BOOTIMAGE_TARGET == "" && BOARD_USES_GENERIC_KERNEL_IMAGE != true
endif # BOARD_USES_RECOVERY_AS_BOOT not true
$(hide) $(foreach t,$(INSTALLED_RADIOIMAGE_TARGET),\
mkdir -p $(zip_root)/RADIO; \
@@ -4489,6 +4774,19 @@
echo "$(BOARD_KERNEL_PAGESIZE)" > $(zip_root)/VENDOR_BOOT/pagesize
endif
echo "$(INTERNAL_KERNEL_CMDLINE)" > $(zip_root)/VENDOR_BOOT/vendor_cmdline
+ifdef BOARD_VENDOR_RAMDISK_FRAGMENTS
+ echo "$(BOARD_VENDOR_RAMDISK_FRAGMENTS)" > "$(zip_root)/VENDOR_BOOT/vendor_ramdisk_fragments"
+ $(foreach vendor_ramdisk_fragment,$(BOARD_VENDOR_RAMDISK_FRAGMENTS), \
+ mkdir -p $(zip_root)/VENDOR_BOOT/RAMDISK_FRAGMENTS/$(vendor_ramdisk_fragment); \
+ echo "$(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).MKBOOTIMG_ARGS)" > "$(zip_root)/VENDOR_BOOT/RAMDISK_FRAGMENTS/$(vendor_ramdisk_fragment)/mkbootimg_args"; \
+ $(eval prebuilt_ramdisk := $(BOARD_VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).PREBUILT)) \
+ $(if $(prebuilt_ramdisk), \
+ cp "$(prebuilt_ramdisk)" "$(zip_root)/VENDOR_BOOT/RAMDISK_FRAGMENTS/$(vendor_ramdisk_fragment)/prebuilt_ramdisk";, \
+ $(call package_files-copy-root, \
+ $(VENDOR_RAMDISK_FRAGMENT.$(vendor_ramdisk_fragment).STAGING_DIR), \
+ $(zip_root)/VENDOR_BOOT/RAMDISK_FRAGMENTS/$(vendor_ramdisk_fragment)/RAMDISK); \
+ ))
+endif # BOARD_VENDOR_RAMDISK_FRAGMENTS != ""
endif # INSTALLED_VENDOR_BOOTIMAGE_TARGET
ifdef BUILDING_SYSTEM_IMAGE
@# Contents of the system image
@@ -4538,6 +4836,13 @@
@# Extra contents of the OTA package
$(hide) mkdir -p $(zip_root)/OTA
$(hide) cp $(INSTALLED_ANDROID_INFO_TXT_TARGET) $(zip_root)/OTA/
+ifdef BUILDING_RAMDISK_IMAGE
+ifeq (true,$(BOARD_IMG_USE_RAMDISK))
+ @# Contents of the ramdisk image
+ $(hide) mkdir -p $(zip_root)/IMAGES
+ $(hide) cp $(INSTALLED_RAMDISK_TARGET) $(zip_root)/IMAGES/
+endif
+endif
ifeq ($(TARGET_OTA_ALLOW_NON_AB),true)
ifneq ($(built_ota_tools),)
$(hide) mkdir -p $(zip_root)/OTA/bin
@@ -4620,10 +4925,12 @@
$(hide) mkdir -p $(zip_root)/IMAGES
$(hide) cp $(INSTALLED_SYSTEM_EXTIMAGE_TARGET) $(zip_root)/IMAGES/
endif
-ifdef BOARD_PREBUILT_BOOTIMAGE
+ifneq (,$(INTERNAL_PREBUILT_BOOTIMAGE) $(filter true,$(BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES)))
+ifdef INSTALLED_BOOTIMAGE_TARGET
$(hide) mkdir -p $(zip_root)/IMAGES
$(hide) cp $(INSTALLED_BOOTIMAGE_TARGET) $(zip_root)/IMAGES/
-endif
+endif # INSTALLED_BOOTIMAGE_TARGET
+endif # INTERNAL_PREBUILT_BOOTIMAGE != "" || BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES == true
ifdef BOARD_PREBUILT_ODMIMAGE
$(hide) mkdir -p $(zip_root)/IMAGES
$(hide) cp $(INSTALLED_ODMIMAGE_TARGET) $(zip_root)/IMAGES/
@@ -4640,6 +4947,10 @@
$(hide) mkdir -p $(zip_root)/PREBUILT_IMAGES
$(hide) cp $(INSTALLED_DTBOIMAGE_TARGET) $(zip_root)/PREBUILT_IMAGES/
endif # BOARD_PREBUILT_DTBOIMAGE
+ifdef BOARD_PREBUILT_PVMFWIMAGE
+ $(hide) mkdir -p $(zip_root)/PREBUILT_IMAGES
+ $(hide) cp $(INSTALLED_PVMFWIMAGE_TARGET) $(zip_root)/PREBUILT_IMAGES/
+endif # BOARD_PREBUILT_PVMFWIMAGE
ifdef BOARD_PREBUILT_BOOTLOADER
$(hide) mkdir -p $(zip_root)/IMAGES
$(hide) cp $(INSTALLED_BOOTLOADER_MODULE) $(zip_root)/IMAGES/
@@ -4740,7 +5051,7 @@
# $(1): output file
# $(2): additional args
define build-ota-package-target
-PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$$PATH \
+PATH=$(INTERNAL_USERIMAGES_BINARY_PATHS):$(dir $(ZIP2ZIP)):$$PATH \
$(OTA_FROM_TARGET_FILES) \
--verbose \
--extracted_input_target_files $(patsubst %.zip,%,$(BUILT_TARGET_FILES_PACKAGE)) \
@@ -4750,18 +5061,18 @@
$(BUILT_TARGET_FILES_PACKAGE) $(1)
endef
-name := $(TARGET_PRODUCT)
+product_name := $(TARGET_PRODUCT)
ifeq ($(TARGET_BUILD_TYPE),debug)
- name := $(name)_debug
+ product_name := $(product_name)_debug
endif
-name := $(name)-ota-$(FILE_NAME_TAG)
+name := $(product_name)-ota-$(FILE_NAME_TAG)
INTERNAL_OTA_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip
INTERNAL_OTA_METADATA := $(PRODUCT_OUT)/ota_metadata
$(INTERNAL_OTA_PACKAGE_TARGET): KEY_CERT_PAIR := $(DEFAULT_KEY_CERT_PAIR)
$(INTERNAL_OTA_PACKAGE_TARGET): .KATI_IMPLICIT_OUTPUTS := $(INTERNAL_OTA_METADATA)
-$(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(OTA_FROM_TARGET_FILES)
+$(INTERNAL_OTA_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(OTA_FROM_TARGET_FILES) $(INTERNAL_OTATOOLS_FILES)
@echo "Package OTA: $@"
$(call build-ota-package-target,$@,-k $(KEY_CERT_PAIR) --output_metadata_path $(INTERNAL_OTA_METADATA))
@@ -4769,17 +5080,14 @@
otapackage: $(INTERNAL_OTA_PACKAGE_TARGET)
ifeq ($(BOARD_BUILD_RETROFIT_DYNAMIC_PARTITIONS_OTA_PACKAGE),true)
-name := $(TARGET_PRODUCT)
-ifeq ($(TARGET_BUILD_TYPE),debug)
- name := $(name)_debug
-endif
-name := $(name)-ota-retrofit-$(FILE_NAME_TAG)
+name := $(product_name)-ota-retrofit-$(FILE_NAME_TAG)
INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip
$(INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET): KEY_CERT_PAIR := $(DEFAULT_KEY_CERT_PAIR)
$(INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET): \
$(BUILT_TARGET_FILES_PACKAGE) \
- $(OTA_FROM_TARGET_FILES)
+ $(OTA_FROM_TARGET_FILES) \
+ $(INTERNAL_OTATOOLS_FILES)
@echo "Package OTA (retrofit dynamic partitions): $@"
$(call build-ota-package-target,$@,-k $(KEY_CERT_PAIR) --retrofit_dynamic_partitions)
@@ -4789,6 +5097,20 @@
endif # BOARD_BUILD_RETROFIT_DYNAMIC_PARTITIONS_OTA_PACKAGE
+ifneq ($(BOARD_PARTIAL_OTA_UPDATE_PARTITIONS_LIST),)
+name := $(product_name)-partial-ota-$(FILE_NAME_TAG)
+
+INTERNAL_OTA_PARTIAL_PACKAGE_TARGET := $(PRODUCT_OUT)/$(name).zip
+$(INTERNAL_OTA_PARTIAL_PACKAGE_TARGET): KEY_CERT_PAIR := $(DEFAULT_KEY_CERT_PAIR)
+$(INTERNAL_OTA_PARTIAL_PACKAGE_TARGET): $(BUILT_TARGET_FILES_PACKAGE) $(OTA_FROM_TARGET_FILES) $(INTERNAL_OTATOOLS_FILES)
+ @echo "Package partial OTA: $@"
+ $(call build-ota-package-target,$@,-k $(KEY_CERT_PAIR) --partial "$(BOARD_PARTIAL_OTA_UPDATE_PARTITIONS_LIST)")
+
+.PHONY: partialotapackage
+partialotapackage: $(INTERNAL_OTA_PARTIAL_PACKAGE_TARGET)
+
+endif # BOARD_PARTIAL_OTA_UPDATE_PARTITIONS_LIST
+
endif # build_ota_package
# -----------------------------------------------------------------
@@ -4824,16 +5146,7 @@
SYMBOLS_ZIP := $(PRODUCT_OUT)/$(name).zip
# For apps_only build we'll establish the dependency later in build/make/core/main.mk.
ifeq (,$(TARGET_BUILD_UNBUNDLED))
-$(SYMBOLS_ZIP): $(FULL_SYSTEMIMAGE_DEPS) \
- $(INTERNAL_RAMDISK_FILES) \
- $(INTERNAL_USERDATAIMAGE_FILES) \
- $(INTERNAL_VENDORIMAGE_FILES) \
- $(INTERNAL_PRODUCTIMAGE_FILES) \
- $(INTERNAL_SYSTEM_EXTIMAGE_FILES) \
- $(INTERNAL_ODMIMAGE_FILES) \
- $(INTERNAL_VENDOR_DLKMIMAGE_FILES) \
- $(INTERNAL_ODM_DLKMIMAGE_FILES) \
- $(updater_dep)
+$(SYMBOLS_ZIP): $(INTERNAL_ALLIMAGES_FILES) $(updater_dep)
endif
$(SYMBOLS_ZIP): PRIVATE_LIST_FILE := $(call intermediates-dir-for,PACKAGING,symbols)/filelist
$(SYMBOLS_ZIP): $(SOONG_ZIP)
@@ -4851,15 +5164,7 @@
endif
COVERAGE_ZIP := $(PRODUCT_OUT)/$(name).zip
ifeq (,$(TARGET_BUILD_UNBUNDLED))
-$(COVERAGE_ZIP): $(FULL_SYSTEMIMAGE_DEPS) \
- $(INTERNAL_RAMDISK_FILES) \
- $(INTERNAL_USERDATAIMAGE_FILES) \
- $(INTERNAL_VENDORIMAGE_FILES) \
- $(INTERNAL_PRODUCTIMAGE_FILES) \
- $(INTERNAL_SYSTEM_EXTIMAGE_FILES) \
- $(INTERNAL_ODMIMAGE_FILES) \
- $(INTERNAL_VENDOR_DLKMIMAGE_FILES) \
- $(INTERNAL_ODM_DLKMIMAGE_FILES)
+$(COVERAGE_ZIP): $(INTERNAL_ALLIMAGES_FILES)
endif
$(COVERAGE_ZIP): PRIVATE_LIST_FILE := $(call intermediates-dir-for,PACKAGING,coverage)/filelist
$(COVERAGE_ZIP): $(SOONG_ZIP)
@@ -4879,7 +5184,7 @@
$(PROFDATA_ZIP): $(SOONG_ZIP)
$(hide) $(SOONG_ZIP) -d -o $@ -C $(LLVM_PREBUILTS_BASE)/linux-x86/$(LLVM_PREBUILTS_VERSION) -f $(LLVM_PROFDATA) -f $(LIBCXX)
- $(call dist-for-goals,droidcore apps_only,$(PROFDATA_ZIP))
+ $(call dist-for-goals,droidcore,$(PROFDATA_ZIP))
endif
# -----------------------------------------------------------------
@@ -4915,9 +5220,12 @@
JACOCO_REPORT_CLASSES_ALL := $(PRODUCT_OUT)/jacoco-report-classes-all.jar
$(JACOCO_REPORT_CLASSES_ALL) :
@echo "Collecting uninstrumented classes"
- find $(TARGET_COMMON_OUT_ROOT) $(HOST_COMMON_OUT_ROOT) -name "jacoco-report-classes.jar" 2>/dev/null | sort > $@.list
+ find $(TARGET_COMMON_OUT_ROOT) $(HOST_COMMON_OUT_ROOT) -name "jacoco-report-classes.jar" -o -name "proguard_usage.zip" 2>/dev/null | sort > $@.list
$(SOONG_ZIP) -o $@ -L 0 -C $(OUT_DIR) -P out -l $@.list
+ifeq (,$(TARGET_BUILD_UNBUNDLED))
+ $(JACOCO_REPORT_CLASSES_ALL): $(INTERNAL_ALLIMAGES_FILES)
+endif
endif # EMMA_INSTRUMENT=true
@@ -4927,17 +5235,7 @@
PROGUARD_DICT_ZIP := $(PRODUCT_OUT)/$(TARGET_PRODUCT)-proguard-dict-$(FILE_NAME_TAG).zip
# For apps_only build we'll establish the dependency later in build/make/core/main.mk.
ifeq (,$(TARGET_BUILD_UNBUNDLED))
-$(PROGUARD_DICT_ZIP): \
- $(FULL_SYSTEMIMAGE_DEPS) \
- $(INTERNAL_RAMDISK_FILES) \
- $(INTERNAL_USERDATAIMAGE_FILES) \
- $(INTERNAL_VENDORIMAGE_FILES) \
- $(INTERNAL_PRODUCTIMAGE_FILES) \
- $(INTERNAL_SYSTEM_EXTIMAGE_FILES) \
- $(INTERNAL_ODMIMAGE_FILES) \
- $(INTERNAL_VENDOR_DLKMIMAGE_FILES) \
- $(INTERNAL_ODM_DLKMIMAGE_FILES) \
- $(updater_dep)
+$(PROGUARD_DICT_ZIP): $(INTERNAL_ALLIMAGES_FILES) $(updater_dep)
endif
$(PROGUARD_DICT_ZIP): PRIVATE_LIST_FILE := $(call intermediates-dir-for,PACKAGING,proguard)/filelist
$(PROGUARD_DICT_ZIP): $(SOONG_ZIP)
diff --git a/core/OWNERS b/core/OWNERS
index 459683e..5456d4f 100644
--- a/core/OWNERS
+++ b/core/OWNERS
@@ -1,2 +1,6 @@
per-file dex_preopt*.mk = ngeoffray@google.com,calin@google.com,mathewi@google.com,dbrazdil@google.com
per-file verify_uses_libraries.sh = ngeoffray@google.com,calin@google.com,mathieuc@google.com
+
+# For version updates
+per-file version_defaults.mk = aseaton@google.com,elisapascual@google.com,lubomir@google.com,pscovanner@google.com
+
diff --git a/core/android_soong_config_vars.mk b/core/android_soong_config_vars.mk
index ee12c8c..3a0c0f1 100644
--- a/core/android_soong_config_vars.mk
+++ b/core/android_soong_config_vars.mk
@@ -27,3 +27,15 @@
# Add variables to the namespace below:
$(call add_soong_config_var,ANDROID,TARGET_ENABLE_MEDIADRM_64)
+
+# TODO(b/172480615): Remove when platform uses ART Module prebuilts by default.
+ifeq (,$(filter art_module,$(SOONG_CONFIG_NAMESPACES)))
+ $(call add_soong_config_namespace,art_module)
+ SOONG_CONFIG_art_module += source_build
+endif
+SOONG_CONFIG_art_module_source_build ?= true
+
+# Apex build mode variables
+ifdef APEX_BUILD_FOR_PRE_S_DEVICES
+$(call add_soong_config_var_value,ANDROID,library_linking_strategy,prefer_static)
+endif
diff --git a/core/artifact_path_requirements.mk b/core/artifact_path_requirements.mk
new file mode 100644
index 0000000..ceaefa2
--- /dev/null
+++ b/core/artifact_path_requirements.mk
@@ -0,0 +1,60 @@
+# This file contains logic to enforce artifact path requirements
+# defined in product makefiles.
+
+# Fakes don't get installed, and NDK stubs aren't installed to device.
+static_allowed_patterns := $(TARGET_OUT_FAKE)/% $(SOONG_OUT_DIR)/ndk/%
+# RROs become REQUIRED by the source module, but are always placed on the vendor partition.
+static_allowed_patterns += %__auto_generated_rro_product.apk
+static_allowed_patterns += %__auto_generated_rro_vendor.apk
+# Auto-included targets are not considered
+static_allowed_patterns += $(call product-installed-files,)
+# $(PRODUCT_OUT)/apex is where shared libraries in APEXes get installed.
+# The path can be considered as a fake path, as the shared libraries
+# are installed there just to have symbols files for them under
+# $(PRODUCT_OUT)/symbols/apex for debugging purpose. The /apex directory
+# is never compiled into a filesystem image.
+static_allowed_patterns += $(PRODUCT_OUT)/apex/%
+ifeq (true,$(BOARD_USES_SYSTEM_OTHER_ODEX))
+ # Allow system_other odex space optimization.
+ static_allowed_patterns += \
+ $(TARGET_OUT_SYSTEM_OTHER)/%.odex \
+ $(TARGET_OUT_SYSTEM_OTHER)/%.vdex \
+ $(TARGET_OUT_SYSTEM_OTHER)/%.art
+endif
+
+all_offending_files :=
+$(foreach makefile,$(ARTIFACT_PATH_REQUIREMENT_PRODUCTS),\
+ $(eval requirements := $(PRODUCTS.$(makefile).ARTIFACT_PATH_REQUIREMENTS)) \
+ $(eval ### Verify that the product only produces files inside its path requirements.) \
+ $(eval allowed := $(PRODUCTS.$(makefile).ARTIFACT_PATH_ALLOWED_LIST)) \
+ $(eval path_patterns := $(call resolve-product-relative-paths,$(requirements),%)) \
+ $(eval allowed_patterns := $(call resolve-product-relative-paths,$(allowed))) \
+ $(eval files := $(call product-installed-files, $(makefile))) \
+ $(eval offending_files := $(filter-out $(path_patterns) $(allowed_patterns) $(static_allowed_patterns),$(files))) \
+ $(call maybe-print-list-and-error,$(offending_files),\
+ $(makefile) produces files outside its artifact path requirement. \
+ Allowed paths are $(subst $(space),$(comma)$(space),$(addsuffix *,$(requirements)))) \
+ $(eval unused_allowed := $(filter-out $(files),$(allowed_patterns))) \
+ $(if $(PRODUCTS.$(makefile).ARTIFACT_PATH_REQUIREMENT_IS_RELAXED),, \
+ $(call maybe-print-list-and-error,$(unused_allowed),$(makefile) includes redundant allowed entries in its artifact path requirement.) \
+ ) \
+ $(eval ### Optionally verify that nothing else produces files inside this artifact path requirement.) \
+ $(eval extra_files := $(filter-out $(files) $(HOST_OUT)/%,$(product_target_FILES))) \
+ $(eval files_in_requirement := $(filter $(path_patterns),$(extra_files))) \
+ $(eval all_offending_files += $(files_in_requirement)) \
+ $(eval allowed := $(PRODUCT_ARTIFACT_PATH_REQUIREMENT_ALLOWED_LIST)) \
+ $(eval allowed_patterns := $(call resolve-product-relative-paths,$(allowed))) \
+ $(eval offending_files := $(filter-out $(allowed_patterns),$(files_in_requirement))) \
+ $(eval enforcement := $(PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS)) \
+ $(if $(enforcement),\
+ $(call maybe-print-list-and-error,$(offending_files),\
+ $(INTERNAL_PRODUCT) produces files inside $(makefile)s artifact path requirement. \
+ $(PRODUCT_ARTIFACT_PATH_REQUIREMENT_HINT)) \
+ $(eval unused_allowed := $(if $(filter true strict,$(enforcement)),\
+ $(foreach p,$(allowed_patterns),$(if $(filter $(p),$(extra_files)),,$(p))))) \
+ $(call maybe-print-list-and-error,$(unused_allowed),$(INTERNAL_PRODUCT) includes redundant artifact path requirement allowed list entries.) \
+ ) \
+)
+$(PRODUCT_OUT)/offending_artifacts.txt:
+ rm -f $@
+ $(foreach f,$(sort $(all_offending_files)),echo $(f) >> $@;)
diff --git a/core/base_rules.mk b/core/base_rules.mk
index 58be7a2..4fd8baa 100644
--- a/core/base_rules.mk
+++ b/core/base_rules.mk
@@ -274,6 +274,12 @@
endif
endif
+ifeq ($(LOCAL_IS_UNIT_TEST),true)
+ ifeq ($(LOCAL_IS_HOST_MODULE),true)
+ LOCAL_COMPATIBILITY_SUITE += host-unit-tests
+ endif
+endif
+
ifeq ($(my_module_path),)
install_path_var := $(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)OUT$(partition_tag)_$(LOCAL_MODULE_CLASS)
ifeq (true,$(LOCAL_PRIVILEGED_MODULE))
@@ -987,6 +993,10 @@
ALL_MODULES.$(my_register_name).TEST_CONFIG := $(test_config)
ALL_MODULES.$(my_register_name).EXTRA_TEST_CONFIGS := $(LOCAL_EXTRA_FULL_TEST_CONFIGS)
ALL_MODULES.$(my_register_name).TEST_MAINLINE_MODULES := $(LOCAL_TEST_MAINLINE_MODULES)
+ifndef LOCAL_IS_HOST_MODULE
+ALL_MODULES.$(my_register_name).FILE_CONTEXTS := $(LOCAL_FILE_CONTEXTS)
+endif
+ALL_MODULES.$(my_register_name).IS_UNIT_TEST := $(LOCAL_IS_UNIT_TEST)
test_config :=
INSTALLABLE_FILES.$(LOCAL_INSTALLED_MODULE).MODULE := $(my_register_name)
diff --git a/core/binary.mk b/core/binary.mk
index be008e6..fa36d64 100644
--- a/core/binary.mk
+++ b/core/binary.mk
@@ -1333,11 +1333,6 @@
my_c_includes += $(TOPDIR)$(LOCAL_PATH) $(intermediates) $(generated_sources_dir)
-# The platform JNI header is for platform modules only.
-ifeq ($(LOCAL_SDK_VERSION)$(LOCAL_USE_VNDK),)
- my_c_includes += $(JNI_H_INCLUDE)
-endif
-
my_c_includes := $(foreach inc,$(my_c_includes),$(call clean-path,$(inc)))
my_outside_includes := $(filter-out $(OUT_DIR)/%,$(filter /%,$(my_c_includes)) $(filter ../%,$(my_c_includes)))
diff --git a/core/board_config.mk b/core/board_config.mk
index 95d8af8..725c0a5 100644
--- a/core/board_config.mk
+++ b/core/board_config.mk
@@ -19,88 +19,111 @@
# and checks the variable defined therein.
# ###############################################################
-_board_strip_readonly_list := \
- BOARD_BOOTLOADER_IN_UPDATE_PACKAGE \
- BOARD_EGL_CFG \
- BOARD_HAVE_BLUETOOTH \
- BOARD_INSTALLER_CMDLINE \
- BOARD_KERNEL_CMDLINE \
- BOARD_KERNEL_BASE \
- BOARD_USES_GENERIC_AUDIO \
- BOARD_USES_RECOVERY_AS_BOOT \
- BOARD_VENDOR_USE_AKMD \
- BOARD_WPA_SUPPLICANT_DRIVER \
- BOARD_WLAN_DEVICE \
- TARGET_ARCH \
- TARGET_ARCH_VARIANT \
- TARGET_CPU_ABI \
- TARGET_CPU_ABI2 \
- TARGET_CPU_VARIANT \
- TARGET_CPU_VARIANT_RUNTIME \
- TARGET_2ND_ARCH \
- TARGET_2ND_ARCH_VARIANT \
- TARGET_2ND_CPU_ABI \
- TARGET_2ND_CPU_ABI2 \
- TARGET_2ND_CPU_VARIANT \
- TARGET_2ND_CPU_VARIANT_RUNTIME \
- TARGET_BOARD_PLATFORM \
- TARGET_BOARD_PLATFORM_GPU \
- TARGET_BOOTLOADER_BOARD_NAME \
- TARGET_FS_CONFIG_GEN \
- TARGET_NO_BOOTLOADER \
- TARGET_NO_KERNEL \
- TARGET_NO_RECOVERY \
- TARGET_NO_RADIOIMAGE \
- TARGET_HARDWARE_3D \
- WITH_DEXPREOPT \
+_board_strip_readonly_list :=
+_board_strip_readonly_list += BOARD_BOOTLOADER_IN_UPDATE_PACKAGE
+_board_strip_readonly_list += BOARD_EGL_CFG
+_board_strip_readonly_list += BOARD_HAVE_BLUETOOTH
+_board_strip_readonly_list += BOARD_INSTALLER_CMDLINE
+_board_strip_readonly_list += BOARD_KERNEL_CMDLINE
+_board_strip_readonly_list += BOARD_KERNEL_BASE
+_board_strip_readonly_list += BOARD_USES_GENERIC_AUDIO
+_board_strip_readonly_list += BOARD_USES_RECOVERY_AS_BOOT
+_board_strip_readonly_list += BOARD_VENDOR_USE_AKMD
+_board_strip_readonly_list += BOARD_WPA_SUPPLICANT_DRIVER
+_board_strip_readonly_list += BOARD_WLAN_DEVICE
+_board_strip_readonly_list += TARGET_BOARD_PLATFORM
+_board_strip_readonly_list += TARGET_BOARD_PLATFORM_GPU
+_board_strip_readonly_list += TARGET_BOOTLOADER_BOARD_NAME
+_board_strip_readonly_list += TARGET_FS_CONFIG_GEN
+_board_strip_readonly_list += TARGET_NO_BOOTLOADER
+_board_strip_readonly_list += TARGET_NO_KERNEL
+_board_strip_readonly_list += TARGET_NO_RECOVERY
+_board_strip_readonly_list += TARGET_NO_RADIOIMAGE
+_board_strip_readonly_list += TARGET_HARDWARE_3D
+_board_strip_readonly_list += WITH_DEXPREOPT
+
+# Arch variables
+_board_strip_readonly_list += TARGET_ARCH
+_board_strip_readonly_list += TARGET_ARCH_VARIANT
+_board_strip_readonly_list += TARGET_CPU_ABI
+_board_strip_readonly_list += TARGET_CPU_ABI2
+_board_strip_readonly_list += TARGET_CPU_VARIANT
+_board_strip_readonly_list += TARGET_CPU_VARIANT_RUNTIME
+_board_strip_readonly_list += TARGET_2ND_ARCH
+_board_strip_readonly_list += TARGET_2ND_ARCH_VARIANT
+_board_strip_readonly_list += TARGET_2ND_CPU_ABI
+_board_strip_readonly_list += TARGET_2ND_CPU_ABI2
+_board_strip_readonly_list += TARGET_2ND_CPU_VARIANT
+_board_strip_readonly_list += TARGET_2ND_CPU_VARIANT_RUNTIME
+# TARGET_ARCH_SUITE is an alternative arch configuration to TARGET_ARCH (and related variables),
+# that can be used for soong-only builds to build for several architectures at once.
+# Allowed values currently are "ndk" and "mainline_sdk".
+_board_strip_readonly_list += TARGET_ARCH_SUITE
# File system variables
-_board_strip_readonly_list += \
- BOARD_FLASH_BLOCK_SIZE \
- BOARD_BOOTIMAGE_PARTITION_SIZE \
- BOARD_RECOVERYIMAGE_PARTITION_SIZE \
- BOARD_SYSTEMIMAGE_PARTITION_SIZE \
- BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE \
- BOARD_USERDATAIMAGE_FILE_SYSTEM_TYPE \
- BOARD_USERDATAIMAGE_PARTITION_SIZE \
- BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE \
- BOARD_CACHEIMAGE_PARTITION_SIZE \
- BOARD_VENDORIMAGE_PARTITION_SIZE \
- BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE \
- BOARD_PRODUCTIMAGE_PARTITION_SIZE \
- BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE \
- BOARD_SYSTEM_EXTIMAGE_PARTITION_SIZE \
- BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE \
- BOARD_ODMIMAGE_PARTITION_SIZE \
- BOARD_ODMIMAGE_FILE_SYSTEM_TYPE \
- BOARD_VENDOR_DLKMIMAGE_PARTITION_SIZE \
- BOARD_VENDOR_DLKMIMAGE_FILE_SYSTEM_TYPE \
- BOARD_ODM_DLKMIMAGE_PARTITION_SIZE \
- BOARD_ODM_DLKMIMAGE_FILE_SYSTEM_TYPE \
+_board_strip_readonly_list += BOARD_FLASH_BLOCK_SIZE
+_board_strip_readonly_list += BOARD_BOOTIMAGE_PARTITION_SIZE
+_board_strip_readonly_list += BOARD_RECOVERYIMAGE_PARTITION_SIZE
+_board_strip_readonly_list += BOARD_SYSTEMIMAGE_PARTITION_SIZE
+_board_strip_readonly_list += BOARD_SYSTEMIMAGE_FILE_SYSTEM_TYPE
+_board_strip_readonly_list += BOARD_USERDATAIMAGE_FILE_SYSTEM_TYPE
+_board_strip_readonly_list += BOARD_USERDATAIMAGE_PARTITION_SIZE
+_board_strip_readonly_list += BOARD_CACHEIMAGE_FILE_SYSTEM_TYPE
+_board_strip_readonly_list += BOARD_CACHEIMAGE_PARTITION_SIZE
+_board_strip_readonly_list += BOARD_VENDORIMAGE_PARTITION_SIZE
+_board_strip_readonly_list += BOARD_VENDORIMAGE_FILE_SYSTEM_TYPE
+_board_strip_readonly_list += BOARD_PRODUCTIMAGE_PARTITION_SIZE
+_board_strip_readonly_list += BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE
+_board_strip_readonly_list += BOARD_SYSTEM_EXTIMAGE_PARTITION_SIZE
+_board_strip_readonly_list += BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE
+_board_strip_readonly_list += BOARD_ODMIMAGE_PARTITION_SIZE
+_board_strip_readonly_list += BOARD_ODMIMAGE_FILE_SYSTEM_TYPE
+_board_strip_readonly_list += BOARD_VENDOR_DLKMIMAGE_PARTITION_SIZE
+_board_strip_readonly_list += BOARD_VENDOR_DLKMIMAGE_FILE_SYSTEM_TYPE
+_board_strip_readonly_list += BOARD_ODM_DLKMIMAGE_PARTITION_SIZE
+_board_strip_readonly_list += BOARD_ODM_DLKMIMAGE_FILE_SYSTEM_TYPE
# Logical partitions related variables.
-_dynamic_partitions_var_list += \
- BOARD_SYSTEMIMAGE_PARTITION_RESERVED_SIZE \
- BOARD_VENDORIMAGE_PARTITION_RESERVED_SIZE \
- BOARD_ODMIMAGE_PARTITION_RESERVED_SIZE \
- BOARD_VENDOR_DLKMIMAGE_PARTITION_RESERVED_SIZE \
- BOARD_ODM_DLKMIMAGE_PARTITION_RESERVED_SIZE \
- BOARD_PRODUCTIMAGE_PARTITION_RESERVED_SIZE \
- BOARD_SYSTEM_EXTIMAGE_PARTITION_RESERVED_SIZE \
- BOARD_SUPER_PARTITION_SIZE \
- BOARD_SUPER_PARTITION_GROUPS \
-
-_board_strip_readonly_list += $(_dynamic_partitions_var_list)
+_board_strip_readonly_list += BOARD_SYSTEMIMAGE_PARTITION_RESERVED_SIZE
+_board_strip_readonly_list += BOARD_VENDORIMAGE_PARTITION_RESERVED_SIZE
+_board_strip_readonly_list += BOARD_ODMIMAGE_PARTITION_RESERVED_SIZE
+_board_strip_readonly_list += BOARD_VENDOR_DLKMIMAGE_PARTITION_RESERVED_SIZE
+_board_strip_readonly_list += BOARD_ODM_DLKMIMAGE_PARTITION_RESERVED_SIZE
+_board_strip_readonly_list += BOARD_PRODUCTIMAGE_PARTITION_RESERVED_SIZE
+_board_strip_readonly_list += BOARD_SYSTEM_EXTIMAGE_PARTITION_RESERVED_SIZE
+_board_strip_readonly_list += BOARD_SUPER_PARTITION_SIZE
+_board_strip_readonly_list += BOARD_SUPER_PARTITION_GROUPS
# Kernel related variables
-_board_strip_readonly_list += \
- BOARD_KERNEL_BINARIES \
- BOARD_KERNEL_MODULE_INTERFACE_VERSIONS \
+_board_strip_readonly_list += BOARD_KERNEL_BINARIES
+_board_strip_readonly_list += BOARD_KERNEL_MODULE_INTERFACE_VERSIONS
+
+# Variables related to generic kernel image (GKI) and generic boot image
+# - BOARD_USES_GENERIC_KERNEL_IMAGE is the global variable that defines if the
+# board uses GKI and generic boot image.
+# Update mechanism of the boot image is not enforced by this variable.
+# - BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE controls whether the recovery image
+# contains a kernel or not.
+# - BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT controls whether ramdisk
+# recovery resources are built to vendor_boot.
+# - BOARD_MOVE_GSI_AVB_KEYS_TO_VENDOR_BOOT controls whether GSI AVB keys are
+# built to vendor_boot.
+# - BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES controls whether boot images in $OUT are added
+# to target files package directly.
+_board_strip_readonly_list += BOARD_USES_GENERIC_KERNEL_IMAGE
+_board_strip_readonly_list += BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE
+_board_strip_readonly_list += BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT
+_board_strip_readonly_list += BOARD_MOVE_GSI_AVB_KEYS_TO_VENDOR_BOOT
+_board_strip_readonly_list += BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES
+
+# Defines the list of logical vendor ramdisk names to build or include in vendor_boot.
+_board_strip_readonly_list += BOARD_VENDOR_RAMDISK_FRAGMENTS
_build_broken_var_list := \
BUILD_BROKEN_DUP_RULES \
BUILD_BROKEN_DUP_SYSPROP \
BUILD_BROKEN_ELF_PREBUILT_PRODUCT_COPY_FILES \
+ BUILD_BROKEN_ENFORCE_SYSPROP_OWNER \
BUILD_BROKEN_MISSING_REQUIRED_MODULES \
BUILD_BROKEN_OUTSIDE_INCLUDE_DIRS \
BUILD_BROKEN_PREBUILT_ELF_FILES \
@@ -154,10 +177,19 @@
TARGET_DEVICE_DIR := $(patsubst %/,%,$(dir $(board_config_mk)))
.KATI_READONLY := TARGET_DEVICE_DIR
endif
+
include $(board_config_mk)
-ifeq ($(TARGET_ARCH),)
- $(error TARGET_ARCH not defined by board config: $(board_config_mk))
+
+ifneq (,$(and $(TARGET_ARCH),$(TARGET_ARCH_SUITE)))
+ $(error $(board_config_mk) erroneously sets both TARGET_ARCH and TARGET_ARCH_SUITE)
endif
+ifeq ($(TARGET_ARCH)$(TARGET_ARCH_SUITE),)
+ $(error Target architectures not defined by board config: $(board_config_mk))
+endif
+ifeq ($(TARGET_CPU_ABI)$(TARGET_ARCH_SUITE),)
+ $(error TARGET_CPU_ABI not defined by board config: $(board_config_mk))
+endif
+
ifneq ($(MALLOC_IMPL),)
$(warning *** Unsupported option MALLOC_IMPL defined by board config: $(board_config_mk).)
$(error Use `MALLOC_SVELTE := true` to configure jemalloc for low-memory)
@@ -174,10 +206,12 @@
TARGET_CPU_VARIANT_RUNTIME := $(or $(TARGET_CPU_VARIANT_RUNTIME),$(TARGET_CPU_VARIANT))
TARGET_2ND_CPU_VARIANT_RUNTIME := $(or $(TARGET_2ND_CPU_VARIANT_RUNTIME),$(TARGET_2ND_CPU_VARIANT))
-# The combo makefiles check and set defaults for various CPU configuration
-combo_target := TARGET_
-combo_2nd_arch_prefix :=
-include $(BUILD_SYSTEM)/combo/select.mk
+ifdef TARGET_ARCH
+ # The combo makefiles check and set defaults for various CPU configuration
+ combo_target := TARGET_
+ combo_2nd_arch_prefix :=
+ include $(BUILD_SYSTEM)/combo/select.mk
+endif
ifdef TARGET_2ND_ARCH
combo_2nd_arch_prefix := $(TARGET_2ND_ARCH_VAR_PREFIX)
@@ -187,9 +221,7 @@
.KATI_READONLY := $(_board_strip_readonly_list)
INTERNAL_KERNEL_CMDLINE := $(BOARD_KERNEL_CMDLINE)
-ifeq ($(TARGET_CPU_ABI),)
- $(error No TARGET_CPU_ABI defined by board config: $(board_config_mk))
-endif
+
ifneq ($(filter %64,$(TARGET_ARCH)),)
TARGET_IS_64_BIT := true
endif
@@ -290,7 +322,8 @@
###########################################
# Now we can substitute with the real value of TARGET_COPY_OUT_DEBUG_RAMDISK
-ifneq (,$(filter true,$(BOARD_USES_RECOVERY_AS_BOOT) $(BOARD_GKI_NONAB_COMPAT)))
+ifneq (,$(filter true,$(BOARD_USES_RECOVERY_AS_BOOT) \
+ $(BOARD_GKI_NONAB_COMPAT) $(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT)))
TARGET_COPY_OUT_DEBUG_RAMDISK := debug_ramdisk/first_stage_ramdisk
TARGET_COPY_OUT_VENDOR_DEBUG_RAMDISK := vendor_debug_ramdisk/first_stage_ramdisk
TARGET_COPY_OUT_TEST_HARNESS_RAMDISK := test_harness_ramdisk/first_stage_ramdisk
@@ -342,23 +375,34 @@
# Are we building a boot image
BUILDING_BOOT_IMAGE :=
-ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
- BUILDING_BOOT_IMAGE :=
-else ifeq ($(PRODUCT_BUILD_BOOT_IMAGE),)
- ifdef BOARD_BOOTIMAGE_PARTITION_SIZE
+ifeq ($(PRODUCT_BUILD_BOOT_IMAGE),)
+ ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+ BUILDING_BOOT_IMAGE :=
+ else ifdef BOARD_BOOTIMAGE_PARTITION_SIZE
+ BUILDING_BOOT_IMAGE := true
+ else ifneq (,$(foreach kernel,$(BOARD_KERNEL_BINARIES),$(BOARD_$(call to-upper,$(kernel))_BOOTIMAGE_PARTITION_SIZE)))
BUILDING_BOOT_IMAGE := true
endif
else ifeq ($(PRODUCT_BUILD_BOOT_IMAGE),true)
- BUILDING_BOOT_IMAGE := true
+ ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+ $(warning *** PRODUCT_BUILD_BOOT_IMAGE is true, but so is BOARD_USES_RECOVERY_AS_BOOT.)
+ $(warning *** Skipping building boot image.)
+ BUILDING_BOOT_IMAGE :=
+ else
+ BUILDING_BOOT_IMAGE := true
+ endif
endif
.KATI_READONLY := BUILDING_BOOT_IMAGE
# Are we building a recovery image
BUILDING_RECOVERY_IMAGE :=
-ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
- BUILDING_RECOVERY_IMAGE := true
-else ifeq ($(PRODUCT_BUILD_RECOVERY_IMAGE),)
- ifdef BOARD_RECOVERYIMAGE_PARTITION_SIZE
+ifeq ($(PRODUCT_BUILD_RECOVERY_IMAGE),)
+ ifeq ($(BOARD_USES_RECOVERY_AS_BOOT),true)
+ BUILDING_RECOVERY_IMAGE := true
+ else ifeq ($(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT),true)
+ # Set to true to build recovery resources for vendor_boot
+ BUILDING_RECOVERY_IMAGE := true
+ else ifdef BOARD_RECOVERYIMAGE_PARTITION_SIZE
ifeq (,$(filter true, $(TARGET_NO_KERNEL) $(TARGET_NO_RECOVERY)))
BUILDING_RECOVERY_IMAGE := true
endif
@@ -372,7 +416,9 @@
BUILDING_VENDOR_BOOT_IMAGE :=
ifdef BOARD_BOOT_HEADER_VERSION
ifneq ($(call math_gt_or_eq,$(BOARD_BOOT_HEADER_VERSION),3),)
- ifneq ($(TARGET_NO_VENDOR_BOOT),true)
+ ifeq ($(PRODUCT_BUILD_VENDOR_BOOT_IMAGE),)
+ BUILDING_VENDOR_BOOT_IMAGE := true
+ else ifeq ($(PRODUCT_BUILD_VENDOR_BOOT_IMAGE),true)
BUILDING_VENDOR_BOOT_IMAGE := true
endif
endif
@@ -687,6 +733,16 @@
TARGET_VENDOR_TEST_SUFFIX :=
endif
+# If PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY is set,
+# BOARD_VNDK_VERSION must be set because PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY
+# is a enforcement of inter-partition dependency, and it doesn't have any meaning
+# when BOARD_VNDK_VERSION isn't set.
+ifeq ($(PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY),true)
+ ifeq ($(BOARD_VNDK_VERSION),)
+ $(error BOARD_VNDK_VERSION must be set when PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY is true)
+ endif
+endif
+
###########################################
# APEXes are by default flattened, i.e. non-updatable.
# It can be unflattened (and updatable) by inheriting from
@@ -732,3 +788,39 @@
$(if $(filter true,$(BUILD_BROKEN_USES_$(m))),\
$(KATI_deprecated_var $(m),Please convert to Soong),\
$(KATI_obsolete_var $(m),Please convert to Soong)))
+
+ifndef BUILDING_RECOVERY_IMAGE
+ ifeq (true,$(BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE))
+ $(error Should not set BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE if not building recovery image)
+ endif
+endif
+
+ifndef BUILDING_VENDOR_BOOT_IMAGE
+ ifeq (true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT))
+ $(error Should not set BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT if not building vendor_boot image)
+ endif
+ ifdef BOARD_VENDOR_RAMDISK_FRAGMENTS
+ $(error Should not set BOARD_VENDOR_RAMDISK_FRAGMENTS if not building vendor_boot image)
+ endif
+endif
+
+ifneq ($(words $(BOARD_VENDOR_RAMDISK_FRAGMENTS)),$(words $(sort $(BOARD_VENDOR_RAMDISK_FRAGMENTS))))
+ $(error BOARD_VENDOR_RAMDISK_FRAGMENTS has duplicate entries: $(BOARD_VENDOR_RAMDISK_FRAGMENTS))
+endif
+
+# If BOARD_USES_GENERIC_KERNEL_IMAGE is set, BOARD_USES_RECOVERY_AS_BOOT must not be set.
+# Devices without a dedicated recovery partition uses BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT to
+# build recovery into vendor_boot.
+ifeq (true,$(BOARD_USES_GENERIC_KERNEL_IMAGE))
+ ifeq (true,$(BOARD_USES_RECOVERY_AS_BOOT))
+ $(error BOARD_USES_RECOVERY_AS_BOOT cannot be true if BOARD_USES_GENERIC_KERNEL_IMAGE is true. \
+ Use BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT instead)
+ endif
+endif
+
+ifeq (true,$(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT))
+ ifeq (true,$(BOARD_USES_RECOVERY_AS_BOOT))
+ $(error BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT and BOARD_USES_RECOVERY_AS_BOOT cannot be \
+ both true. Recovery resources should be installed to either boot or vendor_boot, but not both)
+ endif
+endif
diff --git a/core/clear_vars.mk b/core/clear_vars.mk
index d515db3..5effac7 100644
--- a/core/clear_vars.mk
+++ b/core/clear_vars.mk
@@ -74,7 +74,6 @@
LOCAL_DROIDDOC_CUSTOM_ASSET_DIR:=
LOCAL_DROIDDOC_CUSTOM_TEMPLATE_DIR:=
LOCAL_DROIDDOC_DOC_ZIP :=
-LOCAL_DROIDDOC_JDIFF_DOC_ZIP :=
LOCAL_DROIDDOC_HTML_DIR:=
LOCAL_DROIDDOC_METADATA_ZIP:=
LOCAL_DROIDDOC_OPTIONS:=
@@ -102,6 +101,7 @@
LOCAL_EXTRACT_APK:=
LOCAL_EXTRACT_DPI_APK:=
LOCAL_FDO_SUPPORT:=
+LOCAL_FILE_CONTEXTS:=
LOCAL_FINDBUGS_FLAGS:=
LOCAL_FORCE_STATIC_EXECUTABLE:=
LOCAL_FULL_CLASSES_JACOCO_JAR:=
@@ -130,6 +130,7 @@
LOCAL_IS_FUZZ_TARGET:=
LOCAL_IS_HOST_MODULE:=
LOCAL_IS_RUNTIME_RESOURCE_OVERLAY:=
+LOCAL_IS_UNIT_TEST:=
LOCAL_JACK_CLASSPATH:=
LOCAL_JACK_COVERAGE_EXCLUDE_FILTER:=
LOCAL_JACK_COVERAGE_INCLUDE_FILTER:=
@@ -158,6 +159,10 @@
LOCAL_CERTIFICATE_LINEAGE:=
LOCAL_LDFLAGS:=
LOCAL_LDLIBS:=
+LOCAL_LICENSE_CONDITIONS:=
+LOCAL_LICENSE_KINDS:=
+LOCAL_LICENSE_INSTALL_MAP:=
+LOCAL_LICENSE_PACKAGE_NAME:=
LOCAL_LOGTAGS_FILES:=
LOCAL_MANIFEST_FILE:=
LOCAL_MANIFEST_INSTRUMENTATION_FOR:=
@@ -169,6 +174,7 @@
LOCAL_MODULE_HOST_ARCH_WARN:=
LOCAL_MODULE_HOST_CROSS_ARCH:=
LOCAL_MODULE_HOST_OS:=
+LOCAL_MODULE_IS_CONTAINER:=
LOCAL_MODULE_OWNER:=
LOCAL_MODULE_PATH:=
LOCAL_MODULE_RELATIVE_PATH :=
@@ -237,6 +243,7 @@
# lite(default),micro,nano,stream,full,nanopb-c,nanopb-c-enable_malloc,nanopb-c-16bit,nanopb-c-enable_malloc-16bit,nanopb-c-32bit,nanopb-c-enable_malloc-32bit
LOCAL_PROTOC_OPTIMIZE_TYPE:=
LOCAL_PROTO_JAVA_OUTPUT_PARAMS:=
+LOCAL_PROVIDES_USES_LIBRARY:=
LOCAL_R8_FLAG_FILES:=
LOCAL_RECORDED_MODULE_TYPE:=
LOCAL_RENDERSCRIPT_CC:=
diff --git a/target/product/virtual_ab_ota_retrofit.mk b/core/combo/arch/arm64/armv8-a-branchprot.mk
similarity index 70%
copy from target/product/virtual_ab_ota_retrofit.mk
copy to core/combo/arch/arm64/armv8-a-branchprot.mk
index 3416a4f..77f3535 100644
--- a/target/product/virtual_ab_ota_retrofit.mk
+++ b/core/combo/arch/arm64/armv8-a-branchprot.mk
@@ -1,5 +1,5 @@
#
-# Copyright (C) 2019 The Android Open-Source Project
+# Copyright (C) 2021 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,8 +14,6 @@
# limitations under the License.
#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/virtual_ab_ota.mk)
-
-PRODUCT_VIRTUAL_AB_OTA_RETROFIT := true
-
-PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.retrofit=true
+# .mk file required to support build for the new armv8-a-branchprot Arm64 arch
+# variant. The file just needs to be present but does not require to contain
+# anything
diff --git a/core/config.mk b/core/config.mk
index be0b55c..ed6429a 100644
--- a/core/config.mk
+++ b/core/config.mk
@@ -155,6 +155,9 @@
$(KATI_obsolete_var COVERAGE_EXCLUDE_PATHS,Use NATIVE_COVERAGE_EXCLUDE_PATHS instead)
$(KATI_obsolete_var BOARD_VNDK_RUNTIME_DISABLE,VNDK-Lite is no longer supported.)
$(KATI_obsolete_var LOCAL_SANITIZE_BLACKLIST,Use LOCAL_SANITIZE_BLOCKLIST instead.)
+$(KATI_deprecated_var BOARD_PLAT_PUBLIC_SEPOLICY_DIR,Use SYSTEM_EXT_PUBLIC_SEPOLICY_DIRS instead.)
+$(KATI_deprecated_var BOARD_PLAT_PRIVATE_SEPOLICY_DIR,Use SYSTEM_EXT_PRIVATE_SEPOLICY_DIRS instead.)
+$(KATI_obsolete_var TARGET_NO_VENDOR_BOOT,Use PRODUCT_BUILD_VENDOR_BOOT_IMAGE instead)
# Used to force goals to build. Only use for conditionally defined goals.
.PHONY: FORCE
@@ -552,6 +555,7 @@
FS_GET_STATS := $(HOST_OUT_EXECUTABLES)/fs_get_stats$(HOST_EXECUTABLE_SUFFIX)
MKEXTUSERIMG := $(HOST_OUT_EXECUTABLES)/mkuserimg_mke2fs
MKE2FS_CONF := system/extras/ext4_utils/mke2fs.conf
+MKEROFSUSERIMG := $(HOST_OUT_EXECUTABLES)/mkerofsimage.sh
MKSQUASHFSUSERIMG := $(HOST_OUT_EXECUTABLES)/mksquashfsimage.sh
MKF2FSUSERIMG := $(HOST_OUT_EXECUTABLES)/mkf2fsuserimg.sh
SIMG2IMG := $(HOST_OUT_EXECUTABLES)/simg2img$(HOST_EXECUTABLE_SUFFIX)
@@ -601,13 +605,9 @@
APICHECK_COMMAND := $(JAVA) -Xmx4g -jar $(APICHECK) --no-banner --compatible-output=no
# Boolean variable determining if the allow list for compatible properties is enabled
-PRODUCT_COMPATIBLE_PROPERTY := false
-ifneq ($(PRODUCT_COMPATIBLE_PROPERTY_OVERRIDE),)
- PRODUCT_COMPATIBLE_PROPERTY := $(PRODUCT_COMPATIBLE_PROPERTY_OVERRIDE)
-else ifeq ($(PRODUCT_SHIPPING_API_LEVEL),)
- #$(warning no product shipping level defined)
-else ifneq ($(call math_lt,27,$(PRODUCT_SHIPPING_API_LEVEL)),)
- PRODUCT_COMPATIBLE_PROPERTY := true
+PRODUCT_COMPATIBLE_PROPERTY := true
+ifeq ($(PRODUCT_COMPATIBLE_PROPERTY_OVERRIDE),false)
+ $(error PRODUCT_COMPATIBLE_PROPERTY_OVERRIDE is obsolete)
endif
.KATI_READONLY := \
@@ -990,6 +990,13 @@
endif # PRODUCT_USE_DYNAMIC_PARTITIONS
+# By default, we build the hidden API csv files from source. You can use
+# prebuilt hiddenapi files by setting BOARD_PREBUILT_HIDDENAPI_DIR to the name
+# of a directory containing both prebuilt hiddenapi-flags.csv and
+# hiddenapi-index.csv.
+BOARD_PREBUILT_HIDDENAPI_DIR ?=
+.KATI_READONLY := BOARD_PREBUILT_HIDDENAPI_DIR
+
# ###############################################################
# Set up final options.
# ###############################################################
diff --git a/core/config_sanitizers.mk b/core/config_sanitizers.mk
index eaab1b5..f39b84a 100644
--- a/core/config_sanitizers.mk
+++ b/core/config_sanitizers.mk
@@ -53,6 +53,18 @@
endif
endif
+# Disable global memtag_heap in excluded paths
+ifneq ($(filter memtag_heap, $(my_global_sanitize)),)
+ combined_exclude_paths := $(MEMTAG_HEAP_EXCLUDE_PATHS) \
+ $(PRODUCT_MEMTAG_HEAP_EXCLUDE_PATHS)
+
+ ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_exclude_paths)),\
+ $(filter $(dir)%,$(LOCAL_PATH)))),)
+ my_global_sanitize := $(filter-out memtag_heap,$(my_global_sanitize))
+ my_global_sanitize_diag := $(filter-out memtag_heap,$(my_global_sanitize_diag))
+ endif
+endif
+
ifneq ($(my_global_sanitize),)
my_sanitize := $(my_global_sanitize) $(my_sanitize)
endif
@@ -116,6 +128,25 @@
endif
endif
+# Enable memtag_heap in included paths (for Arm64 only).
+ifeq ($(filter memtag_heap, $(my_sanitize)),)
+ ifneq ($(filter arm64,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)),)
+ combined_sync_include_paths := $(MEMTAG_HEAP_SYNC_INCLUDE_PATHS) \
+ $(PRODUCT_MEMTAG_HEAP_SYNC_INCLUDE_PATHS)
+ combined_async_include_paths := $(MEMTAG_HEAP_ASYNC_INCLUDE_PATHS) \
+ $(PRODUCT_MEMTAG_HEAP_ASYNC_INCLUDE_PATHS)
+
+ ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_sync_include_paths)),\
+ $(filter $(dir)%,$(LOCAL_PATH)))),)
+ my_sanitize := memtag_heap $(my_sanitize)
+ my_sanitize_diag := memtag_heap $(my_sanitize)
+ else ifneq ($(strip $(foreach dir,$(subst $(comma),$(space),$(combined_async_include_paths)),\
+ $(filter $(dir)%,$(LOCAL_PATH)))),)
+ my_sanitize := memtag_heap $(my_sanitize)
+ endif
+ endif
+endif
+
# If CFI is disabled globally, remove it from my_sanitize.
ifeq ($(strip $(ENABLE_CFI)),false)
my_sanitize := $(filter-out cfi,$(my_sanitize))
@@ -164,6 +195,7 @@
ifneq ($(filter arm x86 x86_64,$(TARGET_$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH)),)
my_sanitize := $(filter-out hwaddress,$(my_sanitize))
+ my_sanitize := $(filter-out memtag_heap,$(my_sanitize))
endif
ifneq ($(filter hwaddress,$(my_sanitize)),)
@@ -183,6 +215,20 @@
endif
endif
+ifneq ($(filter memtag_heap,$(my_sanitize)),)
+ # Add memtag ELF note.
+ ifneq ($(filter memtag_heap,$(my_sanitize_diag)),)
+ my_whole_static_libraries += note_memtag_heap_sync
+ else
+ my_whole_static_libraries += note_memtag_heap_async
+ endif
+ # This is all that memtag_heap does - it is not an actual -fsanitize argument.
+ # Remove it from the list.
+ my_sanitize := $(filter-out memtag_heap,$(my_sanitize))
+endif
+
+my_sanitize_diag := $(filter-out memtag_heap,$(my_sanitize_diag))
+
# TSAN is not supported on 32-bit architectures. For non-multilib cases, make
# its use an error. For multilib cases, don't use it for the 32-bit case.
ifneq ($(filter thread,$(my_sanitize)),)
@@ -441,3 +487,13 @@
endif
endif
endif
+
+# http://b/177566116, libc++ may crash with this sanitizer.
+# Disable this check unless it has been explicitly specified.
+ifneq ($(findstring fsanitize,$(my_cflags)),)
+ ifneq ($(findstring integer,$(my_cflags)),)
+ ifeq ($(findstring sanitize=unsigned-shift-base,$(my_cflags)),)
+ my_cflags += -fno-sanitize=unsigned-shift-base
+ endif
+ endif
+endif
diff --git a/core/cxx_stl_setup.mk b/core/cxx_stl_setup.mk
index a2abb1a..f71ef72 100644
--- a/core/cxx_stl_setup.mk
+++ b/core/cxx_stl_setup.mk
@@ -78,18 +78,12 @@
my_static_libraries += libc++demangle
ifeq ($(my_link_type),static)
- my_static_libraries += libm libc
- ifeq (arm,$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH))
- my_static_libraries += libunwind_llvm
- my_ldflags += -Wl,--exclude-libs,libunwind_llvm.a
- else
- my_static_libraries += libgcc_stripped
- my_ldflags += -Wl,--exclude-libs,libgcc_stripped.a
- endif
+ my_static_libraries += libm libc libunwind
endif
endif
else ifeq ($(my_cxx_stl),ndk)
# Using an NDK STL. Handled in binary.mk, except for the unwinder.
+ # TODO: Switch the NDK over to the LLVM unwinder for non-arm32 architectures.
ifeq (arm,$($(my_prefix)$(LOCAL_2ND_ARCH_VAR_PREFIX)ARCH))
my_static_libraries += libunwind_llvm
my_ldflags += -Wl,--exclude-libs,libunwind_llvm.a
diff --git a/core/definitions.mk b/core/definitions.mk
index bfbeee3..4300efe 100644
--- a/core/definitions.mk
+++ b/core/definitions.mk
@@ -107,7 +107,7 @@
# All tests that should be skipped in presubmit check.
ALL_DISABLED_PRESUBMIT_TESTS :=
-# All compatibility suites mentioned in LOCAL_COMPATIBILITY_SUITES
+# All compatibility suites mentioned in LOCAL_COMPATIBILITY_SUITE
ALL_COMPATIBILITY_SUITES :=
# All compatibility suite files to dist.
@@ -525,6 +525,96 @@
endef
###########################################################
+## Sometimes a notice dependency will reference an unadorned
+## module name that only appears in ALL_MODULES adorned with
+## an ARCH suffix or a `host_cross_` prefix.
+##
+## After all of the modules are processed in base_rules.mk,
+## replace all such dependencies with every matching adorned
+## module name.
+###########################################################
+
+define fix-notice-deps
+$(strip \
+ $(eval _all_module_refs := \
+ $(sort \
+ $(foreach m,$(sort $(ALL_MODULES)), \
+ $(ALL_MODULES.$(m).NOTICE_DEPS) \
+ ) \
+ ) \
+ ) \
+ $(foreach m, $(_all_module_refs), \
+ $(eval _lookup.$(m) := \
+ $(sort \
+ $(if $(strip $(ALL_MODULES.$(m).PATH)), \
+ $(m), \
+ $(filter $(m)_32 $(m)_64 host_cross_$(m) host_cross_$(m)_32 host_cross_$(m)_64, $(ALL_MODULES)) \
+ ) \
+ ) \
+ ) \
+ ) \
+ $(foreach m, $(ALL_MODULES), \
+ $(eval ALL_MODULES.$(m).NOTICE_DEPS := \
+ $(sort \
+ $(foreach d,$(ALL_MODULES.$(m).NOTICE_DEPS), \
+ $(_lookup.$(d)) \
+ ) \
+ ) \
+ ) \
+ ) \
+)
+endef
+
+###########################################################
+## Target directory for license metadata files.
+###########################################################
+define license-metadata-dir
+$(call generated-sources-dir-for,META,lic,)
+endef
+
+###########################################################
+## License metadata build rule for my_register_name $1
+###########################################################
+define license-metadata-rule
+$(strip $(eval _dir := $(call license-metadata-dir)))
+$(strip $(eval _deps := $(sort $(filter-out $(_dir)/$(1).meta_lic,$(foreach d,$(ALL_MODULES.$(1).NOTICE_DEPS), $(_dir)/$(d).meta_lic)))))
+$(foreach b,$(sort $(ALL_MODULES.$(1).BUILT) $(ALL_MODULES.$(1).INSTALLED)),
+$(_dir)/$(b).meta_module ::
+ mkdir -p $$(dir $$@)
+ echo $(_dir)/$(1).meta_lic >> $$@
+ sort -u $$@ -o $$@
+
+)
+$(_dir)/$(1).meta_lic: PRIVATE_KINDS := $(sort $(ALL_MODULES.$(1).LICENSE_KINDS))
+$(_dir)/$(1).meta_lic: PRIVATE_CONDITIONS := $(sort $(ALL_MODULES.$(1).LICENSE_CONDITIONS))
+$(_dir)/$(1).meta_lic: PRIVATE_NOTICES := $(sort $(ALL_MODULES.$(1).NOTICES))
+$(_dir)/$(1).meta_lic: PRIVATE_NOTICE_DEPS := $(_deps)
+$(_dir)/$(1).meta_lic: PRIVATE_TARGETS := $(sort $(ALL_MODULES.$(1).BUILT) $(ALL_MODULES.$(1).INSTALLED))
+$(_dir)/$(1).meta_lic: PRIVATE_IS_CONTAINER := $(sort $(ALL_MODULES.$(1).IS_CONTAINER))
+$(_dir)/$(1).meta_lic: PRIVATE_PACKAGE_NAME := $(ALL_MODULES.$(1).LICENSE_PACKAGE_NAME)
+$(_dir)/$(1).meta_lic: PRIVATE_INSTALL_MAP := $(sort $(ALL_MODULES.$(1).LICENSE_INSTALL_MAP))
+$(_dir)/$(1).meta_lic : $(_deps) $(ALL_MODULES.$(1).NOTICES) $(foreach b,$(sort $(ALL_MODULES.$(1).BUILT) $(ALL_MODULES.$(1).INSTALLED)), $(_dir)/$(b).meta_module) build/make/tools/build-license-metadata.sh
+ rm -f $$@
+ mkdir -p $$(dir $$@)
+ build/make/tools/build-license-metadata.sh -k $$(PRIVATE_KINDS) -c $$(PRIVATE_CONDITIONS) -n $$(PRIVATE_NOTICES) -d $$(PRIVATE_NOTICE_DEPS) -m $$(PRIVATE_INSTALL_MAP) -t $$(PRIVATE_TARGETS) $$(if $$(filter-out false,$$(PRIVATE_IS_CONTAINER)),-is_container) -p $$(PRIVATE_PACKAGE_NAME) -o $$@
+
+$(1) : $(_dir)/$(1).meta_lic
+
+$(if $(ALL_MODULES.$(1).INSTALLED_NOTICE_FILE),$(ALL_MODULES.$(1).INSTALLED_NOTICE_FILE) : $(_dir)/$(1).meta_lic)
+
+.PHONY: $(1).meta_lic
+$(1).meta_lic : $(_dir)/$(1).meta_lic
+
+endef
+
+###########################################################
+## Declares a license metadata build rule for ALL_MODULES
+###########################################################
+define build-license-metadata
+$(foreach m,$(ALL_MODULES),$(eval $(call license-metadata-rule,$(m))))
+endef
+
+###########################################################
## Returns correct _idfPrefix from the list:
## { HOST, HOST_CROSS, TARGET }
###########################################################
@@ -2404,6 +2494,7 @@
$(1) \
$(HOST_INIT_VERIFIER) \
$(call intermediates-dir-for,ETC,passwd_system)/passwd_system \
+ $(call intermediates-dir-for,ETC,passwd_system_ext)/passwd_system_ext \
$(call intermediates-dir-for,ETC,passwd_vendor)/passwd_vendor \
$(call intermediates-dir-for,ETC,passwd_odm)/passwd_odm \
$(call intermediates-dir-for,ETC,passwd_product)/passwd_product \
@@ -2414,6 +2505,7 @@
$(call intermediates-dir-for,ETC,odm_property_contexts)/odm_property_contexts
$(hide) $(HOST_INIT_VERIFIER) \
-p $(call intermediates-dir-for,ETC,passwd_system)/passwd_system \
+ -p $(call intermediates-dir-for,ETC,passwd_system_ext)/passwd_system_ext \
-p $(call intermediates-dir-for,ETC,passwd_vendor)/passwd_vendor \
-p $(call intermediates-dir-for,ETC,passwd_odm)/passwd_odm \
-p $(call intermediates-dir-for,ETC,passwd_product)/passwd_product \
@@ -2491,15 +2583,25 @@
# $(2): destination file
# $(3): message to print on error
define copy-non-elf-file-checked
-$(2): $(1) $(LLVM_READOBJ)
- @echo "Copy non-ELF: $$@"
+$(eval check_non_elf_file_timestamp := \
+ $(call intermediates-dir-for,FAKE,check-non-elf-file-timestamps)/$(2).timestamp)
+$(check_non_elf_file_timestamp): $(1) $(LLVM_READOBJ)
+ @echo "Check non-ELF: $$<"
+ $(hide) mkdir -p "$$(dir $$@)"
+ $(hide) rm -f "$$@"
$(hide) \
- if $(LLVM_READOBJ) -h $$< >/dev/null 2>&1; then \
- $(call echo-error,$$@,$(3)); \
- $(call echo-error,$$@,found ELF file: $$<); \
+ if $(LLVM_READOBJ) -h "$$<" >/dev/null 2>&1; then \
+ $(call echo-error,$(2),$(3)); \
+ $(call echo-error,$(2),found ELF file: $$<); \
false; \
fi
+ $(hide) touch "$$@"
+
+$(2): $(1) $(check_non_elf_file_timestamp)
+ @echo "Copy non-ELF: $$@"
$$(copy-file-to-target)
+
+check-elf-prebuilt-product-copy-files: $(check_non_elf_file_timestamp)
endef
# The -t option to acp and the -p option to cp is
@@ -2610,6 +2712,7 @@
@mkdir -p $(dir $$@)
@rm -rf $$@
$(hide) ln -sf $(2) $$@
+$(3): .KATI_SYMLINK_OUTPUTS := $(3)
endef
# Copy an apk to a target location while removing classes*.dex
diff --git a/core/dex_preopt_config.mk b/core/dex_preopt_config.mk
index 41a2be9..06e2fb7 100644
--- a/core/dex_preopt_config.mk
+++ b/core/dex_preopt_config.mk
@@ -1,16 +1,24 @@
DEX_PREOPT_CONFIG := $(SOONG_OUT_DIR)/dexpreopt.config
ENABLE_PREOPT := true
+ENABLE_PREOPT_BOOT_IMAGES := true
ifneq (true,$(filter true,$(WITH_DEXPREOPT)))
+ # Disable dexpreopt for libraries/apps and for boot images.
ENABLE_PREOPT :=
+ ENABLE_PREOPT_BOOT_IMAGES :=
else ifneq (true,$(filter true,$(PRODUCT_USES_DEFAULT_ART_CONFIG)))
+ # Disable dexpreopt for libraries/apps and for boot images: not having default
+ # ART config means that some important system properties are not set, which
+ # would result in passing bad arguments to dex2oat and failing the build.
ENABLE_PREOPT :=
+ ENABLE_PREOPT_BOOT_IMAGES :=
else ifeq (true,$(DISABLE_PREOPT))
+ # Disable dexpreopt for libraries/apps, but do compile boot images.
ENABLE_PREOPT :=
endif
# The default value for LOCAL_DEX_PREOPT
-DEX_PREOPT_DEFAULT ?= true
+DEX_PREOPT_DEFAULT ?= $(ENABLE_PREOPT)
# The default filter for which files go into the system_other image (if it is
# being used). Note that each pattern p here matches both '/<p>' and /system/<p>'.
@@ -46,14 +54,6 @@
endif
endif
-# Use the first preloaded-classes file in PRODUCT_COPY_FILES.
-PRELOADED_CLASSES := $(call word-colon,1,$(firstword \
- $(filter %system/etc/preloaded-classes,$(PRODUCT_COPY_FILES))))
-
-# Use the first dirty-image-objects file in PRODUCT_COPY_FILES.
-DIRTY_IMAGE_OBJECTS := $(call word-colon,1,$(firstword \
- $(filter %system/etc/dirty-image-objects,$(PRODUCT_COPY_FILES))))
-
# Get value of a property. It is first searched from PRODUCT_VENDOR_PROPERTIES
# and then falls back to PRODUCT_SYSTEM_PROPERTIES
# $1: name of the property
@@ -73,6 +73,7 @@
$(call json_start)
$(call add_json_bool, DisablePreopt, $(call invert_bool,$(ENABLE_PREOPT)))
+ $(call add_json_bool, DisablePreoptBootImages, $(call invert_bool,$(ENABLE_PREOPT_BOOT_IMAGES)))
$(call add_json_list, DisablePreoptModules, $(DEXPREOPT_DISABLED_MODULES))
$(call add_json_bool, OnlyPreoptBootImageAndSystemServer, $(filter true,$(WITH_DEXPREOPT_BOOT_IMG_AND_SYSTEM_SERVER_ONLY)))
$(call add_json_bool, UseArtImage, $(filter true,$(DEXPREOPT_USE_ART_IMAGE)))
@@ -108,6 +109,7 @@
$(call add_json_str, Dex2oatXms, $(DEX2OAT_XMS))
$(call add_json_str, EmptyDirectory, $(OUT_DIR)/empty)
+ifdef TARGET_ARCH
$(call add_json_map, CpuVariant)
$(call add_json_str, $(TARGET_ARCH), $(DEX2OAT_TARGET_CPU_VARIANT))
ifdef TARGET_2ND_ARCH
@@ -121,8 +123,8 @@
$(call add_json_str, $(TARGET_2ND_ARCH), $($(TARGET_2ND_ARCH_VAR_PREFIX)DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES))
endif
$(call end_json_map)
+endif
- $(call add_json_str, DirtyImageObjects, $(DIRTY_IMAGE_OBJECTS))
$(call add_json_list, BootImageProfiles, $(PRODUCT_DEX_PREOPT_BOOT_IMAGE_PROFILE_LOCATION))
$(call add_json_str, BootFlags, $(PRODUCT_DEX_PREOPT_BOOT_FLAGS))
$(call add_json_str, Dex2oatImageXmx, $(DEX2OAT_IMAGE_XMX))
diff --git a/core/dex_preopt_odex_install.mk b/core/dex_preopt_odex_install.mk
index 799b623..b74e047 100644
--- a/core/dex_preopt_odex_install.mk
+++ b/core/dex_preopt_odex_install.mk
@@ -189,20 +189,43 @@
my_filtered_optional_uses_libraries := $(filter-out $(INTERNAL_PLATFORM_MISSING_USES_LIBRARIES), \
$(LOCAL_OPTIONAL_USES_LIBRARIES))
- # dexpreopt needs the paths to the dex jars of these libraries in order to
- # construct class loader context for dex2oat.
- my_extra_dexpreopt_libs := \
- org.apache.http.legacy \
+ # compatibility libraries are added to class loader context of an app only if
+ # targetSdkVersion in the app's manifest is lower than the given SDK version
+
+ my_dexpreopt_libs_compat_28 := \
+ org.apache.http.legacy
+
+ my_dexpreopt_libs_compat_29 := \
android.hidl.base-V1.0-java \
- android.hidl.manager-V1.0-java \
+ android.hidl.manager-V1.0-java
+
+ my_dexpreopt_libs_compat_30 := \
android.test.base \
+ android.test.mock
+
+ my_dexpreopt_libs_compat := \
+ $(my_dexpreopt_libs_compat_28) \
+ $(my_dexpreopt_libs_compat_29) \
+ $(my_dexpreopt_libs_compat_30)
my_dexpreopt_libs := $(sort \
$(LOCAL_USES_LIBRARIES) \
$(my_filtered_optional_uses_libraries) \
- $(my_extra_dexpreopt_libs) \
)
+ # 1: SDK version
+ # 2: list of libraries
+ add_json_class_loader_context = \
+ $(call add_json_map, $(1)) \
+ $(foreach lib, $(2),\
+ $(call add_json_map, $(lib)) \
+ $(eval file := $(filter %/$(lib).jar, $(call module-installed-files,$(lib)))) \
+ $(call add_json_str, Host, $(call intermediates-dir-for,JAVA_LIBRARIES,$(lib),,COMMON)/javalib.jar) \
+ $(call add_json_str, Device, $(call install-path-to-on-device-path,$(file))) \
+ $(call add_json_map, Subcontexts, ${$}) $(call end_json_map) \
+ $(call end_json_map)) \
+ $(call end_json_map)
+
# Record dex-preopt config.
DEXPREOPT.$(LOCAL_MODULE).DEX_PREOPT := $(LOCAL_DEX_PREOPT)
DEXPREOPT.$(LOCAL_MODULE).MULTILIB := $(LOCAL_MULTILIB)
@@ -230,15 +253,12 @@
$(call add_json_str, ProfileClassListing, $(if $(my_process_profile),$(LOCAL_DEX_PREOPT_PROFILE)))
$(call add_json_bool, ProfileIsTextListing, $(my_profile_is_text_listing))
$(call add_json_bool, EnforceUsesLibraries, $(LOCAL_ENFORCE_USES_LIBRARIES))
- $(call add_json_list, OptionalUsesLibraries, $(my_filtered_optional_uses_libraries))
- $(call add_json_list, UsesLibraries, $(LOCAL_USES_LIBRARIES))
- $(call add_json_map, LibraryPaths)
- $(foreach lib,$(my_dexpreopt_libs),\
- $(call add_json_map, $(lib)) \
- $(eval file := $(filter %/$(lib).jar, $(call module-installed-files,$(lib)))) \
- $(call add_json_str, Host, $(call intermediates-dir-for,JAVA_LIBRARIES,$(lib),,COMMON)/javalib.jar) \
- $(call add_json_str, Device, $(call install-path-to-on-device-path,$(file))) \
- $(call end_json_map))
+ $(call add_json_str, ProvidesUsesLibrary, $(firstword $(LOCAL_PROVIDES_USES_LIBRARY) $(LOCAL_MODULE)))
+ $(call add_json_map, ClassLoaderContexts)
+ $(call add_json_class_loader_context, any, $(my_dexpreopt_libs))
+ $(call add_json_class_loader_context, 28, $(my_dexpreopt_libs_compat_28))
+ $(call add_json_class_loader_context, 29, $(my_dexpreopt_libs_compat_29))
+ $(call add_json_class_loader_context, 30, $(my_dexpreopt_libs_compat_30))
$(call end_json_map)
$(call add_json_list, Archs, $(my_dexpreopt_archs))
$(call add_json_list, DexPreoptImages, $(my_dexpreopt_images))
@@ -280,7 +300,7 @@
my_dexpreopt_deps := $(my_dex_jar)
my_dexpreopt_deps += $(if $(my_process_profile),$(LOCAL_DEX_PREOPT_PROFILE))
my_dexpreopt_deps += \
- $(foreach lib, $(my_dexpreopt_libs), \
+ $(foreach lib, $(my_dexpreopt_libs) $(my_dexpreopt_libs_compat), \
$(call intermediates-dir-for,JAVA_LIBRARIES,$(lib),,COMMON)/javalib.jar)
my_dexpreopt_deps += $(my_dexpreopt_images_deps)
my_dexpreopt_deps += $(DEXPREOPT_BOOTCLASSPATH_DEX_FILES)
diff --git a/core/dumpvar.mk b/core/dumpvar.mk
index b2ee8fd..6b5c030 100644
--- a/core/dumpvar.mk
+++ b/core/dumpvar.mk
@@ -24,9 +24,14 @@
# Input variables:
# DUMP_MANY_VARS: the list of variable names.
# DUMP_VAR_PREFIX: an optional prefix of the variable name added to the output.
+# The value is printed in parts because large variables like PRODUCT_PACKAGES
+# can exceed the maximum linux command line size
.PHONY: dump-many-vars
dump-many-vars :
@$(foreach v, $(DUMP_MANY_VARS),\
- printf "%s='%s'\n" '$(DUMP_VAR_PREFIX)$(v)' '$($(v))';)
+ printf "%s='%s" '$(DUMP_VAR_PREFIX)$(v)' '$(firstword $($(v)))'; \
+ $(foreach part, $(wordlist 2, $(words $($(v))), $($(v))),\
+ printf " %s" '$(part)'$(newline))\
+ printf "'\n";)
endif # CALLED_FROM_SETUP
diff --git a/core/java_host_unit_test_config_template.xml b/core/java_host_unit_test_config_template.xml
new file mode 100644
index 0000000..ff300da
--- /dev/null
+++ b/core/java_host_unit_test_config_template.xml
@@ -0,0 +1,26 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2020 The Android Open Source Project
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+-->
+<!-- This test config file is auto-generated. -->
+<configuration description="Runs {MODULE}">
+ <option name="test-suite-tag" value="apct" />
+ <option name="test-suite-tag" value="apct-unit-tests" />
+
+ {EXTRA_CONFIGS}
+
+ <test class="com.android.tradefed.testtype.IsolatedHostTest" >
+ <option name="jar" value="{MODULE}.jar" />
+ </test>
+</configuration>
diff --git a/core/main.mk b/core/main.mk
index 8c29045..5ea95c8 100644
--- a/core/main.mk
+++ b/core/main.mk
@@ -115,6 +115,11 @@
endif
endif
+ifdef TARGET_ARCH_SUITE
+ # TODO(b/175577370): Enable this error.
+ # $(error TARGET_ARCH_SUITE is not supported in kati/make builds)
+endif
+
# ADDITIONAL_<partition>_PROPERTIES are properties that are determined by the
# build system itself. Don't let it be defined from outside of the core build
# system like Android.mk or <product>.mk files.
@@ -193,11 +198,7 @@
# Sets ro.actionable_compatible_property.enabled to know on runtime whether the
# allowed list of actionable compatible properties is enabled or not.
-ifeq ($(PRODUCT_ACTIONABLE_COMPATIBLE_PROPERTY_DISABLE),true)
-ADDITIONAL_SYSTEM_PROPERTIES += ro.actionable_compatible_property.enabled=false
-else
-ADDITIONAL_SYSTEM_PROPERTIES += ro.actionable_compatible_property.enabled=${PRODUCT_COMPATIBLE_PROPERTY}
-endif
+ADDITIONAL_SYSTEM_PROPERTIES += ro.actionable_compatible_property.enabled=true
# Add the system server compiler filter if they are specified for the product.
ifneq (,$(PRODUCT_SYSTEM_SERVER_COMPILER_FILTER))
@@ -284,9 +285,6 @@
ADDITIONAL_VENDOR_PROPERTIES += \
ro.vendor.build.security_patch=$(VENDOR_SECURITY_PATCH) \
- ro.vendor.product.cpu.abilist=$(TARGET_CPU_ABI_LIST) \
- ro.vendor.product.cpu.abilist32=$(TARGET_CPU_ABI_LIST_32_BIT) \
- ro.vendor.product.cpu.abilist64=$(TARGET_CPU_ABI_LIST_64_BIT) \
ro.product.board=$(TARGET_BOOTLOADER_BOARD_NAME) \
ro.board.platform=$(TARGET_BOARD_PLATFORM) \
ro.hwui.use_vulkan=$(TARGET_USES_VULKAN)
@@ -301,11 +299,6 @@
ro.build.ab_update=$(AB_OTA_UPDATER)
endif
-ADDITIONAL_ODM_PROPERTIES += \
- ro.odm.product.cpu.abilist=$(TARGET_CPU_ABI_LIST) \
- ro.odm.product.cpu.abilist32=$(TARGET_CPU_ABI_LIST_32_BIT) \
- ro.odm.product.cpu.abilist64=$(TARGET_CPU_ABI_LIST_64_BIT)
-
# Set ro.product.vndk.version to know the VNDK version required by product
# modules. It uses the version in PRODUCT_PRODUCT_VNDK_VERSION. If the value
# is "current", use PLATFORM_VNDK_VERSION.
@@ -748,7 +741,7 @@
$(eval r := $(call module-installed-files,$(r))) \
$(eval h_m := $(filter $(HOST_OUT)/%, $(ALL_MODULES.$(m).INSTALLED))) \
$(eval h_r := $(filter $(HOST_OUT)/%, $(r))) \
- $(eval h_m := $(filter-out $(h_r), $(h_m))) \
+ $(eval h_r := $(filter-out $(h_m), $(h_r))) \
$(if $(h_m), $(eval $(call add-required-deps, $(h_m),$(h_r)))) \
) \
)
@@ -764,7 +757,7 @@
$(eval r := $(call module-installed-files,$(r))) \
$(eval hc_m := $(filter $(HOST_CROSS_OUT)/%, $(ALL_MODULES.$(m).INSTALLED))) \
$(eval hc_r := $(filter $(HOST_CROSS_OUT)/%, $(r))) \
- $(eval hc_m := $(filter-out $(hc_r), $(hc_m))) \
+ $(eval hc_r := $(filter-out $(hc_m), $(hc_r))) \
$(if $(hc_m), $(eval $(call add-required-deps, $(hc_m),$(hc_r)))) \
) \
)
@@ -780,7 +773,7 @@
$(eval r := $(call module-installed-files,$(r))) \
$(eval t_m := $(filter $(TARGET_OUT_ROOT)/%, $(ALL_MODULES.$(m).INSTALLED))) \
$(eval t_r := $(filter $(TARGET_OUT_ROOT)/%, $(r))) \
- $(eval t_m := $(filter-out $(t_r), $(t_m))) \
+ $(eval t_r := $(filter-out $(t_m), $(t_r))) \
$(if $(t_m), $(eval $(call add-required-deps, $(t_m),$(t_r)))) \
) \
)
@@ -807,7 +800,6 @@
)\
$(eval req_files := $(strip $(req_files)))\
$(eval mod_files := $(filter $(HOST_OUT)/%, $(call module-installed-files,$(m)))) \
- $(eval mod_files := $(filter-out $(req_files),$(mod_files)))\
$(if $(mod_files),\
$(eval $(call add-required-deps, $(mod_files),$(req_files))) \
)\
@@ -836,7 +828,6 @@
)\
$(eval req_files := $(strip $(req_files)))\
$(eval mod_files := $(filter $(TARGET_OUT_ROOT)/%, $(call module-installed-files,$(m))))\
- $(eval mod_files := $(filter-out $(req_files),$(mod_files)))\
$(if $(mod_files),\
$(eval $(call add-required-deps, $(mod_files),$(req_files))) \
)\
@@ -895,7 +886,7 @@
# Scan all modules in general-tests, device-tests and other selected suites and
# flatten the shared library dependencies.
define update-host-shared-libs-deps-for-suites
-$(foreach suite,general-tests device-tests vts,\
+$(foreach suite,general-tests device-tests vts art-host-tests host-unit-tests,\
$(foreach m,$(COMPATIBILITY.$(suite).MODULES),\
$(eval my_deps := $(call get-all-shared-libs-deps,$(m)))\
$(foreach dep,$(my_deps),\
@@ -1121,7 +1112,11 @@
# Expand a list of modules to the modules that they override (if any)
# $(1): The list of modules.
define module-overrides
-$(foreach m,$(1),$(PACKAGES.$(m).OVERRIDES) $(EXECUTABLES.$(m).OVERRIDES) $(SHARED_LIBRARIES.$(m).OVERRIDES) $(ETC.$(m).OVERRIDES))
+$(foreach m,$(1),\
+ $(eval _mo_overrides := $(PACKAGES.$(m).OVERRIDES) $(EXECUTABLES.$(m).OVERRIDES) $(SHARED_LIBRARIES.$(m).OVERRIDES) $(ETC.$(m).OVERRIDES))\
+ $(if $(filter $(m),$(_mo_overrides)),\
+ $(error Module $(m) cannot override itself),\
+ $(_mo_overrides)))
endef
###########################################################
@@ -1305,67 +1300,7 @@
# Verify the artifact path requirements made by included products.
is_asan := $(if $(filter address,$(SANITIZE_TARGET)),true)
ifneq (true,$(or $(is_asan),$(DISABLE_ARTIFACT_PATH_REQUIREMENTS)))
- # Fakes don't get installed, and NDK stubs aren't installed to device.
- static_allowed_patterns := $(TARGET_OUT_FAKE)/% $(SOONG_OUT_DIR)/ndk/%
- # RROs become REQUIRED by the source module, but are always placed on the vendor partition.
- static_allowed_patterns += %__auto_generated_rro_product.apk
- static_allowed_patterns += %__auto_generated_rro_vendor.apk
- # Auto-included targets are not considered
- static_allowed_patterns += $(call product-installed-files,)
- # $(PRODUCT_OUT)/apex is where shared libraries in APEXes get installed.
- # The path can be considered as a fake path, as the shared libraries
- # are installed there just to have symbols files for them under
- # $(PRODUCT_OUT)/symbols/apex for debugging purpose. The /apex directory
- # is never compiled into a filesystem image.
- static_allowed_patterns += $(PRODUCT_OUT)/apex/%
- ifeq (true,$(BOARD_USES_SYSTEM_OTHER_ODEX))
- # Allow system_other odex space optimization.
- static_allowed_patterns += \
- $(TARGET_OUT_SYSTEM_OTHER)/%.odex \
- $(TARGET_OUT_SYSTEM_OTHER)/%.vdex \
- $(TARGET_OUT_SYSTEM_OTHER)/%.art
- endif
-
-CERTIFICATE_VIOLATION_MODULES_FILENAME := $(PRODUCT_OUT)/certificate_violation_modules.txt
-$(CERTIFICATE_VIOLATION_MODULES_FILENAME):
- rm -f $@
- $(foreach m,$(sort $(CERTIFICATE_VIOLATION_MODULES)), echo $(m) >> $@;)
-$(call dist-for-goals,droidcore,$(CERTIFICATE_VIOLATION_MODULES_FILENAME))
-
- all_offending_files :=
- $(foreach makefile,$(ARTIFACT_PATH_REQUIREMENT_PRODUCTS),\
- $(eval requirements := $(PRODUCTS.$(makefile).ARTIFACT_PATH_REQUIREMENTS)) \
- $(eval ### Verify that the product only produces files inside its path requirements.) \
- $(eval allowed := $(PRODUCTS.$(makefile).ARTIFACT_PATH_ALLOWED_LIST)) \
- $(eval path_patterns := $(call resolve-product-relative-paths,$(requirements),%)) \
- $(eval allowed_patterns := $(call resolve-product-relative-paths,$(allowed))) \
- $(eval files := $(call product-installed-files, $(makefile))) \
- $(eval offending_files := $(filter-out $(path_patterns) $(allowed_patterns) $(static_allowed_patterns),$(files))) \
- $(call maybe-print-list-and-error,$(offending_files),\
- $(makefile) produces files outside its artifact path requirement. \
- Allowed paths are $(subst $(space),$(comma)$(space),$(addsuffix *,$(requirements)))) \
- $(eval unused_allowed := $(filter-out $(files),$(allowed_patterns))) \
- $(call maybe-print-list-and-error,$(unused_allowed),$(makefile) includes redundant allowed entries in its artifact path requirement.) \
- $(eval ### Optionally verify that nothing else produces files inside this artifact path requirement.) \
- $(eval extra_files := $(filter-out $(files) $(HOST_OUT)/%,$(product_target_FILES))) \
- $(eval files_in_requirement := $(filter $(path_patterns),$(extra_files))) \
- $(eval all_offending_files += $(files_in_requirement)) \
- $(eval allowed := $(PRODUCT_ARTIFACT_PATH_REQUIREMENT_ALLOWED_LIST)) \
- $(eval allowed_patterns := $(call resolve-product-relative-paths,$(allowed))) \
- $(eval offending_files := $(filter-out $(allowed_patterns),$(files_in_requirement))) \
- $(eval enforcement := $(PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS)) \
- $(if $(enforcement),\
- $(call maybe-print-list-and-error,$(offending_files),\
- $(INTERNAL_PRODUCT) produces files inside $(makefile)s artifact path requirement. \
- $(PRODUCT_ARTIFACT_PATH_REQUIREMENT_HINT)) \
- $(eval unused_allowed := $(if $(filter true strict,$(enforcement)),\
- $(foreach p,$(allowed_patterns),$(if $(filter $(p),$(extra_files)),,$(p))))) \
- $(call maybe-print-list-and-error,$(unused_allowed),$(INTERNAL_PRODUCT) includes redundant artifact path requirement allowed list entries.) \
- ) \
- )
-$(PRODUCT_OUT)/offending_artifacts.txt:
- rm -f $@
- $(foreach f,$(sort $(all_offending_files)),echo $(f) >> $@;)
+ include $(BUILD_SYSTEM)/artifact_path_requirements.mk
endif
else
# We're not doing a full build, and are probably only including
@@ -1465,6 +1400,17 @@
ALL_DEFAULT_INSTALLED_MODULES :=
+# Some notice deps refer to module names without prefix or arch suffix where
+# only the variants with them get built.
+# fix-notice-deps replaces those unadorned module names with every built variant.
+$(call fix-notice-deps)
+
+# Create a license metadata rule per module. Could happen in base_rules.mk or
+# notice_files.mk; except, it has to happen after fix-notice-deps to avoid
+# missing dependency errors.
+$(call build-license-metadata)
+
+
# These are additional goals that we build, in order to make sure that there
# is as little code as possible in the tree that doesn't build.
modules_to_check := $(foreach m,$(ALL_MODULES),$(ALL_MODULES.$(m).CHECKED))
@@ -1517,9 +1463,6 @@
.PHONY: ramdisk_test_harness
ramdisk_test_harness: $(INSTALLED_TEST_HARNESS_RAMDISK_TARGET)
-.PHONY: vendor_ramdisk_debug
-vendor_ramdisk_debug: $(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET)
-
.PHONY: userdataimage
userdataimage: $(INSTALLED_USERDATAIMAGE_TARGET)
@@ -1599,7 +1542,6 @@
$(INSTALLED_BPTIMAGE_TARGET) \
$(INSTALLED_VENDORIMAGE_TARGET) \
$(INSTALLED_VENDOR_BOOTIMAGE_TARGET) \
- $(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET) \
$(INSTALLED_VENDOR_DEBUG_BOOTIMAGE_TARGET) \
$(INSTALLED_ODMIMAGE_TARGET) \
$(INSTALLED_VENDOR_DLKMIMAGE_TARGET) \
@@ -1627,6 +1569,8 @@
$(INSTALLED_FILES_JSON_RAMDISK) \
$(INSTALLED_FILES_FILE_DEBUG_RAMDISK) \
$(INSTALLED_FILES_JSON_DEBUG_RAMDISK) \
+ $(INSTALLED_FILES_FILE_VENDOR_RAMDISK) \
+ $(INSTALLED_FILES_JSON_VENDOR_RAMDISK) \
$(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK) \
$(INSTALLED_FILES_JSON_VENDOR_DEBUG_RAMDISK) \
$(INSTALLED_FILES_FILE_ROOT) \
@@ -1641,6 +1585,7 @@
ifeq ($(SOONG_COLLECT_JAVA_DEPS), true)
$(call dist-for-goals, dist_files, $(SOONG_OUT_DIR)/module_bp_java_deps.json)
+ $(call dist-for-goals, dist_files, $(PRODUCT_OUT)/module-info.json)
endif
.PHONY: apps_only
@@ -1715,6 +1660,7 @@
$(INTERNAL_UPDATE_PACKAGE_TARGET) \
$(INTERNAL_OTA_PACKAGE_TARGET) \
$(INTERNAL_OTA_METADATA) \
+ $(INTERNAL_OTA_PARTIAL_PACKAGE_TARGET) \
$(INTERNAL_OTA_RETROFIT_DYNAMIC_PARTITIONS_PACKAGE_TARGET) \
$(BUILT_OTATOOLS_PACKAGE) \
$(SYMBOLS_ZIP) \
@@ -1745,6 +1691,7 @@
$(INSTALLED_PRODUCT_BUILD_PROP_TARGET):build.prop-product \
$(INSTALLED_ODM_BUILD_PROP_TARGET):build.prop-odm \
$(INSTALLED_SYSTEM_EXT_BUILD_PROP_TARGET):build.prop-system_ext \
+ $(INSTALLED_RAMDISK_BUILD_PROP_TARGET):build.prop-ramdisk \
$(BUILT_TARGET_FILES_PACKAGE) \
$(INSTALLED_ANDROID_INFO_TXT_TARGET) \
$(INSTALLED_MISC_INFO_TARGET) \
@@ -1773,11 +1720,12 @@
$(INSTALLED_FILES_JSON_RAMDISK) \
$(INSTALLED_FILES_FILE_DEBUG_RAMDISK) \
$(INSTALLED_FILES_JSON_DEBUG_RAMDISK) \
+ $(INSTALLED_FILES_FILE_VENDOR_RAMDISK) \
+ $(INSTALLED_FILES_JSON_VENDOR_RAMDISK) \
$(INSTALLED_FILES_FILE_VENDOR_DEBUG_RAMDISK) \
$(INSTALLED_FILES_JSON_VENDOR_DEBUG_RAMDISK) \
$(INSTALLED_DEBUG_RAMDISK_TARGET) \
$(INSTALLED_DEBUG_BOOTIMAGE_TARGET) \
- $(INSTALLED_VENDOR_DEBUG_RAMDISK_TARGET) \
$(INSTALLED_VENDOR_DEBUG_BOOTIMAGE_TARGET) \
)
$(call dist-for-goals, bootimage_test_harness, \
@@ -1793,7 +1741,6 @@
endif
ifeq ($(EMMA_INSTRUMENT),true)
- $(JACOCO_REPORT_CLASSES_ALL) : $(modules_to_install)
$(call dist-for-goals, dist_files, $(JACOCO_REPORT_CLASSES_ALL))
endif
@@ -1897,6 +1844,11 @@
ndk: $(SOONG_OUT_DIR)/ndk.timestamp
.PHONY: ndk
+# Checks that build/soong/apex/allowed_deps.txt remains up to date
+ifneq ($(UNSAFE_DISABLE_APEX_ALLOWED_DEPS_CHECK),true)
+ droidcore: ${APEX_ALLOWED_DEPS_CHECK}
+endif
+
$(call dist-write-file,$(KATI_PACKAGE_MK_DIR)/dist.mk)
$(info [$(call inc_and_print,subdir_makefiles_inc)/$(subdir_makefiles_total)] writing build rules ...)
diff --git a/core/ninja_config.mk b/core/ninja_config.mk
index 6fccacd..2e1bd69 100644
--- a/core/ninja_config.mk
+++ b/core/ninja_config.mk
@@ -39,7 +39,6 @@
user \
userdataimage \
userdebug \
- vts10 \
win_sdk \
winsdk-tools
diff --git a/core/notice_files.mk b/core/notice_files.mk
index 721a034..89f822b 100644
--- a/core/notice_files.mk
+++ b/core/notice_files.mk
@@ -9,6 +9,32 @@
notice_file:=$(strip $(wildcard $(LOCAL_PATH)/LICENSE $(LOCAL_PATH)/LICENCE $(LOCAL_PATH)/NOTICE))
endif
+ifneq (,$(strip $(LOCAL_LICENSE_PACKAGE_NAME)))
+license_package_name:=$(strip $(LOCAL_LICENSE_PACKAGE_NAME))
+else ifdef my_register_name
+license_package_name:=$(my_register_name)
+else
+license_package_name:=$(strip $(LOCAL_MODULE))
+endif
+
+ifneq (,$(strip $(LOCAL_LICENSE_INSTALL_MAP)))
+install_map:=$(strip $(LOCAL_LICENSE_INSTALL_MAP))
+else
+install_map:=
+endif
+
+ifneq (,$(strip $(LOCAL_LICENSE_KINDS)))
+license_kinds:=$(strip $(LOCAL_LICENSE_KINDS))
+else
+license_kinds:=legacy_by_exception_only
+endif
+
+ifneq (,$(strip $(LOCAL_LICENSE_CONDITIONS)))
+license_conditions:=$(strip $(LOCAL_LICENSE_CONDITIONS))
+else
+license_conditions:=by_exception_only
+endif
+
ifeq ($(LOCAL_MODULE_CLASS),GYP)
# We ignore NOTICE files for modules of type GYP.
notice_file :=
@@ -40,10 +66,64 @@
installed_notice_file :=
+is_container:=$(strip $(LOCAL_MODULE_IS_CONTAINER))
+ifeq (,$(is_container))
+ifneq (,$(strip $(filter %.zip %.tar %.tgz %.tar.gz %.apk %.img %.srcszip %.apex, $(LOCAL_BUILT_MODULE))))
+is_container:=true
+else
+is_container:=false
+endif
+else ifneq (,$(strip $(filter-out true false,$(is_container))))
+$(error Unrecognized value '$(is_container)' for LOCAL_MODULE_IS_CONTAINER)
+endif
+
+ifeq (true,$(is_container))
+# Include shared libraries' notices for "container" types, but not for binaries etc.
+notice_deps := \
+ $(sort \
+ $(LOCAL_REQUIRED_MODULES) \
+ $(LOCAL_STATIC_LIBRARIES) \
+ $(LOCAL_WHOLE_STATIC_LIBRARIES) \
+ $(LOCAL_SHARED_LIBRARIES) \
+ $(LOCAL_DYLIB_LIBRARIES) \
+ $(LOCAL_RLIB_LIBRARIES) \
+ $(LOCAL_PROC_MACRO_LIBRARIES) \
+ $(LOCAL_HEADER_LIBRARIES) \
+ $(LOCAL_STATIC_JAVA_LIBRARIES) \
+ $(LOCAL_JAVA_LIBRARIES) \
+ $(LOCAL_JNI_SHARED_LIBRARIES) \
+ )
+else
+notice_deps := \
+ $(sort \
+ $(LOCAL_REQUIRED_MODULES) \
+ $(LOCAL_STATIC_LIBRARIES) \
+ $(LOCAL_WHOLE_STATIC_LIBRARIES) \
+ $(LOCAL_RLIB_LIBRARIES) \
+ $(LOCAL_PROC_MACRO_LIBRARIES) \
+ $(LOCAL_HEADER_LIBRARIES) \
+ $(LOCAL_STATIC_JAVA_LIBRARIES) \
+ )
+endif
+ifeq ($(LOCAL_IS_HOST_MODULE),true)
+notice_deps := $(sort $(notice_deps) $(LOCAL_HOST_REQUIRED_MODULES))
+else
+notice_deps := $(sort $(notice_deps) $(LOCAL_TARGET_REQUIRED_MODULES))
+endif
+
+ifdef my_register_name
+ALL_MODULES.$(my_register_name).LICENSE_PACKAGE_NAME := $(strip $(license_package_name))
+ALL_MODULES.$(my_register_name).LICENSE_KINDS := $(sort $(ALL_MODULES.$(my_register_name).LICENSE_KINDS) $(license_kinds))
+ALL_MODULES.$(my_register_name).LICENSE_CONDITIONS := $(sort $(ALL_MODULES.$(my_register_name).LICENSE_CONDITIONS) $(license_conditions))
+ALL_MODULES.$(my_register_name).LICENSE_INSTALL_MAP := $(sort $(ALL_MODULES.$(my_register_name).LICENSE_INSTALL_MAP) $(install_map))
+ALL_MODULES.$(my_register_name).NOTICE_DEPS := $(sort $(ALL_MODULES.$(my_register_name).NOTICE_DEPS) $(notice_deps))
+ALL_MODULES.$(my_register_name).IS_CONTAINER := $(sort $(ALL_MODULES.$(my_register_name).IS_CONTAINER) $(is_container))
+endif
+
ifdef notice_file
ifdef my_register_name
-ALL_MODULES.$(my_register_name).NOTICES := $(ALL_MODULES.$(my_register_name).NOTICES) $(notice_file)
+ALL_MODULES.$(my_register_name).NOTICES := $(sort $(ALL_MODULES.$(my_register_name).NOTICES) $(notice_file))
endif
# This relies on the name of the directory in PRODUCT_OUT matching where
@@ -79,15 +159,14 @@
endif
module_installed_filename := \
$(patsubst $(PRODUCT_OUT)/%,%,$($(my_prefix)OUT_JAVA_LIBRARIES))/$(module_leaf)
- else ifeq ($(LOCAL_MODULE_CLASS),ETC)
- # ETC modules may be uninstallable, yet still have a NOTICE file. e.g. apex components
+ else ifneq ($(filter ETC DATA,$(LOCAL_MODULE_CLASS)),)
+ # ETC and DATA modules may be uninstallable, yet still have a NOTICE file.
+ # e.g. apex components
module_installed_filename :=
else ifneq (,$(and $(filter %.sdk,$(LOCAL_MODULE)),$(filter $(patsubst %.sdk,%,$(LOCAL_MODULE)),$(SOONG_SDK_VARIANT_MODULES))))
# Soong produces uninstallable *.sdk shared libraries for embedding in APKs.
module_installed_filename := \
$(patsubst $(PRODUCT_OUT)/%,%,$($(LOCAL_2ND_ARCH_VAR_PREFIX)$(my_prefix)OUT_SHARED_LIBRARIES))/$(notdir $(LOCAL_BUILT_MODULE))
- else
- $(error Cannot determine where to install NOTICE file for $(LOCAL_MODULE))
endif # JAVA_LIBRARIES
endif # STATIC_LIBRARIES
endif
@@ -100,12 +179,17 @@
installed_notice_file := $($(my_prefix)OUT_NOTICE_FILES)/src/$(module_installed_filename).txt
+ifdef my_register_name
+ALL_MODULES.$(my_register_name).INSTALLED_NOTICE_FILE := $(installed_notice_file)
+endif
+
$(installed_notice_file): PRIVATE_INSTALLED_MODULE := $(module_installed_filename)
+$(installed_notice_file) : PRIVATE_NOTICES := $(notice_file)
$(installed_notice_file): $(notice_file)
@echo Notice file: $< -- $@
$(hide) mkdir -p $(dir $@)
- $(hide) awk 'FNR==1 && NR > 1 {print "\n"} {print}' $^ > $@
+ $(hide) awk 'FNR==1 && NR > 1 {print "\n"} {print}' $(PRIVATE_NOTICES) > $@
ifdef LOCAL_INSTALLED_MODULE
# Make LOCAL_INSTALLED_MODULE depend on NOTICE files if they exist
diff --git a/core/package_internal.mk b/core/package_internal.mk
index a97e401..1b40624 100644
--- a/core/package_internal.mk
+++ b/core/package_internal.mk
@@ -552,6 +552,10 @@
ifeq (true, $(LOCAL_UNCOMPRESS_DEX))
$(LOCAL_BUILT_MODULE) : $(ZIP2ZIP)
endif
+ifeq ($(full_classes_jar),)
+ # We don't build jar, need to add the Java resources here.
+ $(LOCAL_BUILT_MODULE): $(java_resource_sources)
+endif
$(LOCAL_BUILT_MODULE): PRIVATE_USE_EMBEDDED_NATIVE_LIBS := $(LOCAL_USE_EMBEDDED_NATIVE_LIBS)
$(LOCAL_BUILT_MODULE):
@echo "target Package: $(PRIVATE_MODULE) ($@)"
@@ -603,6 +607,8 @@
else
$(my_bundle_module): PRIVATE_DEX_FILE :=
$(my_bundle_module): PRIVATE_SOURCE_ARCHIVE :=
+ # We don't build jar, need to add the Java resources here.
+ $(my_bundle_module): $(java_resource_sources)
endif # full_classes_jar
$(my_bundle_module): $(MERGE_ZIPS) $(SOONG_ZIP) $(ZIP2ZIP)
diff --git a/core/product.mk b/core/product.mk
index 624501e..2ab4b06 100644
--- a/core/product.mk
+++ b/core/product.mk
@@ -120,7 +120,7 @@
# The resoure configuration options to use for this product.
_product_list_vars += PRODUCT_LOCALES
_product_list_vars += PRODUCT_AAPT_CONFIG
-_product_list_vars += PRODUCT_AAPT_PREF_CONFIG
+_product_single_value_vars += PRODUCT_AAPT_PREF_CONFIG
_product_list_vars += PRODUCT_AAPT_PREBUILT_DPI
_product_list_vars += PRODUCT_HOST_PACKAGES
_product_list_vars += PRODUCT_PACKAGES
@@ -209,7 +209,7 @@
_product_list_vars += PRODUCT_SOONG_NAMESPACES
_product_list_vars += PRODUCT_DEFAULT_WIFI_CHANNELS
-_product_list_vars += PRODUCT_DEFAULT_DEV_CERTIFICATE
+_product_single_value_vars += PRODUCT_DEFAULT_DEV_CERTIFICATE
_product_list_vars += PRODUCT_MAINLINE_SEPOLICY_DEV_CERTIFICATES
_product_list_vars += PRODUCT_RESTRICT_VENDOR_FILES
@@ -233,7 +233,7 @@
# List of system_server jars delivered via apex. Format = <apex name>:<jar name>.
_product_list_vars += PRODUCT_UPDATABLE_SYSTEM_SERVER_JARS
# If true, then suboptimal order of system server jars does not cause an error.
-_product_list_vars += PRODUCT_BROKEN_SUBOPTIMAL_ORDER_OF_SYSTEM_SERVER_JARS
+_product_single_value_vars += PRODUCT_BROKEN_SUBOPTIMAL_ORDER_OF_SYSTEM_SERVER_JARS
# Additional system server jars to be appended at the end of the common list.
# This is necessary to avoid jars reordering due to makefile inheritance order.
@@ -258,13 +258,13 @@
# Per-module dex-preopt configs.
_product_list_vars += PRODUCT_DEX_PREOPT_MODULE_CONFIGS
-_product_list_vars += PRODUCT_DEX_PREOPT_DEFAULT_COMPILER_FILTER
+_product_single_value_vars += PRODUCT_DEX_PREOPT_DEFAULT_COMPILER_FILTER
_product_list_vars += PRODUCT_DEX_PREOPT_DEFAULT_FLAGS
-_product_list_vars += PRODUCT_DEX_PREOPT_BOOT_FLAGS
-_product_list_vars += PRODUCT_DEX_PREOPT_PROFILE_DIR
-_product_list_vars += PRODUCT_DEX_PREOPT_GENERATE_DM_FILES
-_product_list_vars += PRODUCT_DEX_PREOPT_NEVER_ALLOW_STRIPPING
-_product_list_vars += PRODUCT_DEX_PREOPT_RESOLVE_STARTUP_STRINGS
+_product_single_value_vars += PRODUCT_DEX_PREOPT_BOOT_FLAGS
+_product_single_value_vars += PRODUCT_DEX_PREOPT_PROFILE_DIR
+_product_single_value_vars += PRODUCT_DEX_PREOPT_GENERATE_DM_FILES
+_product_single_value_vars += PRODUCT_DEX_PREOPT_NEVER_ALLOW_STRIPPING
+_product_single_value_vars += PRODUCT_DEX_PREOPT_RESOLVE_STARTUP_STRINGS
# Boot image options.
_product_single_value_vars += \
@@ -272,7 +272,7 @@
PRODUCT_DEX_PREOPT_BOOT_IMAGE_PROFILE_LOCATION \
PRODUCT_USES_DEFAULT_ART_CONFIG \
-_product_list_vars += PRODUCT_SYSTEM_SERVER_COMPILER_FILTER
+_product_single_value_vars += PRODUCT_SYSTEM_SERVER_COMPILER_FILTER
# Per-module sanitizer configs
_product_list_vars += PRODUCT_SANITIZER_MODULE_CONFIGS
_product_single_value_vars += PRODUCT_SYSTEM_BASE_FS_PATH
@@ -315,19 +315,16 @@
# Whether the Scudo hardened allocator is disabled platform-wide
_product_single_value_vars += PRODUCT_DISABLE_SCUDO
-# A flag to override PRODUCT_COMPATIBLE_PROPERTY
-_product_single_value_vars += PRODUCT_COMPATIBLE_PROPERTY_OVERRIDE
-
# List of extra VNDK versions to be included
_product_list_vars += PRODUCT_EXTRA_VNDK_VERSIONS
+# Whether APEX should be compressed or not
+_product_single_value_vars += PRODUCT_COMPRESSED_APEX
+
# VNDK version of product partition. It can be 'current' if the product
# partitions uses PLATFORM_VNDK_VERSION.
_product_single_value_vars += PRODUCT_PRODUCT_VNDK_VERSION
-# Whether the list of allowed of actionable compatible properties should be disabled or not
-_product_single_value_vars += PRODUCT_ACTIONABLE_COMPATIBLE_PROPERTY_DISABLE
-
_product_single_value_vars += PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS
_product_single_value_vars += PRODUCT_ENFORCE_ARTIFACT_SYSTEM_CERTIFICATE_REQUIREMENT
_product_list_vars += PRODUCT_ARTIFACT_SYSTEM_CERTIFICATE_REQUIREMENT_ALLOW_LIST
@@ -380,6 +377,7 @@
_product_single_value_vars += PRODUCT_BUILD_USERDATA_IMAGE
_product_single_value_vars += PRODUCT_BUILD_RECOVERY_IMAGE
_product_single_value_vars += PRODUCT_BUILD_BOOT_IMAGE
+_product_single_value_vars += PRODUCT_BUILD_VENDOR_BOOT_IMAGE
_product_single_value_vars += PRODUCT_BUILD_VBMETA_IMAGE
# List of boot jars delivered via apex
@@ -391,6 +389,9 @@
# If set, device uses virtual A/B.
_product_single_value_vars += PRODUCT_VIRTUAL_AB_OTA
+# If set, device uses virtual A/B Compression.
+_product_single_value_vars += PRODUCT_VIRTUAL_AB_COMPRESSION
+
# If set, device retrofits virtual A/B.
_product_single_value_vars += PRODUCT_VIRTUAL_AB_OTA_RETROFIT
@@ -404,6 +405,20 @@
# If set, Java module in product partition cannot use hidden APIs.
_product_single_value_vars += PRODUCT_ENFORCE_PRODUCT_PARTITION_INTERFACE
+# If set, only java_sdk_library can be used at inter-partition dependency.
+# Note: Build error if BOARD_VNDK_VERSION is not set while
+# PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY is true, because
+# PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY has no meaning if
+# BOARD_VNDK_VERSION is not set.
+# Note: When PRODUCT_ENFORCE_PRODUCT_PARTITION_INTERFACE is not set, there are
+# no restrictions at dependency between system and product partition.
+_product_single_value_vars += PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY
+
+# Allowlist for PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY option.
+# Listed modules are allowed at inter-partition dependency even if it isn't
+# a java_sdk_library module.
+_product_list_vars += PRODUCT_INTER_PARTITION_JAVA_LIBRARY_ALLOWLIST
+
_product_single_value_vars += PRODUCT_INSTALL_EXTRA_FLATTENED_APEXES
.KATI_READONLY := _product_single_value_vars _product_list_vars
@@ -459,6 +474,13 @@
$(sort $(ARTIFACT_PATH_REQUIREMENT_PRODUCTS) $(current_mk)))
endef
+# Like require-artifacts-in-path, but does not require all allow-list entries to
+# have an effect.
+define require-artifacts-in-path-relaxed
+ $(require-artifacts-in-path) \
+ $(eval PRODUCTS.$(current_mk).ARTIFACT_PATH_REQUIREMENT_IS_RELAXED := true)
+endef
+
# Makes including non-existent modules in PRODUCT_PACKAGES an error.
# $(1): list of non-existent modules to allow.
define enforce-product-packages-exist
diff --git a/core/product_config.mk b/core/product_config.mk
index 6170b5b..7b72b5e 100644
--- a/core/product_config.mk
+++ b/core/product_config.mk
@@ -160,15 +160,16 @@
$(call import-products, $(current_product_makefile))
endif # Import all or just the current product makefile
+# Quick check
+$(check-all-products)
+
# Import all the products that have made artifact path requirements, so that we can verify
# the artifacts they produce.
+# These are imported after check-all-products because some of them might not be real products.
$(foreach makefile,$(ARTIFACT_PATH_REQUIREMENT_PRODUCTS),\
$(if $(filter-out $(makefile),$(PRODUCTS)),$(eval $(call import-products,$(makefile))))\
)
-# Quick check
-$(check-all-products)
-
ifneq ($(filter dump-products, $(MAKECMDGOALS)),)
$(dump-products)
endif
@@ -184,18 +185,6 @@
all_product_makefiles :=
all_product_configs :=
-# Jacoco agent JARS to be built and installed, if any.
-ifeq ($(EMMA_INSTRUMENT),true)
- ifneq ($(EMMA_INSTRUMENT_STATIC),true)
- # For instrumented build, if Jacoco is not being included statically
- # in instrumented packages then include Jacoco classes into the
- # bootclasspath.
- $(foreach product,$(PRODUCTS),\
- $(eval PRODUCTS.$(product).PRODUCT_PACKAGES += jacocoagent)\
- $(eval PRODUCTS.$(product).PRODUCT_BOOT_JARS += jacocoagent))
- endif # EMMA_INSTRUMENT_STATIC
-endif # EMMA_INSTRUMENT
-
############################################################################
# Strip and assign the PRODUCT_ variables.
$(call strip-product-vars)
@@ -355,6 +344,16 @@
PRODUCT_EXTRA_VNDK_VERSIONS := $(OVERRIDE_PRODUCT_EXTRA_VNDK_VERSIONS)
endif
+###########################################
+# APEXes are by default not compressed
+#
+# APEX compression can be forcibly enabled (resp. disabled) by
+# setting OVERRIDE_PRODUCT_COMPRESSED_APEX to true (resp. false), e.g. by
+# setting the OVERRIDE_PRODUCT_COMPRESSED_APEX environment variable.
+ifdef OVERRIDE_PRODUCT_COMPRESSED_APEX
+ PRODUCT_COMPRESSED_APEX := $(OVERRIDE_PRODUCT_COMPRESSED_APEX)
+endif
+
$(KATI_obsolete_var OVERRIDE_PRODUCT_EXTRA_VNDK_VERSIONS \
,Use PRODUCT_EXTRA_VNDK_VERSIONS instead)
diff --git a/core/robolectric_test_config_template.xml b/core/robolectric_test_config_template.xml
index e79abd5..e62175f 100644
--- a/core/robolectric_test_config_template.xml
+++ b/core/robolectric_test_config_template.xml
@@ -15,8 +15,8 @@
-->
<!-- This test config file is auto-generated. -->
<configuration description="Runs {MODULE}">
- <option name="test-suite-tag" value="apct" />
- <option name="test-suite-tag" value="apct-junit" />
+ <option name="test-suite-tag" value="robolectric" />
+ <option name="test-suite-tag" value="robolectric-tests" />
<option name="java-folder" value="prebuilts/jdk/jdk9/linux-x86/" />
<option name="exclude-paths" value="java" />
diff --git a/core/soong_cc_prebuilt.mk b/core/soong_cc_prebuilt.mk
index 770408c..a12ef66 100644
--- a/core/soong_cc_prebuilt.mk
+++ b/core/soong_cc_prebuilt.mk
@@ -151,7 +151,7 @@
ifeq ($(LOCAL_IS_HOST_MODULE) $(if $(filter EXECUTABLES SHARED_LIBRARIES NATIVE_TESTS,$(LOCAL_MODULE_CLASS)),true,),true true)
$(copy-or-link-prebuilt-to-target)
ifneq ($(filter EXECUTABLES NATIVE_TESTS,$(LOCAL_MODULE_CLASS)),)
- [ -x $@ ] || $(call echo-error,$@,Target of symlink is not executable)
+ [ -x $@ ] || ( $(call echo-error,$@,Target of symlink is not executable); false )
endif
else
$(transform-prebuilt-to-target)
diff --git a/core/soong_config.mk b/core/soong_config.mk
index ad2e816..fde5832 100644
--- a/core/soong_config.mk
+++ b/core/soong_config.mk
@@ -1,7 +1,3 @@
-SOONG := $(SOONG_OUT_DIR)/soong
-SOONG_BOOTSTRAP := $(SOONG_OUT_DIR)/.soong.bootstrap
-SOONG_BUILD_NINJA := $(SOONG_OUT_DIR)/build.ninja
-SOONG_IN_MAKE := $(SOONG_OUT_DIR)/.soong.in_make
SOONG_MAKEVARS_MK := $(SOONG_OUT_DIR)/make_vars-$(TARGET_PRODUCT).mk
SOONG_VARIABLES := $(SOONG_OUT_DIR)/soong.variables
SOONG_ANDROID_MK := $(SOONG_OUT_DIR)/Android-$(TARGET_PRODUCT).mk
@@ -42,6 +38,7 @@
$(call add_json_bool, Unbundled_build, $(TARGET_BUILD_UNBUNDLED))
$(call add_json_bool, Unbundled_build_apps, $(TARGET_BUILD_APPS))
$(call add_json_bool, Always_use_prebuilt_sdks, $(TARGET_BUILD_USE_PREBUILT_SDKS))
+$(call add_json_bool, Skip_boot_jars_check, $(SKIP_BOOT_JARS_CHECK))
$(call add_json_bool, Debuggable, $(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
$(call add_json_bool, Eng, $(filter eng,$(TARGET_BUILD_VARIANT)))
@@ -57,6 +54,9 @@
$(call add_json_str, DeviceSecondaryCpuVariant, $(TARGET_2ND_CPU_VARIANT))
$(call add_json_list, DeviceSecondaryAbi, $(TARGET_2ND_CPU_ABI) $(TARGET_2ND_CPU_ABI2))
+$(call add_json_bool, Aml_abis, $(if $(filter mainline_sdk,$(TARGET_ARCH_SUITE)),true))
+$(call add_json_bool, Ndk_abis, $(if $(filter ndk, $(TARGET_ARCH_SUITE)),true))
+
$(call add_json_str, NativeBridgeArch, $(TARGET_NATIVE_BRIDGE_ARCH))
$(call add_json_str, NativeBridgeArchVariant, $(TARGET_NATIVE_BRIDGE_ARCH_VARIANT))
$(call add_json_str, NativeBridgeCpuVariant, $(TARGET_NATIVE_BRIDGE_CPU_VARIANT))
@@ -103,7 +103,9 @@
$(call add_json_list, CFIIncludePaths, $(CFI_INCLUDE_PATHS) $(PRODUCT_CFI_INCLUDE_PATHS))
$(call add_json_list, IntegerOverflowExcludePaths, $(INTEGER_OVERFLOW_EXCLUDE_PATHS) $(PRODUCT_INTEGER_OVERFLOW_EXCLUDE_PATHS))
-$(call add_json_bool, Experimental_mte, $(filter true,$(TARGET_EXPERIMENTAL_MTE)))
+$(call add_json_list, MemtagHeapExcludePaths, $(MEMTAG_HEAP_EXCLUDE_PATHS) $(PRODUCT_MEMTAG_HEAP_EXCLUDE_PATHS))
+$(call add_json_list, MemtagHeapAsyncIncludePaths, $(MEMTAG_HEAP_ASYNC_INCLUDE_PATHS) $(PRODUCT_MEMTAG_HEAP_ASYNC_INCLUDE_PATHS))
+$(call add_json_list, MemtagHeapSyncIncludePaths, $(MEMTAG_HEAP_SYNC_INCLUDE_PATHS) $(PRODUCT_MEMTAG_HEAP_SYNC_INCLUDE_PATHS))
$(call add_json_bool, DisableScudo, $(filter true,$(PRODUCT_DISABLE_SCUDO)))
@@ -129,9 +131,10 @@
$(call add_json_str, ProductVndkVersion, $(PRODUCT_PRODUCT_VNDK_VERSION))
$(call add_json_list, ExtraVndkVersions, $(PRODUCT_EXTRA_VNDK_VERSIONS))
$(call add_json_list, DeviceSystemSdkVersions, $(BOARD_SYSTEMSDK_VERSIONS))
+$(call add_json_str, RecoverySnapshotVersion, $(RECOVERY_SNAPSHOT_VERSION))
$(call add_json_list, Platform_systemsdk_versions, $(PLATFORM_SYSTEMSDK_VERSIONS))
$(call add_json_bool, Malloc_not_svelte, $(call invert_bool,$(filter true,$(MALLOC_SVELTE))))
-$(call add_json_bool, Malloc_zero_contents, $(MALLOC_ZERO_CONTENTS))
+$(call add_json_bool, Malloc_zero_contents, $(call invert_bool,$(filter false,$(MALLOC_ZERO_CONTENTS))))
$(call add_json_bool, Malloc_pattern_fill_contents, $(MALLOC_PATTERN_FILL_CONTENTS))
$(call add_json_str, Override_rs_driver, $(OVERRIDE_RS_DRIVER))
@@ -144,6 +147,12 @@
$(call add_json_bool, VndkUseCoreVariant, $(TARGET_VNDK_USE_CORE_VARIANT))
$(call add_json_bool, VndkSnapshotBuildArtifacts, $(VNDK_SNAPSHOT_BUILD_ARTIFACTS))
+$(call add_json_bool, DirectedVendorSnapshot, $(DIRECTED_VENDOR_SNAPSHOT))
+$(call add_json_map, VendorSnapshotModules)
+$(foreach module,$(VENDOR_SNAPSHOT_MODULES),\
+ $(call add_json_bool,$(module),true))
+$(call end_json_map)
+
$(call add_json_bool, Treble_linker_namespaces, $(filter true,$(PRODUCT_TREBLE_LINKER_NAMESPACES)))
$(call add_json_bool, Enforce_vintf_manifest, $(filter true,$(PRODUCT_ENFORCE_VINTF_MANIFEST)))
@@ -170,15 +179,19 @@
$(call add_json_list, PgoAdditionalProfileDirs, $(PGO_ADDITIONAL_PROFILE_DIRS))
+$(call add_json_list, BoardReqdMaskPolicy, $(BOARD_REQD_MASK_POLICY))
$(call add_json_list, BoardVendorSepolicyDirs, $(BOARD_VENDOR_SEPOLICY_DIRS) $(BOARD_SEPOLICY_DIRS))
$(call add_json_list, BoardOdmSepolicyDirs, $(BOARD_ODM_SEPOLICY_DIRS))
$(call add_json_list, BoardVendorDlkmSepolicyDirs, $(BOARD_VENDOR_DLKM_SEPOLICY_DIRS))
$(call add_json_list, BoardOdmDlkmSepolicyDirs, $(BOARD_ODM_DLKM_SEPOLICY_DIRS))
-$(call add_json_list, BoardPlatPublicSepolicyDirs, $(BOARD_PLAT_PUBLIC_SEPOLICY_DIR))
-$(call add_json_list, BoardPlatPrivateSepolicyDirs, $(BOARD_PLAT_PRIVATE_SEPOLICY_DIR))
+# TODO: BOARD_PLAT_* dirs only kept for compatibility reasons. Will be a hard error on API level 31
+$(call add_json_list, SystemExtPublicSepolicyDirs, $(SYSTEM_EXT_PUBLIC_SEPOLICY_DIRS) $(BOARD_PLAT_PUBLIC_SEPOLICY_DIR))
+$(call add_json_list, SystemExtPrivateSepolicyDirs, $(SYSTEM_EXT_PRIVATE_SEPOLICY_DIRS) $(BOARD_PLAT_PRIVATE_SEPOLICY_DIR))
$(call add_json_list, BoardSepolicyM4Defs, $(BOARD_SEPOLICY_M4DEFS))
+$(call add_json_str, BoardSepolicyVers, $(BOARD_SEPOLICY_VERS))
$(call add_json_bool, Flatten_apex, $(filter true,$(TARGET_FLATTEN_APEX)))
+$(call add_json_bool, ForceApexSymlinkOptimization, $(filter true,$(TARGET_FORCE_APEX_SYMLINK_OPTIMIZATION)))
$(call add_json_str, DexpreoptGlobalConfig, $(DEX_PREOPT_CONFIG))
@@ -195,7 +208,6 @@
$(call add_json_list, ProductPublicSepolicyDirs, $(PRODUCT_PUBLIC_SEPOLICY_DIRS))
$(call add_json_list, ProductPrivateSepolicyDirs, $(PRODUCT_PRIVATE_SEPOLICY_DIRS))
-$(call add_json_bool, ProductCompatibleProperty, $(PRODUCT_COMPATIBLE_PROPERTY))
$(call add_json_list, TargetFSConfigGen, $(TARGET_FS_CONFIG_GEN))
@@ -212,13 +224,21 @@
$(call add_json_bool, EnforceProductPartitionInterface, $(PRODUCT_ENFORCE_PRODUCT_PARTITION_INTERFACE))
$(call add_json_str, DeviceCurrentApiLevelForVendorModules, $(BOARD_CURRENT_API_LEVEL_FOR_VENDOR_MODULES))
+$(call add_json_bool, EnforceInterPartitionJavaSdkLibrary, $(PRODUCT_ENFORCE_INTER_PARTITION_JAVA_SDK_LIBRARY))
+$(call add_json_list, InterPartitionJavaLibraryAllowList, $(PRODUCT_INTER_PARTITION_JAVA_LIBRARY_ALLOWLIST))
+
$(call add_json_bool, InstallExtraFlattenedApexes, $(PRODUCT_INSTALL_EXTRA_FLATTENED_APEXES))
+$(call add_json_bool, CompressedApex, $(PRODUCT_COMPRESSED_APEX))
+
$(call add_json_bool, BoardUsesRecoveryAsBoot, $(BOARD_USES_RECOVERY_AS_BOOT))
$(call add_json_list, BoardKernelBinaries, $(BOARD_KERNEL_BINARIES))
$(call add_json_list, BoardKernelModuleInterfaceVersions, $(BOARD_KERNEL_MODULE_INTERFACE_VERSIONS))
+$(call add_json_bool, BoardMoveRecoveryResourcesToVendorBoot, $(BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT))
+$(call add_json_str, PrebuiltHiddenApiDir, $(BOARD_PREBUILT_HIDDENAPI_DIR))
+
$(call json_end)
$(file >$(SOONG_VARIABLES).tmp,$(json_contents))
diff --git a/core/soong_droiddoc_prebuilt.mk b/core/soong_droiddoc_prebuilt.mk
index c0467df..4dc5d08 100644
--- a/core/soong_droiddoc_prebuilt.mk
+++ b/core/soong_droiddoc_prebuilt.mk
@@ -29,16 +29,6 @@
$(eval $(call copy-one-file,$(LOCAL_DROIDDOC_API_VERSIONS_XML),$(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/$(LOCAL_MODULE)_generated-api-versions.xml))
endif
-ifdef LOCAL_DROIDDOC_JDIFF_DOC_ZIP
-$(eval $(call copy-one-file,$(LOCAL_DROIDDOC_JDIFF_DOC_ZIP),$(OUT_DOCS)/$(LOCAL_MODULE)-jdiff-docs.zip))
-$(call dist-for-goals,docs,$(OUT_DOCS)/$(LOCAL_MODULE)-jdiff-docs.zip)
-
-ALL_DOCS += $(OUT_DOCS)/$(LOCAL_MODULE)-jdiff-docs.zip
-
-.PHONY: $(LOCAL_MODULE) $(LOCAL_MODULE)-jdiff
-$(LOCAL_MODULE) $(LOCAL_MODULE)-jdiff : $(OUT_DOCS)/$(LOCAL_MODULE)-jdiff-docs.zip
-endif
-
ifdef LOCAL_DROIDDOC_METADATA_ZIP
$(eval $(call copy-one-file,$(LOCAL_DROIDDOC_METADATA_ZIP),$(TARGET_OUT_COMMON_INTERMEDIATES)/PACKAGING/$(LOCAL_MODULE)-metadata.zip))
endif
diff --git a/core/soong_rust_prebuilt.mk b/core/soong_rust_prebuilt.mk
index 2f4c6e7..4cfb01f 100644
--- a/core/soong_rust_prebuilt.mk
+++ b/core/soong_rust_prebuilt.mk
@@ -60,7 +60,7 @@
ifeq ($(LOCAL_IS_HOST_MODULE) $(if $(filter EXECUTABLES SHARED_LIBRARIES NATIVE_TESTS,$(LOCAL_MODULE_CLASS)),true,),true true)
$(copy-or-link-prebuilt-to-target)
ifneq ($(filter EXECUTABLES NATIVE_TESTS,$(LOCAL_MODULE_CLASS)),)
- [ -x $@ ] || $(call echo-error,$@,Target of symlink is not executable)
+ [ -x $@ ] || ( $(call echo-error,$@,Target of symlink is not executable); false )
endif
else
$(transform-prebuilt-to-target)
@@ -82,8 +82,19 @@
endif
endif
+create_coverage_zip :=
ifeq ($(NATIVE_COVERAGE),true)
+ create_coverage_zip := true
+endif
+
+# Until Rust supports LLVM coverage, Soong assumes GCOV coverage in both cases.
+# Therefore we should create the coverage zip with the gcno files in this case as well.
+ifeq ($(CLANG_COVERAGE),true)
+ create_coverage_zip := true
+endif
+
+ifdef create_coverage_zip
ifneq (,$(strip $(LOCAL_PREBUILT_COVERAGE_ARCHIVE)))
$(eval $(call copy-one-file,$(LOCAL_PREBUILT_COVERAGE_ARCHIVE),$(intermediates)/$(LOCAL_MODULE).zip))
ifneq ($(LOCAL_UNINSTALLABLE_MODULE),true)
diff --git a/core/sysprop.mk b/core/sysprop.mk
index 73dbec7..df27067 100644
--- a/core/sysprop.mk
+++ b/core/sysprop.mk
@@ -47,6 +47,11 @@
echo "ro.product.$(1).model=$(PRODUCT_MODEL)" >> $(2);\
echo "ro.product.$(1).name=$(TARGET_PRODUCT)" >> $(2);\
)\
+ $(if $(filter system vendor odm,$(1)),\
+ echo "ro.$(1).product.cpu.abilist=$(TARGET_CPU_ABI_LIST) " >> $(2);\
+ echo "ro.$(1).product.cpu.abilist32=$(TARGET_CPU_ABI_LIST_32_BIT)" >> $(2);\
+ echo "ro.$(1).product.cpu.abilist64=$(TARGET_CPU_ABI_LIST_64_BIT)" >> $(2);\
+ )\
echo "ro.$(1).build.date=`$(DATE_FROM_FILE)`" >> $(2);\
echo "ro.$(1).build.date.utc=`$(DATE_FROM_FILE) +%s`" >> $(2);\
echo "ro.$(1).build.fingerprint=$(BUILD_FINGERPRINT_FROM_FILE)" >> $(2);\
@@ -54,7 +59,8 @@
echo "ro.$(1).build.tags=$(BUILD_VERSION_TAGS)" >> $(2);\
echo "ro.$(1).build.type=$(TARGET_BUILD_VARIANT)" >> $(2);\
echo "ro.$(1).build.version.incremental=$(BUILD_NUMBER_FROM_FILE)" >> $(2);\
- echo "ro.$(1).build.version.release=$(PLATFORM_VERSION)" >> $(2);\
+ echo "ro.$(1).build.version.release=$(PLATFORM_VERSION_LAST_STABLE)" >> $(2);\
+ echo "ro.$(1).build.version.release_or_codename=$(PLATFORM_VERSION)" >> $(2);\
echo "ro.$(1).build.version.sdk=$(PLATFORM_SDK_VERSION)" >> $(2);\
endef
@@ -67,7 +73,10 @@
# emitted to the output
# $(4): list of variable names each of which contains name=value pairs
# $(5): optional list of prop names to force remove from the output. Properties from both
-# $(3) and (4) are affected.
+# $(3) and (4) are affected
+# $(6): optional list of files to append at the end. The content of each file is emitted
+# to the output
+# $(7): optional flag to skip common properties generation
define build-properties
ALL_DEFAULT_INSTALLED_MODULES += $(2)
@@ -89,11 +98,13 @@
$(eval _option := --allow-dup)\
)
-$(2): $(POST_PROCESS_PROPS) $(INTERNAL_BUILD_ID_MAKEFILE) $(API_FINGERPRINT) $(3)
+$(2): $(POST_PROCESS_PROPS) $(INTERNAL_BUILD_ID_MAKEFILE) $(API_FINGERPRINT) $(3) $(6)
$(hide) echo Building $$@
$(hide) mkdir -p $$(dir $$@)
$(hide) rm -f $$@ && touch $$@
+ifneq ($(strip $(7)), true)
$(hide) $$(call generate-common-build-props,$(call to-lower,$(strip $(1))),$$@)
+endif
$(hide) $(foreach file,$(strip $(3)),\
if [ -f "$(file)" ]; then\
echo "" >> $$@;\
@@ -112,6 +123,10 @@
)\
)
$(hide) $(POST_PROCESS_PROPS) $$(_option) $$@ $(5)
+ $(hide) $(foreach file,$(strip $(6)),\
+ if [ -f "$(file)" ]; then\
+ cat $(file) >> $$@;\
+ fi;)
$(hide) echo "# end of file" >> $$@
endef
@@ -231,7 +246,7 @@
endef
gen_from_buildinfo_sh := $(call intermediates-dir-for,PACKAGING,system_build_prop)/buildinfo.prop
-$(gen_from_buildinfo_sh): $(INTERNAL_BUILD_ID_MAKEFILE) $(API_FINGERPRINT)
+$(gen_from_buildinfo_sh): $(INTERNAL_BUILD_ID_MAKEFILE) $(API_FINGERPRINT) | $(BUILD_DATETIME_FILE) $(BUILD_NUMBER_FILE)
$(hide) TARGET_BUILD_TYPE="$(TARGET_BUILD_VARIANT)" \
TARGET_BUILD_FLAVOR="$(TARGET_BUILD_FLAVOR)" \
TARGET_DEVICE="$(TARGET_DEVICE)" \
@@ -264,19 +279,6 @@
TARGET_CPU_ABI2="$(TARGET_CPU_ABI2)" \
bash $(BUILDINFO_SH) > $@
-ifneq ($(PRODUCT_OEM_PROPERTIES),)
-import_oem_prop := $(call intermediates-dir-for,ETC,system_build_prop)/oem.prop
-
-$(import_oem_prop):
- $(hide) echo "#" >> $@; \
- echo "# PRODUCT_OEM_PROPERTIES" >> $@; \
- echo "#" >> $@;
- $(hide) $(foreach prop,$(PRODUCT_OEM_PROPERTIES), \
- echo "import /oem/oem.prop $(prop)" >> $@;)
-else
-import_oem_prop :=
-endif
-
ifdef TARGET_SYSTEM_PROP
system_prop_file := $(TARGET_SYSTEM_PROP)
else
@@ -284,7 +286,6 @@
endif
_prop_files_ := \
- $(import_oem_prop) \
$(gen_from_buildinfo_sh) \
$(system_prop_file)
@@ -310,9 +311,14 @@
INSTALLED_BUILD_PROP_TARGET := $(TARGET_OUT)/build.prop
-$(eval $(call build-properties,system,$(INSTALLED_BUILD_PROP_TARGET),\
-$(_prop_files_),$(_prop_vars_),\
-$(_blacklist_names_)))
+$(eval $(call build-properties,\
+ system,\
+ $(INSTALLED_BUILD_PROP_TARGET),\
+ $(_prop_files_),\
+ $(_prop_vars_),\
+ $(_blacklist_names_),\
+ $(empty),\
+ $(empty)))
# -----------------------------------------------------------------
# vendor/build.prop
@@ -348,7 +354,9 @@
$(INSTALLED_VENDOR_BUILD_PROP_TARGET),\
$(_prop_files_),\
$(_prop_vars_),\
- $(PRODUCT_VENDOR_PROPERTY_BLACKLIST)))
+ $(PRODUCT_VENDOR_PROPERTY_BLACKLIST),\
+ $(empty),\
+ $(empty)))
# -----------------------------------------------------------------
# product/etc/build.prop
@@ -365,12 +373,44 @@
PRODUCT_PRODUCT_PROPERTIES
INSTALLED_PRODUCT_BUILD_PROP_TARGET := $(TARGET_OUT_PRODUCT)/etc/build.prop
+
+ifdef PRODUCT_OEM_PROPERTIES
+import_oem_prop := $(call intermediates-dir-for,ETC,import_oem_prop)/oem.prop
+
+$(import_oem_prop):
+ $(hide) echo "####################################" >> $@; \
+ echo "# PRODUCT_OEM_PROPERTIES" >> $@; \
+ echo "####################################" >> $@;
+ $(hide) $(foreach prop,$(PRODUCT_OEM_PROPERTIES), \
+ echo "import /oem/oem.prop $(prop)" >> $@;)
+
+_footers_ := $(import_oem_prop)
+else
+_footers_ :=
+endif
+
+# Skip common /product properties generation if device released before R and
+# has no product partition. This is the first part of the check.
+ifeq ($(call math_lt,$(if $(PRODUCT_SHIPPING_API_LEVEL),$(PRODUCT_SHIPPING_API_LEVEL),30),30), true)
+ _skip_common_properties := true
+endif
+
+# The second part of the check - always generate common properties for the
+# devices with product partition regardless of shipping level.
+ifneq ($(BOARD_USES_PRODUCTIMAGE),)
+ _skip_common_properties :=
+endif
+
$(eval $(call build-properties,\
product,\
$(INSTALLED_PRODUCT_BUILD_PROP_TARGET),\
$(_prop_files_),\
$(_prop_vars_),\
- $(empty)))
+ $(empty),\
+ $(_footers_),\
+ $(_skip_common_properties)))
+
+_skip_common_properties :=
# ----------------------------------------------------------------
# odm/etc/build.prop
@@ -391,6 +431,8 @@
$(INSTALLED_ODM_BUILD_PROP_TARGET),\
$(_prop_files),\
$(_prop_vars_),\
+ $(empty),\
+ $(empty),\
$(empty)))
# ----------------------------------------------------------------
@@ -400,7 +442,12 @@
INSTALLED_VENDOR_DLKM_BUILD_PROP_TARGET := $(TARGET_OUT_VENDOR_DLKM)/etc/build.prop
$(eval $(call build-properties,\
vendor_dlkm,\
- $(INSTALLED_VENDOR_DLKM_BUILD_PROP_TARGET)))
+ $(INSTALLED_VENDOR_DLKM_BUILD_PROP_TARGET),\
+ $(empty),\
+ $(empty),\
+ $(empty),\
+ $(empty),\
+ $(empty)))
# ----------------------------------------------------------------
# odm_dlkm/etc/build.prop
@@ -409,7 +456,12 @@
INSTALLED_ODM_DLKM_BUILD_PROP_TARGET := $(TARGET_OUT_ODM_DLKM)/etc/build.prop
$(eval $(call build-properties,\
odm_dlkm,\
- $(INSTALLED_ODM_DLKM_BUILD_PROP_TARGET)))
+ $(INSTALLED_ODM_DLKM_BUILD_PROP_TARGET),\
+ $(empty),\
+ $(empty),\
+ $(empty),\
+ $(empty),\
+ $(empty)))
# -----------------------------------------------------------------
# system_ext/etc/build.prop
@@ -428,4 +480,21 @@
$(INSTALLED_SYSTEM_EXT_BUILD_PROP_TARGET),\
$(_prop_files_),\
$(_prop_vars_),\
+ $(empty),\
+ $(empty),\
+ $(empty)))
+
+# ----------------------------------------------------------------
+# ramdisk/boot/etc/build.prop
+#
+
+RAMDISK_BUILD_PROP_REL_PATH := system/etc/ramdisk/build.prop
+INSTALLED_RAMDISK_BUILD_PROP_TARGET := $(TARGET_RAMDISK_OUT)/$(RAMDISK_BUILD_PROP_REL_PATH)
+$(eval $(call build-properties,\
+ bootimage,\
+ $(INSTALLED_RAMDISK_BUILD_PROP_TARGET),\
+ $(empty),\
+ $(empty),\
+ $(empty),\
+ $(empty),\
$(empty)))
diff --git a/core/tasks/art-host-tests.mk b/core/tasks/art-host-tests.mk
index 96e2c74..d771b06 100644
--- a/core/tasks/art-host-tests.mk
+++ b/core/tasks/art-host-tests.mk
@@ -16,12 +16,27 @@
intermediates_dir := $(call intermediates-dir-for,PACKAGING,art-host-tests)
art_host_tests_zip := $(PRODUCT_OUT)/art-host-tests.zip
-$(art_host_tests_zip) : $(COMPATIBILITY.art-host-tests.FILES) $(SOONG_ZIP)
+# Get the hostside libraries to be packaged in the test zip. Unlike
+# device-tests.mk or general-tests.mk, the files are not copied to the
+# testcases directory.
+my_host_shared_lib_for_art_host_tests := $(foreach f,$(COMPATIBILITY.art-host-tests.HOST_SHARED_LIBRARY.FILES),$(strip \
+ $(eval _cmf_tuple := $(subst :, ,$(f))) \
+ $(eval _cmf_src := $(word 1,$(_cmf_tuple))) \
+ $(_cmf_src)))
+
+$(art_host_tests_zip) : PRIVATE_HOST_SHARED_LIBS := $(my_host_shared_lib_for_art_host_tests)
+
+$(art_host_tests_zip) : $(COMPATIBILITY.art-host-tests.FILES) $(my_host_shared_lib_for_art_host_tests) $(SOONG_ZIP)
echo $(sort $(COMPATIBILITY.art-host-tests.FILES)) | tr " " "\n" > $@.list
grep $(HOST_OUT_TESTCASES) $@.list > $@-host.list || true
+ $(hide) for shared_lib in $(PRIVATE_HOST_SHARED_LIBS); do \
+ echo $$shared_lib >> $@-host-libs.list; \
+ done
grep $(TARGET_OUT_TESTCASES) $@.list > $@-target.list || true
- $(hide) $(SOONG_ZIP) -d -o $@ -P host -C $(HOST_OUT) -l $@-host.list -P target -C $(PRODUCT_OUT) -l $@-target.list
- rm -f $@.list $@-host.list $@-target.list
+ $(hide) $(SOONG_ZIP) -d -o $@ -P host -C $(HOST_OUT) -l $@-host.list \
+ -P target -C $(PRODUCT_OUT) -l $@-target.list \
+ -P host/testcases -C $(HOST_OUT) -l $@-host-libs.list
+ rm -f $@.list $@-host.list $@-target.list $@-host-libs.list
art-host-tests: $(art_host_tests_zip)
$(call dist-for-goals, art-host-tests, $(art_host_tests_zip))
diff --git a/core/tasks/boot_jars_package_check.mk b/core/tasks/boot_jars_package_check.mk
deleted file mode 100644
index c9a8e27..0000000
--- a/core/tasks/boot_jars_package_check.mk
+++ /dev/null
@@ -1,63 +0,0 @@
-# Copyright (C) 2014 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Rules to check if classes in the boot jars are from the list of allowed packages.
-#
-
-ifneq ($(SKIP_BOOT_JARS_CHECK),true)
-ifdef PRODUCT_BOOT_JARS
-
-intermediates := $(call intermediates-dir-for, PACKAGING, boot-jars-package-check,,COMMON)
-stamp := $(intermediates)/stamp
-
-# Convert the colon-separated components <apex>:<jar> to <jar>.<apex> names
-# (e.g. com.android.media:updatable-media -> updatable-media.com.android.media).
-# Special cases:
-# - for the "platform" or "system_ext" apex drop the .<apex> suffix
-# - for the ART apex select release variant
-boot_jars := $(foreach pair,$(PRODUCT_BOOT_JARS) $(PRODUCT_UPDATABLE_BOOT_JARS), \
- $(eval apex := $(call word-colon,1,$(pair))) \
- $(eval jar := $(call word-colon,2,$(pair))) \
- $(eval q := :) \
- $(eval sfx := $(q).$(apex)$(q)) \
- $(eval sfx := $(subst $(q).platform$(q),$(q)$(q),$(sfx))) \
- $(eval sfx := $(subst $(q).system_ext$(q),$(q)$(q),$(sfx))) \
- $(eval sfx := $(subst $(q).com.android.art$(q),$(q).com.android.art.release$(q),$(sfx))) \
- $(eval sfx := $(patsubst $(q)%$(q),%,$(sfx))) \
- $(jar)$(sfx))
-
-# Convert boot jar names to build paths.
-built_boot_jars := $(foreach j, $(boot_jars), \
- $(call intermediates-dir-for, JAVA_LIBRARIES, $(j),,COMMON)/classes.jar)
-
-script := build/make/core/tasks/check_boot_jars/check_boot_jars.py
-allowed_file := build/make/core/tasks/check_boot_jars/package_allowed_list.txt
-
-$(stamp): PRIVATE_BOOT_JARS := $(built_boot_jars)
-$(stamp): PRIVATE_SCRIPT := $(script)
-$(stamp): PRIVATE_ALLOWED := $(allowed_file)
-$(stamp) : $(built_boot_jars) $(script) $(allowed_file)
- @echo "Check package name for $(PRIVATE_BOOT_JARS)"
- $(hide) $(PRIVATE_SCRIPT) $(PRIVATE_ALLOWED) $(PRIVATE_BOOT_JARS)
- $(hide) mkdir -p $(dir $@) && touch $@
-
-.PHONY: check-boot-jars
-check-boot-jars : $(stamp)
-
-# Run check-boot-jars by default
-droidcore : check-boot-jars
-
-endif # PRODUCT_BOOT_JARS
-endif # SKIP_BOOT_JARS_CHECK not true
diff --git a/core/tasks/check_boot_jars/check_boot_jars.py b/core/tasks/check_boot_jars/check_boot_jars.py
deleted file mode 100755
index cf4ef27..0000000
--- a/core/tasks/check_boot_jars/check_boot_jars.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Check boot jars.
-
-Usage: check_boot_jars.py <package_allow_list_file> <jar1> <jar2> ...
-"""
-import logging
-import os.path
-import re
-import subprocess
-import sys
-
-
-# The compiled allow list RE.
-allow_list_re = None
-
-
-def LoadAllowList(filename):
- """ Load and compile allow list regular expressions from filename.
- """
- lines = []
- with open(filename, 'r') as f:
- for line in f:
- line = line.strip()
- if not line or line.startswith('#'):
- continue
- lines.append(line)
- combined_re = r'^(%s)$' % '|'.join(lines)
- global allow_list_re
- try:
- allow_list_re = re.compile(combined_re)
- except re.error:
- logging.exception(
- 'Cannot compile package allow list regular expression: %r',
- combined_re)
- allow_list_re = None
- return False
- return True
-
-
-def CheckJar(allow_list_path, jar):
- """Check a jar file.
- """
- # Get the list of files inside the jar file.
- p = subprocess.Popen(args='jar tf %s' % jar,
- stdout=subprocess.PIPE, shell=True)
- stdout, _ = p.communicate()
- if p.returncode != 0:
- return False
- items = stdout.split()
- classes = 0
- for f in items:
- if f.endswith('.class'):
- classes += 1
- package_name = os.path.dirname(f)
- package_name = package_name.replace('/', '.')
- if not package_name or not allow_list_re.match(package_name):
- print >> sys.stderr, ('Error: %s contains class file %s, whose package name %s is empty or'
- ' not in the allow list %s of packages allowed on the bootclasspath.'
- % (jar, f, package_name, allow_list_path))
- return False
- if classes == 0:
- print >> sys.stderr, ('Error: %s does not contain any class files.' % jar)
- return False
- return True
-
-
-def main(argv):
- if len(argv) < 2:
- print __doc__
- return 1
- allow_list_path = argv[0]
-
- if not LoadAllowList(allow_list_path):
- return 1
-
- passed = True
- for jar in argv[1:]:
- if not CheckJar(allow_list_path, jar):
- passed = False
- if not passed:
- return 1
-
- return 0
-
-
-if __name__ == '__main__':
- sys.exit(main(sys.argv[1:]))
diff --git a/core/tasks/check_boot_jars/package_allowed_list.txt b/core/tasks/check_boot_jars/package_allowed_list.txt
deleted file mode 100644
index 18ab427..0000000
--- a/core/tasks/check_boot_jars/package_allowed_list.txt
+++ /dev/null
@@ -1,248 +0,0 @@
-# Boot jar package name allowed list.
-# Each line is interpreted as a regular expression.
-
-###################################################
-# core-libart.jar & core-oj.jar
-java\.awt\.font
-java\.beans
-java\.io
-java\.lang
-java\.lang\.annotation
-java\.lang\.invoke
-java\.lang\.ref
-java\.lang\.reflect
-java\.math
-java\.net
-java\.nio
-java\.nio\.file
-java\.nio\.file\.spi
-java\.nio\.file\.attribute
-java\.nio\.channels
-java\.nio\.channels\.spi
-java\.nio\.charset
-java\.nio\.charset\.spi
-java\.security
-java\.security\.acl
-java\.security\.cert
-java\.security\.interfaces
-java\.security\.spec
-java\.sql
-java\.text
-java\.text\.spi
-java\.time
-java\.time\.chrono
-java\.time\.format
-java\.time\.temporal
-java\.time\.zone
-java\.util
-java\.util\.concurrent
-java\.util\.concurrent\.atomic
-java\.util\.concurrent\.locks
-java\.util\.function
-java\.util\.jar
-java\.util\.logging
-java\.util\.prefs
-java\.util\.regex
-java\.util\.spi
-java\.util\.stream
-java\.util\.zip
-# TODO: Remove javax.annotation.processing if possible, see http://b/132338110:
-javax\.annotation\.processing
-javax\.crypto
-javax\.crypto\.interfaces
-javax\.crypto\.spec
-javax\.net
-javax\.net\.ssl
-javax\.security\.auth
-javax\.security\.auth\.callback
-javax\.security\.auth\.login
-javax\.security\.auth\.x500
-javax\.security\.cert
-javax\.sql
-javax\.xml
-javax\.xml\.datatype
-javax\.xml\.namespace
-javax\.xml\.parsers
-javax\.xml\.transform
-javax\.xml\.transform\.dom
-javax\.xml\.transform\.sax
-javax\.xml\.transform\.stream
-javax\.xml\.validation
-javax\.xml\.xpath
-jdk\.internal\.util
-jdk\.internal\.vm\.annotation
-jdk\.net
-org\.w3c\.dom
-org\.w3c\.dom\.ls
-org\.w3c\.dom\.traversal
-# OpenJdk internal implementation.
-sun\.invoke\.util
-sun\.invoke\.empty
-sun\.misc
-sun\.util.*
-sun\.text.*
-sun\.security.*
-sun\.reflect.*
-sun\.nio.*
-sun\.net.*
-com\.sun\..*
-
-# TODO: Move these internal org.apache.harmony classes to libcore.*
-org\.apache\.harmony\.crypto\.internal
-org\.apache\.harmony\.dalvik
-org\.apache\.harmony\.dalvik\.ddmc
-org\.apache\.harmony\.luni\.internal\.util
-org\.apache\.harmony\.security
-org\.apache\.harmony\.security\.asn1
-org\.apache\.harmony\.security\.fortress
-org\.apache\.harmony\.security\.pkcs10
-org\.apache\.harmony\.security\.pkcs7
-org\.apache\.harmony\.security\.pkcs8
-org\.apache\.harmony\.security\.provider\.crypto
-org\.apache\.harmony\.security\.utils
-org\.apache\.harmony\.security\.x501
-org\.apache\.harmony\.security\.x509
-org\.apache\.harmony\.security\.x509\.tsp
-org\.apache\.harmony\.xml
-org\.apache\.harmony\.xml\.dom
-org\.apache\.harmony\.xml\.parsers
-
-org\.json
-org\.xmlpull\.v1
-org\.xmlpull\.v1\.sax2
-
-# TODO: jarjar org.kxml2.io to com.android org\.kxml2\.io
-org\.kxml2\.io
-org\.xml
-org\.xml\.sax
-org\.xml\.sax\.ext
-org\.xml\.sax\.helpers
-
-dalvik\..*
-libcore\..*
-android\..*
-com\.android\..*
-###################################################
-# android.test.base.jar
-junit\.extensions
-junit\.framework
-android\.test
-android\.test\.suitebuilder\.annotation
-
-
-###################################################
-# ext.jar
-# TODO: jarjar javax.sip to com.android
-javax\.sip
-javax\.sip\.address
-javax\.sip\.header
-javax\.sip\.message
-
-# TODO: jarjar org.apache.commons to com.android
-org\.apache\.commons\.codec
-org\.apache\.commons\.codec\.binary
-org\.apache\.commons\.codec\.language
-org\.apache\.commons\.codec\.net
-org\.apache\.commons\.logging
-org\.apache\.commons\.logging\.impl
-org\.apache\.http
-org\.apache\.http\.auth
-org\.apache\.http\.auth\.params
-org\.apache\.http\.client
-org\.apache\.http\.client\.entity
-org\.apache\.http\.client\.methods
-org\.apache\.http\.client\.params
-org\.apache\.http\.client\.protocol
-org\.apache\.http\.client\.utils
-org\.apache\.http\.conn
-org\.apache\.http\.conn\.params
-org\.apache\.http\.conn\.routing
-org\.apache\.http\.conn\.scheme
-org\.apache\.http\.conn\.ssl
-org\.apache\.http\.conn\.util
-org\.apache\.http\.cookie
-org\.apache\.http\.cookie\.params
-org\.apache\.http\.entity
-org\.apache\.http\.impl
-org\.apache\.http\.impl\.auth
-org\.apache\.http\.impl\.client
-org\.apache\.http\.impl\.client
-org\.apache\.http\.impl\.conn
-org\.apache\.http\.impl\.conn\.tsccm
-org\.apache\.http\.impl\.cookie
-org\.apache\.http\.impl\.entity
-org\.apache\.http\.impl\.io
-org\.apache\.http\.impl\.io
-org\.apache\.http\.io
-org\.apache\.http\.message
-org\.apache\.http\.params
-org\.apache\.http\.protocol
-org\.apache\.http\.util
-
-# TODO: jarjar gov.nist to com.android
-gov\.nist\.core
-gov\.nist\.core\.net
-gov\.nist\.javax\.sip
-gov\.nist\.javax\.sip\.address
-gov\.nist\.javax\.sip\.clientauthutils
-gov\.nist\.javax\.sip\.header
-gov\.nist\.javax\.sip\.header\.extensions
-gov\.nist\.javax\.sip\.header\.ims
-gov\.nist\.javax\.sip\.message
-gov\.nist\.javax\.sip\.parser
-gov\.nist\.javax\.sip\.parser\.extensions
-gov\.nist\.javax\.sip\.parser\.ims
-gov\.nist\.javax\.sip\.stack
-
-org\.ccil\.cowan\.tagsoup
-org\.ccil\.cowan\.tagsoup\.jaxp
-
-###################################################
-# framework.jar
-javax\.microedition\.khronos\.opengles
-javax\.microedition\.khronos\.egl
-
-android
-
-###################################################
-# apache-xml.jar
-org\.apache\.xml\.res
-org\.apache\.xml\.utils
-org\.apache\.xml\.utils\.res
-org\.apache\.xml\.dtm
-org\.apache\.xml\.dtm\.ref
-org\.apache\.xml\.dtm\.ref\.dom2dtm
-org\.apache\.xml\.dtm\.ref\.sax2dtm
-org\.apache\.xml\.serializer
-org\.apache\.xml\.serializer\.utils
-org\.apache\.xml\.serializer\.dom3
-org\.apache\.xpath
-org\.apache\.xpath\.operations
-org\.apache\.xpath\.domapi
-org\.apache\.xpath\.functions
-org\.apache\.xpath\.res
-org\.apache\.xpath\.axes
-org\.apache\.xpath\.objects
-org\.apache\.xpath\.patterns
-org\.apache\.xpath\.jaxp
-org\.apache\.xpath\.compiler
-org\.apache\.xalan
-org\.apache\.xalan\.res
-org\.apache\.xalan\.templates
-org\.apache\.xalan\.serialize
-org\.apache\.xalan\.extensions
-org\.apache\.xalan\.processor
-org\.apache\.xalan\.transformer
-org\.apache\.xalan\.xslt
-
-###################################################
-# Packages in the google namespace across all bootclasspath jars.
-com\.google\.android\..*
-com\.google\.vr\.platform.*
-com\.google\.i18n\.phonenumbers\..*
-com\.google\.i18n\.phonenumbers
-
-###################################################
-# Packages used for Android in Chrome OS
-org\.chromium\.arc
-org\.chromium\.arc\..*
diff --git a/core/tasks/cts.mk b/core/tasks/cts.mk
index cd5fa8e..fdd9591 100644
--- a/core/tasks/cts.mk
+++ b/core/tasks/cts.mk
@@ -14,9 +14,8 @@
test_suite_name := cts
test_suite_tradefed := cts-tradefed
-test_suite_dynamic_config := test/suite_harness/tools/cts-tradefed/DynamicConfig.xml
-test_suite_readme := test/suite_harness/tools/cts-tradefed/README
-include_test_suite_notice := true
+test_suite_dynamic_config := cts/tools/cts-tradefed/DynamicConfig.xml
+test_suite_readme := cts/tools/cts-tradefed/README
include $(BUILD_SYSTEM)/tasks/tools/compatibility.mk
diff --git a/core/tasks/find-shareduid-violation.mk b/core/tasks/find-shareduid-violation.mk
index 86052f2..d6885eb 100644
--- a/core/tasks/find-shareduid-violation.mk
+++ b/core/tasks/find-shareduid-violation.mk
@@ -16,8 +16,6 @@
shareduid_violation_modules_filename := $(PRODUCT_OUT)/shareduid_violation_modules.json
-find_shareduid_script := $(BUILD_SYSTEM)/tasks/find-shareduid-violation.py
-
$(shareduid_violation_modules_filename): $(INSTALLED_SYSTEMIMAGE_TARGET) \
$(INSTALLED_RAMDISK_TARGET) \
$(INSTALLED_BOOTIMAGE_TARGET) \
@@ -26,7 +24,15 @@
$(INSTALLED_PRODUCTIMAGE_TARGET) \
$(INSTALLED_SYSTEM_EXTIMAGE_TARGET)
-$(shareduid_violation_modules_filename): $(find_shareduid_script)
+$(shareduid_violation_modules_filename): $(HOST_OUT_EXECUTABLES)/find_shareduid_violation
$(shareduid_violation_modules_filename): $(AAPT2)
- $(find_shareduid_script) $(PRODUCT_OUT) $(AAPT2) > $@
+ $(HOST_OUT_EXECUTABLES)/find_shareduid_violation \
+ --product_out $(PRODUCT_OUT) \
+ --aapt $(AAPT2) \
+ --copy_out_system $(TARGET_COPY_OUT_SYSTEM) \
+ --copy_out_vendor $(TARGET_COPY_OUT_VENDOR) \
+ --copy_out_product $(TARGET_COPY_OUT_PRODUCT) \
+ --copy_out_system_ext $(TARGET_COPY_OUT_SYSTEM_EXT) \
+ > $@
+
$(call dist-for-goals,droidcore,$(shareduid_violation_modules_filename))
diff --git a/core/tasks/find-shareduid-violation.py b/core/tasks/find-shareduid-violation.py
deleted file mode 100755
index 1f8e4df..0000000
--- a/core/tasks/find-shareduid-violation.py
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/usr/bin/env python3
-#
-# Copyright (C) 2019 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-import os
-import subprocess
-from glob import glob
-from collections import defaultdict
-import sys
-import json
-
-if len(sys.argv) < 3:
- product_out = os.environ["PRODUCT_OUT"]
- aapt = "aapt2"
-else:
- product_out = sys.argv[1]
- aapt = sys.argv[2]
-
-def execute(cmd):
- p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- out, err = map(lambda b: b.decode('utf-8'), p.communicate())
- return p.returncode == 0, out, err
-
-def make_aapt_cmds(file):
- return [aapt + ' dump ' + file + ' --file AndroidManifest.xml',
- aapt + ' dump xmltree ' + file + ' --file AndroidManifest.xml']
-
-def extract_shared_uid(file):
- for cmd in make_aapt_cmds(file):
- success, manifest, error_msg = execute(cmd)
- if success:
- break
- else:
- print(error_msg, file=sys.stderr)
- sys.exit()
- return None
-
- for l in manifest.split('\n'):
- if "sharedUserId" in l:
- return l.split('"')[-2]
- return None
-
-
-partitions = ["system", "vendor", "product"]
-
-shareduid_app_dict = defaultdict(list)
-
-for p in partitions:
- for f in glob(os.path.join(product_out, p, "*", "*", "*.apk")):
- apk_file = os.path.basename(f)
- shared_uid = extract_shared_uid(f)
-
- if shared_uid is None:
- continue
- shareduid_app_dict[shared_uid].append((p, apk_file))
-
-
-output = defaultdict(lambda: defaultdict(list))
-
-for uid, app_infos in shareduid_app_dict.items():
- partitions = {p for p, _ in app_infos}
- if len(partitions) > 1:
- for part in partitions:
- output[uid][part].extend([a for p, a in app_infos if p == part])
-
-print(json.dumps(output, indent=2, sort_keys=True))
diff --git a/core/tasks/host-unit-tests.mk b/core/tasks/host-unit-tests.mk
new file mode 100644
index 0000000..755b589
--- /dev/null
+++ b/core/tasks/host-unit-tests.mk
@@ -0,0 +1,50 @@
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# `host-unit-tests` shall only include hostside unittest that don't require a device to run. Tests
+# included will be run as part of presubmit check.
+# To add tests to the suite, do one of the following:
+# * For test modules configured with Android.bp, set attribute `test_options: { unit_test: true }`
+# * For test modules configured with mk, set `LOCAL_IS_UNIT_TEST := true`
+.PHONY: host-unit-tests
+
+intermediates_dir := $(call intermediates-dir-for,PACKAGING,host-unit-tests)
+host_unit_tests_zip := $(PRODUCT_OUT)/host-unit-tests.zip
+# Get the hostside libraries to be packaged in the test zip. Unlike
+# device-tests.mk or general-tests.mk, the files are not copied to the
+# testcases directory.
+my_host_shared_lib_for_host_unit_tests := $(foreach f,$(COMPATIBILITY.host-unit-tests.HOST_SHARED_LIBRARY.FILES),$(strip \
+ $(eval _cmf_tuple := $(subst :, ,$(f))) \
+ $(eval _cmf_src := $(word 1,$(_cmf_tuple))) \
+ $(_cmf_src)))
+
+$(host_unit_tests_zip) : PRIVATE_HOST_SHARED_LIBS := $(my_host_shared_lib_for_host_unit_tests)
+
+$(host_unit_tests_zip) : $(COMPATIBILITY.host-unit-tests.FILES) $(my_host_shared_lib_for_host_unit_tests) $(SOONG_ZIP)
+ echo $(sort $(COMPATIBILITY.host-unit-tests.FILES)) | tr " " "\n" > $@.list
+ grep $(HOST_OUT_TESTCASES) $@.list > $@-host.list || true
+ echo "" >> $@-host-libs.list
+ $(hide) for shared_lib in $(PRIVATE_HOST_SHARED_LIBS); do \
+ echo $$shared_lib >> $@-host-libs.list; \
+ done
+ grep $(TARGET_OUT_TESTCASES) $@.list > $@-target.list || true
+ $(hide) $(SOONG_ZIP) -d -o $@ -P host -C $(HOST_OUT) -l $@-host.list \
+ -P target -C $(PRODUCT_OUT) -l $@-target.list \
+ -P host/testcases -C $(HOST_OUT) -l $@-host-libs.list
+ rm -f $@.list $@-host.list $@-target.list $@-host-libs.list
+
+host-unit-tests: $(host_unit_tests_zip)
+$(call dist-for-goals, host-unit-tests, $(host_unit_tests_zip))
+
+tests: host-unit-tests
diff --git a/core/tasks/host_init_verifier.mk b/core/tasks/host_init_verifier.mk
new file mode 100644
index 0000000..bdf996c
--- /dev/null
+++ b/core/tasks/host_init_verifier.mk
@@ -0,0 +1,56 @@
+#
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+host_init_verifier_output := $(PRODUCT_OUT)/host_init_verifier_output.txt
+
+$(host_init_verifier_output): \
+ $(INSTALLED_SYSTEMIMAGE_TARGET) \
+ $(INSTALLED_SYSTEM_EXTIMAGE_TARGET) \
+ $(INSTALLED_VENDORIMAGE_TARGET) \
+ $(INSTALLED_ODMIMAGE_TARGET) \
+ $(INSTALLED_PRODUCTIMAGE_TARGET) \
+ $(call intermediates-dir-for,ETC,passwd_system)/passwd_system \
+ $(call intermediates-dir-for,ETC,passwd_system_ext)/passwd_system_ext \
+ $(call intermediates-dir-for,ETC,passwd_vendor)/passwd_vendor \
+ $(call intermediates-dir-for,ETC,passwd_odm)/passwd_odm \
+ $(call intermediates-dir-for,ETC,passwd_product)/passwd_product \
+ $(call intermediates-dir-for,ETC,plat_property_contexts)/plat_property_contexts \
+ $(call intermediates-dir-for,ETC,system_ext_property_contexts)/system_ext_property_contexts \
+ $(call intermediates-dir-for,ETC,product_property_contexts)/product_property_contexts \
+ $(call intermediates-dir-for,ETC,vendor_property_contexts)/vendor_property_contexts \
+ $(call intermediates-dir-for,ETC,odm_property_contexts)/odm_property_contexts
+
+# Run host_init_verifier on the partition staging directories.
+$(host_init_verifier_output): $(HOST_INIT_VERIFIER)
+ $(HOST_INIT_VERIFIER) \
+ -p $(call intermediates-dir-for,ETC,passwd_system)/passwd_system \
+ -p $(call intermediates-dir-for,ETC,passwd_system_ext)/passwd_system_ext \
+ -p $(call intermediates-dir-for,ETC,passwd_vendor)/passwd_vendor \
+ -p $(call intermediates-dir-for,ETC,passwd_odm)/passwd_odm \
+ -p $(call intermediates-dir-for,ETC,passwd_product)/passwd_product \
+ --property-contexts=$(call intermediates-dir-for,ETC,plat_property_contexts)/plat_property_contexts \
+ --property-contexts=$(call intermediates-dir-for,ETC,system_ext_property_contexts)/system_ext_property_contexts \
+ --property-contexts=$(call intermediates-dir-for,ETC,product_property_contexts)/product_property_contexts \
+ --property-contexts=$(call intermediates-dir-for,ETC,vendor_property_contexts)/vendor_property_contexts \
+ --property-contexts=$(call intermediates-dir-for,ETC,odm_property_contexts)/odm_property_contexts \
+ --out_system $(PRODUCT_OUT)/$(TARGET_COPY_OUT_SYSTEM) \
+ --out_system_ext $(PRODUCT_OUT)/$(TARGET_COPY_OUT_SYSTEM_EXT) \
+ --out_vendor $(PRODUCT_OUT)/$(TARGET_COPY_OUT_VENDOR) \
+ --out_odm $(PRODUCT_OUT)/$(TARGET_COPY_OUT_ODM) \
+ --out_product $(PRODUCT_OUT)/$(TARGET_COPY_OUT_PRODUCT) \
+ > $@
+
+$(call dist-for-goals,droidcore,$(host_init_verifier_output))
diff --git a/core/tasks/module-info.mk b/core/tasks/module-info.mk
index cf32d65..4bbfd39 100644
--- a/core/tasks/module-info.mk
+++ b/core/tasks/module-info.mk
@@ -20,6 +20,7 @@
'"srcjars": [$(foreach w,$(sort $(ALL_MODULES.$(m).SRCJARS)),"$(w)", )], ' \
'"classes_jar": [$(foreach w,$(sort $(ALL_MODULES.$(m).CLASSES_JAR)),"$(w)", )], ' \
'"test_mainline_modules": [$(foreach w,$(sort $(ALL_MODULES.$(m).TEST_MAINLINE_MODULES)),"$(w)", )], ' \
+ '"is_unit_test": "$(ALL_MODULES.$(m).IS_UNIT_TEST)", ' \
'},\n' \
) | sed -e 's/, *\]/]/g' -e 's/, *\}/ }/g' -e '$$s/,$$//' >> $@
$(hide) echo '}' >> $@
diff --git a/core/tasks/mts.mk b/core/tasks/mts.mk
index e800505..e084856 100644
--- a/core/tasks/mts.mk
+++ b/core/tasks/mts.mk
@@ -13,13 +13,20 @@
# limitations under the License.
ifneq ($(wildcard test/mts/README.md),)
-test_suite_name := mts
-test_suite_tradefed := mts-tradefed
-test_suite_readme := test/mts/README.md
-include $(BUILD_SYSTEM)/tasks/tools/compatibility.mk
+mts_test_suites :=
+mts_test_suites += mts
-.PHONY: mts
-mts: $(compatibility_zip)
-$(call dist-for-goals, mts, $(compatibility_zip))
+$(foreach module, $(mts_modules), $(eval mts_test_suites += mts-$(module)))
+
+$(foreach suite, $(mts_test_suites), \
+ $(eval test_suite_name := $(suite)) \
+ $(eval test_suite_tradefed := mts-tradefed) \
+ $(eval test_suite_readme := test/mts/README.md) \
+ $(eval include $(BUILD_SYSTEM)/tasks/tools/compatibility.mk) \
+ $(eval .PHONY: $(suite)) \
+ $(eval $(suite): $(compatibility_zip)) \
+ $(eval $(call dist-for-goals, $(suite), $(compatibility_zip))) \
+)
+
endif
diff --git a/core/tasks/platform_availability_check.mk b/core/tasks/platform_availability_check.mk
index f252ff53..1524758 100644
--- a/core/tasks/platform_availability_check.mk
+++ b/core/tasks/platform_availability_check.mk
@@ -17,6 +17,9 @@
# Check whether there is any module that isn't available for platform
# is installed to the platform.
+# Skip for unbundled builds that don't produce a platform image.
+ifeq (,$(TARGET_BUILD_UNBUNDLED))
+
# Filter FAKE and NON_INSTALLABLE modules out and then collect those are not
# available for platform
_modules_not_available_for_platform := \
@@ -41,7 +44,7 @@
# Don't error out immediately when ALLOW_MISSING_DEPENDENCIES is set.
# Instead, add a dependency on a rule that prints the error message.
define not_available_for_platform_rule
- not_installable_file := $(patsubst $(OUT_DIR)/%,$(OUT_DIR)/NOT_AVAILABLE_FOR_PLATFORM/%,$(1)))
+ not_installable_file := $(patsubst $(OUT_DIR)/%,$(OUT_DIR)/NOT_AVAILABLE_FOR_PLATFORM/%,$(1))
$(1): $$(not_installable_file)
$$(not_installable_file):
$(call echo-error,$(2),Module is requested to be installed but is not \
@@ -51,6 +54,8 @@
endef
$(foreach m,$(_modules_not_available_for_platform),\
- $(foreach i,$(ALL_MODULES.$(m).INSTALLED),\
+ $(foreach i,$(filter-out $(HOST_OUT)/%,$(ALL_MODULES.$(m).INSTALLED)),\
$(eval $(call not_available_for_platform_rule,$(i),$(m)))))
endif
+
+endif
diff --git a/core/tasks/recovery_snapshot.mk b/core/tasks/recovery_snapshot.mk
new file mode 100644
index 0000000..525273b
--- /dev/null
+++ b/core/tasks/recovery_snapshot.mk
@@ -0,0 +1,34 @@
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+current_makefile := $(lastword $(MAKEFILE_LIST))
+
+# RECOVERY_SNAPSHOT_VERSION must be set to 'current' in order to generate a recovery snapshot.
+ifeq ($(RECOVERY_SNAPSHOT_VERSION),current)
+
+.PHONY: recovery-snapshot
+recovery-snapshot: $(SOONG_RECOVERY_SNAPSHOT_ZIP)
+
+$(call dist-for-goals, recovery-snapshot, $(SOONG_RECOVERY_SNAPSHOT_ZIP))
+
+else # RECOVERY_SNAPSHOT_VERSION is NOT set to 'current'
+
+.PHONY: recovery-snapshot
+recovery-snapshot: PRIVATE_MAKEFILE := $(current_makefile)
+recovery-snapshot:
+ $(call echo-error,$(PRIVATE_MAKEFILE),\
+ "CANNOT generate Recovery snapshot. RECOVERY_SNAPSHOT_VERSION must be set to 'current'.")
+ exit 1
+
+endif # RECOVERY_SNAPSHOT_VERSION
diff --git a/core/tasks/tools/compatibility.mk b/core/tasks/tools/compatibility.mk
index f394b96..570a39a 100644
--- a/core/tasks/tools/compatibility.mk
+++ b/core/tasks/tools/compatibility.mk
@@ -29,15 +29,12 @@
test_suite_subdir := android-$(test_suite_name)
out_dir := $(HOST_OUT)/$(test_suite_name)/$(test_suite_subdir)
test_artifacts := $(COMPATIBILITY.$(test_suite_name).FILES)
-test_tools := $(HOST_OUT_JAVA_LIBRARIES)/hosttestlib.jar \
- $(HOST_OUT_JAVA_LIBRARIES)/tradefed.jar \
+test_tools := $(HOST_OUT_JAVA_LIBRARIES)/tradefed.jar \
$(HOST_OUT_JAVA_LIBRARIES)/tradefed-no-fwk.jar \
$(HOST_OUT_JAVA_LIBRARIES)/tradefed-test-framework.jar \
$(HOST_OUT_JAVA_LIBRARIES)/loganalysis.jar \
$(HOST_OUT_JAVA_LIBRARIES)/compatibility-host-util.jar \
- $(HOST_OUT_JAVA_LIBRARIES)/compatibility-host-util-tests.jar \
- $(HOST_OUT_JAVA_LIBRARIES)/compatibility-common-util-tests.jar \
- $(HOST_OUT_JAVA_LIBRARIES)/compatibility-tradefed-tests.jar \
+ $(HOST_OUT_JAVA_LIBRARIES)/compatibility-tradefed.jar \
$(HOST_OUT_JAVA_LIBRARIES)/$(test_suite_tradefed).jar \
$(HOST_OUT_JAVA_LIBRARIES)/$(test_suite_tradefed)-tests.jar \
$(HOST_OUT_EXECUTABLES)/$(test_suite_tradefed) \
@@ -80,10 +77,8 @@
$(HOST_OUT_NOTICE_FILES) $(TARGET_OUT_NOTICE_FILES), \
$(compatibility_zip_deps)))
-ifeq ($(include_test_suite_notice),true)
- compatibility_zip_deps += $(test_suite_notice_txt)
- compatibility_zip_resources += $(test_suite_notice_txt)
-endif
+compatibility_zip_deps += $(test_suite_notice_txt)
+compatibility_zip_resources += $(test_suite_notice_txt)
compatibility_zip := $(out_dir).zip
$(compatibility_zip): PRIVATE_OUT_DIR := $(out_dir)
@@ -112,7 +107,6 @@
test_suite_readme :=
test_suite_prebuilt_tools :=
test_suite_tools :=
-include_test_suite_notice :=
test_suite_jdk :=
test_suite_jdk_dir :=
host_shared_libs :=
diff --git a/core/tasks/tools/vts_package_utils.mk b/core/tasks/tools/vts_package_utils.mk
new file mode 100644
index 0000000..47bf29c
--- /dev/null
+++ b/core/tasks/tools/vts_package_utils.mk
@@ -0,0 +1,33 @@
+#
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# $(1): List of target native files to copy.
+# $(2): Copy destination directory.
+# Evaluates to a list of ":"-separated pairs src:dst.
+define target-native-copy-pairs
+$(foreach m,$(1),\
+ $(eval _built_files := $(strip $(ALL_MODULES.$(m).BUILT_INSTALLED)\
+ $(ALL_MODULES.$(m)$(TARGET_2ND_ARCH_MODULE_SUFFIX).BUILT_INSTALLED)))\
+ $(foreach i, $(_built_files),\
+ $(eval bui_ins := $(subst :,$(space),$(i)))\
+ $(eval ins := $(word 2,$(bui_ins)))\
+ $(if $(filter $(TARGET_OUT_ROOT)/%,$(ins)),\
+ $(eval bui := $(word 1,$(bui_ins)))\
+ $(eval my_copy_dest := $(patsubst data/%,DATA/%,\
+ $(patsubst system/%,DATA/%,\
+ $(patsubst $(PRODUCT_OUT)/%,%,$(ins)))))\
+ $(bui):$(2)/$(my_copy_dest))))
+endef
diff --git a/core/tasks/vendor_snapshot.mk b/core/tasks/vendor_snapshot.mk
index 8234e3f..83c1379 100644
--- a/core/tasks/vendor_snapshot.mk
+++ b/core/tasks/vendor_snapshot.mk
@@ -22,6 +22,11 @@
$(call dist-for-goals, vendor-snapshot, $(SOONG_VENDOR_SNAPSHOT_ZIP))
+.PHONY: vendor-fake-snapshot
+vendor-fake-snapshot: $(SOONG_VENDOR_FAKE_SNAPSHOT_ZIP)
+
+$(call dist-for-goals, vendor-fake-snapshot, $(SOONG_VENDOR_FAKE_SNAPSHOT_ZIP):fake/$(notdir $(SOONG_VENDOR_FAKE_SNAPSHOT_ZIP)))
+
else # BOARD_VNDK_VERSION is NOT set to 'current'
.PHONY: vendor-snapshot
@@ -31,4 +36,11 @@
"CANNOT generate Vendor snapshot. BOARD_VNDK_VERSION must be set to 'current'.")
exit 1
+.PHONY: vendor-fake-snapshot
+vendor-fake-snapshot: PRIVATE_MAKEFILE := $(current_makefile)
+vendor-fake-snapshot:
+ $(call echo-error,$(PRIVATE_MAKEFILE),\
+ "CANNOT generate Vendor snapshot. BOARD_VNDK_VERSION must be set to 'current'.")
+ exit 1
+
endif # BOARD_VNDK_VERSION
diff --git a/core/tasks/vts-core-tests.mk b/core/tasks/vts-core-tests.mk
index a3247da..95c4d24 100644
--- a/core/tasks/vts-core-tests.mk
+++ b/core/tasks/vts-core-tests.mk
@@ -15,6 +15,8 @@
-include external/linux-kselftest/android/kselftest_test_list.mk
-include external/ltp/android/ltp_package_list.mk
+include $(BUILD_SYSTEM)/tasks/tools/vts_package_utils.mk
+
test_suite_name := vts
test_suite_tradefed := vts-tradefed
test_suite_readme := test/vts/tools/vts-core-tradefed/README
diff --git a/core/version_defaults.mk b/core/version_defaults.mk
index b507ca9..fe90165 100644
--- a/core/version_defaults.mk
+++ b/core/version_defaults.mk
@@ -240,7 +240,7 @@
# It must be of the form "YYYY-MM-DD" on production devices.
# It must match one of the Android Security Patch Level strings of the Public Security Bulletins.
# If there is no $PLATFORM_SECURITY_PATCH set, keep it empty.
- PLATFORM_SECURITY_PATCH := 2020-09-05
+ PLATFORM_SECURITY_PATCH := 2021-01-05
endif
.KATI_READONLY := PLATFORM_SECURITY_PATCH
diff --git a/envsetup.sh b/envsetup.sh
index a3b07a7..c03e2cb 100644
--- a/envsetup.sh
+++ b/envsetup.sh
@@ -33,7 +33,9 @@
- allmod: List all modules.
- gomod: Go to the directory containing a module.
- pathmod: Get the directory containing a module.
-- refreshmod: Refresh list of modules for allmod/gomod/pathmod.
+- outmod: Gets the location of a module's installed outputs with a certain extension.
+- installmod: Adb installs a module's built APK.
+- refreshmod: Refresh list of modules for allmod/gomod/pathmod/outmod/installmod.
- syswrite: Remount partitions (e.g. system.img) as writable, rebooting if necessary.
Environment options:
@@ -307,6 +309,9 @@
unset ANDROID_HOST_OUT
export ANDROID_HOST_OUT=$(get_abs_build_var HOST_OUT)
+ unset ANDROID_SOONG_HOST_OUT
+ export ANDROID_SOONG_HOST_OUT=$(get_abs_build_var SOONG_HOST_OUT)
+
unset ANDROID_HOST_OUT_TESTCASES
export ANDROID_HOST_OUT_TESTCASES=$(get_abs_build_var HOST_OUT_TESTCASES)
@@ -318,6 +323,22 @@
#export HOST_EXTRACFLAGS="-I "$T/system/kernel_headers/host_include
}
+function bazel()
+{
+ local T="$(gettop)"
+ if [ ! "$T" ]; then
+ echo "Couldn't locate the top of the tree. Try setting TOP."
+ return
+ fi
+
+ if which bazel &>/dev/null; then
+ >&2 echo "NOTE: bazel() function sourced from envsetup.sh is being used instead of $(which bazel)"
+ >&2 echo
+ fi
+
+ "$T/tools/bazel" "$@"
+}
+
function printconfig()
{
local T=$(gettop)
@@ -392,7 +413,10 @@
fi
complete -F _lunch lunch
+ complete -F _complete_android_module_names pathmod
complete -F _complete_android_module_names gomod
+ complete -F _complete_android_module_names outmod
+ complete -F _complete_android_module_names installmod
complete -F _complete_android_module_names m
}
@@ -769,7 +793,7 @@
local TOPFILE=build/make/core/envsetup.mk
if [ -n "$TOP" -a -f "$TOP/$TOPFILE" ] ; then
# The following circumlocution ensures we remove symlinks from TOP.
- (cd $TOP; PWD= /bin/pwd)
+ (cd "$TOP"; PWD= /bin/pwd)
else
if [ -f $TOPFILE ] ; then
# The following circumlocution (repeated below as well) ensures
@@ -779,13 +803,13 @@
else
local HERE=$PWD
local T=
- while [ \( ! \( -f $TOPFILE \) \) -a \( $PWD != "/" \) ]; do
+ while [ \( ! \( -f $TOPFILE \) \) -a \( "$PWD" != "/" \) ]; do
\cd ..
T=`PWD= /bin/pwd -P`
done
- \cd $HERE
+ \cd "$HERE"
if [ -f "$T/$TOPFILE" ]; then
- echo $T
+ echo "$T"
fi
fi
fi
@@ -1355,13 +1379,12 @@
mkdir -p $ANDROID_PRODUCT_OUT || return 1
# Note, can't use absolute path because of the way make works.
- m out/target/product/$(get_build_var TARGET_DEVICE)/module-info.json \
+ m $(get_build_var PRODUCT_OUT)/module-info.json \
> $ANDROID_PRODUCT_OUT/module-info.json.build.log 2>&1
}
-# List all modules for the current device, as cached in module-info.json. If any build change is
-# made and it should be reflected in the output, you should run 'refreshmod' first.
-function allmod() {
+# Verifies that module-info.txt exists, creating it if it doesn't.
+function verifymodinfo() {
if [ ! "$ANDROID_PRODUCT_OUT" ]; then
echo "No ANDROID_PRODUCT_OUT. Try running 'lunch' first." >&2
return 1
@@ -1371,6 +1394,12 @@
echo "Could not find module-info.json. It will only be built once, and it can be updated with 'refreshmod'" >&2
refreshmod || return 1
fi
+}
+
+# List all modules for the current device, as cached in module-info.json. If any build change is
+# made and it should be reflected in the output, you should run 'refreshmod' first.
+function allmod() {
+ verifymodinfo || return 1
python -c "import json; print('\n'.join(sorted(json.load(open('$ANDROID_PRODUCT_OUT/module-info.json')).keys())))"
}
@@ -1378,20 +1407,12 @@
# Get the path of a specific module in the android tree, as cached in module-info.json. If any build change
# is made, and it should be reflected in the output, you should run 'refreshmod' first.
function pathmod() {
- if [ ! "$ANDROID_PRODUCT_OUT" ]; then
- echo "No ANDROID_PRODUCT_OUT. Try running 'lunch' first." >&2
- return 1
- fi
-
if [[ $# -ne 1 ]]; then
echo "usage: pathmod <module>" >&2
return 1
fi
- if [ ! -f "$ANDROID_PRODUCT_OUT/module-info.json" ]; then
- echo "Could not find module-info.json. It will only be built once, and it can be updated with 'refreshmod'" >&2
- refreshmod || return 1
- fi
+ verifymodinfo || return 1
local relpath=$(python -c "import json, os
module = '$1'
@@ -1423,6 +1444,59 @@
cd $path
}
+# Gets the list of a module's installed outputs, as cached in module-info.json.
+# If any build change is made, and it should be reflected in the output, you should run 'refreshmod' first.
+function outmod() {
+ if [[ $# -ne 1 ]]; then
+ echo "usage: outmod <module>" >&2
+ return 1
+ fi
+
+ verifymodinfo || return 1
+
+ local relpath
+ relpath=$(python -c "import json, os
+module = '$1'
+module_info = json.load(open('$ANDROID_PRODUCT_OUT/module-info.json'))
+if module not in module_info:
+ exit(1)
+for output in module_info[module]['installed']:
+ print(os.path.join('$ANDROID_BUILD_TOP', output))" 2>/dev/null)
+
+ if [ $? -ne 0 ]; then
+ echo "Could not find module '$1' (try 'refreshmod' if there have been build changes?)" >&2
+ return 1
+ elif [ ! -z "$relpath" ]; then
+ echo "$relpath"
+ fi
+}
+
+# adb install a module's apk, as cached in module-info.json. If any build change
+# is made, and it should be reflected in the output, you should run 'refreshmod' first.
+# Usage: installmod [adb install arguments] <module>
+# For example: installmod -r Dialer -> adb install -r /path/to/Dialer.apk
+function installmod() {
+ if [[ $# -eq 0 ]]; then
+ echo "usage: installmod [adb install arguments] <module>" >&2
+ return 1
+ fi
+
+ local _path
+ _path=$(outmod ${@:$#:1})
+ if [ $? -ne 0 ]; then
+ return 1
+ fi
+
+ _path=$(echo "$_path" | grep -E \\.apk$ | head -n 1)
+ if [ -z "$_path" ]; then
+ echo "Module '$1' does not produce a file ending with .apk (try 'refreshmod' if there have been build changes?)" >&2
+ return 1
+ fi
+ local length=$(( $# - 1 ))
+ echo adb install ${@:1:$length} $_path
+ adb install ${@:1:$length} $_path
+}
+
function _complete_android_module_names() {
local word=${COMP_WORDS[COMP_CWORD]}
COMPREPLY=( $(allmod | grep -E "^$word") )
@@ -1600,25 +1674,26 @@
# This allows loading only approved vendorsetup.sh files
function source_vendorsetup() {
unset VENDOR_PYTHONPATH
+ local T="$(gettop)"
allowed=
- for f in $(find -L device vendor product -maxdepth 4 -name 'allowed-vendorsetup_sh-files' 2>/dev/null | sort); do
+ for f in $(cd "$T" && find -L device vendor product -maxdepth 4 -name 'allowed-vendorsetup_sh-files' 2>/dev/null | sort); do
if [ -n "$allowed" ]; then
echo "More than one 'allowed_vendorsetup_sh-files' file found, not including any vendorsetup.sh files:"
echo " $allowed"
echo " $f"
return
fi
- allowed="$f"
+ allowed="$T/$f"
done
allowed_files=
[ -n "$allowed" ] && allowed_files=$(cat "$allowed")
for dir in device vendor product; do
- for f in $(test -d $dir && \
+ for f in $(cd "$T" && test -d $dir && \
find -L $dir -maxdepth 4 -name 'vendorsetup.sh' 2>/dev/null | sort); do
if [[ -z "$allowed" || "$allowed_files" =~ $f ]]; then
- echo "including $f"; . "$f"
+ echo "including $f"; . "$T/$f"
else
echo "ignoring $f, not in $allowed"
fi
diff --git a/rbesetup.sh b/rbesetup.sh
index 0182bfd..ec39e6e 100644
--- a/rbesetup.sh
+++ b/rbesetup.sh
@@ -65,7 +65,11 @@
function _set_rbe_vars() {
unset USE_GOMA
export USE_RBE="true"
- export RBE_CXX_EXEC_STRATEGY="remote_local_fallback"
+ export RBE_CXX_EXEC_STRATEGY="racing"
+ export RBE_JAVAC_EXEC_STRATEGY="racing"
+ export RBE_R8_EXEC_STRATEGY="racing"
+ export RBE_D8_EXEC_STRATEGY="racing"
+ export RBE_use_unified_cas_ops="true"
export RBE_JAVAC=1
export RBE_R8=1
export RBE_D8=1
diff --git a/target/board/BoardConfigEmuCommon.mk b/target/board/BoardConfigEmuCommon.mk
index e9fb096..342abd7 100644
--- a/target/board/BoardConfigEmuCommon.mk
+++ b/target/board/BoardConfigEmuCommon.mk
@@ -33,8 +33,8 @@
# emulator needs super.img
BOARD_BUILD_SUPER_IMAGE_BY_DEFAULT := true
- # 3G + header
- BOARD_SUPER_PARTITION_SIZE := 3229614080
+ # 4G + 8M
+ BOARD_SUPER_PARTITION_SIZE := 4303355904
BOARD_SUPER_PARTITION_GROUPS := emulator_dynamic_partitions
ifeq ($(QEMU_USE_SYSTEM_EXT_PARTITIONS),true)
@@ -56,8 +56,8 @@
vendor
endif
- # 3G
- BOARD_EMULATOR_DYNAMIC_PARTITIONS_SIZE := 3221225472
+ # 4G
+ BOARD_EMULATOR_DYNAMIC_PARTITIONS_SIZE := 4294967296
# in build environment to speed up make -j
ifeq ($(QEMU_DISABLE_AVB),true)
@@ -73,11 +73,11 @@
endif
#vendor boot
-TARGET_NO_VENDOR_BOOT := false
BOARD_INCLUDE_DTB_IN_BOOTIMG := false
BOARD_BOOT_HEADER_VERSION := 3
BOARD_MKBOOTIMG_ARGS += --header_version $(BOARD_BOOT_HEADER_VERSION)
BOARD_VENDOR_BOOTIMAGE_PARTITION_SIZE := 0x06000000
+BOARD_RAMDISK_USE_LZ4 := true
# Enable chain partition for system.
BOARD_AVB_SYSTEM_KEY_PATH := external/avb/test/data/testkey_rsa2048.pem
diff --git a/target/board/BoardConfigGsiCommon.mk b/target/board/BoardConfigGsiCommon.mk
index e34dc23..a2150ad 100644
--- a/target/board/BoardConfigGsiCommon.mk
+++ b/target/board/BoardConfigGsiCommon.mk
@@ -49,6 +49,10 @@
BOARD_GSI_DYNAMIC_PARTITIONS_SIZE := 3221225472
endif
+# TODO(b/123695868, b/146149698):
+# This flag is set by mainline but isn't desired for GSI
+BOARD_BLUETOOTH_BDROID_BUILDCFG_INCLUDE_DIR :=
+
# Enable chain partition for boot, mainly for GKI images.
BOARD_AVB_BOOT_KEY_PATH := external/avb/test/data/testkey_rsa2048.pem
BOARD_AVB_BOOT_ALGORITHM := SHA256_RSA2048
diff --git a/target/board/BoardConfigModuleCommon.mk b/target/board/BoardConfigModuleCommon.mk
new file mode 100644
index 0000000..24c01a5
--- /dev/null
+++ b/target/board/BoardConfigModuleCommon.mk
@@ -0,0 +1,6 @@
+# BoardConfigModuleCommon.mk
+#
+# Common compile-time settings for module builds.
+
+# Required for all module devices.
+TARGET_USES_64_BIT_BINDER := true
diff --git a/target/board/BoardConfigPixelCommon.mk b/target/board/BoardConfigPixelCommon.mk
new file mode 100644
index 0000000..a970fec
--- /dev/null
+++ b/target/board/BoardConfigPixelCommon.mk
@@ -0,0 +1,18 @@
+# BoardConfigPixelCommon.mk
+#
+# Common compile-time definitions for Pixel devices.
+
+# Using sha256 for dm-verity partitions. b/156162446
+# system, system_other, system_ext and product.
+BOARD_AVB_SYSTEM_ADD_HASHTREE_FOOTER_ARGS += --hash_algorithm sha256
+BOARD_AVB_SYSTEM_OTHER_ADD_HASHTREE_FOOTER_ARGS += --hash_algorithm sha256
+BOARD_AVB_SYSTEM_EXT_ADD_HASHTREE_FOOTER_ARGS += --hash_algorithm sha256
+BOARD_AVB_PRODUCT_ADD_HASHTREE_FOOTER_ARGS += --hash_algorithm sha256
+
+# vendor and odm.
+BOARD_AVB_VENDOR_ADD_HASHTREE_FOOTER_ARGS += --hash_algorithm sha256
+BOARD_AVB_ODM_ADD_HASHTREE_FOOTER_ARGS += --hash_algorithm sha256
+
+# vendor_dlkm and odm_dlkm.
+BOARD_AVB_VENDOR_DLKM_ADD_HASHTREE_FOOTER_ARGS += --hash_algorithm sha256
+BOARD_AVB_ODM_DLKM_ADD_HASHTREE_FOOTER_ARGS += --hash_algorithm sha256
diff --git a/target/board/emulator_arm64/BoardConfig.mk b/target/board/emulator_arm64/BoardConfig.mk
index b34ccb4..9293625 100644
--- a/target/board/emulator_arm64/BoardConfig.mk
+++ b/target/board/emulator_arm64/BoardConfig.mk
@@ -19,11 +19,7 @@
TARGET_CPU_VARIANT := generic
TARGET_CPU_ABI := arm64-v8a
-TARGET_2ND_ARCH := arm
-TARGET_2ND_CPU_ABI := armeabi-v7a
-TARGET_2ND_CPU_ABI2 := armeabi
-
-ifneq ($(TARGET_BUILD_APPS)$(filter cts sdk vts10,$(MAKECMDGOALS)),)
+ifneq ($(TARGET_BUILD_APPS)$(filter cts sdk,$(MAKECMDGOALS)),)
# DO NOT USE
# DO NOT USE
#
@@ -56,7 +52,6 @@
include build/make/target/board/BoardConfigEmuCommon.mk
TARGET_NO_KERNEL := false
-TARGET_NO_VENDOR_BOOT := false
BOARD_USES_RECOVERY_AS_BOOT := true
BOARD_BOOTIMAGE_PARTITION_SIZE := 0x02000000
diff --git a/target/board/emulator_arm64/device.mk b/target/board/emulator_arm64/device.mk
index 73dc2f4..dc84192 100644
--- a/target/board/emulator_arm64/device.mk
+++ b/target/board/emulator_arm64/device.mk
@@ -19,7 +19,7 @@
# Cuttlefish has GKI kernel prebuilts, so use those for the GKI boot.img.
ifeq ($(TARGET_PREBUILT_KERNEL),)
- LOCAL_KERNEL := device/google/cuttlefish_kernel/5.4-arm64/kernel
+ LOCAL_KERNEL := kernel/prebuilts/5.4/arm64/kernel-5.4-lz4
else
LOCAL_KERNEL := $(TARGET_PREBUILT_KERNEL)
endif
diff --git a/target/board/generic_arm64/BoardConfig.mk b/target/board/generic_arm64/BoardConfig.mk
index c45a8ab..30c033d 100644
--- a/target/board/generic_arm64/BoardConfig.mk
+++ b/target/board/generic_arm64/BoardConfig.mk
@@ -23,7 +23,7 @@
TARGET_2ND_CPU_ABI := armeabi-v7a
TARGET_2ND_CPU_ABI2 := armeabi
-ifneq ($(TARGET_BUILD_APPS)$(filter cts sdk vts10,$(MAKECMDGOALS)),)
+ifneq ($(TARGET_BUILD_APPS)$(filter cts sdk,$(MAKECMDGOALS)),)
# DO NOT USE
# DO NOT USE
#
@@ -54,10 +54,6 @@
include build/make/target/board/BoardConfigGsiCommon.mk
-TARGET_NO_KERNEL := false
-TARGET_NO_VENDOR_BOOT := true
-BOARD_USES_RECOVERY_AS_BOOT := true
-
BOARD_KERNEL-4.19-GZ_BOOTIMAGE_PARTITION_SIZE := 47185920
BOARD_KERNEL-5.4_BOOTIMAGE_PARTITION_SIZE := 67108864
BOARD_KERNEL-5.4-ALLSYMS_BOOTIMAGE_PARTITION_SIZE := 67108864
@@ -65,6 +61,12 @@
BOARD_KERNEL-5.4-GZ-ALLSYMS_BOOTIMAGE_PARTITION_SIZE := 47185920
BOARD_KERNEL-5.4-LZ4_BOOTIMAGE_PARTITION_SIZE := 53477376
BOARD_KERNEL-5.4-LZ4-ALLSYMS_BOOTIMAGE_PARTITION_SIZE := 53477376
+BOARD_KERNEL-5.10_BOOTIMAGE_PARTITION_SIZE := 67108864
+BOARD_KERNEL-5.10-ALLSYMS_BOOTIMAGE_PARTITION_SIZE := 67108864
+BOARD_KERNEL-5.10-GZ_BOOTIMAGE_PARTITION_SIZE := 47185920
+BOARD_KERNEL-5.10-GZ-ALLSYMS_BOOTIMAGE_PARTITION_SIZE := 47185920
+BOARD_KERNEL-5.10-LZ4_BOOTIMAGE_PARTITION_SIZE := 53477376
+BOARD_KERNEL-5.10-LZ4-ALLSYMS_BOOTIMAGE_PARTITION_SIZE := 53477376
BOARD_KERNEL-MAINLINE_BOOTIMAGE_PARTITION_SIZE := 67108864
BOARD_KERNEL-MAINLINE-GZ_BOOTIMAGE_PARTITION_SIZE := 47185920
BOARD_KERNEL-MAINLINE-LZ4_BOOTIMAGE_PARTITION_SIZE := 53477376
@@ -75,12 +77,36 @@
BOARD_BOOT_HEADER_VERSION := 3
BOARD_MKBOOTIMG_ARGS += --header_version $(BOARD_BOOT_HEADER_VERSION)
-BOARD_KERNEL_BINARIES := kernel-4.19-gz kernel-5.4 kernel-5.4-gz kernel-5.4-lz4 \
- kernel-mainline kernel-mainline-gz kernel-mainline-lz4
+BOARD_KERNEL_BINARIES := \
+ kernel-4.19-gz \
+ kernel-5.4 kernel-5.4-gz kernel-5.4-lz4 \
+ kernel-5.10 kernel-5.10-gz kernel-5.10-lz4 \
+ kernel-mainline kernel-mainline-gz kernel-mainline-lz4 \
+
ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
-BOARD_KERNEL_BINARIES += kernel-5.4-allsyms kernel-5.4-gz-allsyms kernel-5.4-lz4-allsyms
+BOARD_KERNEL_BINARIES += \
+ kernel-5.4-allsyms kernel-5.4-gz-allsyms kernel-5.4-lz4-allsyms \
+ kernel-5.10-allsyms kernel-5.10-gz-allsyms kernel-5.10-lz4-allsyms \
+
endif
-BOARD_KERNEL_MODULE_INTERFACE_VERSIONS := 5.4-android12-0
+
+# Boot image
+BOARD_USES_RECOVERY_AS_BOOT :=
+TARGET_NO_KERNEL := false
+BOARD_USES_GENERIC_KERNEL_IMAGE := true
+BOARD_KERNEL_MODULE_INTERFACE_VERSIONS := \
+ 5.4-android12-0 \
+ 5.10-android12-0 \
+
+# Copy boot image in $OUT to target files. This is defined for targets where
+# the installed GKI APEXes are built from source.
+BOARD_COPY_BOOT_IMAGE_TO_TARGET_FILES := true
+
+# No vendor_boot
+BOARD_MOVE_RECOVERY_RESOURCES_TO_VENDOR_BOOT :=
+
+# No recovery
+BOARD_EXCLUDE_KERNEL_FROM_RECOVERY_IMAGE :=
# Some vendors still haven't cleaned up all device specific directories under
# root!
diff --git a/target/board/generic_arm64/README.txt b/target/board/generic_arm64/README.txt
index 21c71d7..8711a14 100644
--- a/target/board/generic_arm64/README.txt
+++ b/target/board/generic_arm64/README.txt
@@ -1,5 +1,7 @@
The "generic_arm64" product defines a non-hardware-specific arm64 target
-without a kernel or bootloader.
+without a bootloader.
+
+It is also the target to build the generic kernel image (GKI).
It is not a product "base class"; no other products inherit
from it or use it in any way.
diff --git a/target/board/generic_arm64/device.mk b/target/board/generic_arm64/device.mk
index 1585fbd..37c0f25 100644
--- a/target/board/generic_arm64/device.mk
+++ b/target/board/generic_arm64/device.mk
@@ -15,17 +15,29 @@
#
PRODUCT_COPY_FILES += \
- kernel/prebuilts/4.19/arm64/Image.gz:kernel-4.19-gz \
- device/google/cuttlefish_kernel/5.4-arm64/kernel-5.4:kernel-5.4 \
- device/google/cuttlefish_kernel/5.4-arm64/kernel-5.4-gz:kernel-5.4-gz \
- device/google/cuttlefish_kernel/5.4-arm64/kernel-5.4-lz4:kernel-5.4-lz4 \
- kernel/prebuilts/mainline/arm64/kernel-mainline:kernel-mainline \
- kernel/prebuilts/mainline/arm64/kernel-mainline-gz:kernel-mainline-gz \
- kernel/prebuilts/mainline/arm64/kernel-mainline-lz4:kernel-mainline-lz4
+ kernel/prebuilts/4.19/arm64/kernel-4.19-gz:kernel-4.19-gz \
+ kernel/prebuilts/5.4/arm64/kernel-5.4:kernel-5.4 \
+ kernel/prebuilts/5.4/arm64/kernel-5.4-gz:kernel-5.4-gz \
+ kernel/prebuilts/5.4/arm64/kernel-5.4-lz4:kernel-5.4-lz4 \
+ kernel/prebuilts/5.10/arm64/kernel-5.10:kernel-5.10 \
+ kernel/prebuilts/5.10/arm64/kernel-5.10-gz:kernel-5.10-gz \
+ kernel/prebuilts/5.10/arm64/kernel-5.10-lz4:kernel-5.10-lz4 \
+ kernel/prebuilts/mainline/arm64/kernel-mainline-allsyms:kernel-mainline \
+ kernel/prebuilts/mainline/arm64/kernel-mainline-gz-allsyms:kernel-mainline-gz \
+ kernel/prebuilts/mainline/arm64/kernel-mainline-lz4-allsyms:kernel-mainline-lz4
ifneq (,$(filter userdebug eng,$(TARGET_BUILD_VARIANT)))
PRODUCT_COPY_FILES += \
- device/google/cuttlefish_kernel/5.4-arm64/kernel-5.4:kernel-5.4-allsyms \
- device/google/cuttlefish_kernel/5.4-arm64/kernel-5.4-gz:kernel-5.4-gz-allsyms \
- device/google/cuttlefish_kernel/5.4-arm64/kernel-5.4-lz4:kernel-5.4-lz4-allsyms
+ kernel/prebuilts/5.4/arm64/kernel-5.4:kernel-5.4-allsyms \
+ kernel/prebuilts/5.4/arm64/kernel-5.4-gz:kernel-5.4-gz-allsyms \
+ kernel/prebuilts/5.4/arm64/kernel-5.4-lz4:kernel-5.4-lz4-allsyms \
+ kernel/prebuilts/5.10/arm64/kernel-5.10:kernel-5.10-allsyms \
+ kernel/prebuilts/5.10/arm64/kernel-5.10-gz:kernel-5.10-gz-allsyms \
+ kernel/prebuilts/5.10/arm64/kernel-5.10-lz4:kernel-5.10-lz4-allsyms \
+
endif
+
+PRODUCT_BUILD_VENDOR_BOOT_IMAGE := false
+PRODUCT_BUILD_RECOVERY_IMAGE := false
+
+$(call inherit-product, $(SRC_TARGET_DIR)/product/generic_ramdisk.mk)
diff --git a/target/board/generic_arm64/sepolicy/OWNERS b/target/board/generic_arm64/sepolicy/OWNERS
index ff29677..6dc2b86 100644
--- a/target/board/generic_arm64/sepolicy/OWNERS
+++ b/target/board/generic_arm64/sepolicy/OWNERS
@@ -1,8 +1 @@
-alanstokes@google.com
-bowgotsai@google.com
-jbires@google.com
-jeffv@google.com
-jgalenson@google.com
-sspatil@google.com
-tomcherry@google.com
-trong@google.com
+include platform/system/sepolicy:/OWNERS
diff --git a/target/board/generic_arm64_ab/BoardConfig.mk b/target/board/generic_arm64_ab/BoardConfig.mk
deleted file mode 100644
index 7c91607..0000000
--- a/target/board/generic_arm64_ab/BoardConfig.mk
+++ /dev/null
@@ -1,39 +0,0 @@
-#
-# Copyright (C) 2017 The Android Open-Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-include build/make/target/board/BoardConfigGsiCommon.mk
-
-TARGET_ARCH := arm64
-TARGET_ARCH_VARIANT := armv8-a
-TARGET_CPU_ABI := arm64-v8a
-TARGET_CPU_ABI2 :=
-TARGET_CPU_VARIANT := generic
-
-TARGET_2ND_ARCH := arm
-TARGET_2ND_ARCH_VARIANT := armv8-a
-TARGET_2ND_CPU_ABI := armeabi-v7a
-TARGET_2ND_CPU_ABI2 := armeabi
-TARGET_2ND_CPU_VARIANT := generic
-
-# TODO(jiyong) These might be SoC specific.
-BOARD_ROOT_EXTRA_FOLDERS += firmware firmware/radio persist
-BOARD_ROOT_EXTRA_SYMLINKS += /vendor/lib/dsp:/dsp
-BOARD_ROOT_EXTRA_SYMLINKS += /vendor/firmware_mnt/image:/firmware/image
-BOARD_ROOT_EXTRA_SYMLINKS += /vendor/firmware_mnt/verinfo:/firmware/verinfo
-
-# TODO(b/36764215): remove this setting when the generic system image
-# no longer has QCOM-specific directories under /.
-BOARD_SEPOLICY_DIRS += build/make/target/board/generic_arm64/sepolicy
diff --git a/target/board/generic_arm_ab/BoardConfig.mk b/target/board/generic_arm_ab/BoardConfig.mk
deleted file mode 100644
index 21b763c..0000000
--- a/target/board/generic_arm_ab/BoardConfig.mk
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-# Copyright (C) 2017 The Android Open-Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-include build/make/target/board/BoardConfigGsiCommon.mk
-
-TARGET_ARCH := arm
-TARGET_ARCH_VARIANT := armv7-a-neon
-TARGET_CPU_ABI := armeabi-v7a
-TARGET_CPU_ABI2 := armeabi
-TARGET_CPU_VARIANT := generic
-
-# Legacy GSI keeps 32 bits binder for 32 bits CPU Arch
-TARGET_USES_64_BIT_BINDER := false
-
-# TODO(jiyong) These might be SoC specific.
-BOARD_ROOT_EXTRA_FOLDERS += firmware firmware/radio persist
-BOARD_ROOT_EXTRA_SYMLINKS += /vendor/lib/dsp:/dsp
-BOARD_ROOT_EXTRA_SYMLINKS += /vendor/firmware_mnt/image:/firmware/image
-BOARD_ROOT_EXTRA_SYMLINKS += /vendor/firmware_mnt/verinfo:/firmware/verinfo
-
-# TODO(b/36764215): remove this setting when the generic system image
-# no longer has QCOM-specific directories under /.
-BOARD_SEPOLICY_DIRS += build/make/target/board/generic_arm64/sepolicy
diff --git a/target/board/mainline_arm64/sepolicy/OWNERS b/target/board/mainline_arm64/sepolicy/OWNERS
index ff29677..6dc2b86 100644
--- a/target/board/mainline_arm64/sepolicy/OWNERS
+++ b/target/board/mainline_arm64/sepolicy/OWNERS
@@ -1,8 +1 @@
-alanstokes@google.com
-bowgotsai@google.com
-jbires@google.com
-jeffv@google.com
-jgalenson@google.com
-sspatil@google.com
-tomcherry@google.com
-trong@google.com
+include platform/system/sepolicy:/OWNERS
diff --git a/core/tasks/apidiff.mk b/target/board/mainline_sdk/BoardConfig.mk
similarity index 76%
copy from core/tasks/apidiff.mk
copy to target/board/mainline_sdk/BoardConfig.mk
index 76e4749..84f8b2d 100644
--- a/core/tasks/apidiff.mk
+++ b/target/board/mainline_sdk/BoardConfig.mk
@@ -1,4 +1,4 @@
-# Copyright (C) 2017 The Android Open Source Project
+# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,11 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-#
-# Rules for building API diffs.
#
-.PHONY: api-diff
+TARGET_ARCH_SUITE := mainline_sdk
-api-diff: api-stubs-docs-jdiff
+HOST_CROSS_OS := linux_bionic
+HOST_CROSS_ARCH := x86_64
+HOST_CROSS_2ND_ARCH :=
diff --git a/target/board/mainline_sdk/README.md b/target/board/mainline_sdk/README.md
new file mode 100644
index 0000000..714f797
--- /dev/null
+++ b/target/board/mainline_sdk/README.md
@@ -0,0 +1,2 @@
+This device is suitable for a soong-only build that builds for all the architectures
+needed for mainline modules sdk prebuilts.
diff --git a/target/product/virtual_ab_ota_retrofit_compression.mk b/target/board/module_arm/BoardConfig.mk
similarity index 67%
copy from target/product/virtual_ab_ota_retrofit_compression.mk
copy to target/board/module_arm/BoardConfig.mk
index 8059f75..3f35c06 100644
--- a/target/product/virtual_ab_ota_retrofit_compression.mk
+++ b/target/board/module_arm/BoardConfig.mk
@@ -1,5 +1,4 @@
-#
-# Copyright (C) 2020 The Android Open-Source Project
+# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,9 +13,10 @@
# limitations under the License.
#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/virtual_ab_ota_retrofit.mk)
+include build/make/target/board/BoardConfigModuleCommon.mk
-PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.compression.enabled=true
-
-PRODUCT_PACKAGES += snapuserd_ramdisk
-
+TARGET_ARCH := arm
+TARGET_ARCH_VARIANT := armv7-a-neon
+TARGET_CPU_VARIANT := generic
+TARGET_CPU_ABI := armeabi-v7a
+TARGET_CPU_ABI2 := armeabi
diff --git a/target/board/module_arm/README.md b/target/board/module_arm/README.md
new file mode 100644
index 0000000..b893573
--- /dev/null
+++ b/target/board/module_arm/README.md
@@ -0,0 +1,2 @@
+This device is suitable for an unbundled module targeted specifically to an arm
+device.
diff --git a/core/tasks/apidiff.mk b/target/board/module_arm/device.mk
similarity index 73%
copy from core/tasks/apidiff.mk
copy to target/board/module_arm/device.mk
index 76e4749..cceb987 100644
--- a/core/tasks/apidiff.mk
+++ b/target/board/module_arm/device.mk
@@ -1,4 +1,5 @@
-# Copyright (C) 2017 The Android Open Source Project
+#
+# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,11 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-#
-# Rules for building API diffs.
#
-.PHONY: api-diff
-
-api-diff: api-stubs-docs-jdiff
+$(call inherit-product, build/make/target/product/default_art_config.mk)
+$(call inherit-product, build/make/target/product/languages_default.mk)
diff --git a/target/board/module_arm64/BoardConfig.mk b/target/board/module_arm64/BoardConfig.mk
new file mode 100644
index 0000000..3700056
--- /dev/null
+++ b/target/board/module_arm64/BoardConfig.mk
@@ -0,0 +1,27 @@
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+include build/make/target/board/BoardConfigModuleCommon.mk
+
+TARGET_ARCH := arm64
+TARGET_ARCH_VARIANT := armv8-a
+TARGET_CPU_VARIANT := generic
+TARGET_CPU_ABI := arm64-v8a
+
+TARGET_2ND_ARCH := arm
+TARGET_2ND_ARCH_VARIANT := armv8-a
+TARGET_2ND_CPU_ABI := armeabi-v7a
+TARGET_2ND_CPU_ABI2 := armeabi
+TARGET_2ND_CPU_VARIANT := generic
diff --git a/target/board/module_arm64/README.md b/target/board/module_arm64/README.md
new file mode 100644
index 0000000..cb36fbf
--- /dev/null
+++ b/target/board/module_arm64/README.md
@@ -0,0 +1,3 @@
+This device is suitable for an unbundled module targeted specifically to an
+arm64 device. 32 bit binaries built with this product will not be suitable for a
+32-bit arm device.
diff --git a/target/product/virtual_ab_ota_retrofit_compression.mk b/target/board/module_arm64/device.mk
similarity index 67%
copy from target/product/virtual_ab_ota_retrofit_compression.mk
copy to target/board/module_arm64/device.mk
index 8059f75..0d4c543 100644
--- a/target/product/virtual_ab_ota_retrofit_compression.mk
+++ b/target/board/module_arm64/device.mk
@@ -1,5 +1,5 @@
#
-# Copyright (C) 2020 The Android Open-Source Project
+# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,9 +14,6 @@
# limitations under the License.
#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/virtual_ab_ota_retrofit.mk)
-
-PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.compression.enabled=true
-
-PRODUCT_PACKAGES += snapuserd_ramdisk
-
+$(call inherit-product, build/make/target/product/default_art_config.mk)
+$(call inherit-product, build/make/target/product/core_64_bit.mk)
+$(call inherit-product, build/make/target/product/languages_default.mk)
diff --git a/target/board/generic_x86_ab/BoardConfig.mk b/target/board/module_x86/BoardConfig.mk
similarity index 75%
rename from target/board/generic_x86_ab/BoardConfig.mk
rename to target/board/module_x86/BoardConfig.mk
index 53acffd..a93ac97 100644
--- a/target/board/generic_x86_ab/BoardConfig.mk
+++ b/target/board/module_x86/BoardConfig.mk
@@ -1,5 +1,4 @@
-#
-# Copyright (C) 2017 The Android Open-Source Project
+# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,11 +13,8 @@
# limitations under the License.
#
-include build/make/target/board/BoardConfigGsiCommon.mk
+include build/make/target/board/BoardConfigModuleCommon.mk
TARGET_CPU_ABI := x86
TARGET_ARCH := x86
TARGET_ARCH_VARIANT := x86
-
-# Legacy GSI keeps 32 bits binder for 32 bits CPU Arch
-TARGET_USES_64_BIT_BINDER := false
diff --git a/target/board/module_x86/README.md b/target/board/module_x86/README.md
new file mode 100644
index 0000000..10866b7
--- /dev/null
+++ b/target/board/module_x86/README.md
@@ -0,0 +1,2 @@
+This device is suitable for an unbundled module targeted specifically to an
+x86 device.
diff --git a/core/tasks/apidiff.mk b/target/board/module_x86/device.mk
similarity index 73%
copy from core/tasks/apidiff.mk
copy to target/board/module_x86/device.mk
index 76e4749..cceb987 100644
--- a/core/tasks/apidiff.mk
+++ b/target/board/module_x86/device.mk
@@ -1,4 +1,5 @@
-# Copyright (C) 2017 The Android Open Source Project
+#
+# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,11 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-#
-# Rules for building API diffs.
#
-.PHONY: api-diff
-
-api-diff: api-stubs-docs-jdiff
+$(call inherit-product, build/make/target/product/default_art_config.mk)
+$(call inherit-product, build/make/target/product/languages_default.mk)
diff --git a/target/board/generic_x86_64_ab/BoardConfig.mk b/target/board/module_x86_64/BoardConfig.mk
similarity index 86%
rename from target/board/generic_x86_64_ab/BoardConfig.mk
rename to target/board/module_x86_64/BoardConfig.mk
index 1dd5e48..1ed3be0 100644
--- a/target/board/generic_x86_64_ab/BoardConfig.mk
+++ b/target/board/module_x86_64/BoardConfig.mk
@@ -1,5 +1,4 @@
-#
-# Copyright (C) 2017 The Android Open-Source Project
+# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,7 +13,7 @@
# limitations under the License.
#
-include build/make/target/board/BoardConfigGsiCommon.mk
+include build/make/target/board/BoardConfigModuleCommon.mk
TARGET_CPU_ABI := x86_64
TARGET_ARCH := x86_64
diff --git a/target/board/module_x86_64/README.md b/target/board/module_x86_64/README.md
new file mode 100644
index 0000000..3377baa
--- /dev/null
+++ b/target/board/module_x86_64/README.md
@@ -0,0 +1,3 @@
+This device is suitable for an unbundled module targeted specifically to an
+x86_64 device. 32 bit binaries built with this product will not be suitable for
+a 32-bit x86 device.
diff --git a/target/product/virtual_ab_ota_retrofit_compression.mk b/target/board/module_x86_64/device.mk
similarity index 67%
copy from target/product/virtual_ab_ota_retrofit_compression.mk
copy to target/board/module_x86_64/device.mk
index 8059f75..0d4c543 100644
--- a/target/product/virtual_ab_ota_retrofit_compression.mk
+++ b/target/board/module_x86_64/device.mk
@@ -1,5 +1,5 @@
#
-# Copyright (C) 2020 The Android Open-Source Project
+# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -14,9 +14,6 @@
# limitations under the License.
#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/virtual_ab_ota_retrofit.mk)
-
-PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.compression.enabled=true
-
-PRODUCT_PACKAGES += snapuserd_ramdisk
-
+$(call inherit-product, build/make/target/product/default_art_config.mk)
+$(call inherit-product, build/make/target/product/core_64_bit.mk)
+$(call inherit-product, build/make/target/product/languages_default.mk)
diff --git a/target/product/AndroidProducts.mk b/target/product/AndroidProducts.mk
index 61a7583..c27badc 100644
--- a/target/product/AndroidProducts.mk
+++ b/target/product/AndroidProducts.mk
@@ -44,13 +44,9 @@
else
PRODUCT_MAKEFILES := \
$(LOCAL_DIR)/aosp_64bitonly_x86_64.mk \
- $(LOCAL_DIR)/aosp_arm64_ab.mk \
$(LOCAL_DIR)/aosp_arm64.mk \
- $(LOCAL_DIR)/aosp_arm_ab.mk \
$(LOCAL_DIR)/aosp_arm.mk \
- $(LOCAL_DIR)/aosp_x86_64_ab.mk \
$(LOCAL_DIR)/aosp_x86_64.mk \
- $(LOCAL_DIR)/aosp_x86_ab.mk \
$(LOCAL_DIR)/aosp_x86_arm.mk \
$(LOCAL_DIR)/aosp_x86.mk \
$(LOCAL_DIR)/full.mk \
@@ -61,7 +57,6 @@
$(LOCAL_DIR)/generic_system_x86_64.mk \
$(LOCAL_DIR)/generic_system_x86_arm.mk \
$(LOCAL_DIR)/generic_x86.mk \
- $(LOCAL_DIR)/gsi_arm64.mk \
$(LOCAL_DIR)/mainline_system_arm64.mk \
$(LOCAL_DIR)/mainline_system_x86.mk \
$(LOCAL_DIR)/mainline_system_x86_64.mk \
@@ -77,6 +72,8 @@
endif
+PRODUCT_MAKEFILES += $(LOCAL_DIR)/mainline_sdk.mk
+
COMMON_LUNCH_CHOICES := \
aosp_arm64-eng \
aosp_arm-eng \
diff --git a/target/product/aosp_arm64_ab.mk b/target/product/aosp_arm64_ab.mk
deleted file mode 100644
index 5510e1b..0000000
--- a/target/product/aosp_arm64_ab.mk
+++ /dev/null
@@ -1,58 +0,0 @@
-#
-# Copyright (C) 2017 The Android Open-Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# PRODUCT_VENDOR_PROPERTIES cannot be used here because sysprops will be at
-# /vendor/[build|default].prop when build split is on. In order to have sysprops
-# on the generic system image, place them in build/make/target/board/
-# gsi_system.prop.
-
-# aosp_arm64_ab-userdebug is a Legacy GSI for the devices with:
-# - ARM 64 bits user space
-# - 64 bits binder interface
-# - system-as-root
-
-#
-# All components inherited here go to system image
-# (The system image of Legacy GSI is not CSI)
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/generic_system.mk)
-
-# Enable mainline checking for excat this product name
-ifeq (aosp_arm64_ab,$(TARGET_PRODUCT))
-PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := relaxed
-endif
-
-#
-# All components inherited here go to system_ext image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/handheld_system_ext.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_system_ext.mk)
-
-#
-# All components inherited here go to product image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_product.mk)
-
-#
-# Special settings for GSI releasing
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/legacy_gsi_release.mk)
-
-PRODUCT_NAME := aosp_arm64_ab
-PRODUCT_DEVICE := generic_arm64_ab
-PRODUCT_BRAND := Android
-PRODUCT_MODEL := AOSP on ARM64
diff --git a/target/product/aosp_arm_ab.mk b/target/product/aosp_arm_ab.mk
deleted file mode 100644
index 7e060440..0000000
--- a/target/product/aosp_arm_ab.mk
+++ /dev/null
@@ -1,57 +0,0 @@
-#
-# Copyright (C) 2017 The Android Open-Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# PRODUCT_VENDOR_PROPERTIES cannot be used here because sysprops will be at
-# /vendor/[build|default].prop when build split is on. In order to have sysprops
-# on the generic system image, place them in build/make/target/board/
-# gsi_system.prop.
-
-# aosp_arm_ab-userdebug is a Legacy GSI for the devices with:
-# - ARM 32 bits user space
-# - 32 bits binder interface
-# - system-as-root
-
-#
-# All components inherited here go to system image
-# (The system image of Legacy GSI is not CSI)
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/generic_system.mk)
-
-# Enable mainline checking for excat this product name
-ifeq (aosp_arm_ab,$(TARGET_PRODUCT))
-PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := relaxed
-endif
-
-#
-# All components inherited here go to system_ext image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/handheld_system_ext.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_system_ext.mk)
-
-#
-# All components inherited here go to product image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_product.mk)
-
-#
-# Special settings for GSI releasing
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/legacy_gsi_release.mk)
-
-PRODUCT_NAME := aosp_arm_ab
-PRODUCT_DEVICE := generic_arm_ab
-PRODUCT_BRAND := Android
-PRODUCT_MODEL := AOSP on ARM32
diff --git a/target/product/aosp_x86_64_ab.mk b/target/product/aosp_x86_64_ab.mk
deleted file mode 100644
index c31545d..0000000
--- a/target/product/aosp_x86_64_ab.mk
+++ /dev/null
@@ -1,58 +0,0 @@
-#
-# Copyright (C) 2017 The Android Open-Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# PRODUCT_VENDOR_PROPERTIES cannot be used here because sysprops will be at
-# /vendor/[build|default].prop when build split is on. In order to have sysprops
-# on the generic system image, place them in build/make/target/board/
-# gsi_system.prop.
-
-# aosp_x86_64_ab-userdebug is a Legacy GSI for the devices with:
-# - x86 64 bits user space
-# - 64 bits binder interface
-# - system-as-root
-
-#
-# All components inherited here go to system image
-# (The system image of Legacy GSI is not CSI)
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/generic_system.mk)
-
-# Enable mainline checking for excat this product name
-ifeq (aosp_x86_64_ab,$(TARGET_PRODUCT))
-PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := relaxed
-endif
-
-#
-# All components inherited here go to system_ext image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/handheld_system_ext.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_system_ext.mk)
-
-#
-# All components inherited here go to product image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_product.mk)
-
-#
-# Special settings for GSI releasing
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/legacy_gsi_release.mk)
-
-PRODUCT_NAME := aosp_x86_64_ab
-PRODUCT_DEVICE := generic_x86_64_ab
-PRODUCT_BRAND := Android
-PRODUCT_MODEL := AOSP on x86_64
diff --git a/target/product/aosp_x86_ab.mk b/target/product/aosp_x86_ab.mk
deleted file mode 100644
index 2f02dd1..0000000
--- a/target/product/aosp_x86_ab.mk
+++ /dev/null
@@ -1,58 +0,0 @@
-#
-# Copyright (C) 2017 The Android Open-Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# PRODUCT_VENDOR_PROPERTIES cannot be used here because sysprops will be at
-# /vendor/[build|default].prop when build split is on. In order to have sysprops
-# on the generic system image, place them in build/make/target/board/
-# gsi_system.prop.
-
-# aosp_x86_ab-userdebug is a Legacy GSI for the devices with:
-# - x86 32 bits user space
-# - 32 bits binder interface
-# - system-as-root
-
-#
-# All components inherited here go to system image
-# (The system image of Legacy GSI is not CSI)
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/generic_system.mk)
-
-# Enable mainline checking for excat this product name
-ifeq (aosp_x86_ab,$(TARGET_PRODUCT))
-PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := relaxed
-endif
-
-#
-# All components inherited here go to system_ext image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/handheld_system_ext.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_system_ext.mk)
-
-#
-# All components inherited here go to product image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_product.mk)
-
-#
-# Special settings for GSI releasing
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/legacy_gsi_release.mk)
-
-PRODUCT_NAME := aosp_x86_ab
-PRODUCT_DEVICE := generic_x86_ab
-PRODUCT_BRAND := Android
-PRODUCT_MODEL := AOSP on x86
diff --git a/target/product/base_product.mk b/target/product/base_product.mk
index 2ed550c..5446064 100644
--- a/target/product/base_product.mk
+++ b/target/product/base_product.mk
@@ -16,8 +16,11 @@
# Base modules and settings for the product partition.
PRODUCT_PACKAGES += \
+ fs_config_dirs_product \
+ fs_config_files_product \
group_product \
ModuleMetadata \
passwd_product \
product_compatibility_matrix.xml \
product_manifest.xml \
+ selinux_policy_product \
diff --git a/target/product/base_system.mk b/target/product/base_system.mk
index b97d98d..c7ae1f0 100644
--- a/target/product/base_system.mk
+++ b/target/product/base_system.mk
@@ -38,7 +38,6 @@
bcc \
blank_screen \
blkid \
- service-blobstore \
bmgr \
bootanimation \
bootstat \
@@ -117,8 +116,8 @@
iptables \
ip-up-vpn \
javax.obex \
- service-jobscheduler \
keystore \
+ keystore2 \
credstore \
ld.mc \
libaaudio \
@@ -250,6 +249,7 @@
shell_and_utilities_system \
sm \
snapshotctl \
+ snapuserd \
SoundPicker \
storaged \
surfaceflinger \
@@ -281,6 +281,23 @@
system_manifest.xml \
system_compatibility_matrix.xml \
+# HWASAN runtime for SANITIZE_TARGET=hwaddress builds
+ifneq (,$(filter hwaddress,$(SANITIZE_TARGET)))
+ PRODUCT_PACKAGES += \
+ libclang_rt.hwasan-aarch64-android.bootstrap
+endif
+
+# Jacoco agent JARS to be built and installed, if any.
+ifeq ($(EMMA_INSTRUMENT),true)
+ ifneq ($(EMMA_INSTRUMENT_STATIC),true)
+ # For instrumented build, if Jacoco is not being included statically
+ # in instrumented packages then include Jacoco classes into the
+ # bootclasspath.
+ PRODUCT_PACKAGES += jacocoagent
+ PRODUCT_BOOT_JARS += jacocoagent
+ endif # EMMA_INSTRUMENT_STATIC
+endif # EMMA_INSTRUMENT
+
# Host tools to install
PRODUCT_HOST_PACKAGES += \
BugReport \
@@ -317,47 +334,14 @@
tz_version_host \
tz_version_host_tzdata_apex \
-ifeq ($(ART_APEX_JARS),)
-$(error ART_APEX_JARS is empty; cannot initialize PRODUCT_BOOT_JARS variable)
-endif
-
-# The order matters for runtime class lookup performance.
-PRODUCT_BOOT_JARS := \
- $(ART_APEX_JARS) \
- framework-minus-apex \
- ext \
- com.android.i18n:core-icu4j \
- telephony-common \
- voip-common \
- ims-common
-
-PRODUCT_UPDATABLE_BOOT_JARS := \
- com.android.conscrypt:conscrypt \
- com.android.media:updatable-media \
- com.android.mediaprovider:framework-mediaprovider \
- com.android.os.statsd:framework-statsd \
- com.android.permission:framework-permission \
- com.android.sdkext:framework-sdkextensions \
- com.android.wifi:framework-wifi \
- com.android.tethering:framework-tethering
PRODUCT_COPY_FILES += \
system/core/rootdir/init.usb.rc:system/etc/init/hw/init.usb.rc \
system/core/rootdir/init.usb.configfs.rc:system/etc/init/hw/init.usb.configfs.rc \
system/core/rootdir/etc/hosts:system/etc/hosts
-# Add the compatibility library that is needed when android.test.base
-# is removed from the bootclasspath.
-# Default to excluding android.test.base from the bootclasspath.
-ifneq ($(REMOVE_ATB_FROM_BCP),false)
-PRODUCT_PACKAGES += framework-atb-backward-compatibility
-PRODUCT_BOOT_JARS += framework-atb-backward-compatibility
-else
-PRODUCT_BOOT_JARS += android.test.base
-endif
-
PRODUCT_COPY_FILES += system/core/rootdir/init.zygote32.rc:system/etc/init/hw/init.zygote32.rc
-PRODUCT_SYSTEM_PROPERTIES += ro.zygote?=zygote32
+PRODUCT_VENDOR_PROPERTIES += ro.zygote?=zygote32
PRODUCT_SYSTEM_PROPERTIES += debug.atrace.tags.enableflags=0
PRODUCT_SYSTEM_PROPERTIES += persist.traced.enable=1
@@ -368,6 +352,7 @@
PRODUCT_PACKAGES_DEBUG := \
adb_keys \
arping \
+ dmuserd \
gdbserver \
idlcli \
init-debug.rc \
@@ -377,6 +362,8 @@
logpersist.start \
logtagd.rc \
procrank \
+ profcollectd \
+ profcollectctl \
remount \
showmap \
sqlite3 \
diff --git a/target/product/base_system_ext.mk b/target/product/base_system_ext.mk
index b67549a..852d7ca 100644
--- a/target/product/base_system_ext.mk
+++ b/target/product/base_system_ext.mk
@@ -16,6 +16,9 @@
# Base modules and settings for the system_ext partition.
PRODUCT_PACKAGES += \
+ fs_config_dirs_system_ext \
+ fs_config_files_system_ext \
group_system_ext \
- system_ext_manifest.xml \
passwd_system_ext \
+ selinux_policy_system_ext \
+ system_ext_manifest.xml \
diff --git a/target/product/cfi-common.mk b/target/product/cfi-common.mk
index 42edd92..925d70e 100644
--- a/target/product/cfi-common.mk
+++ b/target/product/cfi-common.mk
@@ -17,8 +17,8 @@
# This is a set of common components to enable CFI for (across
# compatible product configs)
PRODUCT_CFI_INCLUDE_PATHS := \
+ device/generic/goldfish/wifi/wpa_supplicant_8_lib \
device/google/cuttlefish/guest/libs/wpa_supplicant_8_lib \
- device/google/wahoo/wifi_offload \
external/tinyxml2 \
external/wpa_supplicant_8 \
frameworks/av/camera \
@@ -28,7 +28,8 @@
hardware/broadcom/wlan/bcmdhd/wpa_supplicant_8_lib \
hardware/interfaces/nfc \
hardware/qcom/wlan/qcwcn/wpa_supplicant_8_lib \
- harware/interfaces/keymaster \
+ hardware/interfaces/keymaster \
+ hardware/interfaces/security \
system/bt \
system/chre \
system/core/libnetutils \
diff --git a/target/product/core_64_bit.mk b/target/product/core_64_bit.mk
index 7fa6ed2..322fa80 100644
--- a/target/product/core_64_bit.mk
+++ b/target/product/core_64_bit.mk
@@ -27,7 +27,7 @@
# Set the zygote property to select the 64-bit primary, 32-bit secondary script
# This line must be parsed before the one in core_minimal.mk
-PRODUCT_SYSTEM_PROPERTIES += ro.zygote=zygote64_32
+PRODUCT_VENDOR_PROPERTIES += ro.zygote=zygote64_32
TARGET_SUPPORTS_32_BIT_APPS := true
TARGET_SUPPORTS_64_BIT_APPS := true
diff --git a/target/product/core_64_bit_only.mk b/target/product/core_64_bit_only.mk
index 63beea9..53c9c74 100644
--- a/target/product/core_64_bit_only.mk
+++ b/target/product/core_64_bit_only.mk
@@ -24,7 +24,7 @@
# Set the zygote property to select the 64-bit script.
# This line must be parsed before the one in core_minimal.mk
-PRODUCT_SYSTEM_PROPERTIES += ro.zygote=zygote64
+PRODUCT_VENDOR_PROPERTIES += ro.zygote=zygote64
TARGET_SUPPORTS_32_BIT_APPS := false
TARGET_SUPPORTS_64_BIT_APPS := true
diff --git a/target/product/default_art_config.mk b/target/product/default_art_config.mk
new file mode 100644
index 0000000..131ba31
--- /dev/null
+++ b/target/product/default_art_config.mk
@@ -0,0 +1,58 @@
+#
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+ifeq ($(ART_APEX_JARS),)
+ $(error ART_APEX_JARS is empty; cannot initialize PRODUCT_BOOT_JARS variable)
+endif
+
+# The order matters for runtime class lookup performance.
+PRODUCT_BOOT_JARS := \
+ $(ART_APEX_JARS) \
+ framework-minus-apex \
+ ext \
+ com.android.i18n:core-icu4j \
+ telephony-common \
+ voip-common \
+ ims-common
+
+PRODUCT_UPDATABLE_BOOT_JARS := \
+ com.android.conscrypt:conscrypt \
+ com.android.media:updatable-media \
+ com.android.mediaprovider:framework-mediaprovider \
+ com.android.os.statsd:framework-statsd \
+ com.android.permission:framework-permission \
+ com.android.sdkext:framework-sdkextensions \
+ com.android.wifi:framework-wifi \
+ com.android.tethering:framework-tethering
+
+# Add the compatibility library that is needed when android.test.base
+# is removed from the bootclasspath.
+# Default to excluding android.test.base from the bootclasspath.
+ifneq ($(REMOVE_ATB_FROM_BCP),false)
+ PRODUCT_PACKAGES += framework-atb-backward-compatibility
+ PRODUCT_BOOT_JARS += framework-atb-backward-compatibility
+else
+ PRODUCT_BOOT_JARS += android.test.base
+endif
+
+# Minimal configuration for running dex2oat (default argument values).
+# PRODUCT_USES_DEFAULT_ART_CONFIG must be true to enable boot image compilation.
+PRODUCT_USES_DEFAULT_ART_CONFIG := true
+PRODUCT_SYSTEM_PROPERTIES += \
+ dalvik.vm.image-dex2oat-Xms=64m \
+ dalvik.vm.image-dex2oat-Xmx=64m \
+ dalvik.vm.dex2oat-Xms=64m \
+ dalvik.vm.dex2oat-Xmx=512m \
diff --git a/target/product/emulator_vendor.mk b/target/product/emulator_vendor.mk
index 89c3f3a..4d46358 100644
--- a/target/product/emulator_vendor.mk
+++ b/target/product/emulator_vendor.mk
@@ -26,7 +26,7 @@
PRODUCT_PACKAGES += \
vndk-sp
-PRODUCT_PACKAGE_OVERLAYS := device/generic/goldfish/overlay
+DEVICE_PACKAGE_OVERLAYS := device/generic/goldfish/overlay
PRODUCT_CHARACTERISTICS := emulator
diff --git a/target/product/generic_ramdisk.mk b/target/product/generic_ramdisk.mk
new file mode 100644
index 0000000..ae81329
--- /dev/null
+++ b/target/product/generic_ramdisk.mk
@@ -0,0 +1,39 @@
+#
+# Copyright (C) 2020 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This makefile installs contents of the generic ramdisk.
+# Inherit from this makefile to declare that this product uses generic ramdisk.
+# This makefile checks that other makefiles must not install things to the
+# ramdisk.
+
+# Ramdisk
+PRODUCT_PACKAGES += \
+ init_first_stage \
+
+# Debug ramdisk
+PRODUCT_PACKAGES += \
+ userdebug_plat_sepolicy.cil \
+
+_my_paths := \
+ $(TARGET_COPY_OUT_RAMDISK)/ \
+ $(TARGET_COPY_OUT_DEBUG_RAMDISK)/ \
+ system/usr/share/zoneinfo/tz_version \
+ system/usr/share/zoneinfo/tzdata \
+
+# We use the "relaxed" version here because tzdata / tz_version is only produced
+# by this makefile on a subset of devices.
+# TODO: remove this
+$(call require-artifacts-in-path-relaxed, $(_my_paths), )
diff --git a/target/product/generic_system.mk b/target/product/generic_system.mk
index 731a450..9580ade 100644
--- a/target/product/generic_system.mk
+++ b/target/product/generic_system.mk
@@ -121,9 +121,6 @@
PRODUCT_ENFORCE_RRO_TARGETS := *
-# TODO(b/150820813) Settings depends on static overlay, remove this after eliminating the dependency.
-PRODUCT_ENFORCE_RRO_EXEMPTED_TARGETS := Settings
-
PRODUCT_NAME := generic_system
PRODUCT_BRAND := generic
diff --git a/target/product/generic_system_arm64.mk b/target/product/generic_system_arm64.mk
index b8b12c3..2c64479 100644
--- a/target/product/generic_system_arm64.mk
+++ b/target/product/generic_system_arm64.mk
@@ -38,9 +38,6 @@
PRODUCT_SHIPPING_API_LEVEL := 29
-# TODO(b/137033385): change this back to "all"
-PRODUCT_RESTRICT_VENDOR_FILES := owner
-
PRODUCT_NAME := generic_system_arm64
PRODUCT_DEVICE := mainline_arm64
PRODUCT_BRAND := generic
diff --git a/target/product/generic_system_x86.mk b/target/product/generic_system_x86.mk
index dddcb7e..cf38a98 100644
--- a/target/product/generic_system_x86.mk
+++ b/target/product/generic_system_x86.mk
@@ -37,9 +37,6 @@
PRODUCT_SHIPPING_API_LEVEL := 29
-# TODO(b/137033385): change this back to "all"
-PRODUCT_RESTRICT_VENDOR_FILES := owner
-
PRODUCT_NAME := generic_system_x86
PRODUCT_DEVICE := mainline_x86
PRODUCT_BRAND := generic
diff --git a/target/product/generic_system_x86_64.mk b/target/product/generic_system_x86_64.mk
index 1ca9678..5f3829b 100644
--- a/target/product/generic_system_x86_64.mk
+++ b/target/product/generic_system_x86_64.mk
@@ -38,8 +38,6 @@
PRODUCT_SHIPPING_API_LEVEL := 29
-PRODUCT_RESTRICT_VENDOR_FILES := all
-
PRODUCT_NAME := generic_system_x86_64
PRODUCT_DEVICE := mainline_x86_64
PRODUCT_BRAND := generic
diff --git a/target/product/generic_system_x86_arm.mk b/target/product/generic_system_x86_arm.mk
index a62fb9b..923f32d 100644
--- a/target/product/generic_system_x86_arm.mk
+++ b/target/product/generic_system_x86_arm.mk
@@ -37,9 +37,6 @@
PRODUCT_SHIPPING_API_LEVEL := 29
-# TODO(b/137033385): change this back to "all"
-PRODUCT_RESTRICT_VENDOR_FILES := owner
-
PRODUCT_NAME := generic_system_x86_arm
PRODUCT_DEVICE := mainline_x86_arm
PRODUCT_BRAND := generic
diff --git a/target/product/generic_x86.mk b/target/product/generic_x86.mk
index 0274b5b..eeb8216 100644
--- a/target/product/generic_x86.mk
+++ b/target/product/generic_x86.mk
@@ -17,7 +17,7 @@
# This is a generic phone product that isn't specialized for a specific device.
# It includes the base Android platform.
-include $(SRC_TARGET_DIR)/product/generic.mk
+$(call inherit-product, $(SRC_TARGET_DIR)/product/generic.mk)
# Overrides
PRODUCT_BRAND := generic_x86
diff --git a/target/product/gsi/Android.mk b/target/product/gsi/Android.mk
index b4df5fe..f21fe16 100644
--- a/target/product/gsi/Android.mk
+++ b/target/product/gsi/Android.mk
@@ -161,7 +161,10 @@
include $(BUILD_PHONY_PACKAGE)
include $(CLEAR_VARS)
-_vndk_versions := $(PRODUCT_EXTRA_VNDK_VERSIONS)
+_vndk_versions :=
+ifeq ($(filter com.android.vndk.current.on_vendor, $(PRODUCT_PACKAGES)),)
+ _vndk_versions += $(PRODUCT_EXTRA_VNDK_VERSIONS)
+endif
ifneq ($(BOARD_VNDK_VERSION),current)
_vndk_versions += $(BOARD_VNDK_VERSION)
endif
diff --git a/target/product/gsi/current.txt b/target/product/gsi/current.txt
index baf1382..2ca6687 100644
--- a/target/product/gsi/current.txt
+++ b/target/product/gsi/current.txt
@@ -18,8 +18,13 @@
LLNDK: libsync.so
LLNDK: libvndksupport.so
LLNDK: libvulkan.so
-VNDK-SP: android.hardware.common-V1-ndk_platform.so
-VNDK-SP: android.hardware.graphics.common-V1-ndk_platform.so
+VNDK-SP: android.hardware.common-V2-ndk_platform.so
+VNDK-SP: android.hardware.common-unstable-ndk_platform.so
+VNDK-SP: android.hardware.common.fmq-V1-ndk_platform.so
+VNDK-SP: android.hardware.common.fmq-ndk_platform.so
+VNDK-SP: android.hardware.common.fmq-unstable-ndk_platform.so
+VNDK-SP: android.hardware.graphics.common-V2-ndk_platform.so
+VNDK-SP: android.hardware.graphics.common-unstable-ndk_platform.so
VNDK-SP: android.hardware.graphics.common@1.0.so
VNDK-SP: android.hardware.graphics.common@1.1.so
VNDK-SP: android.hardware.graphics.common@1.2.so
@@ -42,6 +47,7 @@
VNDK-SP: libc++.so
VNDK-SP: libcompiler_rt.so
VNDK-SP: libcutils.so
+VNDK-SP: libdmabufheap.so
VNDK-SP: libgralloctypes.so
VNDK-SP: libhardware.so
VNDK-SP: libhidlbase.so
@@ -55,7 +61,11 @@
VNDK-SP: libutilscallstack.so
VNDK-SP: libz.so
VNDK-core: android.hardware.audio.common@2.0.so
+VNDK-core: android.hardware.authsecret-V1-ndk_platform.so
+VNDK-core: android.hardware.authsecret-ndk_platform.so
+VNDK-core: android.hardware.authsecret-unstable-ndk_platform.so
VNDK-core: android.hardware.automotive.occupant_awareness-V1-ndk_platform.so
+VNDK-core: android.hardware.automotive.occupant_awareness-ndk_platform.so
VNDK-core: android.hardware.configstore-utils.so
VNDK-core: android.hardware.configstore@1.0.so
VNDK-core: android.hardware.configstore@1.1.so
@@ -65,22 +75,51 @@
VNDK-core: android.hardware.graphics.allocator@4.0.so
VNDK-core: android.hardware.graphics.bufferqueue@1.0.so
VNDK-core: android.hardware.graphics.bufferqueue@2.0.so
+VNDK-core: android.hardware.health.storage-V1-ndk_platform.so
+VNDK-core: android.hardware.health.storage-ndk_platform.so
+VNDK-core: android.hardware.health.storage-unstable-ndk_platform.so
VNDK-core: android.hardware.identity-V2-ndk_platform.so
+VNDK-core: android.hardware.identity-ndk_platform.so
VNDK-core: android.hardware.keymaster-V2-ndk_platform.so
+VNDK-core: android.hardware.keymaster-ndk_platform.so
VNDK-core: android.hardware.light-V1-ndk_platform.so
+VNDK-core: android.hardware.light-ndk_platform.so
VNDK-core: android.hardware.media.bufferpool@2.0.so
VNDK-core: android.hardware.media.omx@1.0.so
VNDK-core: android.hardware.media@1.0.so
+VNDK-core: android.hardware.memtrack-V1-ndk_platform.so
+VNDK-core: android.hardware.memtrack-ndk_platform.so
+VNDK-core: android.hardware.memtrack-unstable-ndk_platform.so
VNDK-core: android.hardware.memtrack@1.0.so
+VNDK-core: android.hardware.oemlock-V1-ndk_platform.so
+VNDK-core: android.hardware.oemlock-ndk_platform.so
+VNDK-core: android.hardware.oemlock-unstable-ndk_platform.so
VNDK-core: android.hardware.power-V1-ndk_platform.so
+VNDK-core: android.hardware.power-ndk_platform.so
VNDK-core: android.hardware.rebootescrow-V1-ndk_platform.so
+VNDK-core: android.hardware.rebootescrow-ndk_platform.so
+VNDK-core: android.hardware.security.keymint-V1-ndk_platform.so
+VNDK-core: android.hardware.security.keymint-ndk_platform.so
+VNDK-core: android.hardware.security.keymint-unstable-ndk_platform.so
+VNDK-core: android.hardware.security.secureclock-V1-ndk_platform.so
+VNDK-core: android.hardware.security.secureclock-ndk_platform.so
+VNDK-core: android.hardware.security.secureclock-unstable-ndk_platform.so
+VNDK-core: android.hardware.security.sharedsecret-V1-ndk_platform.so
+VNDK-core: android.hardware.security.sharedsecret-ndk_platform.so
+VNDK-core: android.hardware.security.sharedsecret-unstable-ndk_platform.so
VNDK-core: android.hardware.soundtrigger@2.0-core.so
VNDK-core: android.hardware.soundtrigger@2.0.so
VNDK-core: android.hardware.vibrator-V1-ndk_platform.so
+VNDK-core: android.hardware.vibrator-ndk_platform.so
+VNDK-core: android.hardware.weaver-V1-ndk_platform.so
+VNDK-core: android.hardware.weaver-ndk_platform.so
+VNDK-core: android.hardware.weaver-unstable-ndk_platform.so
VNDK-core: android.hidl.token@1.0-utils.so
VNDK-core: android.hidl.token@1.0.so
+VNDK-core: android.system.keystore2-V1-ndk_platform.so
+VNDK-core: android.system.keystore2-ndk_platform.so
+VNDK-core: android.system.keystore2-unstable-ndk_platform.so
VNDK-core: android.system.suspend@1.0.so
-VNDK-core: libadf.so
VNDK-core: libaudioroute.so
VNDK-core: libaudioutils.so
VNDK-core: libbinder.so
@@ -142,3 +181,70 @@
VNDK-private: libcompiler_rt.so
VNDK-private: libft2.so
VNDK-private: libgui.so
+VNDK-product: android.hardware.audio.common@2.0.so
+VNDK-product: android.hardware.configstore@1.0.so
+VNDK-product: android.hardware.configstore@1.1.so
+VNDK-product: android.hardware.graphics.allocator@2.0.so
+VNDK-product: android.hardware.graphics.allocator@3.0.so
+VNDK-product: android.hardware.graphics.allocator@4.0.so
+VNDK-product: android.hardware.graphics.bufferqueue@1.0.so
+VNDK-product: android.hardware.graphics.bufferqueue@2.0.so
+VNDK-product: android.hardware.graphics.common@1.0.so
+VNDK-product: android.hardware.graphics.common@1.1.so
+VNDK-product: android.hardware.graphics.common@1.2.so
+VNDK-product: android.hardware.graphics.mapper@2.0.so
+VNDK-product: android.hardware.graphics.mapper@2.1.so
+VNDK-product: android.hardware.graphics.mapper@3.0.so
+VNDK-product: android.hardware.graphics.mapper@4.0.so
+VNDK-product: android.hardware.media.bufferpool@2.0.so
+VNDK-product: android.hardware.media.omx@1.0.so
+VNDK-product: android.hardware.media@1.0.so
+VNDK-product: android.hardware.memtrack@1.0.so
+VNDK-product: android.hardware.renderscript@1.0.so
+VNDK-product: android.hardware.soundtrigger@2.0.so
+VNDK-product: android.hidl.memory.token@1.0.so
+VNDK-product: android.hidl.memory@1.0.so
+VNDK-product: android.hidl.safe_union@1.0.so
+VNDK-product: android.hidl.token@1.0.so
+VNDK-product: android.system.suspend@1.0.so
+VNDK-product: libaudioutils.so
+VNDK-product: libbacktrace.so
+VNDK-product: libbase.so
+VNDK-product: libc++.so
+VNDK-product: libcamera_metadata.so
+VNDK-product: libcap.so
+VNDK-product: libcompiler_rt.so
+VNDK-product: libcrypto.so
+VNDK-product: libcurl.so
+VNDK-product: libcutils.so
+VNDK-product: libevent.so
+VNDK-product: libexpat.so
+VNDK-product: libfmq.so
+VNDK-product: libhidlbase.so
+VNDK-product: libhidlmemory.so
+VNDK-product: libion.so
+VNDK-product: libjpeg.so
+VNDK-product: libjsoncpp.so
+VNDK-product: libldacBT_abr.so
+VNDK-product: libldacBT_enc.so
+VNDK-product: liblz4.so
+VNDK-product: liblzma.so
+VNDK-product: libminijail.so
+VNDK-product: libnl.so
+VNDK-product: libpcre2.so
+VNDK-product: libpiex.so
+VNDK-product: libpng.so
+VNDK-product: libprocessgroup.so
+VNDK-product: libprocinfo.so
+VNDK-product: libspeexresampler.so
+VNDK-product: libssl.so
+VNDK-product: libtinyalsa.so
+VNDK-product: libtinyxml2.so
+VNDK-product: libunwindstack.so
+VNDK-product: libutils.so
+VNDK-product: libutilscallstack.so
+VNDK-product: libwifi-system-iface.so
+VNDK-product: libxml2.so
+VNDK-product: libyuv.so
+VNDK-product: libz.so
+VNDK-product: libziparchive.so
diff --git a/target/product/gsi_arm64.mk b/target/product/gsi_arm64.mk
deleted file mode 100644
index 1043a85..0000000
--- a/target/product/gsi_arm64.mk
+++ /dev/null
@@ -1,46 +0,0 @@
-#
-# Copyright (C) 2019 The Android Open Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#
-# All components inherited here go to system image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/core_64_bit.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/generic_system.mk)
-
-# Enable mainline checking
-PRODUCT_ENFORCE_ARTIFACT_PATH_REQUIREMENTS := relaxed
-
-#
-# All components inherited here go to system_ext image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/handheld_system_ext.mk)
-$(call inherit-product, $(SRC_TARGET_DIR)/product/telephony_system_ext.mk)
-
-#
-# All components inherited here go to product image
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/aosp_product.mk)
-
-#
-# Special settings for GSI releasing
-#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/gsi_release.mk)
-
-
-PRODUCT_NAME := gsi_arm64
-PRODUCT_DEVICE := gsi_arm64
-PRODUCT_BRAND := generic
-PRODUCT_MODEL := GSI on ARM64
diff --git a/target/product/gsi_release.mk b/target/product/gsi_release.mk
index 241b6ba..2c74ce0 100644
--- a/target/product/gsi_release.mk
+++ b/target/product/gsi_release.mk
@@ -37,9 +37,6 @@
# Enable dynamic partition size
PRODUCT_USE_DYNAMIC_PARTITION_SIZE := true
-# Needed by Pi newly launched device to pass VtsTrebleSysProp on GSI
-PRODUCT_COMPATIBLE_PROPERTY_OVERRIDE := true
-
# GSI targets should install "unflattened" APEXes in /system
TARGET_FLATTEN_APEX := false
diff --git a/target/product/legacy_gsi_release.mk b/target/product/legacy_gsi_release.mk
deleted file mode 100644
index 09b96fb..0000000
--- a/target/product/legacy_gsi_release.mk
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-# Copyright (C) 2020 The Android Open-Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-include $(SRC_TARGET_DIR)/product/gsi_release.mk
-
-# Legacy GSI support additional O-MR1 interface
-PRODUCT_EXTRA_VNDK_VERSIONS += 27
-
-# Legacy GSI relax the compatible property checking
-PRODUCT_COMPATIBLE_PROPERTY_OVERRIDE := false
diff --git a/core/tasks/apidiff.mk b/target/product/mainline_sdk.mk
similarity index 79%
rename from core/tasks/apidiff.mk
rename to target/product/mainline_sdk.mk
index 76e4749..343aed6 100644
--- a/core/tasks/apidiff.mk
+++ b/target/product/mainline_sdk.mk
@@ -1,4 +1,4 @@
-# Copyright (C) 2017 The Android Open Source Project
+# Copyright (C) 2020 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -11,11 +11,8 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-#
-# Rules for building API diffs.
#
-.PHONY: api-diff
-
-api-diff: api-stubs-docs-jdiff
+PRODUCT_NAME := mainline_sdk
+PRODUCT_BRAND := Android
+PRODUCT_DEVICE := mainline_sdk
diff --git a/target/product/media_system.mk b/target/product/media_system.mk
index 1004dc5..4ebec51 100644
--- a/target/product/media_system.mk
+++ b/target/product/media_system.mk
@@ -58,7 +58,6 @@
# The values should be of the format <apex name>:<jar name>
PRODUCT_UPDATABLE_SYSTEM_SERVER_JARS := \
com.android.permission:service-permission \
- com.android.wifi:service-wifi \
com.android.ipsec:android.net.ipsec.ike \
PRODUCT_COPY_FILES += \
diff --git a/target/product/media_vendor.mk b/target/product/media_vendor.mk
index 7d4af64..ef009ad 100644
--- a/target/product/media_vendor.mk
+++ b/target/product/media_vendor.mk
@@ -23,4 +23,3 @@
# /vendor packages
PRODUCT_PACKAGES += \
libaudiopreprocessing \
- libwebrtc_audio_preprocessing \
diff --git a/target/product/runtime_libart.mk b/target/product/runtime_libart.mk
index 7633abe..e655d51 100644
--- a/target/product/runtime_libart.mk
+++ b/target/product/runtime_libart.mk
@@ -16,6 +16,8 @@
# Provides a functioning ART environment without Android frameworks
+$(call inherit-product, $(SRC_TARGET_DIR)/product/default_art_config.mk)
+
# Additional mixins to the boot classpath.
PRODUCT_PACKAGES += \
android.test.base \
@@ -30,8 +32,8 @@
# ART APEX module.
# Note that this package includes the minimal boot classpath JARs (listed in
# ART_APEX_JARS), which should no longer be added directly to PRODUCT_PACKAGES.
-PRODUCT_PACKAGES += com.android.art
-PRODUCT_HOST_PACKAGES += com.android.art
+PRODUCT_PACKAGES += com.android.art-autoselect
+PRODUCT_HOST_PACKAGES += com.android.art-autoselect
# Certificates.
PRODUCT_PACKAGES += \
@@ -41,10 +43,6 @@
hiddenapi-package-whitelist.xml \
PRODUCT_SYSTEM_PROPERTIES += \
- dalvik.vm.image-dex2oat-Xms=64m \
- dalvik.vm.image-dex2oat-Xmx=64m \
- dalvik.vm.dex2oat-Xms=64m \
- dalvik.vm.dex2oat-Xmx=512m \
dalvik.vm.usejit=true \
dalvik.vm.usejitprofiles=true \
dalvik.vm.dexopt.secondary=true \
@@ -70,6 +68,11 @@
# or if it is empty speed-profile is equivalent to (quicken + empty app image).
PRODUCT_SYSTEM_PROPERTIES += \
pm.dexopt.install?=speed-profile \
+ pm.dexopt.install-fast?=skip \
+ pm.dexopt.install-bulk?=speed-profile \
+ pm.dexopt.install-bulk-secondary?=verify \
+ pm.dexopt.install-bulk-downgraded?=verify \
+ pm.dexopt.install-bulk-secondary-downgraded?=extract \
pm.dexopt.bg-dexopt?=speed-profile \
pm.dexopt.ab-ota?=speed-profile \
pm.dexopt.inactive?=verify \
@@ -92,4 +95,14 @@
dalvik.vm.minidebuginfo=true \
dalvik.vm.dex2oat-minidebuginfo=true
-PRODUCT_USES_DEFAULT_ART_CONFIG := true
+# Two other device configs are added to IORap besides "ro.iorapd.enable".
+# IORap by default is off and starts when
+# (https://source.corp.google.com/android/system/iorap/iorapd.rc?q=iorapd.rc)
+#
+# * "ro.iorapd.enable" is true excluding unset
+# * One of the device configs is true.
+#
+# "ro.iorapd.enable" has to be set to true, so that iorap can be started.
+PRODUCT_SYSTEM_PROPERTIES += \
+ ro.iorapd.enable?=true
+
diff --git a/target/product/sdk_phone_x86.mk b/target/product/sdk_phone_x86.mk
index 63671dc..9096ff3 100644
--- a/target/product/sdk_phone_x86.mk
+++ b/target/product/sdk_phone_x86.mk
@@ -50,8 +50,8 @@
$(call inherit-product, $(SRC_TARGET_DIR)/board/emulator_x86/device.mk)
# Define the host tools and libs that are parts of the SDK.
--include sdk/build/product_sdk.mk
--include development/build/product_sdk.mk
+$(call inherit-product-if-exists, sdk/build/product_sdk.mk)
+$(call inherit-product-if-exists, development/build/product_sdk.mk)
# Overrides
PRODUCT_BRAND := Android
diff --git a/target/product/sdk_phone_x86_64.mk b/target/product/sdk_phone_x86_64.mk
index c5a6245..161043b 100644
--- a/target/product/sdk_phone_x86_64.mk
+++ b/target/product/sdk_phone_x86_64.mk
@@ -51,8 +51,8 @@
$(call inherit-product, $(SRC_TARGET_DIR)/board/emulator_x86_64/device.mk)
# Define the host tools and libs that are parts of the SDK.
--include sdk/build/product_sdk.mk
--include development/build/product_sdk.mk
+$(call inherit-product-if-exists, sdk/build/product_sdk.mk)
+$(call inherit-product-if-exists, development/build/product_sdk.mk)
# Overrides
PRODUCT_BRAND := Android
diff --git a/target/product/virtual_ab_ota.mk b/target/product/virtual_ab_ota.mk
new file mode 120000
index 0000000..16f7329
--- /dev/null
+++ b/target/product/virtual_ab_ota.mk
@@ -0,0 +1 @@
+virtual_ab_ota/launch.mk
\ No newline at end of file
diff --git a/target/product/virtual_ab_ota/README.md b/target/product/virtual_ab_ota/README.md
new file mode 100644
index 0000000..2d40c03
--- /dev/null
+++ b/target/product/virtual_ab_ota/README.md
@@ -0,0 +1,16 @@
+# Virtual A/B makefiles
+
+Devices that uses Virtual A/B must inherit from one of the makefiles in this directory.
+
+## Structure
+
+```
+launch.mk
+ |- retrofit.mk
+ |- plus_non_ab.mk
+
+launch_with_vendor_ramdisk.mk
+ |- compression.mk
+
+compression_retrofit.mk
+```
diff --git a/target/product/virtual_ab_ota_retrofit_compression.mk b/target/product/virtual_ab_ota/compression.mk
similarity index 81%
rename from target/product/virtual_ab_ota_retrofit_compression.mk
rename to target/product/virtual_ab_ota/compression.mk
index 8059f75..8301047 100644
--- a/target/product/virtual_ab_ota_retrofit_compression.mk
+++ b/target/product/virtual_ab_ota/compression.mk
@@ -14,9 +14,11 @@
# limitations under the License.
#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/virtual_ab_ota_retrofit.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/virtual_ab_ota/launch_with_vendor_ramdisk.mk)
PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.compression.enabled=true
-
-PRODUCT_PACKAGES += snapuserd_ramdisk
-
+PRODUCT_VIRTUAL_AB_COMPRESSION := true
+PRODUCT_PACKAGES += \
+ snapuserd.vendor_ramdisk \
+ snapuserd \
+ snapuserd.recovery
diff --git a/target/product/virtual_ab_ota_retrofit_compression.mk b/target/product/virtual_ab_ota/compression_retrofit.mk
similarity index 62%
copy from target/product/virtual_ab_ota_retrofit_compression.mk
copy to target/product/virtual_ab_ota/compression_retrofit.mk
index 8059f75..6c29cba 100644
--- a/target/product/virtual_ab_ota_retrofit_compression.mk
+++ b/target/product/virtual_ab_ota/compression_retrofit.mk
@@ -14,9 +14,15 @@
# limitations under the License.
#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/virtual_ab_ota_retrofit.mk)
-
PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.compression.enabled=true
+PRODUCT_VIRTUAL_AB_COMPRESSION := true
-PRODUCT_PACKAGES += snapuserd_ramdisk
-
+# For devices that are not GKI-capable (eg do not have vendor_boot),
+# snapuserd.ramdisk is included rather than snapuserd.vendor_ramdisk.
+# When using virtual_ab_ota_compression_retrofit.mk, either
+# virtual_ab_ota.mk or virtual_ab_ota_retrofit.mk must be inherited
+# as well.
+PRODUCT_PACKAGES += \
+ snapuserd.ramdisk \
+ snapuserd \
+ snapuserd.recovery
diff --git a/target/product/virtual_ab_ota.mk b/target/product/virtual_ab_ota/launch.mk
similarity index 100%
rename from target/product/virtual_ab_ota.mk
rename to target/product/virtual_ab_ota/launch.mk
diff --git a/target/product/virtual_ab_ota_retrofit_compression.mk b/target/product/virtual_ab_ota/launch_with_vendor_ramdisk.mk
similarity index 66%
copy from target/product/virtual_ab_ota_retrofit_compression.mk
copy to target/product/virtual_ab_ota/launch_with_vendor_ramdisk.mk
index 8059f75..bc81b33 100644
--- a/target/product/virtual_ab_ota_retrofit_compression.mk
+++ b/target/product/virtual_ab_ota/launch_with_vendor_ramdisk.mk
@@ -14,9 +14,13 @@
# limitations under the License.
#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/virtual_ab_ota_retrofit.mk)
+# Devices launching with Virtual A/B and has a vendor_boot partition is
+# preferred to inherit from this makefile instead of launch.mk.
-PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.compression.enabled=true
+PRODUCT_VIRTUAL_AB_OTA := true
-PRODUCT_PACKAGES += snapuserd_ramdisk
+PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.enabled=true
+PRODUCT_PACKAGES += \
+ linker.vendor_ramdisk \
+ e2fsck.vendor_ramdisk \
diff --git a/target/product/virtual_ab_ota_plus_non_ab.mk b/target/product/virtual_ab_ota/plus_non_ab.mk
similarity index 97%
rename from target/product/virtual_ab_ota_plus_non_ab.mk
rename to target/product/virtual_ab_ota/plus_non_ab.mk
index 99a10ed..820fa1e 100644
--- a/target/product/virtual_ab_ota_plus_non_ab.mk
+++ b/target/product/virtual_ab_ota/plus_non_ab.mk
@@ -14,7 +14,7 @@
# limitations under the License.
#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/virtual_ab_ota.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/virtual_ab_ota/launch.mk)
PRODUCT_OTA_FORCE_NON_AB_PACKAGE := true
diff --git a/target/product/virtual_ab_ota_retrofit.mk b/target/product/virtual_ab_ota/retrofit.mk
similarity index 97%
rename from target/product/virtual_ab_ota_retrofit.mk
rename to target/product/virtual_ab_ota/retrofit.mk
index 3416a4f..93b42b7 100644
--- a/target/product/virtual_ab_ota_retrofit.mk
+++ b/target/product/virtual_ab_ota/retrofit.mk
@@ -14,7 +14,7 @@
# limitations under the License.
#
-$(call inherit-product, $(SRC_TARGET_DIR)/product/virtual_ab_ota.mk)
+$(call inherit-product, $(SRC_TARGET_DIR)/product/virtual_ab_ota/launch.mk)
PRODUCT_VIRTUAL_AB_OTA_RETROFIT := true
diff --git a/target/product/virtual_ab_ota_compression.mk b/target/product/virtual_ab_ota_compression.mk
deleted file mode 100644
index c4849be..0000000
--- a/target/product/virtual_ab_ota_compression.mk
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Copyright (C) 2020 The Android Open-Source Project
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-$(call inherit-product, $(SRC_TARGET_DIR)/product/virtual_ab_ota.mk)
-
-PRODUCT_VENDOR_PROPERTIES += ro.virtual_ab.compression.enabled=true
-
-PRODUCT_PACKAGES += snapuserd_ramdisk
diff --git a/target/product/virtual_ab_ota_plus_non_ab.mk b/target/product/virtual_ab_ota_plus_non_ab.mk
new file mode 120000
index 0000000..4979957
--- /dev/null
+++ b/target/product/virtual_ab_ota_plus_non_ab.mk
@@ -0,0 +1 @@
+virtual_ab_ota/plus_non_ab.mk
\ No newline at end of file
diff --git a/target/product/virtual_ab_ota_retrofit.mk b/target/product/virtual_ab_ota_retrofit.mk
new file mode 120000
index 0000000..1e16ca8
--- /dev/null
+++ b/target/product/virtual_ab_ota_retrofit.mk
@@ -0,0 +1 @@
+virtual_ab_ota/retrofit.mk
\ No newline at end of file
diff --git a/tools/build-license-metadata.sh b/tools/build-license-metadata.sh
new file mode 100755
index 0000000..3bad358
--- /dev/null
+++ b/tools/build-license-metadata.sh
@@ -0,0 +1,312 @@
+#!/bin/sh
+
+set -u
+
+ME=$(basename $0)
+
+USAGE="Usage: ${ME} {options}
+
+Builds a license metadata specification and outputs it to stdout or {outfile}.
+
+The available options are:
+
+-k kind... license kinds
+-c condition... license conditions
+-p package... license package name
+-n notice... license notice file
+-d dependency... license metadata file dependency
+-t target... targets
+-m target:installed... map dependent targets to their installed names
+-is_container preserved dependent target name when given
+-o outfile output file
+"
+
+# Global flag variables
+license_kinds=
+license_conditions=
+license_package_name=
+license_notice=
+license_deps=
+targets=
+installmap=
+is_container=false
+ofile=
+
+# Global variables
+depfiles=" "
+effective_conditions=
+
+
+# Exits with a message.
+#
+# When the exit status is 2, assumes a usage error and outputs the usage message
+# to stderr before outputting the specific error message to stderr.
+#
+# Parameters:
+# Optional numeric exit status (defaults to 2, i.e. a usage error.)
+# Remaining args treated as an error message sent to stderr.
+die() {
+ lstatus=2
+ case "${1:-}" in *[^0-9]*) ;; *) lstatus="$1"; shift ;; esac
+ case "${lstatus}" in 2) echo "${USAGE}" >&2; echo >&2 ;; esac
+ if [ -n "$*" ]; then
+ echo -e "$*\n" >&2
+ fi
+ exit $lstatus
+}
+
+
+# Sets the flag variables based on the command-line.
+#
+# invoke with: process_args "$@"
+process_args() {
+ lcurr_flag=
+ while [ "$#" -gt '0' ]; do
+ case "${1}" in
+ -h)
+ echo "${USAGE}"
+ exit 0
+ ;;
+ -k)
+ lcurr_flag=kind
+ ;;
+ -c)
+ lcurr_flag=condition
+ ;;
+ -p)
+ lcurr_flag=package
+ ;;
+ -n)
+ lcurr_flag=notice
+ ;;
+ -d)
+ lcurr_flag=dependency
+ ;;
+ -t)
+ lcurr_flag=target
+ ;;
+ -m)
+ lcurr_flag=installmap
+ ;;
+ -o)
+ lcurr_flag=ofile
+ ;;
+ -is_container)
+ lcurr_flag=
+ is_container=true
+ ;;
+ -*)
+ die "Unknown flag: \"${1}\""
+ ;;
+ *)
+ case "${lcurr_flag}" in
+ kind)
+ license_kinds="${license_kinds}${license_kinds:+ }${1}"
+ ;;
+ condition)
+ license_conditions="${license_conditions}${license_conditions:+ }${1}"
+ ;;
+ package)
+ license_package_name="${license_package_name}${license_package_name:+ }${1}"
+ ;;
+ notice)
+ license_notice="${license_notice}${license_notice:+ }${1}"
+ ;;
+ dependency)
+ license_deps="${license_deps}${license_deps:+ }${1}"
+ ;;
+ target)
+ targets="${targets}${targets:+ }${1}"
+ ;;
+ installmap)
+ installmap="${installmap}${installmap:+ }${1}"
+ ;;
+ ofile)
+ if [ -n "${ofile}" ]; then
+ die "Output file -o appears twice as \"${ofile}\" and \"${1}\""
+ fi
+ ofile="${1}"
+ ;;
+ *)
+ die "Must precede argument \"${1}\" with type flag."
+ ;;
+ esac
+ ;;
+ esac
+ shift
+ done
+}
+
+# Reads a license metadata file from stdin, and outputs the named dependencies.
+#
+# No parameters.
+extract_deps() {
+ awk '$1 == "dep_name:" { sub(/^"/, "", $2); sub(/"$/, "", $2); print $2; }'
+}
+
+# Populates the depfiles variable identifying dependency files.
+#
+# Starting with the dependencies enumerated in license_deps, calculates the
+# transitive closure of all dependencies.
+#
+# Dependency names ending in .meta_module indirectly reference license
+# metadata with 1 license metadata filename per line.
+#
+# No parameters; no output.
+read_deps() {
+ lnewdeps=
+ for d in ${license_deps}; do
+ case "${d}" in
+ *.meta_module)
+ lnewdeps="${lnewdeps}${lnewdeps:+ }"$(cat "${d}") ;;
+ *)
+ lnewdeps="${lnewdeps}${lnewdeps:+ }${d}" ;;
+ esac
+ done
+ lnewdeps=$(echo "${lnewdeps}" | tr ' ' '\n' | sort -u)
+ lalldeps=
+ ldeps=
+ lmod=
+ ldep=
+ while [ "${#lnewdeps}" -gt '0' ]; do
+ ldeps="${lnewdeps}"
+ lnewdeps=
+ for ldep in ${ldeps}; do
+ depfiles="${depfiles}${ldep} "
+ lalldeps="${lalldeps}${lalldeps:+ }"$(cat "${ldep}" | extract_deps)
+ done
+ lalldeps=$(for d in ${lalldeps}; do echo "${d}"; done | sort -u)
+ for d in ${lalldeps}; do
+ ldeps="${d}"
+ case "${d}" in *.meta_module) ldeps=$(cat "${d}") ;; esac
+ for lmod in ${ldeps}; do
+ if ! expr "${depfiles}" : ".* ${lmod} .*" >/dev/null 2>&1; then
+ lnewdeps="${lnewdeps}${lnewdeps:+ }${lmod}"
+ fi
+ done
+ done
+ lalldeps=
+ done
+}
+
+# Returns the effective license conditions for the current license metadata.
+#
+# If a module is restricted or links in a restricted module, the effective
+# license has a restricted condition.
+calculate_effective_conditions() {
+ lconditions="${license_conditions}"
+ case "${license_conditions}" in
+ *restricted*) : do nothing ;;
+ *)
+ for d in ${depfiles}; do
+ if cat "${d}" | egrep -q 'effective_condition\s*:.*restricted' ; then
+ lconditions="${lconditions}${lconditions:+ }restricted"
+ fi
+ done
+ ;;
+ esac
+ echo "${lconditions}"
+}
+
+
+process_args "$@"
+
+if [ -n "${ofile}" ]; then
+ # truncate the output file before appending results
+ : >"${ofile}"
+else
+ ofile=/dev/stdout
+fi
+
+# spit out the license metadata file content
+(
+ echo 'license_package_name: "'${license_package_name}'"'
+ for kind in ${license_kinds}; do
+ echo 'license_kind: "'${kind}'"'
+ done
+ for condition in ${license_conditions}; do
+ echo 'license_condition: "'${condition}'"'
+ done
+ for f in ${license_notice}; do
+ echo 'license_text: "'${f}'"'
+ done
+ echo "is_container: ${is_container}"
+ for t in ${targets}; do
+ echo 'target: "'${t}'"'
+ done
+ for m in ${installmap}; do
+ echo 'install_map: "'${m}'"'
+ done
+) >>"${ofile}"
+read_deps
+effective_conditions=$(calculate_effective_conditions)
+for condition in ${effective_conditions}; do
+ echo 'effective_condition: "'${condition}'"'
+done >>"${ofile}"
+for dep in ${depfiles}; do
+ echo 'dep {'
+ cat "${dep}" | \
+ awk -v name="${dep}" '
+ function strip_type() {
+ $1 = ""
+ sub(/^\s*/, "")
+ }
+ BEGIN {
+ print " dep_name: " name
+ }
+ $1 == "license_package_name:" {
+ strip_type()
+ print " dep_package_name: "$0
+ }
+ $1 == "dep_name:" {
+ print " dep_sub_dep: "$2
+ }
+ $1 == "license_kind:" {
+ print " dep_license_kind: "$2
+ }
+ $1 == "license_condition:" {
+ print " dep_license_condition: "$2
+ }
+ $1 == "is_container:" {
+ print " dep_is_container: "$2
+ }
+ $1 == "license_text:" {
+ strip_type()
+ print " dep_license_text: "$0
+ }
+ $1 == "target:" {
+ print " dep_target: "$2
+ }
+ $1 == "install_map:" {
+ print " dep_install_map: "$2
+ }
+ '
+ # The restricted license kind is contagious to all linked dependencies.
+ dep_conditions=$(echo $(
+ cat "${dep}" | awk '
+ $1 == "effective_condition:" {
+ $1 = ""
+ sub(/^\s*/, "")
+ gsub(/"/, "")
+ print
+ }
+ '
+ ))
+ for condition in ${dep_conditions}; do
+ echo ' dep_effective_condition: "'${condition}'"'
+ done
+ if ! ${is_container}; then
+ case "${dep_conditions}" in
+ *restricted*) : already restricted -- nothing to inherit ;;
+ *)
+ case "${effective_conditions}" in
+ *restricted*)
+ # "contagious" restricted infects everything linked to restricted
+ echo ' dep_effective_condition: "restricted"'
+ ;;
+ esac
+ ;;
+ esac
+ fi
+ echo '}'
+done >>"${ofile}"
diff --git a/tools/buildinfo.sh b/tools/buildinfo.sh
index 9bee115..f27ed8c 100755
--- a/tools/buildinfo.sh
+++ b/tools/buildinfo.sh
@@ -35,9 +35,6 @@
if [ -n "$TARGET_CPU_ABI2" ] ; then
echo "ro.product.cpu.abi2=$TARGET_CPU_ABI2"
fi
-echo "ro.product.cpu.abilist=$TARGET_CPU_ABI_LIST"
-echo "ro.product.cpu.abilist32=$TARGET_CPU_ABI_LIST_32_BIT"
-echo "ro.product.cpu.abilist64=$TARGET_CPU_ABI_LIST_64_BIT"
if [ -n "$PRODUCT_DEFAULT_LOCALE" ] ; then
echo "ro.product.locale=$PRODUCT_DEFAULT_LOCALE"
diff --git a/tools/fs_config/Android.mk b/tools/fs_config/Android.mk
index 5fb68b8..c338462 100644
--- a/tools/fs_config/Android.mk
+++ b/tools/fs_config/Android.mk
@@ -34,8 +34,6 @@
$(if $(BOARD_USES_ODMIMAGE)$(BOARD_ODMIMAGE_FILE_SYSTEM_TYPE),odm) \
$(if $(BOARD_USES_VENDOR_DLKMIMAGE)$(BOARD_VENDOR_DLKMIMAGE_FILE_SYSTEM_TYPE),vendor_dlkm) \
$(if $(BOARD_USES_ODM_DLKMIMAGE)$(BOARD_ODM_DLKMIMAGE_FILE_SYSTEM_TYPE),odm_dlkm) \
- $(if $(BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE),product) \
- $(if $(BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE),system_ext) \
)
##################################
@@ -45,11 +43,12 @@
LOCAL_MODULE := fs_config_dirs
LOCAL_REQUIRED_MODULES := \
- fs_config_dirs_system \
- $(foreach t,$(fs_config_generate_extra_partition_list),$(LOCAL_MODULE)_$(t))
+ fs_config_dirs_system \
+ fs_config_dirs_system_ext \
+ fs_config_dirs_product \
+ fs_config_dirs_nonsystem
include $(BUILD_PHONY_PACKAGE)
-
##################################
# Generate the <p>/etc/fs_config_files binary files for each partition.
# Add fs_config_files to PRODUCT_PACKAGES in the device make file to enable.
@@ -58,27 +57,69 @@
LOCAL_MODULE := fs_config_files
LOCAL_REQUIRED_MODULES := \
fs_config_files_system \
- $(foreach t,$(fs_config_generate_extra_partition_list),$(LOCAL_MODULE)_$(t))
+ fs_config_files_system_ext \
+ fs_config_files_product \
+ fs_config_files_nonsystem
+include $(BUILD_PHONY_PACKAGE)
+
+##################################
+# Generate the system_ext/etc/fs_config_dirs binary file for the target if the
+# system_ext partition is generated. Add fs_config_dirs or fs_config_dirs_system_ext
+# to PRODUCT_PACKAGES in the device make file to enable.
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := fs_config_dirs_system_ext
+LOCAL_REQUIRED_MODULES := $(if $(BOARD_USES_SYSTEM_EXTIMAGE)$(BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE),_fs_config_dirs_system_ext)
+include $(BUILD_PHONY_PACKAGE)
+
+##################################
+# Generate the system_ext/etc/fs_config_files binary file for the target if the
+# system_ext partition is generated. Add fs_config_files or fs_config_files_system_ext
+# to PRODUCT_PACKAGES in the device make file to enable.
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := fs_config_files_system_ext
+LOCAL_REQUIRED_MODULES := $(if $(BOARD_USES_SYSTEM_EXTIMAGE)$(BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE),_fs_config_files_system_ext)
+include $(BUILD_PHONY_PACKAGE)
+
+##################################
+# Generate the product/etc/fs_config_dirs binary file for the target if the
+# product partition is generated. Add fs_config_dirs or fs_config_dirs_product
+# to PRODUCT_PACKAGES in the device make file to enable.
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := fs_config_dirs_product
+LOCAL_REQUIRED_MODULES := $(if $(BOARD_USES_PRODUCTIMAGE)$(BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE),_fs_config_dirs_product)
+include $(BUILD_PHONY_PACKAGE)
+
+##################################
+# Generate the product/etc/fs_config_files binary file for the target if the
+# product partition is generated. Add fs_config_files or fs_config_files_product
+# to PRODUCT_PACKAGES in the device make file to enable.
+include $(CLEAR_VARS)
+
+LOCAL_MODULE := fs_config_files_product
+LOCAL_REQUIRED_MODULES := $(if $(BOARD_USES_PRODUCTIMAGE)$(BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE),_fs_config_files_product)
include $(BUILD_PHONY_PACKAGE)
##################################
# Generate the <p>/etc/fs_config_dirs binary files for all enabled partitions
-# excluding /system. Add fs_config_dirs_nonsystem to PRODUCT_PACKAGES in the
-# device make file to enable.
+# excluding /system, /system_ext and /product. Add fs_config_dirs_nonsystem to
+# PRODUCT_PACKAGES in the device make file to enable.
include $(CLEAR_VARS)
LOCAL_MODULE := fs_config_dirs_nonsystem
-LOCAL_REQUIRED_MODULES := $(foreach t,$(fs_config_generate_extra_partition_list),fs_config_dirs_$(t))
+LOCAL_REQUIRED_MODULES := $(foreach t,$(fs_config_generate_extra_partition_list),_fs_config_dirs_$(t))
include $(BUILD_PHONY_PACKAGE)
##################################
# Generate the <p>/etc/fs_config_files binary files for all enabled partitions
-# excluding /system. Add fs_config_files_nonsystem to PRODUCT_PACKAGES in the
-# device make file to enable.
+# excluding /system, /system_ext and /product. Add fs_config_files_nonsystem to
+# PRODUCT_PACKAGES in the device make file to enable.
include $(CLEAR_VARS)
LOCAL_MODULE := fs_config_files_nonsystem
-LOCAL_REQUIRED_MODULES := $(foreach t,$(fs_config_generate_extra_partition_list),fs_config_files_$(t))
+LOCAL_REQUIRED_MODULES := $(foreach t,$(fs_config_generate_extra_partition_list),_fs_config_files_$(t))
include $(BUILD_PHONY_PACKAGE)
##################################
@@ -134,11 +175,11 @@
ifneq ($(filter vendor,$(fs_config_generate_extra_partition_list)),)
##################################
# Generate the vendor/etc/fs_config_dirs binary file for the target
-# Add fs_config_dirs or fs_config_dirs_vendor to PRODUCT_PACKAGES in
-# the device make file to enable.
+# Add fs_config_dirs or fs_config_dirs_nonsystem to PRODUCT_PACKAGES
+# in the device make file to enable
include $(CLEAR_VARS)
-LOCAL_MODULE := fs_config_dirs_vendor
+LOCAL_MODULE := _fs_config_dirs_vendor
LOCAL_MODULE_CLASS := ETC
LOCAL_INSTALLED_MODULE_STEM := fs_config_dirs
LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR)/etc
@@ -158,11 +199,11 @@
##################################
# Generate the vendor/etc/fs_config_files binary file for the target
-# Add fs_config_files or fs_config_files_vendor to PRODUCT_PACKAGES in
-# the device make file to enable
+# Add fs_config_files or fs_config_files_nonsystem to PRODUCT_PACKAGES
+# in the device make file to enable
include $(CLEAR_VARS)
-LOCAL_MODULE := fs_config_files_vendor
+LOCAL_MODULE := _fs_config_files_vendor
LOCAL_MODULE_CLASS := ETC
LOCAL_INSTALLED_MODULE_STEM := fs_config_files
LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR)/etc
@@ -185,11 +226,11 @@
ifneq ($(filter oem,$(fs_config_generate_extra_partition_list)),)
##################################
# Generate the oem/etc/fs_config_dirs binary file for the target
-# Add fs_config_dirs or fs_config_dirs_oem to PRODUCT_PACKAGES in
-# the device make file to enable
+# Add fs_config_dirs or fs_config_dirs_nonsystem to PRODUCT_PACKAGES
+# in the device make file to enable
include $(CLEAR_VARS)
-LOCAL_MODULE := fs_config_dirs_oem
+LOCAL_MODULE := _fs_config_dirs_oem
LOCAL_MODULE_CLASS := ETC
LOCAL_INSTALLED_MODULE_STEM := fs_config_dirs
LOCAL_MODULE_PATH := $(TARGET_OUT_OEM)/etc
@@ -209,11 +250,11 @@
##################################
# Generate the oem/etc/fs_config_files binary file for the target
-# Add fs_config_files or fs_config_files_oem to PRODUCT_PACKAGES in
-# the device make file to enable
+# Add fs_config_files or fs_config_files_nonsystem to PRODUCT_PACKAGES
+# in the device make file to enable
include $(CLEAR_VARS)
-LOCAL_MODULE := fs_config_files_oem
+LOCAL_MODULE := _fs_config_files_oem
LOCAL_MODULE_CLASS := ETC
LOCAL_INSTALLED_MODULE_STEM := fs_config_files
LOCAL_MODULE_PATH := $(TARGET_OUT_OEM)/etc
@@ -236,11 +277,11 @@
ifneq ($(filter odm,$(fs_config_generate_extra_partition_list)),)
##################################
# Generate the odm/etc/fs_config_dirs binary file for the target
-# Add fs_config_dirs or fs_config_dirs_odm to PRODUCT_PACKAGES in
-# the device make file to enable
+# Add fs_config_dirs or fs_config_dirs_nonsystem to PRODUCT_PACKAGES
+# in the device make file to enable
include $(CLEAR_VARS)
-LOCAL_MODULE := fs_config_dirs_odm
+LOCAL_MODULE := _fs_config_dirs_odm
LOCAL_MODULE_CLASS := ETC
LOCAL_INSTALLED_MODULE_STEM := fs_config_dirs
LOCAL_MODULE_PATH := $(TARGET_OUT_ODM)/etc
@@ -260,11 +301,11 @@
##################################
# Generate the odm/etc/fs_config_files binary file for the target
-# Add fs_config_files of fs_config_files_odm to PRODUCT_PACKAGES in
-# the device make file to enable
+# Add fs_config_files or fs_config_files_nonsystem to PRODUCT_PACKAGES
+# in the device make file to enable
include $(CLEAR_VARS)
-LOCAL_MODULE := fs_config_files_odm
+LOCAL_MODULE := _fs_config_files_odm
LOCAL_MODULE_CLASS := ETC
LOCAL_INSTALLED_MODULE_STEM := fs_config_files
LOCAL_MODULE_PATH := $(TARGET_OUT_ODM)/etc
@@ -287,11 +328,11 @@
ifneq ($(filter vendor_dlkm,$(fs_config_generate_extra_partition_list)),)
##################################
# Generate the vendor_dlkm/etc/fs_config_dirs binary file for the target
-# Add fs_config_dirs or fs_config_dirs_vendor_dlkm to PRODUCT_PACKAGES in
+# Add fs_config_dirs or fs_config_dirs_nonsystem to PRODUCT_PACKAGES in
# the device make file to enable
include $(CLEAR_VARS)
-LOCAL_MODULE := fs_config_dirs_vendor_dlkm
+LOCAL_MODULE := _fs_config_dirs_vendor_dlkm
LOCAL_MODULE_CLASS := ETC
LOCAL_INSTALLED_MODULE_STEM := fs_config_dirs
LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR_DLKM)/etc
@@ -311,11 +352,11 @@
##################################
# Generate the vendor_dlkm/etc/fs_config_files binary file for the target
-# Add fs_config_files of fs_config_files_vendor_dlkm to PRODUCT_PACKAGES in
+# Add fs_config_files or fs_config_files_nonsystem to PRODUCT_PACKAGES in
# the device make file to enable
include $(CLEAR_VARS)
-LOCAL_MODULE := fs_config_files_vendor_dlkm
+LOCAL_MODULE := _fs_config_files_vendor_dlkm
LOCAL_MODULE_CLASS := ETC
LOCAL_INSTALLED_MODULE_STEM := fs_config_files
LOCAL_MODULE_PATH := $(TARGET_OUT_VENDOR_DLKM)/etc
@@ -338,11 +379,11 @@
ifneq ($(filter odm_dlkm,$(fs_config_generate_extra_partition_list)),)
##################################
# Generate the odm_dlkm/etc/fs_config_dirs binary file for the target
-# Add fs_config_dirs or fs_config_dirs_odm_dlkm to PRODUCT_PACKAGES in
-# the device make file to enable
+# Add fs_config_dirs or fs_config_dirs_nonsystem to PRODUCT_PACKAGES
+# in the device make file to enable
include $(CLEAR_VARS)
-LOCAL_MODULE := fs_config_dirs_odm_dlkm
+LOCAL_MODULE := _fs_config_dirs_odm_dlkm
LOCAL_MODULE_CLASS := ETC
LOCAL_INSTALLED_MODULE_STEM := fs_config_dirs
LOCAL_MODULE_PATH := $(TARGET_OUT_ODM_DLKM)/etc
@@ -362,11 +403,11 @@
##################################
# Generate the odm_dlkm/etc/fs_config_files binary file for the target
-# Add fs_config_files of fs_config_files_odm_dlkm to PRODUCT_PACKAGES in
-# the device make file to enable
+# Add fs_config_files or fs_config_files_nonsystem to PRODUCT_PACKAGES
+# in the device make file to enable
include $(CLEAR_VARS)
-LOCAL_MODULE := fs_config_files_odm_dlkm
+LOCAL_MODULE := _fs_config_files_odm_dlkm
LOCAL_MODULE_CLASS := ETC
LOCAL_INSTALLED_MODULE_STEM := fs_config_files
LOCAL_MODULE_PATH := $(TARGET_OUT_ODM_DLKM)/etc
@@ -386,14 +427,14 @@
endif
-ifneq ($(filter product,$(fs_config_generate_extra_partition_list)),)
+ifneq ($(BOARD_USES_PRODUCTIMAGE)$(BOARD_PRODUCTIMAGE_FILE_SYSTEM_TYPE),)
##################################
# Generate the product/etc/fs_config_dirs binary file for the target
# Add fs_config_dirs or fs_config_dirs_product to PRODUCT_PACKAGES in
# the device make file to enable
include $(CLEAR_VARS)
-LOCAL_MODULE := fs_config_dirs_product
+LOCAL_MODULE := _fs_config_dirs_product
LOCAL_MODULE_CLASS := ETC
LOCAL_INSTALLED_MODULE_STEM := fs_config_dirs
LOCAL_MODULE_PATH := $(TARGET_OUT_PRODUCT)/etc
@@ -413,11 +454,11 @@
##################################
# Generate the product/etc/fs_config_files binary file for the target
-# Add fs_config_files of fs_config_files_product to PRODUCT_PACKAGES in
+# Add fs_config_files or fs_config_files_product to PRODUCT_PACKAGES in
# the device make file to enable
include $(CLEAR_VARS)
-LOCAL_MODULE := fs_config_files_product
+LOCAL_MODULE := _fs_config_files_product
LOCAL_MODULE_CLASS := ETC
LOCAL_INSTALLED_MODULE_STEM := fs_config_files
LOCAL_MODULE_PATH := $(TARGET_OUT_PRODUCT)/etc
@@ -436,14 +477,14 @@
$(or $(PRIVATE_TARGET_FS_CONFIG_GEN),/dev/null)
endif
-ifneq ($(filter system_ext,$(fs_config_generate_extra_partition_list)),)
+ifneq ($(BOARD_USES_SYSTEM_EXTIMAGE)$(BOARD_SYSTEM_EXTIMAGE_FILE_SYSTEM_TYPE),)
##################################
# Generate the system_ext/etc/fs_config_dirs binary file for the target
# Add fs_config_dirs or fs_config_dirs_system_ext to PRODUCT_PACKAGES in
# the device make file to enable
include $(CLEAR_VARS)
-LOCAL_MODULE := fs_config_dirs_system_ext
+LOCAL_MODULE := _fs_config_dirs_system_ext
LOCAL_MODULE_CLASS := ETC
LOCAL_INSTALLED_MODULE_STEM := fs_config_dirs
LOCAL_MODULE_PATH := $(TARGET_OUT_SYSTEM_EXT)/etc
@@ -463,11 +504,11 @@
##################################
# Generate the system_ext/etc/fs_config_files binary file for the target
-# Add fs_config_files of fs_config_files_system_ext to PRODUCT_PACKAGES in
+# Add fs_config_files or fs_config_files_system_ext to PRODUCT_PACKAGES in
# the device make file to enable
include $(CLEAR_VARS)
-LOCAL_MODULE := fs_config_files_system_ext
+LOCAL_MODULE := _fs_config_files_system_ext
LOCAL_MODULE_CLASS := ETC
LOCAL_INSTALLED_MODULE_STEM := fs_config_files
LOCAL_MODULE_PATH := $(TARGET_OUT_SYSTEM_EXT)/etc
diff --git a/tools/fs_config/OWNERS b/tools/fs_config/OWNERS
index 5599644..7529cb9 100644
--- a/tools/fs_config/OWNERS
+++ b/tools/fs_config/OWNERS
@@ -1,2 +1 @@
-tomcherry@google.com
-salyzyn@google.com
+include platform/system/core:/janitors/OWNERS
diff --git a/tools/fs_config/fs_config.go b/tools/fs_config/fs_config.go
index 16bcefa..4c324fb 100644
--- a/tools/fs_config/fs_config.go
+++ b/tools/fs_config/fs_config.go
@@ -48,10 +48,10 @@
path := android.PathForModuleGen(ctx, "empty")
t.paths = android.Paths{path}
- rule := android.NewRuleBuilder()
+ rule := android.NewRuleBuilder(pctx, ctx)
rule.Command().Text("rm -rf").Output(path)
rule.Command().Text("touch").Output(path)
- rule.Build(pctx, ctx, "fs_config_empty", "create empty file")
+ rule.Build("fs_config_empty", "create empty file")
}
}
diff --git a/tools/fs_config/fs_config_generator.py b/tools/fs_config/fs_config_generator.py
index 940a398..098fde6 100755
--- a/tools/fs_config/fs_config_generator.py
+++ b/tools/fs_config/fs_config_generator.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python2
"""Generates config files for Android file system properties.
This script is used for generating configuration files for configuring
diff --git a/tools/product_config/Android.bp b/tools/product_config/Android.bp
new file mode 100644
index 0000000..287ed5a
--- /dev/null
+++ b/tools/product_config/Android.bp
@@ -0,0 +1,23 @@
+java_defaults {
+ name: "product-config-defaults",
+ srcs: ["src/**/*.java"],
+}
+
+java_binary_host {
+ name: "product-config",
+ defaults: ["product-config-defaults"],
+ manifest: "MANIFEST.MF"
+}
+
+java_test_host {
+ name: "product-config-test",
+ defaults: ["product-config-defaults"],
+ srcs: [
+ "test/**/*.java",
+ ],
+ static_libs: [
+ "junit"
+ ],
+ test_suites: ["general-tests"]
+}
+
diff --git a/tools/product_config/MANIFEST.MF b/tools/product_config/MANIFEST.MF
new file mode 100644
index 0000000..db88df3
--- /dev/null
+++ b/tools/product_config/MANIFEST.MF
@@ -0,0 +1,2 @@
+Manifest-Version: 1.0
+Main-Class: com.android.build.config.Main
diff --git a/tools/product_config/TEST_MAPPING b/tools/product_config/TEST_MAPPING
new file mode 100644
index 0000000..d3568f1
--- /dev/null
+++ b/tools/product_config/TEST_MAPPING
@@ -0,0 +1,7 @@
+{
+ "presubmit": [
+ {
+ "name": "product_config_test"
+ }
+ ]
+}
diff --git a/tools/product_config/src/com/android/build/config/ErrorReporter.java b/tools/product_config/src/com/android/build/config/ErrorReporter.java
new file mode 100644
index 0000000..f382b4e
--- /dev/null
+++ b/tools/product_config/src/com/android/build/config/ErrorReporter.java
@@ -0,0 +1,263 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.build.config;
+
+import java.lang.reflect.Field;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Base class for reporting errors.
+ */
+public class ErrorReporter {
+ /**
+ * List of Entries that have occurred.
+ */
+ // Also used as the lock for this object.
+ private final ArrayList<Entry> mEntries = new ArrayList();
+
+ /**
+ * The categories that are for this Errors object.
+ */
+ private Map<Integer, Category> mCategories;
+
+ /**
+ * Whether there has been a warning or an error yet.
+ */
+ private boolean mHadWarningOrError;
+
+ /**
+ * Whether there has been an error yet.
+ */
+ private boolean mHadError;
+
+ /**
+ * Whether errors are errors, warnings or hidden.
+ */
+ public static enum Level {
+ HIDDEN("hidden"),
+ WARNING("warning"),
+ ERROR("error");
+
+ private final String mLabel;
+
+ Level(String label) {
+ mLabel = label;
+ }
+
+ String getLabel() {
+ return mLabel;
+ }
+ }
+
+ /**
+ * The available error codes.
+ */
+ public class Category {
+ private final int mCode;
+ private boolean mIsLevelSettable;
+ private Level mLevel;
+ private String mHelp;
+
+ /**
+ * Construct a Category object.
+ */
+ public Category(int code, boolean isLevelSettable, Level level, String help) {
+ if (!isLevelSettable && level != Level.ERROR) {
+ throw new RuntimeException("Don't have WARNING or HIDDEN without isLevelSettable");
+ }
+ mCode = code;
+ mIsLevelSettable = isLevelSettable;
+ mLevel = level;
+ mHelp = help;
+ }
+
+ /**
+ * Get the numeric code for the Category, which can be used to set the level.
+ */
+ public int getCode() {
+ return mCode;
+ }
+
+ /**
+ * Get whether the level of this Category can be changed.
+ */
+ public boolean isLevelSettable() {
+ return mIsLevelSettable;
+ }
+
+ /**
+ * Set the level of this category.
+ */
+ public void setLevel(Level level) {
+ if (!mIsLevelSettable) {
+ throw new RuntimeException("Can't set level for error " + mCode);
+ }
+ mLevel = level;
+ }
+
+ /**
+ * Return the level, including any overrides.
+ */
+ public Level getLevel() {
+ return mLevel;
+ }
+
+ /**
+ * Return the category's help text.
+ */
+ public String getHelp() {
+ return mHelp;
+ }
+ }
+
+ /**
+ * An instance of an error happening.
+ */
+ public class Entry {
+ private final Category mCategory;
+ private final Position mPosition;
+ private final String mMessage;
+
+ Entry(Category category, Position position, String message) {
+ mCategory = category;
+ mPosition = position;
+ mMessage = message;
+ }
+
+ public Category getCategory() {
+ return mCategory;
+ }
+
+ public Position getPosition() {
+ return mPosition;
+ }
+
+ public String getMessage() {
+ return mMessage;
+ }
+ }
+
+ private void initLocked() {
+ if (mCategories == null) {
+ HashMap<Integer, Category> categories = new HashMap();
+ for (Field field: getClass().getFields()) {
+ if (Category.class.isAssignableFrom(field.getType())) {
+ Category category = null;
+ try {
+ category = (Category)field.get(this);
+ } catch (IllegalAccessException ex) {
+ // Wrap and rethrow, this is always on this class, so it's
+ // our programming error if this happens.
+ throw new RuntimeException("Categories on Errors should be public.", ex);
+ }
+ Category prev = categories.put(category.getCode(), category);
+ if (prev != null) {
+ throw new RuntimeException("Duplicate categories with code "
+ + category.getCode());
+ }
+ }
+ }
+ mCategories = Collections.unmodifiableMap(categories);
+ }
+ }
+
+ /**
+ * Returns a map of the category codes to the categories.
+ */
+ public Map<Integer, Category> getCategories() {
+ synchronized (mEntries) {
+ initLocked();
+ return mCategories;
+ }
+ }
+
+ /**
+ * Add an error with no source position.
+ */
+ public void add(Category category, String message) {
+ add(category, new Position(), message);
+ }
+
+ /**
+ * Add an error.
+ */
+ public void add(Category category, Position pos, String message) {
+ synchronized (mEntries) {
+ initLocked();
+ if (mCategories.get(category.getCode()) != category) {
+ throw new RuntimeException("Errors.Category used from the wrong Errors object.");
+ }
+ mEntries.add(new Entry(category, pos, message));
+ final Level level = category.getLevel();
+ if (level == Level.WARNING || level == Level.ERROR) {
+ mHadWarningOrError = true;
+ }
+ if (level == Level.ERROR) {
+ mHadError = true;
+ }
+ }
+ }
+
+ /**
+ * Returns whether there has been a warning or an error yet.
+ */
+ public boolean hadWarningOrError() {
+ synchronized (mEntries) {
+ return mHadWarningOrError;
+ }
+ }
+
+ /**
+ * Returns whether there has been an error yet.
+ */
+ public boolean hadError() {
+ synchronized (mEntries) {
+ return mHadError;
+ }
+ }
+
+ /**
+ * Returns a list of all entries that were added.
+ */
+ public List<Entry> getEntries() {
+ synchronized (mEntries) {
+ return new ArrayList<Entry>(mEntries);
+ }
+ }
+
+ /**
+ * Prints the errors.
+ */
+ public void printErrors(PrintStream out) {
+ synchronized (mEntries) {
+ for (Entry entry: mEntries) {
+ final Category category = entry.getCategory();
+ final Level level = category.getLevel();
+ if (level == Level.HIDDEN) {
+ continue;
+ }
+ out.println(entry.getPosition() + "[" + level.getLabel() + " "
+ + category.getCode() + "] " + entry.getMessage());
+ }
+ }
+ }
+}
diff --git a/tools/product_config/src/com/android/build/config/Errors.java b/tools/product_config/src/com/android/build/config/Errors.java
new file mode 100644
index 0000000..63792c8
--- /dev/null
+++ b/tools/product_config/src/com/android/build/config/Errors.java
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.build.config;
+
+import java.lang.reflect.Field;
+import java.io.PrintStream;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Error constants and error reporting.
+ * <p>
+ * <b>Naming Convention:</b>
+ * <ul>
+ * <li>ERROR_ for Categories with isLevelSettable false and Level.ERROR
+ * <li>WARNING_ for Categories with isLevelSettable false and default WARNING or HIDDEN
+ * <li>Don't have isLevelSettable true and not ERROR. (The constructor asserts this).
+ * </ul>
+ */
+public class Errors extends ErrorReporter {
+
+ public final Category ERROR_COMMAND_LINE = new Category(1, false, Level.ERROR,
+ "Error on the command line.");
+
+ public final Category WARNING_UNKNOWN_COMMAND_LINE_ERROR = new Category(2, true, Level.HIDDEN,
+ "Passing unknown errors on the command line. Hidden by default for\n"
+ + "forward compatibility.");
+}
diff --git a/tools/product_config/src/com/android/build/config/Main.java b/tools/product_config/src/com/android/build/config/Main.java
new file mode 100644
index 0000000..7669742
--- /dev/null
+++ b/tools/product_config/src/com/android/build/config/Main.java
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.build.config;
+
+public class Main {
+ private final Errors mErrors;
+ private final Options mOptions;
+
+ public Main(Errors errors, Options options) {
+ mErrors = errors;
+ mOptions = options;
+ }
+
+ void run() {
+ System.out.println("Hello World");
+
+ // TODO: Check the build environment to make sure we're running in a real
+ // build environment, e.g. actually inside a source tree, with TARGET_PRODUCT
+ // and TARGET_BUILD_VARIANT defined, etc.
+
+ // TODO: Run kati and extract the variables and convert all that into starlark files.
+
+ // TODO: Run starlark with all the generated ones and the hand written ones.
+
+ // TODO: Get the variables that were defined in starlark and use that to write
+ // out the make, soong and bazel input files.
+ }
+
+ public static void main(String[] args) {
+ Errors errors = new Errors();
+
+ Options options = Options.parse(errors, args);
+ if (errors.hadError()) {
+ Options.printHelp(System.err);
+ System.err.println();
+ errors.printErrors(System.err);
+ System.exit(1);
+ }
+
+ switch (options.getAction()) {
+ case DEFAULT:
+ (new Main(errors, options)).run();
+ errors.printErrors(System.err);
+ return;
+ case HELP:
+ Options.printHelp(System.out);
+ return;
+ }
+ }
+}
diff --git a/tools/product_config/src/com/android/build/config/Options.java b/tools/product_config/src/com/android/build/config/Options.java
new file mode 100644
index 0000000..494b947
--- /dev/null
+++ b/tools/product_config/src/com/android/build/config/Options.java
@@ -0,0 +1,158 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.build.config;
+
+import java.io.PrintStream;
+import java.util.TreeMap;
+
+public class Options {
+ public enum Action {
+ DEFAULT,
+ HELP
+ }
+
+ private Action mAction = Action.DEFAULT;
+
+ public Action getAction() {
+ return mAction;
+ }
+
+ public static void printHelp(PrintStream out) {
+ out.println("usage: product_config");
+ out.println();
+ out.println("OPTIONS");
+ out.println(" --hide ERROR_ID Suppress this error.");
+ out.println(" --error ERROR_ID Make this ERROR_ID a fatal error.");
+ out.println(" --help -h This message.");
+ out.println(" --warning ERROR_ID Make this ERROR_ID a warning.");
+ out.println();
+ out.println("ERRORS");
+ out.println(" The following are the errors that can be controlled on the");
+ out.println(" commandline with the --hide --warning --error flags.");
+
+ TreeMap<Integer,Errors.Category> sorted = new TreeMap((new Errors()).getCategories());
+
+ for (final Errors.Category category: sorted.values()) {
+ if (category.isLevelSettable()) {
+ out.println(String.format(" %-3d %s", category.getCode(),
+ category.getHelp().replace("\n", "\n ")));
+ }
+ }
+ }
+
+ static class Parser {
+ private class ParseException extends Exception {
+ public ParseException(String message) {
+ super(message);
+ }
+ }
+
+ private Errors mErrors;
+ private String[] mArgs;
+ private Options mResult = new Options();
+ private int mIndex;
+
+ public Parser(Errors errors, String[] args) {
+ mErrors = errors;
+ mArgs = args;
+ }
+
+ public Options parse() {
+ try {
+ while (mIndex < mArgs.length) {
+ final String arg = mArgs[mIndex];
+
+ if ("--hide".equals(arg)) {
+ handleErrorCode(arg, Errors.Level.HIDDEN);
+ } else if ("--error".equals(arg)) {
+ handleErrorCode(arg, Errors.Level.ERROR);
+ } else if ("--help".equals(arg) || "-h".equals(arg)) {
+ // Help overrides all other commands if there isn't an error, but
+ // we will stop here.
+ if (!mErrors.hadError()) {
+ mResult.mAction = Action.HELP;
+ }
+ return mResult;
+ } else if ("--warning".equals(arg)) {
+ handleErrorCode(arg, Errors.Level.WARNING);
+ } else {
+ throw new ParseException("Unknown command line argument: " + arg);
+ }
+
+ mIndex++;
+ }
+ } catch (ParseException ex) {
+ mErrors.add(mErrors.ERROR_COMMAND_LINE, ex.getMessage());
+ }
+
+ return mResult;
+ }
+
+ private void addWarning(Errors.Category category, String message) {
+ mErrors.add(category, message);
+ }
+
+ private String getNextNonFlagArg() {
+ if (mIndex == mArgs.length - 1) {
+ return null;
+ }
+ if (mArgs[mIndex + 1].startsWith("-")) {
+ return null;
+ }
+ mIndex++;
+ return mArgs[mIndex];
+ }
+
+ private int requireNextNumberArg(String arg) throws ParseException {
+ final String val = getNextNonFlagArg();
+ if (val == null) {
+ throw new ParseException(arg + " requires a numeric argument.");
+ }
+ try {
+ return Integer.parseInt(val);
+ } catch (NumberFormatException ex) {
+ throw new ParseException(arg + " requires a numeric argument. found: " + val);
+ }
+ }
+
+ private void handleErrorCode(String arg, Errors.Level level) throws ParseException {
+ final int code = requireNextNumberArg(arg);
+ final Errors.Category category = mErrors.getCategories().get(code);
+ if (category == null) {
+ mErrors.add(mErrors.WARNING_UNKNOWN_COMMAND_LINE_ERROR,
+ "Unknown error code: " + code);
+ return;
+ }
+ if (!category.isLevelSettable()) {
+ mErrors.add(mErrors.ERROR_COMMAND_LINE, "Can't set level for error " + code);
+ return;
+ }
+ category.setLevel(level);
+ }
+ }
+
+ /**
+ * Parse the arguments and return an options object.
+ * <p>
+ * Updates errors with the hidden / warning / error levels.
+ * <p>
+ * Adds errors encountered to Errors object.
+ */
+ public static Options parse(Errors errors, String[] args) {
+ return (new Parser(errors, args)).parse();
+ }
+}
diff --git a/tools/product_config/src/com/android/build/config/Position.java b/tools/product_config/src/com/android/build/config/Position.java
new file mode 100644
index 0000000..7953942
--- /dev/null
+++ b/tools/product_config/src/com/android/build/config/Position.java
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.build.config;
+
+/**
+ * Position in a source file.
+ */
+public class Position implements Comparable<Position> {
+ /**
+ * Sentinel line number for when there is no known line number.
+ */
+ public static final int NO_LINE = -1;
+
+ private final String mFile;
+ private final int mLine;
+
+ public Position() {
+ mFile = null;
+ mLine = NO_LINE;
+ }
+
+ public Position(String file) {
+ mFile = file;
+ mLine = NO_LINE;
+ }
+
+ public Position(String file, int line) {
+ if (line < NO_LINE) {
+ throw new IllegalArgumentException("Negative line number. file=" + file
+ + " line=" + line);
+ }
+ mFile = file;
+ mLine = line;
+ }
+
+ public int compareTo(Position that) {
+ int result = mFile.compareTo(that.mFile);
+ if (result != 0) {
+ return result;
+ }
+ return mLine - that.mLine;
+ }
+
+ public String getFile() {
+ return mFile;
+ }
+
+ public int getLine() {
+ return mLine;
+ }
+
+ @Override
+ public String toString() {
+ if (mFile == null && mLine == NO_LINE) {
+ return "";
+ } else if (mFile == null && mLine != NO_LINE) {
+ return "<unknown>:" + mLine + ": ";
+ } else if (mFile != null && mLine == NO_LINE) {
+ return mFile + ": ";
+ } else { // if (mFile != null && mLine != NO_LINE)
+ return mFile + ':' + mLine + ": ";
+ }
+ }
+}
diff --git a/tools/product_config/test/com/android/build/config/ErrorReporterTest.java b/tools/product_config/test/com/android/build/config/ErrorReporterTest.java
new file mode 100644
index 0000000..2cde476
--- /dev/null
+++ b/tools/product_config/test/com/android/build/config/ErrorReporterTest.java
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.build.config;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.HashSet;
+import java.util.List;
+
+public class ErrorReporterTest {
+ /**
+ * Test that errors can be recorded and retrieved.
+ */
+ @Test
+ public void testAdding() {
+ TestErrors errors = new TestErrors();
+
+ errors.add(errors.ERROR, new Position("a", 12), "Errrororrrr");
+
+ Assert.assertTrue(errors.hadWarningOrError());
+ Assert.assertTrue(errors.hadError());
+
+ List<TestErrors.Entry> entries = errors.getEntries();
+ Assert.assertEquals(1, entries.size());
+
+ TestErrors.Entry entry = entries.get(0);
+ Assert.assertEquals(errors.ERROR, entry.getCategory());
+ Assert.assertEquals("a", entry.getPosition().getFile());
+ Assert.assertEquals(12, entry.getPosition().getLine());
+ Assert.assertEquals("Errrororrrr", entry.getMessage());
+
+ Assert.assertNotEquals("", errors.getErrorMessages());
+ }
+
+ /**
+ * Test that not adding an error doesn't record errors.
+ */
+ @Test
+ public void testNoError() {
+ TestErrors errors = new TestErrors();
+
+ Assert.assertFalse(errors.hadWarningOrError());
+ Assert.assertFalse(errors.hadError());
+ Assert.assertEquals("", errors.getErrorMessages());
+ }
+
+ /**
+ * Test that not adding a warning doesn't record errors.
+ */
+ @Test
+ public void testWarning() {
+ TestErrors errors = new TestErrors();
+
+ errors.add(errors.WARNING, "Waaaaarninggggg");
+
+ Assert.assertTrue(errors.hadWarningOrError());
+ Assert.assertFalse(errors.hadError());
+ Assert.assertNotEquals("", errors.getErrorMessages());
+ }
+
+ /**
+ * Test that hidden warnings don't report.
+ */
+ @Test
+ public void testHidden() {
+ TestErrors errors = new TestErrors();
+
+ errors.add(errors.HIDDEN, "Hidddeennn");
+
+ Assert.assertFalse(errors.hadWarningOrError());
+ Assert.assertFalse(errors.hadError());
+ Assert.assertEquals("", errors.getErrorMessages());
+ }
+
+ /**
+ * Test changing an error level.
+ */
+ @Test
+ public void testSetLevel() {
+ TestErrors errors = new TestErrors();
+ Assert.assertEquals(TestErrors.Level.ERROR, errors.ERROR.getLevel());
+
+ errors.ERROR.setLevel(TestErrors.Level.WARNING);
+
+ Assert.assertEquals(TestErrors.Level.WARNING, errors.ERROR.getLevel());
+ }
+
+ /**
+ * Test that changing a fixed error fails.
+ */
+ @Test
+ public void testSetLevelFails() {
+ TestErrors errors = new TestErrors();
+ Assert.assertEquals(TestErrors.Level.ERROR, errors.ERROR_FIXED.getLevel());
+
+ boolean exceptionThrown = false;
+ try {
+ errors.ERROR_FIXED.setLevel(TestErrors.Level.WARNING);
+ } catch (RuntimeException ex) {
+ exceptionThrown = true;
+ }
+
+ Assert.assertTrue(exceptionThrown);
+ Assert.assertEquals(TestErrors.Level.ERROR, errors.ERROR_FIXED.getLevel());
+ }
+}
diff --git a/tools/product_config/test/com/android/build/config/OptionsTest.java b/tools/product_config/test/com/android/build/config/OptionsTest.java
new file mode 100644
index 0000000..2c36322
--- /dev/null
+++ b/tools/product_config/test/com/android/build/config/OptionsTest.java
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.build.config;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+public class OptionsTest {
+ @Test
+ public void testErrorMissingLast() {
+ final Errors errors = new Errors();
+
+ final Options options = Options.parse(errors, new String[] {
+ "--error"
+ });
+
+ Assert.assertNotEquals("", TestErrors.getErrorMessages(errors));
+ Assert.assertEquals(Options.Action.DEFAULT, options.getAction());
+ TestErrors.assertHasEntry(errors.ERROR_COMMAND_LINE, errors);
+ }
+
+ @Test
+ public void testErrorMissingNotLast() {
+ final Errors errors = new Errors();
+
+ final Options options = Options.parse(errors, new String[] {
+ "--error", "--warning", "2"
+ });
+
+ Assert.assertNotEquals("", TestErrors.getErrorMessages(errors));
+ Assert.assertEquals(Options.Action.DEFAULT, options.getAction());
+ TestErrors.assertHasEntry(errors.ERROR_COMMAND_LINE, errors);
+ }
+
+ @Test
+ public void testErrorNotNumeric() {
+ final Errors errors = new Errors();
+
+ final Options options = Options.parse(errors, new String[] {
+ "--error", "notgood"
+ });
+
+ Assert.assertNotEquals("", TestErrors.getErrorMessages(errors));
+ Assert.assertEquals(Options.Action.DEFAULT, options.getAction());
+ TestErrors.assertHasEntry(errors.ERROR_COMMAND_LINE, errors);
+ }
+
+ @Test
+ public void testErrorInvalidError() {
+ final Errors errors = new Errors();
+
+ final Options options = Options.parse(errors, new String[] {
+ "--error", "50000"
+ });
+
+ Assert.assertEquals("", TestErrors.getErrorMessages(errors));
+ Assert.assertEquals(Options.Action.DEFAULT, options.getAction());
+ TestErrors.assertHasEntry(errors.WARNING_UNKNOWN_COMMAND_LINE_ERROR, errors);
+ }
+
+ @Test
+ public void testErrorOne() {
+ final Errors errors = new Errors();
+
+ final Options options = Options.parse(errors, new String[] {
+ "--error", "2"
+ });
+
+ Assert.assertEquals("", TestErrors.getErrorMessages(errors));
+ Assert.assertEquals(Options.Action.DEFAULT, options.getAction());
+ Assert.assertFalse(errors.hadWarningOrError());
+ }
+
+ @Test
+ public void testWarningOne() {
+ final Errors errors = new Errors();
+
+ final Options options = Options.parse(errors, new String[] {
+ "--warning", "2"
+ });
+
+ Assert.assertEquals("", TestErrors.getErrorMessages(errors));
+ Assert.assertEquals(Options.Action.DEFAULT, options.getAction());
+ Assert.assertFalse(errors.hadWarningOrError());
+ }
+
+ @Test
+ public void testHideOne() {
+ final Errors errors = new Errors();
+
+ final Options options = Options.parse(errors, new String[] {
+ "--hide", "2"
+ });
+
+ Assert.assertEquals("", TestErrors.getErrorMessages(errors));
+ Assert.assertEquals(Options.Action.DEFAULT, options.getAction());
+ Assert.assertFalse(errors.hadWarningOrError());
+ }
+}
+
diff --git a/tools/product_config/test/com/android/build/config/TestErrors.java b/tools/product_config/test/com/android/build/config/TestErrors.java
new file mode 100644
index 0000000..dde88b0
--- /dev/null
+++ b/tools/product_config/test/com/android/build/config/TestErrors.java
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package com.android.build.config;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
+import java.io.UnsupportedEncodingException;
+import java.nio.charset.StandardCharsets;
+
+/**
+ * Errors for testing.
+ */
+public class TestErrors extends ErrorReporter {
+
+ public static final int ERROR_CODE = 1;
+
+ public final Category ERROR = new Category(ERROR_CODE, true, Level.ERROR,
+ "An error.");
+
+ public static final int WARNING_CODE = 2;
+
+ public final Category WARNING = new Category(WARNING_CODE, true, Level.WARNING,
+ "A warning.");
+
+ public static final int HIDDEN_CODE = 3;
+
+ public final Category HIDDEN = new Category(HIDDEN_CODE, true, Level.HIDDEN,
+ "A hidden warning.");
+
+ public static final int ERROR_FIXED_CODE = 4;
+
+ public final Category ERROR_FIXED = new Category(ERROR_FIXED_CODE, false, Level.ERROR,
+ "An error that can't have its level changed.");
+
+ public void assertHasEntry(Errors.Category category) {
+ assertHasEntry(category, this);
+ }
+
+ public String getErrorMessages() {
+ return getErrorMessages(this);
+ }
+
+ public static void assertHasEntry(Errors.Category category, ErrorReporter errors) {
+ StringBuilder found = new StringBuilder();
+ for (Errors.Entry entry: errors.getEntries()) {
+ if (entry.getCategory() == category) {
+ return;
+ }
+ found.append(' ');
+ found.append(entry.getCategory().getCode());
+ }
+ throw new AssertionError("No error category " + category.getCode() + " found."
+ + " Found category codes were:" + found);
+ }
+
+ public static String getErrorMessages(ErrorReporter errors) {
+ final ByteArrayOutputStream stream = new ByteArrayOutputStream();
+ try {
+ errors.printErrors(new PrintStream(stream, true, StandardCharsets.UTF_8.name()));
+ } catch (UnsupportedEncodingException ex) {
+ // utf-8 is always supported
+ }
+ return new String(stream.toByteArray(), StandardCharsets.UTF_8);
+ }
+}
+
diff --git a/tools/releasetools/Android.bp b/tools/releasetools/Android.bp
index 45e0514..473d854 100644
--- a/tools/releasetools/Android.bp
+++ b/tools/releasetools/Android.bp
@@ -28,6 +28,8 @@
"add_img_to_target_files.py",
],
libs: [
+ "ota_metadata_proto",
+ "releasetools_apex_utils",
"releasetools_build_image",
"releasetools_build_super_image",
"releasetools_common",
@@ -49,6 +51,7 @@
required: [
"blk_alloc_to_base_fs",
"e2fsck",
+ "mkerofsimage.sh",
"mkuserimg_mke2fs",
"simg2img",
"tune2fs",
@@ -121,10 +124,15 @@
"releasetools_check_target_files_vintf",
"releasetools_common",
"releasetools_verity_utils",
+ "apex_manifest",
],
required: [
"brillo_update_payload",
"checkvintf",
+ "lz4",
+ "toybox",
+ "unpack_bootimg",
+ "deapexer",
],
target: {
darwin: {
@@ -165,6 +173,8 @@
"apex_utils.py",
],
libs: [
+ "apex_manifest",
+ "ota_metadata_proto",
"releasetools_common",
],
}
@@ -215,6 +225,8 @@
"lz4",
"mkbootfs",
"signapk",
+ "toybox",
+ "unpack_bootimg",
],
}
@@ -275,6 +287,8 @@
"lz4",
"mkbootfs",
"signapk",
+ "toybox",
+ "unpack_bootimg",
],
}
@@ -365,6 +379,32 @@
],
}
+python_defaults {
+ name: "releasetools_find_shareduid_violation_defaults",
+ srcs: [
+ "find_shareduid_violation.py",
+ ],
+ libs: [
+ "releasetools_common",
+ ],
+}
+
+python_binary_host {
+ name: "find_shareduid_violation",
+ defaults: [
+ "releasetools_binary_defaults",
+ "releasetools_find_shareduid_violation_defaults",
+ ],
+}
+
+python_library_host {
+ name: "releasetools_find_shareduid_violation",
+ defaults: [
+ "releasetools_find_shareduid_violation_defaults",
+ "releasetools_library_defaults",
+ ],
+}
+
python_binary_host {
name: "make_recovery_patch",
defaults: ["releasetools_binary_defaults"],
@@ -399,11 +439,13 @@
"releasetools_build_super_image",
"releasetools_check_target_files_vintf",
"releasetools_common",
+ "releasetools_find_shareduid_violation",
"releasetools_img_from_target_files",
"releasetools_ota_from_target_files",
],
required: [
"checkvintf",
+ "host_init_verifier",
],
target: {
darwin: {
@@ -501,12 +543,15 @@
"releasetools_build_super_image",
"releasetools_check_target_files_vintf",
"releasetools_common",
+ "releasetools_find_shareduid_violation",
"releasetools_img_from_target_files",
"releasetools_ota_from_target_files",
"releasetools_verity_utils",
],
data: [
"testdata/**/*",
+ ":com.android.apex.compressed.v1",
+ ":com.android.apex.compressed.v1_original",
],
target: {
darwin: {
@@ -514,6 +559,9 @@
enabled: false,
},
},
+ required: [
+ "deapexer",
+ ],
}
python_test_host {
@@ -533,7 +581,9 @@
embedded_launcher: false,
},
},
- test_suites: ["general-tests"],
+ test_options: {
+ unit_test: true,
+ },
}
python_test_host {
@@ -550,5 +600,7 @@
embedded_launcher: false,
},
},
- test_suites: ["general-tests"],
+ test_options: {
+ unit_test: true,
+ },
}
diff --git a/tools/releasetools/TEST_MAPPING b/tools/releasetools/TEST_MAPPING
deleted file mode 100644
index 0af0f04..0000000
--- a/tools/releasetools/TEST_MAPPING
+++ /dev/null
@@ -1,12 +0,0 @@
-{
- "presubmit": [
- {
- "name": "releasetools_test",
- "host": true
- },
- {
- "name": "releasetools_py3_test",
- "host": true
- }
- ]
-}
diff --git a/tools/releasetools/add_img_to_target_files.py b/tools/releasetools/add_img_to_target_files.py
index eb041ec..7839b47 100644
--- a/tools/releasetools/add_img_to_target_files.py
+++ b/tools/releasetools/add_img_to_target_files.py
@@ -51,6 +51,7 @@
import os
import shlex
import shutil
+import stat
import sys
import uuid
import zipfile
@@ -61,6 +62,9 @@
import rangelib
import sparse_img
import verity_utils
+import ota_metadata_pb2
+
+from apex_utils import GetApexInfoFromTargetFiles
if sys.hexversion < 0x02070000:
print("Python 2.7 or newer is required.", file=sys.stderr)
@@ -93,13 +97,13 @@
name: The name of the output file, regardless of the final destination.
"""
- def __init__(self, output_zip, input_dir, prefix, name):
+ def __init__(self, output_zip, input_dir, *args):
# We write the intermediate output file under the given input_dir, even if
# the final destination is a zip archive.
- self.name = os.path.join(input_dir, prefix, name)
+ self.name = os.path.join(input_dir, *args)
self._output_zip = output_zip
if self._output_zip:
- self._zip_name = os.path.join(prefix, name)
+ self._zip_name = os.path.join(*args)
def Write(self):
if self._output_zip:
@@ -178,7 +182,6 @@
block_list = OutputFile(output_zip, OPTIONS.input_tmp, "IMAGES", "system.map")
CreateImage(OPTIONS.input_tmp, OPTIONS.info_dict, "system", img,
block_list=block_list)
-
return img.name
@@ -330,6 +333,9 @@
# AVB-sign the image as needed.
if OPTIONS.info_dict.get("avb_enable") == "true":
+ # Signing requires +w
+ os.chmod(img.name, os.stat(img.name).st_mode | stat.S_IWUSR)
+
avbtool = OPTIONS.info_dict["avb_avbtool"]
part_size = OPTIONS.info_dict["dtbo_size"]
# The AVB hash footer will be replaced if already present.
@@ -750,6 +756,22 @@
os.path.join(OPTIONS.input_tmp, "IMAGES",
"{}.img".format(partition_name))))
+def AddApexInfo(output_zip):
+ apex_infos = GetApexInfoFromTargetFiles(OPTIONS.input_tmp)
+ apex_metadata_proto = ota_metadata_pb2.ApexMetadata()
+ apex_metadata_proto.apex_info.extend(apex_infos)
+ apex_info_bytes = apex_metadata_proto.SerializeToString()
+
+ output_file = os.path.join(OPTIONS.input_tmp, "META", "apex_info.pb")
+ with open(output_file, "wb") as ofile:
+ ofile.write(apex_info_bytes)
+ if output_zip:
+ arc_name = "META/apex_info.pb"
+ if arc_name in output_zip.namelist():
+ OPTIONS.replace_updated_files_list.append(arc_name)
+ else:
+ common.ZipWrite(output_zip, output_file, arc_name)
+
def AddImagesToTargetFiles(filename):
"""Creates and adds images (boot/recovery/system/...) to a target_files.zip.
@@ -910,6 +932,8 @@
banner("system_other")
AddSystemOther(output_zip)
+ AddApexInfo(output_zip)
+
if not OPTIONS.is_signing:
banner("userdata")
AddUserdata(output_zip)
@@ -959,8 +983,9 @@
if item not in vbmeta_vendor.split()]
vbmeta_partitions.append("vbmeta_vendor")
- banner("vbmeta")
- AddVBMeta(output_zip, partitions, "vbmeta", vbmeta_partitions)
+ if OPTIONS.info_dict.get("avb_building_vbmeta_image") == "true":
+ banner("vbmeta")
+ AddVBMeta(output_zip, partitions, "vbmeta", vbmeta_partitions)
if OPTIONS.info_dict.get("use_dynamic_partitions") == "true":
banner("super_empty")
diff --git a/tools/releasetools/apex_utils.py b/tools/releasetools/apex_utils.py
index 8783f25..5019b84 100644
--- a/tools/releasetools/apex_utils.py
+++ b/tools/releasetools/apex_utils.py
@@ -21,7 +21,12 @@
import shutil
import zipfile
+import apex_manifest
import common
+from common import UnzipTemp, RunAndCheckOutput, MakeTempFile, OPTIONS
+
+import ota_metadata_pb2
+
logger = logging.getLogger(__name__)
@@ -29,6 +34,8 @@
APEX_PAYLOAD_IMAGE = 'apex_payload.img'
+APEX_PUBKEY = 'apex_pubkey'
+
class ApexInfoError(Exception):
"""An Exception raised during Apex Information command."""
@@ -49,7 +56,10 @@
def __init__(self, apex_path, key_passwords, codename_to_api_level_map):
self.apex_path = apex_path
- self.key_passwords = key_passwords
+ if not key_passwords:
+ self.key_passwords = dict()
+ else:
+ self.key_passwords = key_passwords
self.codename_to_api_level_map = codename_to_api_level_map
self.debugfs_path = os.path.join(
OPTIONS.search_path, "bin", "debugfs_static")
@@ -66,7 +76,7 @@
if not os.path.exists(self.debugfs_path):
raise ApexSigningError(
"Couldn't find location of debugfs_static: " +
- "Path {} does not exist. ".format(debugfs_path) +
+ "Path {} does not exist. ".format(self.debugfs_path) +
"Make sure bin/debugfs_static can be found in -p <path>")
list_cmd = ['deapexer', '--debugfs_path',
self.debugfs_path, 'list', self.apex_path]
@@ -102,7 +112,7 @@
if not os.path.exists(self.debugfs_path):
raise ApexSigningError(
"Couldn't find location of debugfs_static: " +
- "Path {} does not exist. ".format(debugfs_path) +
+ "Path {} does not exist. ".format(self.debugfs_path) +
"Make sure bin/debugfs_static can be found in -p <path>")
payload_dir = common.MakeTempDir()
extract_cmd = ['deapexer', '--debugfs_path',
@@ -124,8 +134,9 @@
# signed apk file.
unsigned_apk = common.MakeTempFile()
os.rename(apk_path, unsigned_apk)
- common.SignFile(unsigned_apk, apk_path, key_name, self.key_passwords,
- codename_to_api_level_map=self.codename_to_api_level_map)
+ common.SignFile(
+ unsigned_apk, apk_path, key_name, self.key_passwords.get(key_name),
+ codename_to_api_level_map=self.codename_to_api_level_map)
has_signed_apk = True
return payload_dir, has_signed_apk
@@ -177,7 +188,7 @@
# Add the payload image back to the apex file.
common.ZipDelete(self.apex_path, APEX_PAYLOAD_IMAGE)
- with zipfile.ZipFile(self.apex_path, 'a') as output_apex:
+ with zipfile.ZipFile(self.apex_path, 'a', allowZip64=True) as output_apex:
common.ZipWrite(output_apex, payload_img, APEX_PAYLOAD_IMAGE,
compress_type=zipfile.ZIP_STORED)
return self.apex_path
@@ -297,13 +308,13 @@
return payload_info
-def SignApex(avbtool, apex_data, payload_key, container_key, container_pw,
- apk_keys, codename_to_api_level_map,
- no_hashtree, signing_args=None):
- """Signs the current APEX with the given payload/container keys.
+def SignUncompressedApex(avbtool, apex_file, payload_key, container_key,
+ container_pw, apk_keys, codename_to_api_level_map,
+ no_hashtree, signing_args=None):
+ """Signs the current uncompressed APEX with the given payload/container keys.
Args:
- apex_data: Raw APEX data.
+ apex_file: Uncompressed APEX file.
payload_key: The path to payload signing key (w/ extension).
container_key: The path to container signing key (w/o extension).
container_pw: The matching password of the container_key, or None.
@@ -315,12 +326,6 @@
Returns:
The path to the signed APEX file.
"""
- apex_file = common.MakeTempFile(prefix='apex-', suffix='.apex')
- with open(apex_file, 'wb') as apex_fp:
- apex_fp.write(apex_data)
-
- APEX_PUBKEY = 'apex_pubkey'
-
# 1. Extract the apex payload image and sign the containing apk files. Repack
# the apex file after signing.
apk_signer = ApexApkSigner(apex_file, container_pw,
@@ -351,7 +356,7 @@
common.ZipDelete(apex_file, APEX_PAYLOAD_IMAGE)
if APEX_PUBKEY in zip_items:
common.ZipDelete(apex_file, APEX_PUBKEY)
- apex_zip = zipfile.ZipFile(apex_file, 'a')
+ apex_zip = zipfile.ZipFile(apex_file, 'a', allowZip64=True)
common.ZipWrite(apex_zip, payload_file, arcname=APEX_PAYLOAD_IMAGE)
common.ZipWrite(apex_zip, payload_public_key, arcname=APEX_PUBKEY)
common.ZipClose(apex_zip)
@@ -367,12 +372,213 @@
extra_signapk_args = OPTIONS.extra_signapk_args[:]
extra_signapk_args.extend(['-a', '4096'])
+ password = container_pw.get(container_key) if container_pw else None
common.SignFile(
aligned_apex,
signed_apex,
container_key,
- container_pw,
+ password,
codename_to_api_level_map=codename_to_api_level_map,
extra_signapk_args=extra_signapk_args)
return signed_apex
+
+
+def SignCompressedApex(avbtool, apex_file, payload_key, container_key,
+ container_pw, apk_keys, codename_to_api_level_map,
+ no_hashtree, signing_args=None):
+ """Signs the current compressed APEX with the given payload/container keys.
+
+ Args:
+ apex_file: Raw uncompressed APEX data.
+ payload_key: The path to payload signing key (w/ extension).
+ container_key: The path to container signing key (w/o extension).
+ container_pw: The matching password of the container_key, or None.
+ apk_keys: A dict that holds the signing keys for apk files.
+ codename_to_api_level_map: A dict that maps from codename to API level.
+ no_hashtree: Don't include hashtree in the signed APEX.
+ signing_args: Additional args to be passed to the payload signer.
+
+ Returns:
+ The path to the signed APEX file.
+ """
+ debugfs_path = os.path.join(OPTIONS.search_path, 'bin', 'debugfs_static')
+
+ # 1. Decompress original_apex inside compressed apex.
+ original_apex_file = common.MakeTempFile(prefix='original-apex-',
+ suffix='.apex')
+ # Decompression target path should not exist
+ os.remove(original_apex_file)
+ common.RunAndCheckOutput(['deapexer', '--debugfs_path', debugfs_path,
+ 'decompress', '--input', apex_file,
+ '--output', original_apex_file])
+
+ # 2. Sign original_apex
+ signed_original_apex_file = SignUncompressedApex(
+ avbtool,
+ original_apex_file,
+ payload_key,
+ container_key,
+ container_pw,
+ apk_keys,
+ codename_to_api_level_map,
+ no_hashtree,
+ signing_args)
+
+ # 3. Compress signed original apex.
+ compressed_apex_file = common.MakeTempFile(prefix='apex-container-',
+ suffix='.capex')
+ common.RunAndCheckOutput(['apex_compression_tool',
+ 'compress',
+ '--apex_compression_tool_path', os.getenv('PATH'),
+ '--input', signed_original_apex_file,
+ '--output', compressed_apex_file])
+
+ # 4. Align apex
+ aligned_apex = common.MakeTempFile(prefix='apex-container-', suffix='.capex')
+ common.RunAndCheckOutput(['zipalign', '-f', '4096', compressed_apex_file,
+ aligned_apex])
+
+ # 5. Sign the APEX container with container_key.
+ signed_apex = common.MakeTempFile(prefix='apex-container-', suffix='.capex')
+
+ # Specify the 4K alignment when calling SignApk.
+ extra_signapk_args = OPTIONS.extra_signapk_args[:]
+ extra_signapk_args.extend(['-a', '4096'])
+
+ password = container_pw.get(container_key) if container_pw else None
+ common.SignFile(
+ aligned_apex,
+ signed_apex,
+ container_key,
+ password,
+ codename_to_api_level_map=codename_to_api_level_map,
+ extra_signapk_args=extra_signapk_args)
+
+ return signed_apex
+
+
+def SignApex(avbtool, apex_data, payload_key, container_key, container_pw,
+ apk_keys, codename_to_api_level_map,
+ no_hashtree, signing_args=None):
+ """Signs the current APEX with the given payload/container keys.
+
+ Args:
+ apex_file: Path to apex file path.
+ payload_key: The path to payload signing key (w/ extension).
+ container_key: The path to container signing key (w/o extension).
+ container_pw: The matching password of the container_key, or None.
+ apk_keys: A dict that holds the signing keys for apk files.
+ codename_to_api_level_map: A dict that maps from codename to API level.
+ no_hashtree: Don't include hashtree in the signed APEX.
+ signing_args: Additional args to be passed to the payload signer.
+
+ Returns:
+ The path to the signed APEX file.
+ """
+ apex_file = common.MakeTempFile(prefix='apex-container-', suffix='.apex')
+ with open(apex_file, 'wb') as output_fp:
+ output_fp.write(apex_data)
+
+ debugfs_path = os.path.join(OPTIONS.search_path, 'bin', 'debugfs_static')
+ cmd = ['deapexer', '--debugfs_path', debugfs_path,
+ 'info', '--print-type', apex_file]
+
+ try:
+ apex_type = common.RunAndCheckOutput(cmd).strip()
+ if apex_type == 'UNCOMPRESSED':
+ return SignUncompressedApex(
+ avbtool,
+ apex_file,
+ payload_key=payload_key,
+ container_key=container_key,
+ container_pw=None,
+ codename_to_api_level_map=codename_to_api_level_map,
+ no_hashtree=no_hashtree,
+ apk_keys=apk_keys,
+ signing_args=signing_args)
+ elif apex_type == 'COMPRESSED':
+ return SignCompressedApex(
+ avbtool,
+ apex_file,
+ payload_key=payload_key,
+ container_key=container_key,
+ container_pw=None,
+ codename_to_api_level_map=codename_to_api_level_map,
+ no_hashtree=no_hashtree,
+ apk_keys=apk_keys,
+ signing_args=signing_args)
+ else:
+ # TODO(b/172912232): support signing compressed apex
+ raise ApexInfoError('Unsupported apex type {}'.format(apex_type))
+
+ except common.ExternalError as e:
+ raise ApexInfoError(
+ 'Failed to get type for {}:\n{}'.format(apex_file, e))
+
+def GetApexInfoFromTargetFiles(input_file):
+ """
+ Get information about system APEX stored in the input_file zip
+
+ Args:
+ input_file: The filename of the target build target-files zip or directory.
+
+ Return:
+ A list of ota_metadata_pb2.ApexInfo() populated using the APEX stored in
+ /system partition of the input_file
+ """
+
+ # Extract the apex files so that we can run checks on them
+ if not isinstance(input_file, str):
+ raise RuntimeError("must pass filepath to target-files zip or directory")
+
+ if os.path.isdir(input_file):
+ tmp_dir = input_file
+ else:
+ tmp_dir = UnzipTemp(input_file, ["SYSTEM/apex/*"])
+ target_dir = os.path.join(tmp_dir, "SYSTEM/apex/")
+
+ apex_infos = []
+ for apex_filename in os.listdir(target_dir):
+ apex_filepath = os.path.join(target_dir, apex_filename)
+ if not os.path.isfile(apex_filepath) or \
+ not zipfile.is_zipfile(apex_filepath):
+ logger.info("Skipping %s because it's not a zipfile", apex_filepath)
+ continue
+ apex_info = ota_metadata_pb2.ApexInfo()
+ # Open the apex file to retrieve information
+ manifest = apex_manifest.fromApex(apex_filepath)
+ apex_info.package_name = manifest.name
+ apex_info.version = manifest.version
+ # Check if the file is compressed or not
+ debugfs_path = "debugfs"
+ if OPTIONS.search_path:
+ debugfs_path = os.path.join(OPTIONS.search_path, "bin", "debugfs_static")
+ deapexer = 'deapexer'
+ if OPTIONS.search_path:
+ deapexer_path = os.path.join(OPTIONS.search_path, "deapexer")
+ if os.path.isfile(deapexer_path):
+ deapexer = deapexer_path
+ apex_type = RunAndCheckOutput([
+ deapexer, "--debugfs_path", debugfs_path,
+ 'info', '--print-type', apex_filepath]).rstrip()
+ if apex_type == 'COMPRESSED':
+ apex_info.is_compressed = True
+ elif apex_type == 'UNCOMPRESSED':
+ apex_info.is_compressed = False
+ else:
+ raise RuntimeError('Not an APEX file: ' + apex_type)
+
+ # Decompress compressed APEX to determine its size
+ if apex_info.is_compressed:
+ decompressed_file_path = MakeTempFile(prefix="decompressed-",
+ suffix=".apex")
+ # Decompression target path should not exist
+ os.remove(decompressed_file_path)
+ RunAndCheckOutput([deapexer, 'decompress', '--input', apex_filepath,
+ '--output', decompressed_file_path])
+ apex_info.decompressed_size = os.path.getsize(decompressed_file_path)
+
+ apex_infos.append(apex_info)
+
+ return apex_infos
diff --git a/tools/releasetools/build_image.py b/tools/releasetools/build_image.py
index 9cc072f..820c128 100755
--- a/tools/releasetools/build_image.py
+++ b/tools/releasetools/build_image.py
@@ -250,6 +250,7 @@
run_e2fsck = False
needs_projid = prop_dict.get("needs_projid", 0)
needs_casefold = prop_dict.get("needs_casefold", 0)
+ needs_compress = prop_dict.get("needs_compress", 0)
if fs_type.startswith("ext"):
build_command = [prop_dict["ext_mkuserimg"]]
@@ -295,6 +296,18 @@
build_command.extend(["--inode_size", "256"])
if "selinux_fc" in prop_dict:
build_command.append(prop_dict["selinux_fc"])
+ elif fs_type.startswith("erofs"):
+ build_command = ["mkerofsimage.sh"]
+ build_command.extend([in_dir, out_file])
+ if "erofs_sparse_flag" in prop_dict:
+ build_command.extend([prop_dict["erofs_sparse_flag"]])
+ build_command.extend(["-m", prop_dict["mount_point"]])
+ if target_out:
+ build_command.extend(["-d", target_out])
+ if fs_config:
+ build_command.extend(["-C", fs_config])
+ if "selinux_fc" in prop_dict:
+ build_command.extend(["-c", prop_dict["selinux_fc"]])
elif fs_type.startswith("squash"):
build_command = ["mksquashfsimage.sh"]
build_command.extend([in_dir, out_file])
@@ -337,6 +350,17 @@
build_command.append("--prjquota")
if (needs_casefold):
build_command.append("--casefold")
+ if (needs_compress or prop_dict.get("system_fs_compress") == "true"):
+ build_command.append("--compression")
+ if (prop_dict.get("system_fs_compress") == "true"):
+ build_command.append("--sldc")
+ if (prop_dict.get("system_f2fs_sldc_flags") == None):
+ build_command.append(str(0))
+ else:
+ sldc_flags_str = prop_dict.get("system_f2fs_sldc_flags")
+ sldc_flags = sldc_flags_str.split()
+ build_command.append(str(len(sldc_flags)))
+ build_command.extend(sldc_flags)
else:
raise BuildImageError(
"Error: unknown filesystem type: {}".format(fs_type))
@@ -402,7 +426,7 @@
fs_type = prop_dict.get("fs_type", "")
fs_spans_partition = True
- if fs_type.startswith("squash"):
+ if fs_type.startswith("squash") or fs_type.startswith("erofs"):
fs_spans_partition = False
# Get a builder for creating an image that's to be verified by Verified Boot,
@@ -412,7 +436,16 @@
if (prop_dict.get("use_dynamic_partition_size") == "true" and
"partition_size" not in prop_dict):
# If partition_size is not defined, use output of `du' + reserved_size.
- size = GetDiskUsage(in_dir)
+ # For compressed file system, it's better to use the compressed size to avoid wasting space.
+ if fs_type.startswith("erofs"):
+ tmp_dict = prop_dict.copy()
+ if "erofs_sparse_flag" in tmp_dict:
+ tmp_dict.pop("erofs_sparse_flag")
+ BuildImageMkfs(in_dir, tmp_dict, out_file, target_out, fs_config)
+ size = GetDiskUsage(out_file)
+ os.remove(out_file)
+ else:
+ size = GetDiskUsage(in_dir)
logger.info(
"The tree size of %s is %d MB.", in_dir, size // BYTES_IN_MB)
# If not specified, give us 16MB margin for GetDiskUsage error ...
@@ -529,7 +562,10 @@
common_props = (
"extfs_sparse_flag",
+ "erofs_sparse_flag",
"squashfs_sparse_flag",
+ "system_fs_compress",
+ "system_f2fs_sldc_flags",
"f2fs_sparse_flag",
"skip_fsck",
"ext_mkuserimg",
@@ -610,6 +646,7 @@
copy_prop("userdata_selinux_fc", "selinux_fc")
copy_prop("needs_casefold", "needs_casefold")
copy_prop("needs_projid", "needs_projid")
+ copy_prop("needs_compress", "needs_compress")
elif mount_point == "cache":
copy_prop("cache_fs_type", "fs_type")
copy_prop("cache_size", "partition_size")
diff --git a/tools/releasetools/check_ota_package_signature.py b/tools/releasetools/check_ota_package_signature.py
index 0d990f1..58510a5 100755
--- a/tools/releasetools/check_ota_package_signature.py
+++ b/tools/releasetools/check_ota_package_signature.py
@@ -140,7 +140,7 @@
def VerifyAbOtaPayload(cert, package):
"""Verifies the payload and metadata signatures in an A/B OTA payload."""
- package_zip = zipfile.ZipFile(package, 'r')
+ package_zip = zipfile.ZipFile(package, 'r', allowZip64=True)
if 'payload.bin' not in package_zip.namelist():
common.ZipClose(package_zip)
return
diff --git a/tools/releasetools/check_target_files_vintf.py b/tools/releasetools/check_target_files_vintf.py
index 0edefac..a2ddfe7 100755
--- a/tools/releasetools/check_target_files_vintf.py
+++ b/tools/releasetools/check_target_files_vintf.py
@@ -100,10 +100,7 @@
'PRODUCT_OTA_ENFORCE_VINTF_KERNEL_REQUIREMENTS is not set')
return []
- with open(version_path) as f:
- version = f.read().strip()
-
- return ['--kernel', '{}:{}'.format(version, config_path)]
+ return ['--kernel', '{}:{}'.format(version_path, config_path)]
def CheckVintfFromExtractedTargetFiles(input_tmp, info_dict=None):
@@ -252,7 +249,7 @@
if os.path.isdir(target_files):
return os.path.isdir(os.path.join(target_files, "VENDOR"))
if zipfile.is_zipfile(target_files):
- return HasPartition(zipfile.ZipFile(target_files), "vendor")
+ return HasPartition(zipfile.ZipFile(target_files, allowZip64=True), "vendor")
raise ValueError("Unknown target_files argument")
return (HasVendorPartition(target_files) and
diff --git a/tools/releasetools/common.py b/tools/releasetools/common.py
index c77d8c6..26c4ae8 100644
--- a/tools/releasetools/common.py
+++ b/tools/releasetools/common.py
@@ -128,6 +128,12 @@
'odm_dlkm',
]
+# Partitions with a build.prop file
+PARTITIONS_WITH_BUILD_PROP = PARTITIONS_WITH_CARE_MAP + ['boot']
+
+# See sysprop.mk. If file is moved, add new search paths here; don't remove
+# existing search paths.
+RAMDISK_BUILD_PROP_REL_PATHS = ['system/etc/ramdisk/build.prop']
class ErrorCode(object):
"""Define error_codes for failures that happen during the actual
@@ -217,6 +223,25 @@
def SetHostToolLocation(tool_name, location):
OPTIONS.host_tools[tool_name] = location
+def FindHostToolPath(tool_name):
+ """Finds the path to the host tool.
+
+ Args:
+ tool_name: name of the tool to find
+ Returns:
+ path to the tool if found under either one of the host_tools map or under
+ the same directory as this binary is located at. If not found, tool_name
+ is returned.
+ """
+ if tool_name in OPTIONS.host_tools:
+ return OPTIONS.host_tools[tool_name]
+
+ my_dir = os.path.dirname(os.path.realpath(sys.argv[0]))
+ tool_path = os.path.join(my_dir, tool_name)
+ if os.path.exists(tool_path):
+ return tool_path
+
+ return tool_name
def Run(args, verbose=None, **kwargs):
"""Creates and returns a subprocess.Popen object.
@@ -240,12 +265,10 @@
if 'universal_newlines' not in kwargs:
kwargs['universal_newlines'] = True
- # If explicitly set host tool location before, use that location to avoid
- # PATH violation. Make a copy of args in case client relies on the content
- # of args later.
- if args and args[0] in OPTIONS.host_tools:
+ if args:
+ # Make a copy of args in case client relies on the content of args later.
args = args[:]
- args[0] = OPTIONS.host_tools[args[0]]
+ args[0] = FindHostToolPath(args[0])
# Don't log any if caller explicitly says so.
if verbose:
@@ -400,7 +423,7 @@
"3.2.2. Build Parameters.".format(fingerprint))
self._partition_fingerprints = {}
- for partition in PARTITIONS_WITH_CARE_MAP:
+ for partition in PARTITIONS_WITH_BUILD_PROP:
try:
fingerprint = self.CalculatePartitionFingerprint(partition)
check_fingerprint(fingerprint)
@@ -408,7 +431,7 @@
except ExternalError:
continue
if "system" in self._partition_fingerprints:
- # system_other is not included in PARTITIONS_WITH_CARE_MAP, but does
+ # system_other is not included in PARTITIONS_WITH_BUILD_PROP, but does
# need a fingerprint when creating the image.
self._partition_fingerprints[
"system_other"] = self._partition_fingerprints["system"]
@@ -456,12 +479,16 @@
def GetPartitionBuildProp(self, prop, partition):
"""Returns the inquired build property for the provided partition."""
+
+ # Boot image uses ro.[product.]bootimage instead of boot.
+ prop_partition = "bootimage" if partition == "boot" else partition
+
# If provided a partition for this property, only look within that
# partition's build.prop.
if prop in BuildInfo._RO_PRODUCT_RESOLVE_PROPS:
- prop = prop.replace("ro.product", "ro.product.{}".format(partition))
+ prop = prop.replace("ro.product", "ro.product.{}".format(prop_partition))
else:
- prop = prop.replace("ro.", "ro.{}.".format(partition))
+ prop = prop.replace("ro.", "ro.{}.".format(prop_partition))
prop_val = self._GetRawBuildProp(prop, partition)
if prop_val is not None:
@@ -527,6 +554,27 @@
return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_LEGACY
return BuildInfo._RO_PRODUCT_PROPS_DEFAULT_SOURCE_ORDER_CURRENT
+ def _GetPlatformVersion(self):
+ version_sdk = self.GetBuildProp("ro.build.version.sdk")
+ # init code switches to version_release_or_codename (see b/158483506). After
+ # API finalization, release_or_codename will be the same as release. This
+ # is the best effort to support pre-S dev stage builds.
+ if int(version_sdk) >= 30:
+ try:
+ return self.GetBuildProp("ro.build.version.release_or_codename")
+ except ExternalError:
+ logger.warning('Failed to find ro.build.version.release_or_codename')
+
+ return self.GetBuildProp("ro.build.version.release")
+
+ def _GetPartitionPlatformVersion(self, partition):
+ try:
+ return self.GetPartitionBuildProp("ro.build.version.release_or_codename",
+ partition)
+ except ExternalError:
+ return self.GetPartitionBuildProp("ro.build.version.release",
+ partition)
+
def GetOemProperty(self, key):
if self.oem_props is not None and key in self.oem_props:
return self.oem_dicts[0][key]
@@ -543,7 +591,7 @@
self.GetPartitionBuildProp("ro.product.brand", partition),
self.GetPartitionBuildProp("ro.product.name", partition),
self.GetPartitionBuildProp("ro.product.device", partition),
- self.GetPartitionBuildProp("ro.build.version.release", partition),
+ self._GetPartitionPlatformVersion(partition),
self.GetPartitionBuildProp("ro.build.id", partition),
self.GetPartitionBuildProp(
"ro.build.version.incremental", partition),
@@ -559,7 +607,7 @@
self.GetBuildProp("ro.product.brand"),
self.GetBuildProp("ro.product.name"),
self.GetBuildProp("ro.product.device"),
- self.GetBuildProp("ro.build.version.release"),
+ self._GetPlatformVersion(),
self.GetBuildProp("ro.build.id"),
self.GetBuildProp("ro.build.version.incremental"),
self.GetBuildProp("ro.build.type"),
@@ -611,6 +659,20 @@
raise KeyError(fn)
+def ExtractFromInputFile(input_file, fn):
+ """Extracts the contents of fn from input zipfile or directory into a file."""
+ if isinstance(input_file, zipfile.ZipFile):
+ tmp_file = MakeTempFile(os.path.basename(fn))
+ with open(tmp_file, 'w') as f:
+ f.write(input_file.read(fn))
+ return tmp_file
+ else:
+ file = os.path.join(input_file, *fn.split("/"))
+ if not os.path.exists(file):
+ raise KeyError(fn)
+ return file
+
+
def LoadInfoDict(input_file, repacking=False):
"""Loads the key/value pairs from the given input target_files.
@@ -715,7 +777,7 @@
# Tries to load the build props for all partitions with care_map, including
# system and vendor.
- for partition in PARTITIONS_WITH_CARE_MAP:
+ for partition in PARTITIONS_WITH_BUILD_PROP:
partition_prop = "{}.build.prop".format(partition)
d[partition_prop] = PartitionBuildProps.FromInputFile(
input_file, partition)
@@ -725,7 +787,7 @@
# hash / hashtree footers.
if d.get("avb_enable") == "true":
build_info = BuildInfo(d)
- for partition in PARTITIONS_WITH_CARE_MAP:
+ for partition in PARTITIONS_WITH_BUILD_PROP:
fingerprint = build_info.GetPartitionFingerprint(partition)
if fingerprint:
d["avb_{}_salt".format(partition)] = sha256(fingerprint.encode()).hexdigest()
@@ -801,6 +863,39 @@
@staticmethod
def FromInputFile(input_file, name, placeholder_values=None):
"""Loads the build.prop file and builds the attributes."""
+
+ if name == "boot":
+ data = PartitionBuildProps._ReadBootPropFile(input_file)
+ else:
+ data = PartitionBuildProps._ReadPartitionPropFile(input_file, name)
+
+ props = PartitionBuildProps(input_file, name, placeholder_values)
+ props._LoadBuildProp(data)
+ return props
+
+ @staticmethod
+ def _ReadBootPropFile(input_file):
+ """
+ Read build.prop for boot image from input_file.
+ Return empty string if not found.
+ """
+ try:
+ boot_img = ExtractFromInputFile(input_file, 'IMAGES/boot.img')
+ except KeyError:
+ logger.warning('Failed to read IMAGES/boot.img')
+ return ''
+ prop_file = GetBootImageBuildProp(boot_img)
+ if prop_file is None:
+ return ''
+ with open(prop_file) as f:
+ return f.read().decode()
+
+ @staticmethod
+ def _ReadPartitionPropFile(input_file, name):
+ """
+ Read build.prop for name from input_file.
+ Return empty string if not found.
+ """
data = ''
for prop_file in ['{}/etc/build.prop'.format(name.upper()),
'{}/build.prop'.format(name.upper())]:
@@ -809,9 +904,15 @@
break
except KeyError:
logger.warning('Failed to read %s', prop_file)
+ return data
- props = PartitionBuildProps(input_file, name, placeholder_values)
- props._LoadBuildProp(data)
+ @staticmethod
+ def FromBuildPropFile(name, build_prop_file):
+ """Constructs an instance from a build prop file."""
+
+ props = PartitionBuildProps("unknown", name)
+ with open(build_prop_file) as f:
+ props._LoadBuildProp(f.read())
return props
def _LoadBuildProp(self, data):
@@ -1003,15 +1104,35 @@
Returns:
The merged dynamic partition info dictionary.
"""
- merged_dict = {}
+
+ def uniq_concat(a, b):
+ combined = set(a.split(" "))
+ combined.update(set(b.split(" ")))
+ combined = [item.strip() for item in combined if item.strip()]
+ return " ".join(sorted(combined))
+
+ if (framework_dict.get("use_dynamic_partitions") !=
+ "true") or (vendor_dict.get("use_dynamic_partitions") != "true"):
+ raise ValueError("Both dictionaries must have use_dynamic_partitions=true")
+
+ merged_dict = {"use_dynamic_partitions": "true"}
+
+ merged_dict["dynamic_partition_list"] = uniq_concat(
+ framework_dict.get("dynamic_partition_list", ""),
+ vendor_dict.get("dynamic_partition_list", ""))
+
+ # Super block devices are defined by the vendor dict.
+ if "super_block_devices" in vendor_dict:
+ merged_dict["super_block_devices"] = vendor_dict["super_block_devices"]
+ for block_device in merged_dict["super_block_devices"].split(" "):
+ key = "super_%s_device_size" % block_device
+ if key not in vendor_dict:
+ raise ValueError("Vendor dict does not contain required key %s." % key)
+ merged_dict[key] = vendor_dict[key]
+
# Partition groups and group sizes are defined by the vendor dict because
# these values may vary for each board that uses a shared system image.
merged_dict["super_partition_groups"] = vendor_dict["super_partition_groups"]
- framework_dynamic_partition_list = framework_dict.get(
- "dynamic_partition_list", "")
- vendor_dynamic_partition_list = vendor_dict.get("dynamic_partition_list", "")
- merged_dict["dynamic_partition_list"] = ("%s %s" % (
- framework_dynamic_partition_list, vendor_dynamic_partition_list)).strip()
for partition_group in merged_dict["super_partition_groups"].split(" "):
# Set the partition group's size using the value from the vendor dict.
key = "super_%s_group_size" % partition_group
@@ -1022,18 +1143,104 @@
# Set the partition group's partition list using a concatenation of the
# framework and vendor partition lists.
key = "super_%s_partition_list" % partition_group
- merged_dict[key] = (
- "%s %s" %
- (framework_dict.get(key, ""), vendor_dict.get(key, ""))).strip()
+ merged_dict[key] = uniq_concat(
+ framework_dict.get(key, ""), vendor_dict.get(key, ""))
- # Pick virtual ab related flags from vendor dict, if defined.
- if "virtual_ab" in vendor_dict.keys():
- merged_dict["virtual_ab"] = vendor_dict["virtual_ab"]
- if "virtual_ab_retrofit" in vendor_dict.keys():
- merged_dict["virtual_ab_retrofit"] = vendor_dict["virtual_ab_retrofit"]
+ # Various other flags should be copied from the vendor dict, if defined.
+ for key in ("virtual_ab", "virtual_ab_retrofit", "lpmake",
+ "super_metadata_device", "super_partition_error_limit",
+ "super_partition_size"):
+ if key in vendor_dict.keys():
+ merged_dict[key] = vendor_dict[key]
+
return merged_dict
+def PartitionMapFromTargetFiles(target_files_dir):
+ """Builds a map from partition -> path within an extracted target files directory."""
+ # Keep possible_subdirs in sync with build/make/core/board_config.mk.
+ possible_subdirs = {
+ "system": ["SYSTEM"],
+ "vendor": ["VENDOR", "SYSTEM/vendor"],
+ "product": ["PRODUCT", "SYSTEM/product"],
+ "system_ext": ["SYSTEM_EXT", "SYSTEM/system_ext"],
+ "odm": ["ODM", "VENDOR/odm", "SYSTEM/vendor/odm"],
+ "vendor_dlkm": [
+ "VENDOR_DLKM", "VENDOR/vendor_dlkm", "SYSTEM/vendor/vendor_dlkm"
+ ],
+ "odm_dlkm": ["ODM_DLKM", "VENDOR/odm_dlkm", "SYSTEM/vendor/odm_dlkm"],
+ }
+ partition_map = {}
+ for partition, subdirs in possible_subdirs.items():
+ for subdir in subdirs:
+ if os.path.exists(os.path.join(target_files_dir, subdir)):
+ partition_map[partition] = subdir
+ break
+ return partition_map
+
+
+def SharedUidPartitionViolations(uid_dict, partition_groups):
+ """Checks for APK sharedUserIds that cross partition group boundaries.
+
+ This uses a single or merged build's shareduid_violation_modules.json
+ output file, as generated by find_shareduid_violation.py or
+ core/tasks/find-shareduid-violation.mk.
+
+ An error is defined as a sharedUserId that is found in a set of partitions
+ that span more than one partition group.
+
+ Args:
+ uid_dict: A dictionary created by using the standard json module to read a
+ complete shareduid_violation_modules.json file.
+ partition_groups: A list of groups, where each group is a list of
+ partitions.
+
+ Returns:
+ A list of error messages.
+ """
+ errors = []
+ for uid, partitions in uid_dict.items():
+ found_in_groups = [
+ group for group in partition_groups
+ if set(partitions.keys()) & set(group)
+ ]
+ if len(found_in_groups) > 1:
+ errors.append(
+ "APK sharedUserId \"%s\" found across partition groups in partitions \"%s\""
+ % (uid, ",".join(sorted(partitions.keys()))))
+ return errors
+
+
+def RunHostInitVerifier(product_out, partition_map):
+ """Runs host_init_verifier on the init rc files within partitions.
+
+ host_init_verifier searches the etc/init path within each partition.
+
+ Args:
+ product_out: PRODUCT_OUT directory, containing partition directories.
+ partition_map: A map of partition name -> relative path within product_out.
+ """
+ allowed_partitions = ("system", "system_ext", "product", "vendor", "odm")
+ cmd = ["host_init_verifier"]
+ for partition, path in partition_map.items():
+ if partition not in allowed_partitions:
+ raise ExternalError("Unable to call host_init_verifier for partition %s" %
+ partition)
+ cmd.extend(["--out_%s" % partition, os.path.join(product_out, path)])
+ # Add --property-contexts if the file exists on the partition.
+ property_contexts = "%s_property_contexts" % (
+ "plat" if partition == "system" else partition)
+ property_contexts_path = os.path.join(product_out, path, "etc", "selinux",
+ property_contexts)
+ if os.path.exists(property_contexts_path):
+ cmd.append("--property-contexts=%s" % property_contexts_path)
+ # Add the passwd file if the file exists on the partition.
+ passwd_path = os.path.join(product_out, path, "etc", "passwd")
+ if os.path.exists(passwd_path):
+ cmd.extend(["-p", passwd_path])
+ return RunAndCheckOutput(cmd)
+
+
def AppendAVBSigningArgs(cmd, partition):
"""Append signing arguments for avbtool."""
# e.g., "--key path/to/signing_key --algorithm SHA256_RSA4096"
@@ -1257,23 +1464,27 @@
for building the requested image.
"""
+ if info_dict is None:
+ info_dict = OPTIONS.info_dict
+
# "boot" or "recovery", without extension.
partition_name = os.path.basename(sourcedir).lower()
+ kernel = None
if partition_name == "recovery":
- kernel = "kernel"
+ if info_dict.get("exclude_kernel_from_recovery_image") == "true":
+ logger.info("Excluded kernel binary from recovery image.")
+ else:
+ kernel = "kernel"
else:
kernel = image_name.replace("boot", "kernel")
kernel = kernel.replace(".img", "")
- if not os.access(os.path.join(sourcedir, kernel), os.F_OK):
+ if kernel and not os.access(os.path.join(sourcedir, kernel), os.F_OK):
return None
if has_ramdisk and not os.access(os.path.join(sourcedir, "RAMDISK"), os.F_OK):
return None
- if info_dict is None:
- info_dict = OPTIONS.info_dict
-
img = tempfile.NamedTemporaryFile()
if has_ramdisk:
@@ -1283,7 +1494,9 @@
# use MKBOOTIMG from environ, or "mkbootimg" if empty or not set
mkbootimg = os.getenv('MKBOOTIMG') or "mkbootimg"
- cmd = [mkbootimg, "--kernel", os.path.join(sourcedir, kernel)]
+ cmd = [mkbootimg]
+ if kernel:
+ cmd += ["--kernel", os.path.join(sourcedir, kernel)]
fn = os.path.join(sourcedir, "second")
if os.access(fn, os.F_OK):
@@ -1501,6 +1714,24 @@
cmd.extend(["--vendor_ramdisk", ramdisk_img.name])
cmd.extend(["--vendor_boot", img.name])
+ ramdisk_fragment_imgs = []
+ fn = os.path.join(sourcedir, "vendor_ramdisk_fragments")
+ if os.access(fn, os.F_OK):
+ ramdisk_fragments = shlex.split(open(fn).read().rstrip("\n"))
+ for ramdisk_fragment in ramdisk_fragments:
+ fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment, "mkbootimg_args")
+ cmd.extend(shlex.split(open(fn).read().rstrip("\n")))
+ fn = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment, "prebuilt_ramdisk")
+ # Use prebuilt image if found, else create ramdisk from supplied files.
+ if os.access(fn, os.F_OK):
+ ramdisk_fragment_pathname = fn
+ else:
+ ramdisk_fragment_root = os.path.join(sourcedir, "RAMDISK_FRAGMENTS", ramdisk_fragment)
+ ramdisk_fragment_img = _MakeRamdisk(ramdisk_fragment_root, lz4_ramdisks=use_lz4)
+ ramdisk_fragment_imgs.append(ramdisk_fragment_img)
+ ramdisk_fragment_pathname = ramdisk_fragment_img.name
+ cmd.extend(["--vendor_ramdisk_fragment", ramdisk_fragment_pathname])
+
RunAndCheckOutput(cmd)
# AVB: if enabled, calculate and add hash.
@@ -1518,6 +1749,8 @@
img.seek(os.SEEK_SET, 0)
data = img.read()
+ for f in ramdisk_fragment_imgs:
+ f.close()
ramdisk_img.close()
img.close()
@@ -1568,7 +1801,7 @@
cmd = ["unzip", "-o", "-q", filename, "-d", dirname]
if patterns is not None:
# Filter out non-matching patterns. unzip will complain otherwise.
- with zipfile.ZipFile(filename) as input_zip:
+ with zipfile.ZipFile(filename, allowZip64=True) as input_zip:
names = input_zip.namelist()
filtered = [
pattern for pattern in patterns if fnmatch.filter(names, pattern)]
@@ -3410,3 +3643,75 @@
comment('Move partition %s from default to %s' %
(p, u.tgt_group))
append('move %s %s' % (p, u.tgt_group))
+
+
+def GetBootImageBuildProp(boot_img):
+ """
+ Get build.prop from ramdisk within the boot image
+
+ Args:
+ boot_img: the boot image file. Ramdisk must be compressed with lz4 format.
+
+ Return:
+ An extracted file that stores properties in the boot image.
+ """
+ tmp_dir = MakeTempDir('boot_', suffix='.img')
+ try:
+ RunAndCheckOutput(['unpack_bootimg', '--boot_img', boot_img, '--out', tmp_dir])
+ ramdisk = os.path.join(tmp_dir, 'ramdisk')
+ if not os.path.isfile(ramdisk):
+ logger.warning('Unable to get boot image timestamp: no ramdisk in boot')
+ return None
+ uncompressed_ramdisk = os.path.join(tmp_dir, 'uncompressed_ramdisk')
+ RunAndCheckOutput(['lz4', '-d', ramdisk, uncompressed_ramdisk])
+
+ abs_uncompressed_ramdisk = os.path.abspath(uncompressed_ramdisk)
+ extracted_ramdisk = MakeTempDir('extracted_ramdisk')
+ # Use "toybox cpio" instead of "cpio" because the latter invokes cpio from
+ # the host environment.
+ RunAndCheckOutput(['toybox', 'cpio', '-F', abs_uncompressed_ramdisk, '-i'],
+ cwd=extracted_ramdisk)
+
+ for search_path in RAMDISK_BUILD_PROP_REL_PATHS:
+ prop_file = os.path.join(extracted_ramdisk, search_path)
+ if os.path.isfile(prop_file):
+ return prop_file
+ logger.warning('Unable to get boot image timestamp: no %s in ramdisk', search_path)
+
+ return None
+
+ except ExternalError as e:
+ logger.warning('Unable to get boot image build props: %s', e)
+ return None
+
+
+def GetBootImageTimestamp(boot_img):
+ """
+ Get timestamp from ramdisk within the boot image
+
+ Args:
+ boot_img: the boot image file. Ramdisk must be compressed with lz4 format.
+
+ Return:
+ An integer that corresponds to the timestamp of the boot image, or None
+ if file has unknown format. Raise exception if an unexpected error has
+ occurred.
+ """
+ prop_file = GetBootImageBuildProp(boot_img)
+ if not prop_file:
+ return None
+
+ props = PartitionBuildProps.FromBuildPropFile('boot', prop_file)
+ if props is None:
+ return None
+
+ try:
+ timestamp = props.GetProp('ro.bootimage.build.date.utc')
+ if timestamp:
+ return int(timestamp)
+ logger.warning('Unable to get boot image timestamp: ro.bootimage.build.date.utc is undefined')
+ return None
+
+ except ExternalError as e:
+ logger.warning('Unable to get boot image timestamp: %s', e)
+ return None
diff --git a/tools/releasetools/find_shareduid_violation.py b/tools/releasetools/find_shareduid_violation.py
new file mode 100755
index 0000000..35acde3
--- /dev/null
+++ b/tools/releasetools/find_shareduid_violation.py
@@ -0,0 +1,175 @@
+#!/usr/bin/env python
+#
+# Copyright (C) 2019 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+"""Find APK sharedUserId violators.
+
+Usage: find_shareduid_violation [args]
+
+ --product_out
+ PRODUCT_OUT directory
+
+ --aapt
+ Path to aapt or aapt2
+
+ --copy_out_system
+ TARGET_COPY_OUT_SYSTEM
+
+ --copy_out_vendor_
+ TARGET_COPY_OUT_VENDOR
+
+ --copy_out_product
+ TARGET_COPY_OUT_PRODUCT
+
+ --copy_out_system_ext
+ TARGET_COPY_OUT_SYSTEM_EXT
+"""
+
+import json
+import logging
+import os
+import re
+import subprocess
+import sys
+
+from collections import defaultdict
+from glob import glob
+
+import common
+
+logger = logging.getLogger(__name__)
+
+OPTIONS = common.OPTIONS
+OPTIONS.product_out = os.environ.get("PRODUCT_OUT")
+OPTIONS.aapt = "aapt2"
+OPTIONS.copy_out_system = "system"
+OPTIONS.copy_out_vendor = "vendor"
+OPTIONS.copy_out_product = "product"
+OPTIONS.copy_out_system_ext = "system_ext"
+
+
+def execute(cmd):
+ p = subprocess.Popen(
+ cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = map(lambda b: b.decode("utf-8"), p.communicate())
+ return p.returncode == 0, out, err
+
+
+def make_aapt_cmds(aapt, apk):
+ return [
+ aapt + " dump " + apk + " --file AndroidManifest.xml",
+ aapt + " dump xmltree " + apk + " --file AndroidManifest.xml"
+ ]
+
+
+def extract_shared_uid(aapt, apk):
+ for cmd in make_aapt_cmds(aapt, apk):
+ success, manifest, error_msg = execute(cmd)
+ if success:
+ break
+ else:
+ logger.error(error_msg)
+ sys.exit()
+
+ pattern = re.compile(r"sharedUserId.*=\"([^\"]*)")
+
+ for line in manifest.split("\n"):
+ match = pattern.search(line)
+ if match:
+ return match.group(1)
+ return None
+
+
+def FindShareduidViolation(product_out, partition_map, aapt="aapt2"):
+ """Find sharedUserId violators in the given partitions.
+
+ Args:
+ product_out: The base directory containing the partition directories.
+ partition_map: A map of partition name -> directory name.
+ aapt: The name of the aapt binary. Defaults to aapt2.
+
+ Returns:
+ A string containing a JSON object describing the shared UIDs.
+ """
+ shareduid_app_dict = defaultdict(lambda: defaultdict(list))
+
+ for part, location in partition_map.items():
+ for f in glob(os.path.join(product_out, location, "*", "*", "*.apk")):
+ apk_file = os.path.basename(f)
+ shared_uid = extract_shared_uid(aapt, f)
+
+ if shared_uid is None:
+ continue
+ shareduid_app_dict[shared_uid][part].append(apk_file)
+
+ # Only output sharedUserId values that appear in >1 partition.
+ output = {}
+ for uid, partitions in shareduid_app_dict.items():
+ if len(partitions) > 1:
+ output[uid] = shareduid_app_dict[uid]
+
+ return json.dumps(output, indent=2, sort_keys=True)
+
+
+def main():
+ common.InitLogging()
+
+ def option_handler(o, a):
+ if o == "--product_out":
+ OPTIONS.product_out = a
+ elif o == "--aapt":
+ OPTIONS.aapt = a
+ elif o == "--copy_out_system":
+ OPTIONS.copy_out_system = a
+ elif o == "--copy_out_vendor":
+ OPTIONS.copy_out_vendor = a
+ elif o == "--copy_out_product":
+ OPTIONS.copy_out_product = a
+ elif o == "--copy_out_system_ext":
+ OPTIONS.copy_out_system_ext = a
+ else:
+ return False
+ return True
+
+ args = common.ParseOptions(
+ sys.argv[1:],
+ __doc__,
+ extra_long_opts=[
+ "product_out=",
+ "aapt=",
+ "copy_out_system=",
+ "copy_out_vendor=",
+ "copy_out_product=",
+ "copy_out_system_ext=",
+ ],
+ extra_option_handler=option_handler)
+
+ if args:
+ common.Usage(__doc__)
+ sys.exit(1)
+
+ partition_map = {
+ "system": OPTIONS.copy_out_system,
+ "vendor": OPTIONS.copy_out_vendor,
+ "product": OPTIONS.copy_out_product,
+ "system_ext": OPTIONS.copy_out_system_ext,
+ }
+
+ print(
+ FindShareduidViolation(OPTIONS.product_out, partition_map, OPTIONS.aapt))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tools/releasetools/merge_target_files.py b/tools/releasetools/merge_target_files.py
index bfd2f90..9360d7b 100755
--- a/tools/releasetools/merge_target_files.py
+++ b/tools/releasetools/merge_target_files.py
@@ -16,11 +16,15 @@
#
"""This script merges two partial target files packages.
-One package contains framework files, and the other contains vendor files.
-It produces a complete target files package that can be used to generate an
-OTA package.
+One input package contains framework files, and the other contains vendor files.
-Usage: merge_target_files.py [args]
+This script produces a complete, merged target files package:
+ - This package can be used to generate a flashable IMG package.
+ See --output-img.
+ - This package can be used to generate an OTA package. See --output-ota.
+ - The merged package is checked for compatibility between the two inputs.
+
+Usage: merge_target_files [args]
--framework-target-files framework-target-files-zip-archive
The input target files package containing framework bits. This is a zip
@@ -70,6 +74,10 @@
--rebuild_recovery
Deprecated; does nothing.
+ --allow-duplicate-apkapex-keys
+ If provided, duplicate APK/APEX keys are ignored and the value from the
+ framework is used.
+
--keep-tmp
Keep tempoary files for debugging purposes.
"""
@@ -77,6 +85,7 @@
from __future__ import print_function
import fnmatch
+import json
import logging
import os
import re
@@ -90,6 +99,7 @@
import check_target_files_vintf
import common
import img_from_target_files
+import find_shareduid_violation
import ota_from_target_files
logger = logging.getLogger(__name__)
@@ -110,6 +120,8 @@
OPTIONS.output_super_empty = None
# TODO(b/132730255): Remove this option.
OPTIONS.rebuild_recovery = False
+# TODO(b/150582573): Remove this option.
+OPTIONS.allow_duplicate_apkapex_keys = False
OPTIONS.keep_tmp = False
# In an item list (framework or vendor), we may see entries that select whole
@@ -149,16 +161,9 @@
'SYSTEM/*',
)
-# FRAMEWORK_EXTRACT_SPECIAL_ITEM_LIST is a list of items to extract from the
-# partial framework target files package that need some special processing, such
-# as some sort of combination with items from the partial vendor target files
-# package.
-
-FRAMEWORK_EXTRACT_SPECIAL_ITEM_LIST = ('META/*',)
-
# DEFAULT_FRAMEWORK_MISC_INFO_KEYS is a list of keys to obtain from the
-# framework instance of META/misc_info.txt. The remaining keys from the
-# vendor instance.
+# framework instance of META/misc_info.txt. The remaining keys should come
+# from the vendor instance.
DEFAULT_FRAMEWORK_MISC_INFO_KEYS = (
'avb_system_hashtree_enable',
@@ -199,13 +204,6 @@
'VENDOR/*',
)
-# VENDOR_EXTRACT_SPECIAL_ITEM_LIST is a list of items to extract from the
-# partial vendor target files package that need some special processing, such as
-# some sort of combination with items from the partial framework target files
-# package.
-
-VENDOR_EXTRACT_SPECIAL_ITEM_LIST = ('META/*',)
-
# The merge config lists should not attempt to extract items from both
# builds for any of the following partitions. The partitions in
# SINGLE_BUILD_PARTITIONS should come entirely from a single build (either
@@ -314,8 +312,8 @@
framework_item_list: The list of items to extract from the partial framework
target files package as is.
framework_misc_info_keys: A list of keys to obtain from the framework
- instance of META/misc_info.txt. The remaining keys from the vendor
- instance.
+ instance of META/misc_info.txt. The remaining keys should come from the
+ vendor instance.
vendor_item_list: The list of items to extract from the partial vendor
target files package as is.
@@ -340,10 +338,15 @@
'this script.')
has_error = True
+ # Check that partitions only come from one input.
for partition in SINGLE_BUILD_PARTITIONS:
- in_framework = any(
- item.startswith(partition) for item in framework_item_list)
- in_vendor = any(item.startswith(partition) for item in vendor_item_list)
+ image_path = 'IMAGES/{}.img'.format(partition.lower().replace('/', ''))
+ in_framework = (
+ any(item.startswith(partition) for item in framework_item_list) or
+ image_path in framework_item_list)
+ in_vendor = (
+ any(item.startswith(partition) for item in vendor_item_list) or
+ image_path in vendor_item_list)
if in_framework and in_vendor:
logger.error(
'Cannot extract items from %s for both the framework and vendor'
@@ -369,8 +372,8 @@
framework directory and the vendor directory, placing the merged result in the
output directory. The precondition in that the files are already extracted.
The post condition is that the output META/ab_partitions.txt contains the
- merged content. The format for each ab_partitions.txt a one partition name per
- line. The output file contains the union of the parition names.
+ merged content. The format for each ab_partitions.txt is one partition name
+ per line. The output file contains the union of the partition names.
Args:
framework_target_files_temp_dir: The name of a directory containing the
@@ -423,8 +426,8 @@
create the output target files package after all the special cases are
processed.
framework_misc_info_keys: A list of keys to obtain from the framework
- instance of META/misc_info.txt. The remaining keys from the vendor
- instance.
+ instance of META/misc_info.txt. The remaining keys should come from the
+ vendor instance.
"""
misc_info_path = ['META', 'misc_info.txt']
@@ -453,6 +456,12 @@
# false in the partial builds to prevent duplicate building of super.img.
merged_dict['build_super_partition'] = 'true'
+ # If AVB is enabled then ensure that we build vbmeta.img.
+ # Partial builds with AVB enabled may set PRODUCT_BUILD_VBMETA_IMAGE=false to
+ # skip building an incomplete vbmeta.img.
+ if merged_dict.get('avb_enable') == 'true':
+ merged_dict['avb_building_vbmeta_image'] = 'true'
+
# Replace <image>_selinux_fc values with framework or vendor file_contexts.bin
# depending on which dictionary the key came from.
# Only the file basename is required because all selinux_fc properties are
@@ -526,6 +535,7 @@
Args:
item_list: A list of items in a target files package.
+
Returns:
A set of partitions extracted from the list of items.
"""
@@ -547,7 +557,6 @@
output_target_files_dir,
framework_partition_set,
vendor_partition_set, file_name):
-
"""Performs special processing for META/apexkeys.txt or META/apkcerts.txt.
This function merges the contents of the META/apexkeys.txt or
@@ -597,7 +606,12 @@
if partition_tag in partition_set:
if key in merged_dict:
- raise ValueError('Duplicate key %s' % key)
+ if OPTIONS.allow_duplicate_apkapex_keys:
+ # TODO(b/150582573) Always raise on duplicates.
+ logger.warning('Duplicate key %s' % key)
+ continue
+ else:
+ raise ValueError('Duplicate key %s' % key)
merged_dict[key] = value
@@ -647,8 +661,7 @@
def process_special_cases(framework_target_files_temp_dir,
vendor_target_files_temp_dir,
output_target_files_temp_dir,
- framework_misc_info_keys,
- framework_partition_set,
+ framework_misc_info_keys, framework_partition_set,
vendor_partition_set):
"""Performs special-case processing for certain target files items.
@@ -664,8 +677,8 @@
create the output target files package after all the special cases are
processed.
framework_misc_info_keys: A list of keys to obtain from the framework
- instance of META/misc_info.txt. The remaining keys from the vendor
- instance.
+ instance of META/misc_info.txt. The remaining keys should come from the
+ vendor instance.
framework_partition_set: Partitions that are considered framework
partitions. Used to filter apexkeys.txt and apkcerts.txt.
vendor_partition_set: Partitions that are considered vendor partitions. Used
@@ -711,26 +724,6 @@
file_name='apexkeys.txt')
-def files_from_path(target_path, extra_args=None):
- """Gets files under given path.
-
- Get (sub)files from given target path and return sorted list.
-
- Args:
- target_path: Target path to get subfiles.
- extra_args: List of extra argument for find command. Optional.
-
- Returns:
- Sorted files and directories list.
- """
-
- find_command = ['find', target_path] + (extra_args or [])
- find_process = common.Run(find_command, stdout=subprocess.PIPE, verbose=False)
- return common.RunAndCheckOutput(['sort'],
- stdin=find_process.stdout,
- verbose=False)
-
-
def create_merged_package(temp_dir, framework_target_files, framework_item_list,
vendor_target_files, vendor_item_list,
framework_misc_info_keys, rebuild_recovery):
@@ -752,64 +745,42 @@
target files package as is, meaning these items will land in the output
target files package exactly as they appear in the input partial vendor
target files package.
- framework_misc_info_keys: The list of keys to obtain from the framework
- instance of META/misc_info.txt. The remaining keys from the vendor
- instance.
+ framework_misc_info_keys: A list of keys to obtain from the framework
+ instance of META/misc_info.txt. The remaining keys should come from the
+ vendor instance.
rebuild_recovery: If true, rebuild the recovery patch used by non-A/B
devices and write it to the system image.
Returns:
Path to merged package under temp directory.
"""
+ # Extract "as is" items from the input framework and vendor partial target
+ # files packages directly into the output temporary directory, since these items
+ # do not need special case processing.
- # Create directory names that we'll use when we extract files from framework,
- # and vendor, and for zipping the final output.
-
- framework_target_files_temp_dir = os.path.join(temp_dir, 'framework')
- vendor_target_files_temp_dir = os.path.join(temp_dir, 'vendor')
output_target_files_temp_dir = os.path.join(temp_dir, 'output')
-
- # Extract "as is" items from the input framework partial target files package.
- # We extract them directly into the output temporary directory since the
- # items do not need special case processing.
-
extract_items(
target_files=framework_target_files,
target_files_temp_dir=output_target_files_temp_dir,
extract_item_list=framework_item_list)
-
- # Extract "as is" items from the input vendor partial target files package. We
- # extract them directly into the output temporary directory since the items
- # do not need special case processing.
-
extract_items(
target_files=vendor_target_files,
target_files_temp_dir=output_target_files_temp_dir,
extract_item_list=vendor_item_list)
- # Extract "special" items from the input framework partial target files
- # package. We extract these items to different directory since they require
- # special processing before they will end up in the output directory.
-
+ # Perform special case processing on META/* items.
+ # After this function completes successfully, all the files we need to create
+ # the output target files package are in place.
+ framework_target_files_temp_dir = os.path.join(temp_dir, 'framework')
+ vendor_target_files_temp_dir = os.path.join(temp_dir, 'vendor')
extract_items(
target_files=framework_target_files,
target_files_temp_dir=framework_target_files_temp_dir,
- extract_item_list=FRAMEWORK_EXTRACT_SPECIAL_ITEM_LIST)
-
- # Extract "special" items from the input vendor partial target files package.
- # We extract these items to different directory since they require special
- # processing before they will end up in the output directory.
-
+ extract_item_list=('META/*',))
extract_items(
target_files=vendor_target_files,
target_files_temp_dir=vendor_target_files_temp_dir,
- extract_item_list=VENDOR_EXTRACT_SPECIAL_ITEM_LIST)
-
- # Now that the temporary directories contain all the extracted files, perform
- # special case processing on any items that need it. After this function
- # completes successfully, all the files we need to create the output target
- # files package are in place.
-
+ extract_item_list=('META/*',))
process_special_cases(
framework_target_files_temp_dir=framework_target_files_temp_dir,
vendor_target_files_temp_dir=vendor_target_files_temp_dir,
@@ -835,8 +806,10 @@
# Regenerate IMAGES in the target directory.
- add_img_args = ['--verbose']
- add_img_args.append('--add_missing')
+ add_img_args = [
+ '--verbose',
+ '--add_missing',
+ ]
# TODO(b/132730255): Remove this if statement.
if rebuild_recovery:
add_img_args.append('--rebuild_recovery')
@@ -889,6 +862,15 @@
output_zip = os.path.abspath(output_file)
output_target_files_meta_dir = os.path.join(source_dir, 'META')
+ def files_from_path(target_path, extra_args=None):
+ """Gets files under the given path and return a sorted list."""
+ find_command = ['find', target_path] + (extra_args or [])
+ find_process = common.Run(
+ find_command, stdout=subprocess.PIPE, verbose=False)
+ return common.RunAndCheckOutput(['sort'],
+ stdin=find_process.stdout,
+ verbose=False)
+
meta_content = files_from_path(output_target_files_meta_dir)
other_content = files_from_path(
source_dir,
@@ -937,9 +919,9 @@
target files package as is, meaning these items will land in the output
target files package exactly as they appear in the input partial framework
target files package.
- framework_misc_info_keys: The list of keys to obtain from the framework
- instance of META/misc_info.txt. The remaining keys from the vendor
- instance.
+ framework_misc_info_keys: A list of keys to obtain from the framework
+ instance of META/misc_info.txt. The remaining keys should come from the
+ vendor instance.
vendor_target_files: The name of the zip archive containing the vendor
partial target files package.
vendor_item_list: The list of items to extract from the partial vendor
@@ -967,7 +949,44 @@
rebuild_recovery)
if not check_target_files_vintf.CheckVintf(output_target_files_temp_dir):
- raise RuntimeError("Incompatible VINTF metadata")
+ raise RuntimeError('Incompatible VINTF metadata')
+
+ partition_map = common.PartitionMapFromTargetFiles(
+ output_target_files_temp_dir)
+
+ # Generate and check for cross-partition violations of sharedUserId
+ # values in APKs. This requires the input target-files packages to contain
+ # *.apk files.
+ shareduid_violation_modules = os.path.join(
+ output_target_files_temp_dir, 'META', 'shareduid_violation_modules.json')
+ with open(shareduid_violation_modules, 'w') as f:
+ violation = find_shareduid_violation.FindShareduidViolation(
+ output_target_files_temp_dir, partition_map)
+
+ # Write the output to a file to enable debugging.
+ f.write(violation)
+
+ # Check for violations across the input builds' partition groups.
+ framework_partitions = item_list_to_partition_set(framework_item_list)
+ vendor_partitions = item_list_to_partition_set(vendor_item_list)
+ shareduid_errors = common.SharedUidPartitionViolations(
+ json.loads(violation), [framework_partitions, vendor_partitions])
+ if shareduid_errors:
+ for error in shareduid_errors:
+ logger.error(error)
+ raise ValueError('sharedUserId APK error. See %s' %
+ shareduid_violation_modules)
+
+ # Run host_init_verifier on the combined init rc files.
+ filtered_partitions = {
+ partition: path
+ for partition, path in partition_map.items()
+ # host_init_verifier checks only the following partitions:
+ if partition in ['system', 'system_ext', 'product', 'vendor', 'odm']
+ }
+ common.RunHostInitVerifier(
+ product_out=output_target_files_temp_dir,
+ partition_map=filtered_partitions)
generate_images(output_target_files_temp_dir, rebuild_recovery)
@@ -1075,8 +1094,10 @@
OPTIONS.output_img = a
elif o == '--output-super-empty':
OPTIONS.output_super_empty = a
- elif o == '--rebuild_recovery': # TODO(b/132730255): Warn
+ elif o == '--rebuild_recovery': # TODO(b/132730255): Warn
OPTIONS.rebuild_recovery = True
+ elif o == '--allow-duplicate-apkapex-keys':
+ OPTIONS.allow_duplicate_apkapex_keys = True
elif o == '--keep-tmp':
OPTIONS.keep_tmp = True
else:
@@ -1104,6 +1125,7 @@
'output-img=',
'output-super-empty=',
'rebuild_recovery',
+ 'allow-duplicate-apkapex-keys',
'keep-tmp',
],
extra_option_handler=option_handler)
diff --git a/tools/releasetools/ota_from_target_files.py b/tools/releasetools/ota_from_target_files.py
index 2833397..41644d8 100755
--- a/tools/releasetools/ota_from_target_files.py
+++ b/tools/releasetools/ota_from_target_files.py
@@ -85,6 +85,13 @@
If not set, generates A/B package for A/B device and non-A/B package for
non-A/B device.
+ -o (--oem_settings) <main_file[,additional_files...]>
+ Comma separated list of files used to specify the expected OEM-specific
+ properties on the OEM partition of the intended device. Multiple expected
+ values can be used by providing multiple files. Only the first dict will
+ be used to compute fingerprint, while the rest will be used to assert
+ OEM-specific properties.
+
Non-A/B OTA specific options
-b (--binary) <file>
@@ -114,13 +121,6 @@
builds for an incremental package. This option is only meaningful when -i
is specified.
- -o (--oem_settings) <main_file[,additional_files...]>
- Comma seperated list of files used to specify the expected OEM-specific
- properties on the OEM partition of the intended device. Multiple expected
- values can be used by providing multiple files. Only the first dict will
- be used to compute fingerprint, while the rest will be used to assert
- OEM-specific properties.
-
--oem_no_mount
For devices with OEM-specific properties but without an OEM partition, do
not mount the OEM partition in the updater-script. This should be very
@@ -202,26 +202,38 @@
ones. Should only be used if caller knows it's safe to do so (e.g. all the
postinstall work is to dexopt apps and a data wipe will happen immediately
after). Only meaningful when generating A/B OTAs.
+
+ --partial "<PARTITION> [<PARTITION>[...]]"
+ Generate partial updates, overriding ab_partitions list with the given
+ list.
+
+ --custom_image <custom_partition=custom_image>
+ Use the specified custom_image to update custom_partition when generating
+ an A/B OTA package. e.g. "--custom_image oem=oem.img --custom_image
+ cus=cus_test.img"
"""
from __future__ import print_function
import logging
import multiprocessing
+import os
import os.path
+import re
import shlex
import shutil
import struct
+import subprocess
import sys
import zipfile
import common
import ota_utils
+from ota_utils import (UNZIP_PATTERN, FinalizeMetadata, GetPackageMetadata,
+ PropertyFiles)
import target_files_diff
from check_target_files_vintf import CheckVintfIfTrebleEnabled
from non_ab_ota import GenerateNonAbOtaPackage
-from ota_utils import (UNZIP_PATTERN, FinalizeMetadata, GetPackageMetadata,
- PropertyFiles)
if sys.hexversion < 0x02070000:
print("Python 2.7 or newer is required.", file=sys.stderr)
@@ -257,7 +269,9 @@
OPTIONS.skip_postinstall = False
OPTIONS.skip_compatibility_check = False
OPTIONS.disable_fec_computation = False
-
+OPTIONS.disable_verity_computation = False
+OPTIONS.partial = None
+OPTIONS.custom_images = {}
POSTINSTALL_CONFIG = 'META/postinstall_config.txt'
DYNAMIC_PARTITION_INFO = 'META/dynamic_partitions_info.txt'
@@ -389,6 +403,8 @@
cmd.extend(["--source_image", source_file])
if OPTIONS.disable_fec_computation:
cmd.extend(["--disable_fec_computation", "true"])
+ if OPTIONS.disable_verity_computation:
+ cmd.extend(["--disable_verity_computation", "true"])
cmd.extend(additional_args)
self._Run(cmd)
@@ -593,6 +609,48 @@
return (payload_offset, metadata_total)
+def UpdatesInfoForSpecialUpdates(content, partitions_filter,
+ delete_keys=None):
+ """ Updates info file for secondary payload generation, partial update, etc.
+
+ Scan each line in the info file, and remove the unwanted partitions from
+ the dynamic partition list in the related properties. e.g.
+ "super_google_dynamic_partitions_partition_list=system vendor product"
+ will become "super_google_dynamic_partitions_partition_list=system".
+
+ Args:
+ content: The content of the input info file. e.g. misc_info.txt.
+ partitions_filter: A function to filter the desired partitions from a given
+ list
+ delete_keys: A list of keys to delete in the info file
+
+ Returns:
+ A string of the updated info content.
+ """
+
+ output_list = []
+ # The suffix in partition_list variables that follows the name of the
+ # partition group.
+ list_suffix = 'partition_list'
+ for line in content.splitlines():
+ if line.startswith('#') or '=' not in line:
+ output_list.append(line)
+ continue
+ key, value = line.strip().split('=', 1)
+
+ if delete_keys and key in delete_keys:
+ pass
+ elif key.endswith(list_suffix):
+ partitions = value.split()
+ # TODO for partial update, partitions in the same group must be all
+ # updated or all omitted
+ partitions = filter(partitions_filter, partitions)
+ output_list.append('{}={}'.format(key, ' '.join(partitions)))
+ else:
+ output_list.append(line)
+ return '\n'.join(output_list)
+
+
def GetTargetFilesZipForSecondaryImages(input_file, skip_postinstall=False):
"""Returns a target-files.zip file for generating secondary payload.
@@ -614,49 +672,20 @@
"""
def GetInfoForSecondaryImages(info_file):
- """Updates info file for secondary payload generation.
-
- Scan each line in the info file, and remove the unwanted partitions from
- the dynamic partition list in the related properties. e.g.
- "super_google_dynamic_partitions_partition_list=system vendor product"
- will become "super_google_dynamic_partitions_partition_list=system".
-
- Args:
- info_file: The input info file. e.g. misc_info.txt.
-
- Returns:
- A string of the updated info content.
- """
-
- output_list = []
+ """Updates info file for secondary payload generation."""
with open(info_file) as f:
- lines = f.read().splitlines()
-
- # The suffix in partition_list variables that follows the name of the
- # partition group.
- LIST_SUFFIX = 'partition_list'
- for line in lines:
- if line.startswith('#') or '=' not in line:
- output_list.append(line)
- continue
- key, value = line.strip().split('=', 1)
- if key == 'dynamic_partition_list' or key.endswith(LIST_SUFFIX):
- partitions = value.split()
- partitions = [partition for partition in partitions if partition
- not in SECONDARY_PAYLOAD_SKIPPED_IMAGES]
- output_list.append('{}={}'.format(key, ' '.join(partitions)))
- elif key in ['virtual_ab', "virtual_ab_retrofit"]:
- # Remove virtual_ab flag from secondary payload so that OTA client
- # don't use snapshots for secondary update
- pass
- else:
- output_list.append(line)
- return '\n'.join(output_list)
+ content = f.read()
+ # Remove virtual_ab flag from secondary payload so that OTA client
+ # don't use snapshots for secondary update
+ delete_keys = ['virtual_ab', "virtual_ab_retrofit"]
+ return UpdatesInfoForSpecialUpdates(
+ content, lambda p: p not in SECONDARY_PAYLOAD_SKIPPED_IMAGES,
+ delete_keys)
target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip")
target_zip = zipfile.ZipFile(target_file, 'w', allowZip64=True)
- with zipfile.ZipFile(input_file, 'r') as input_zip:
+ with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip:
infolist = input_zip.infolist()
input_tmp = common.UnzipTemp(input_file, UNZIP_PATTERN)
@@ -719,7 +748,7 @@
The filename of target-files.zip that doesn't contain postinstall config.
"""
# We should only make a copy if postinstall_config entry exists.
- with zipfile.ZipFile(input_file, 'r') as input_zip:
+ with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip:
if POSTINSTALL_CONFIG not in input_zip.namelist():
return input_file
@@ -728,6 +757,80 @@
common.ZipDelete(target_file, POSTINSTALL_CONFIG)
return target_file
+def ParseInfoDict(target_file_path):
+ with zipfile.ZipFile(target_file_path, 'r', allowZip64=True) as zfp:
+ return common.LoadInfoDict(zfp)
+
+def GetTargetFilesZipForPartialUpdates(input_file, ab_partitions):
+ """Returns a target-files.zip for partial ota update package generation.
+
+ This function modifies ab_partitions list with the desired partitions before
+ calling the brillo_update_payload script. It also cleans up the reference to
+ the excluded partitions in the info file, e.g misc_info.txt.
+
+ Args:
+ input_file: The input target-files.zip filename.
+ ab_partitions: A list of partitions to include in the partial update
+
+ Returns:
+ The filename of target-files.zip used for partial ota update.
+ """
+
+ def AddImageForPartition(partition_name):
+ """Add the archive name for a given partition to the copy list."""
+ for prefix in ['IMAGES', 'RADIO']:
+ image_path = '{}/{}.img'.format(prefix, partition_name)
+ if image_path in namelist:
+ copy_entries.append(image_path)
+ map_path = '{}/{}.map'.format(prefix, partition_name)
+ if map_path in namelist:
+ copy_entries.append(map_path)
+ return
+
+ raise ValueError("Cannot find {} in input zipfile".format(partition_name))
+
+ with zipfile.ZipFile(input_file, allowZip64=True) as input_zip:
+ original_ab_partitions = input_zip.read(
+ AB_PARTITIONS).decode().splitlines()
+ namelist = input_zip.namelist()
+
+ unrecognized_partitions = [partition for partition in ab_partitions if
+ partition not in original_ab_partitions]
+ if unrecognized_partitions:
+ raise ValueError("Unrecognized partitions when generating partial updates",
+ unrecognized_partitions)
+
+ logger.info("Generating partial updates for %s", ab_partitions)
+
+ copy_entries = ['META/update_engine_config.txt']
+ for partition_name in ab_partitions:
+ AddImageForPartition(partition_name)
+
+ # Use zip2zip to avoid extracting the zipfile.
+ partial_target_file = common.MakeTempFile(suffix='.zip')
+ cmd = ['zip2zip', '-i', input_file, '-o', partial_target_file]
+ cmd.extend(['{}:{}'.format(name, name) for name in copy_entries])
+ common.RunAndCheckOutput(cmd)
+
+ partial_target_zip = zipfile.ZipFile(partial_target_file, 'a',
+ allowZip64=True)
+ with zipfile.ZipFile(input_file, allowZip64=True) as input_zip:
+ common.ZipWriteStr(partial_target_zip, 'META/ab_partitions.txt',
+ '\n'.join(ab_partitions))
+ for info_file in ['META/misc_info.txt', DYNAMIC_PARTITION_INFO]:
+ if info_file not in input_zip.namelist():
+ logger.warning('Cannot find %s in input zipfile', info_file)
+ continue
+ content = input_zip.read(info_file).decode()
+ modified_info = UpdatesInfoForSpecialUpdates(
+ content, lambda p: p in ab_partitions)
+ common.ZipWriteStr(partial_target_zip, info_file, modified_info)
+
+ # TODO(xunchang) handle 'META/care_map.pb', 'META/postinstall_config.txt'
+ common.ZipClose(partial_target_zip)
+
+ return partial_target_file
+
def GetTargetFilesZipForRetrofitDynamicPartitions(input_file,
super_block_devices,
@@ -754,7 +857,7 @@
target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip")
shutil.copyfile(input_file, target_file)
- with zipfile.ZipFile(input_file) as input_zip:
+ with zipfile.ZipFile(input_file, allowZip64=True) as input_zip:
namelist = input_zip.namelist()
input_tmp = common.UnzipTemp(input_file, RETROFIT_DAP_UNZIP_PATTERN)
@@ -778,7 +881,7 @@
with open(new_ab_partitions, 'w') as f:
for partition in ab_partitions:
if (partition in dynamic_partition_list and
- partition not in super_block_devices):
+ partition not in super_block_devices):
logger.info("Dropping %s from ab_partitions.txt", partition)
continue
f.write(partition + "\n")
@@ -814,6 +917,108 @@
return target_file
+def GetTargetFilesZipForCustomImagesUpdates(input_file, custom_images):
+ """Returns a target-files.zip for custom partitions update.
+
+ This function modifies ab_partitions list with the desired custom partitions
+ and puts the custom images into the target target-files.zip.
+
+ Args:
+ input_file: The input target-files.zip filename.
+ custom_images: A map of custom partitions and custom images.
+
+ Returns:
+ The filename of a target-files.zip which has renamed the custom images in
+ the IMAGS/ to their partition names.
+ """
+ # Use zip2zip to avoid extracting the zipfile.
+ target_file = common.MakeTempFile(prefix="targetfiles-", suffix=".zip")
+ cmd = ['zip2zip', '-i', input_file, '-o', target_file]
+
+ with zipfile.ZipFile(input_file, allowZip64=True) as input_zip:
+ namelist = input_zip.namelist()
+
+ # Write {custom_image}.img as {custom_partition}.img.
+ for custom_partition, custom_image in custom_images.items():
+ default_custom_image = '{}.img'.format(custom_partition)
+ if default_custom_image != custom_image:
+ logger.info("Update custom partition '%s' with '%s'",
+ custom_partition, custom_image)
+ # Default custom image need to be deleted first.
+ namelist.remove('IMAGES/{}'.format(default_custom_image))
+ # IMAGES/{custom_image}.img:IMAGES/{custom_partition}.img.
+ cmd.extend(['IMAGES/{}:IMAGES/{}'.format(custom_image,
+ default_custom_image)])
+
+ cmd.extend(['{}:{}'.format(name, name) for name in namelist])
+ common.RunAndCheckOutput(cmd)
+
+ return target_file
+
+def GeneratePartitionTimestampFlags(partition_state):
+ partition_timestamps = [
+ part.partition_name + ":" + part.version
+ for part in partition_state]
+ return ["--partition_timestamps", ",".join(partition_timestamps)]
+
+def GeneratePartitionTimestampFlagsDowngrade(
+ pre_partition_state, post_partition_state):
+ assert pre_partition_state is not None
+ partition_timestamps = {}
+ for part in pre_partition_state:
+ partition_timestamps[part.partition_name] = part.version
+ for part in post_partition_state:
+ partition_timestamps[part.partition_name] = \
+ max(part.version, partition_timestamps[part.partition_name])
+ return [
+ "--partition_timestamps",
+ ",".join([key + ":" + val for (key, val) in partition_timestamps.items()])
+ ]
+
+def IsSparseImage(filepath):
+ with open(filepath, 'rb') as fp:
+ # Magic for android sparse image format
+ # https://source.android.com/devices/bootloader/images
+ return fp.read(4) == b'\x3A\xFF\x26\xED'
+
+def SupportsMainlineGkiUpdates(target_file):
+ """Return True if the build supports MainlineGKIUpdates.
+
+ This function scans the product.img file in IMAGES/ directory for
+ pattern |*/apex/com.android.gki.*.apex|. If there are files
+ matching this pattern, conclude that build supports mainline
+ GKI and return True
+
+ Args:
+ target_file: Path to a target_file.zip, or an extracted directory
+ Return:
+ True if thisb uild supports Mainline GKI Updates.
+ """
+ if target_file is None:
+ return False
+ if os.path.isfile(target_file):
+ target_file = common.UnzipTemp(target_file, ["IMAGES/product.img"])
+ if not os.path.isdir(target_file):
+ assert os.path.isdir(target_file), \
+ "{} must be a path to zip archive or dir containing extracted"\
+ " target_files".format(target_file)
+ image_file = os.path.join(target_file, "IMAGES", "product.img")
+
+ if not os.path.isfile(image_file):
+ return False
+
+ if IsSparseImage(image_file):
+ # Unsparse the image
+ tmp_img = common.MakeTempFile(suffix=".img")
+ subprocess.check_output(["simg2img", image_file, tmp_img])
+ image_file = tmp_img
+
+ cmd = ["debugfs_static", "-R", "ls -p /apex", image_file]
+ output = subprocess.check_output(cmd).decode()
+
+ pattern = re.compile(r"com\.android\.gki\..*\.apex")
+ return pattern.search(output) is not None
+
def GenerateAbOtaPackage(target_file, output_file, source_file=None):
"""Generates an Android OTA package that has A/B update payload."""
# Stage the output zip package for package signing.
@@ -822,7 +1027,8 @@
else:
staging_file = output_file
output_zip = zipfile.ZipFile(staging_file, "w",
- compression=zipfile.ZIP_DEFLATED)
+ compression=zipfile.ZIP_DEFLATED,
+ allowZip64=True)
if source_file is not None:
assert "ab_partitions" in OPTIONS.source_info_dict, \
@@ -831,45 +1037,70 @@
"META/ab_partitions.txt is required for ab_update."
target_info = common.BuildInfo(OPTIONS.target_info_dict, OPTIONS.oem_dicts)
source_info = common.BuildInfo(OPTIONS.source_info_dict, OPTIONS.oem_dicts)
+ vendor_prop = source_info.info_dict.get("vendor.build.prop")
+ if vendor_prop and \
+ vendor_prop.GetProp("ro.virtual_ab.compression.enabled") == "true":
+ # TODO(zhangkelvin) Remove this once FEC on VABC is supported
+ logger.info("Virtual AB Compression enabled, disabling FEC")
+ OPTIONS.disable_fec_computation = True
+ OPTIONS.disable_verity_computation = True
else:
assert "ab_partitions" in OPTIONS.info_dict, \
"META/ab_partitions.txt is required for ab_update."
target_info = common.BuildInfo(OPTIONS.info_dict, OPTIONS.oem_dicts)
source_info = None
+ additional_args = []
+
+ # Prepare custom images.
+ if OPTIONS.custom_images:
+ target_file = GetTargetFilesZipForCustomImagesUpdates(
+ target_file, OPTIONS.custom_images)
+
if OPTIONS.retrofit_dynamic_partitions:
target_file = GetTargetFilesZipForRetrofitDynamicPartitions(
target_file, target_info.get("super_block_devices").strip().split(),
target_info.get("dynamic_partition_list").strip().split())
+ elif OPTIONS.partial:
+ target_file = GetTargetFilesZipForPartialUpdates(target_file,
+ OPTIONS.partial)
+ additional_args += ["--is_partial_update", "true"]
elif OPTIONS.skip_postinstall:
target_file = GetTargetFilesZipWithoutPostinstallConfig(target_file)
# Target_file may have been modified, reparse ab_partitions
with zipfile.ZipFile(target_file, allowZip64=True) as zfp:
target_info.info_dict['ab_partitions'] = zfp.read(
- AB_PARTITIONS).strip().split("\n")
+ AB_PARTITIONS).decode().strip().split("\n")
# Metadata to comply with Android OTA package format.
metadata = GetPackageMetadata(target_info, source_info)
# Generate payload.
payload = Payload()
- partition_timestamps = []
+ partition_timestamps_flags = []
# Enforce a max timestamp this payload can be applied on top of.
if OPTIONS.downgrade:
max_timestamp = source_info.GetBuildProp("ro.build.date.utc")
+ partition_timestamps_flags = GeneratePartitionTimestampFlagsDowngrade(
+ metadata.precondition.partition_state,
+ metadata.postcondition.partition_state
+ )
else:
max_timestamp = str(metadata.postcondition.timestamp)
- partition_timestamps = [
- part.partition_name + ":" + part.version
- for part in metadata.postcondition.partition_state]
- additional_args = ["--max_timestamp", max_timestamp]
- if partition_timestamps:
- additional_args.extend(
- ["--partition_timestamps", ",".join(
- partition_timestamps)]
- )
+ partition_timestamps_flags = GeneratePartitionTimestampFlags(
+ metadata.postcondition.partition_state)
- payload.Generate(target_file, source_file, additional_args)
+ additional_args += ["--max_timestamp", max_timestamp]
+
+ if SupportsMainlineGkiUpdates(source_file):
+ logger.warning("Detected build with mainline GKI, include full boot image.")
+ additional_args.extend(["--full_boot", "true"])
+
+ payload.Generate(
+ target_file,
+ source_file,
+ additional_args + partition_timestamps_flags
+ )
# Sign the payload.
payload_signer = PayloadSigner()
@@ -887,15 +1118,16 @@
target_file, OPTIONS.skip_postinstall)
secondary_payload = Payload(secondary=True)
secondary_payload.Generate(secondary_target_file,
- additional_args=additional_args)
+ additional_args=["--max_timestamp",
+ max_timestamp])
secondary_payload.Sign(payload_signer)
secondary_payload.WriteToZip(output_zip)
# If dm-verity is supported for the device, copy contents of care_map
# into A/B OTA package.
- target_zip = zipfile.ZipFile(target_file, "r")
+ target_zip = zipfile.ZipFile(target_file, "r", allowZip64=True)
if (target_info.get("verity") == "true" or
- target_info.get("avb_enable") == "true"):
+ target_info.get("avb_enable") == "true"):
care_map_list = [x for x in ["care_map.pb", "care_map.txt"] if
"META/" + x in target_zip.namelist()]
@@ -910,6 +1142,15 @@
else:
logger.warning("Cannot find care map file in target_file package")
+ # Copy apex_info.pb over to generated OTA package.
+ try:
+ apex_info_entry = target_zip.getinfo("META/apex_info.pb")
+ with target_zip.open(apex_info_entry, "r") as zfp:
+ common.ZipWriteStr(output_zip, "apex_info.pb", zfp.read(),
+ compress_type=zipfile.ZIP_STORED)
+ except KeyError:
+ logger.warning("target_file doesn't contain apex_info.pb %s", target_file)
+
common.ZipClose(target_zip)
CheckVintfIfTrebleEnabled(target_file, target_info)
@@ -1002,10 +1243,20 @@
OPTIONS.output_metadata_path = a
elif o == "--disable_fec_computation":
OPTIONS.disable_fec_computation = True
+ elif o == "--disable_verity_computation":
+ OPTIONS.disable_verity_computation = True
elif o == "--force_non_ab":
OPTIONS.force_non_ab = True
elif o == "--boot_variable_file":
OPTIONS.boot_variable_file = a
+ elif o == "--partial":
+ partitions = a.split()
+ if not partitions:
+ raise ValueError("Cannot parse partitions in {}".format(a))
+ OPTIONS.partial = partitions
+ elif o == "--custom_image":
+ custom_partition, custom_image = a.split("=")
+ OPTIONS.custom_images[custom_partition] = custom_image
else:
return False
return True
@@ -1042,8 +1293,11 @@
"skip_compatibility_check",
"output_metadata_path=",
"disable_fec_computation",
+ "disable_verity_computation",
"force_non_ab",
"boot_variable_file=",
+ "partial=",
+ "custom_image=",
], extra_option_handler=option_handler)
if len(args) != 2:
@@ -1052,13 +1306,6 @@
common.InitLogging()
- if OPTIONS.downgrade:
- # We should only allow downgrading incrementals (as opposed to full).
- # Otherwise the device may go back from arbitrary build with this full
- # OTA package.
- if OPTIONS.incremental_source is None:
- raise ValueError("Cannot generate downgradable full OTAs")
-
# Load the build info dicts from the zip directly or the extracted input
# directory. We don't need to unzip the entire target-files zips, because they
# won't be needed for A/B OTAs (brillo_update_payload does that on its own).
@@ -1069,21 +1316,44 @@
if OPTIONS.extracted_input is not None:
OPTIONS.info_dict = common.LoadInfoDict(OPTIONS.extracted_input)
else:
- with zipfile.ZipFile(args[0], 'r') as input_zip:
- OPTIONS.info_dict = common.LoadInfoDict(input_zip)
+ OPTIONS.info_dict = ParseInfoDict(args[0])
+
+ if OPTIONS.downgrade:
+ # We should only allow downgrading incrementals (as opposed to full).
+ # Otherwise the device may go back from arbitrary build with this full
+ # OTA package.
+ if OPTIONS.incremental_source is None:
+ raise ValueError("Cannot generate downgradable full OTAs")
+
+
+ # TODO(xunchang) for retrofit and partial updates, maybe we should rebuild the
+ # target-file and reload the info_dict. So the info will be consistent with
+ # the modified target-file.
logger.info("--- target info ---")
common.DumpInfoDict(OPTIONS.info_dict)
+
# Load the source build dict if applicable.
if OPTIONS.incremental_source is not None:
OPTIONS.target_info_dict = OPTIONS.info_dict
- with zipfile.ZipFile(OPTIONS.incremental_source, 'r') as source_zip:
- OPTIONS.source_info_dict = common.LoadInfoDict(source_zip)
+ OPTIONS.source_info_dict = ParseInfoDict(OPTIONS.incremental_source)
logger.info("--- source info ---")
common.DumpInfoDict(OPTIONS.source_info_dict)
+ if OPTIONS.partial:
+ OPTIONS.info_dict['ab_partitions'] = \
+ list(
+ set(OPTIONS.info_dict['ab_partitions']) & set(OPTIONS.partial)
+ )
+ if OPTIONS.source_info_dict:
+ OPTIONS.source_info_dict['ab_partitions'] = \
+ list(
+ set(OPTIONS.source_info_dict['ab_partitions']) &
+ set(OPTIONS.partial)
+ )
+
# Load OEM dicts if provided.
OPTIONS.oem_dicts = _LoadOemDicts(OPTIONS.oem_source)
@@ -1091,7 +1361,7 @@
# use_dynamic_partitions but target build does.
if (OPTIONS.source_info_dict and
OPTIONS.source_info_dict.get("use_dynamic_partitions") != "true" and
- OPTIONS.target_info_dict.get("use_dynamic_partitions") == "true"):
+ OPTIONS.target_info_dict.get("use_dynamic_partitions") == "true"):
if OPTIONS.target_info_dict.get("dynamic_partition_retrofit") != "true":
raise common.ExternalError(
"Expect to generate incremental OTA for retrofitting dynamic "
@@ -1107,7 +1377,8 @@
ab_update = OPTIONS.info_dict.get("ab_update") == "true"
allow_non_ab = OPTIONS.info_dict.get("allow_non_ab") == "true"
if OPTIONS.force_non_ab:
- assert allow_non_ab, "--force_non_ab only allowed on devices that supports non-A/B"
+ assert allow_non_ab,\
+ "--force_non_ab only allowed on devices that supports non-A/B"
assert ab_update, "--force_non_ab only allowed on A/B devices"
generate_ab = not OPTIONS.force_non_ab and ab_update
diff --git a/tools/releasetools/ota_metadata.proto b/tools/releasetools/ota_metadata.proto
index 20d3091..5da8b84 100644
--- a/tools/releasetools/ota_metadata.proto
+++ b/tools/releasetools/ota_metadata.proto
@@ -16,8 +16,8 @@
// If you change this file,
// Please update ota_metadata_pb2.py by executing
-// protoc ota_metadata.proto --python_out $ANDROID_BUILD_TOP/build/tools/releasetools
-
+// protoc ota_metadata.proto --python_out
+// $ANDROID_BUILD_TOP/build/tools/releasetools
syntax = "proto3";
@@ -65,6 +65,19 @@
repeated PartitionState partition_state = 7;
}
+message ApexInfo {
+ string package_name = 1;
+ int64 version = 2;
+ bool is_compressed = 3;
+ int64 decompressed_size = 4;
+}
+
+// Just a container to hold repeated apex_info, so that we can easily serialize
+// a list of apex_info to string.
+message ApexMetadata {
+ repeated ApexInfo apex_info = 1;
+}
+
// The metadata of an OTA package. It contains the information of the package
// and prerequisite to install the update correctly.
message OtaMetadata {
diff --git a/tools/releasetools/ota_metadata_pb2.py b/tools/releasetools/ota_metadata_pb2.py
index ff2b2c5..27cc930 100644
--- a/tools/releasetools/ota_metadata_pb2.py
+++ b/tools/releasetools/ota_metadata_pb2.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: ota_metadata.proto
-
+"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
@@ -18,7 +18,8 @@
package='build.tools.releasetools',
syntax='proto3',
serialized_options=b'H\003',
- serialized_pb=b'\n\x12ota_metadata.proto\x12\x18\x62uild.tools.releasetools\"X\n\x0ePartitionState\x12\x16\n\x0epartition_name\x18\x01 \x01(\t\x12\x0e\n\x06\x64\x65vice\x18\x02 \x03(\t\x12\r\n\x05\x62uild\x18\x03 \x03(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\"\xce\x01\n\x0b\x44\x65viceState\x12\x0e\n\x06\x64\x65vice\x18\x01 \x03(\t\x12\r\n\x05\x62uild\x18\x02 \x03(\t\x12\x19\n\x11\x62uild_incremental\x18\x03 \x01(\t\x12\x11\n\ttimestamp\x18\x04 \x01(\x03\x12\x11\n\tsdk_level\x18\x05 \x01(\t\x12\x1c\n\x14security_patch_level\x18\x06 \x01(\t\x12\x41\n\x0fpartition_state\x18\x07 \x03(\x0b\x32(.build.tools.releasetools.PartitionState\"\xe1\x03\n\x0bOtaMetadata\x12;\n\x04type\x18\x01 \x01(\x0e\x32-.build.tools.releasetools.OtaMetadata.OtaType\x12\x0c\n\x04wipe\x18\x02 \x01(\x08\x12\x11\n\tdowngrade\x18\x03 \x01(\x08\x12P\n\x0eproperty_files\x18\x04 \x03(\x0b\x32\x38.build.tools.releasetools.OtaMetadata.PropertyFilesEntry\x12;\n\x0cprecondition\x18\x05 \x01(\x0b\x32%.build.tools.releasetools.DeviceState\x12<\n\rpostcondition\x18\x06 \x01(\x0b\x32%.build.tools.releasetools.DeviceState\x12#\n\x1bretrofit_dynamic_partitions\x18\x07 \x01(\x08\x12\x16\n\x0erequired_cache\x18\x08 \x01(\x03\x1a\x34\n\x12PropertyFilesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"4\n\x07OtaType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x06\n\x02\x41\x42\x10\x01\x12\t\n\x05\x42LOCK\x10\x02\x12\t\n\x05\x42RICK\x10\x03\x42\x02H\x03\x62\x06proto3'
+ create_key=_descriptor._internal_create_key,
+ serialized_pb=b'\n\x12ota_metadata.proto\x12\x18\x62uild.tools.releasetools\"X\n\x0ePartitionState\x12\x16\n\x0epartition_name\x18\x01 \x01(\t\x12\x0e\n\x06\x64\x65vice\x18\x02 \x03(\t\x12\r\n\x05\x62uild\x18\x03 \x03(\t\x12\x0f\n\x07version\x18\x04 \x01(\t\"\xce\x01\n\x0b\x44\x65viceState\x12\x0e\n\x06\x64\x65vice\x18\x01 \x03(\t\x12\r\n\x05\x62uild\x18\x02 \x03(\t\x12\x19\n\x11\x62uild_incremental\x18\x03 \x01(\t\x12\x11\n\ttimestamp\x18\x04 \x01(\x03\x12\x11\n\tsdk_level\x18\x05 \x01(\t\x12\x1c\n\x14security_patch_level\x18\x06 \x01(\t\x12\x41\n\x0fpartition_state\x18\x07 \x03(\x0b\x32(.build.tools.releasetools.PartitionState\"c\n\x08\x41pexInfo\x12\x14\n\x0cpackage_name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\x03\x12\x15\n\ris_compressed\x18\x03 \x01(\x08\x12\x19\n\x11\x64\x65\x63ompressed_size\x18\x04 \x01(\x03\"E\n\x0c\x41pexMetadata\x12\x35\n\tapex_info\x18\x01 \x03(\x0b\x32\".build.tools.releasetools.ApexInfo\"\x98\x04\n\x0bOtaMetadata\x12;\n\x04type\x18\x01 \x01(\x0e\x32-.build.tools.releasetools.OtaMetadata.OtaType\x12\x0c\n\x04wipe\x18\x02 \x01(\x08\x12\x11\n\tdowngrade\x18\x03 \x01(\x08\x12P\n\x0eproperty_files\x18\x04 \x03(\x0b\x32\x38.build.tools.releasetools.OtaMetadata.PropertyFilesEntry\x12;\n\x0cprecondition\x18\x05 \x01(\x0b\x32%.build.tools.releasetools.DeviceState\x12<\n\rpostcondition\x18\x06 \x01(\x0b\x32%.build.tools.releasetools.DeviceState\x12#\n\x1bretrofit_dynamic_partitions\x18\x07 \x01(\x08\x12\x16\n\x0erequired_cache\x18\x08 \x01(\x03\x12\x35\n\tapex_info\x18\t \x03(\x0b\x32\".build.tools.releasetools.ApexInfo\x1a\x34\n\x12PropertyFilesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"4\n\x07OtaType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x06\n\x02\x41\x42\x10\x01\x12\t\n\x05\x42LOCK\x10\x02\x12\t\n\x05\x42RICK\x10\x03\x42\x02H\x03\x62\x06proto3'
)
@@ -28,28 +29,33 @@
full_name='build.tools.releasetools.OtaMetadata.OtaType',
filename=None,
file=DESCRIPTOR,
+ create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='UNKNOWN', index=0, number=0,
serialized_options=None,
- type=None),
+ type=None,
+ create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='AB', index=1, number=1,
serialized_options=None,
- type=None),
+ type=None,
+ create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BLOCK', index=2, number=2,
serialized_options=None,
- type=None),
+ type=None,
+ create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='BRICK', index=3, number=3,
serialized_options=None,
- type=None),
+ type=None,
+ create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
- serialized_start=777,
- serialized_end=829,
+ serialized_start=1004,
+ serialized_end=1056,
)
_sym_db.RegisterEnumDescriptor(_OTAMETADATA_OTATYPE)
@@ -60,6 +66,7 @@
filename=None,
file=DESCRIPTOR,
containing_type=None,
+ create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='partition_name', full_name='build.tools.releasetools.PartitionState.partition_name', index=0,
@@ -67,28 +74,28 @@
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='device', full_name='build.tools.releasetools.PartitionState.device', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='build', full_name='build.tools.releasetools.PartitionState.build', index=2,
number=3, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='version', full_name='build.tools.releasetools.PartitionState.version', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
@@ -112,6 +119,7 @@
filename=None,
file=DESCRIPTOR,
containing_type=None,
+ create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='device', full_name='build.tools.releasetools.DeviceState.device', index=0,
@@ -119,49 +127,49 @@
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='build', full_name='build.tools.releasetools.DeviceState.build', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='build_incremental', full_name='build.tools.releasetools.DeviceState.build_incremental', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='timestamp', full_name='build.tools.releasetools.DeviceState.timestamp', index=3,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='sdk_level', full_name='build.tools.releasetools.DeviceState.sdk_level', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='security_patch_level', full_name='build.tools.releasetools.DeviceState.security_patch_level', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='partition_state', full_name='build.tools.releasetools.DeviceState.partition_state', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
@@ -179,12 +187,98 @@
)
+_APEXINFO = _descriptor.Descriptor(
+ name='ApexInfo',
+ full_name='build.tools.releasetools.ApexInfo',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='package_name', full_name='build.tools.releasetools.ApexInfo.package_name', index=0,
+ number=1, type=9, cpp_type=9, label=1,
+ has_default_value=False, default_value=b"".decode('utf-8'),
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='version', full_name='build.tools.releasetools.ApexInfo.version', index=1,
+ number=2, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='is_compressed', full_name='build.tools.releasetools.ApexInfo.is_compressed', index=2,
+ number=3, type=8, cpp_type=7, label=1,
+ has_default_value=False, default_value=False,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='decompressed_size', full_name='build.tools.releasetools.ApexInfo.decompressed_size', index=3,
+ number=4, type=3, cpp_type=2, label=1,
+ has_default_value=False, default_value=0,
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=347,
+ serialized_end=446,
+)
+
+
+_APEXMETADATA = _descriptor.Descriptor(
+ name='ApexMetadata',
+ full_name='build.tools.releasetools.ApexMetadata',
+ filename=None,
+ file=DESCRIPTOR,
+ containing_type=None,
+ create_key=_descriptor._internal_create_key,
+ fields=[
+ _descriptor.FieldDescriptor(
+ name='apex_info', full_name='build.tools.releasetools.ApexMetadata.apex_info', index=0,
+ number=1, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ ],
+ extensions=[
+ ],
+ nested_types=[],
+ enum_types=[
+ ],
+ serialized_options=None,
+ is_extendable=False,
+ syntax='proto3',
+ extension_ranges=[],
+ oneofs=[
+ ],
+ serialized_start=448,
+ serialized_end=517,
+)
+
+
_OTAMETADATA_PROPERTYFILESENTRY = _descriptor.Descriptor(
name='PropertyFilesEntry',
full_name='build.tools.releasetools.OtaMetadata.PropertyFilesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
+ create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='build.tools.releasetools.OtaMetadata.PropertyFilesEntry.key', index=0,
@@ -192,14 +286,14 @@
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='value', full_name='build.tools.releasetools.OtaMetadata.PropertyFilesEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
@@ -212,8 +306,8 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=723,
- serialized_end=775,
+ serialized_start=950,
+ serialized_end=1002,
)
_OTAMETADATA = _descriptor.Descriptor(
@@ -222,6 +316,7 @@
filename=None,
file=DESCRIPTOR,
containing_type=None,
+ create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='build.tools.releasetools.OtaMetadata.type', index=0,
@@ -229,56 +324,63 @@
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='wipe', full_name='build.tools.releasetools.OtaMetadata.wipe', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='downgrade', full_name='build.tools.releasetools.OtaMetadata.downgrade', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='property_files', full_name='build.tools.releasetools.OtaMetadata.property_files', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='precondition', full_name='build.tools.releasetools.OtaMetadata.precondition', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='postcondition', full_name='build.tools.releasetools.OtaMetadata.postcondition', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='retrofit_dynamic_partitions', full_name='build.tools.releasetools.OtaMetadata.retrofit_dynamic_partitions', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='required_cache', full_name='build.tools.releasetools.OtaMetadata.required_cache', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
- serialized_options=None, file=DESCRIPTOR),
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
+ _descriptor.FieldDescriptor(
+ name='apex_info', full_name='build.tools.releasetools.OtaMetadata.apex_info', index=8,
+ number=9, type=11, cpp_type=10, label=3,
+ has_default_value=False, default_value=[],
+ message_type=None, enum_type=None, containing_type=None,
+ is_extension=False, extension_scope=None,
+ serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
@@ -292,19 +394,23 @@
extension_ranges=[],
oneofs=[
],
- serialized_start=348,
- serialized_end=829,
+ serialized_start=520,
+ serialized_end=1056,
)
_DEVICESTATE.fields_by_name['partition_state'].message_type = _PARTITIONSTATE
+_APEXMETADATA.fields_by_name['apex_info'].message_type = _APEXINFO
_OTAMETADATA_PROPERTYFILESENTRY.containing_type = _OTAMETADATA
_OTAMETADATA.fields_by_name['type'].enum_type = _OTAMETADATA_OTATYPE
_OTAMETADATA.fields_by_name['property_files'].message_type = _OTAMETADATA_PROPERTYFILESENTRY
_OTAMETADATA.fields_by_name['precondition'].message_type = _DEVICESTATE
_OTAMETADATA.fields_by_name['postcondition'].message_type = _DEVICESTATE
+_OTAMETADATA.fields_by_name['apex_info'].message_type = _APEXINFO
_OTAMETADATA_OTATYPE.containing_type = _OTAMETADATA
DESCRIPTOR.message_types_by_name['PartitionState'] = _PARTITIONSTATE
DESCRIPTOR.message_types_by_name['DeviceState'] = _DEVICESTATE
+DESCRIPTOR.message_types_by_name['ApexInfo'] = _APEXINFO
+DESCRIPTOR.message_types_by_name['ApexMetadata'] = _APEXMETADATA
DESCRIPTOR.message_types_by_name['OtaMetadata'] = _OTAMETADATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
@@ -322,6 +428,20 @@
})
_sym_db.RegisterMessage(DeviceState)
+ApexInfo = _reflection.GeneratedProtocolMessageType('ApexInfo', (_message.Message,), {
+ 'DESCRIPTOR' : _APEXINFO,
+ '__module__' : 'ota_metadata_pb2'
+ # @@protoc_insertion_point(class_scope:build.tools.releasetools.ApexInfo)
+ })
+_sym_db.RegisterMessage(ApexInfo)
+
+ApexMetadata = _reflection.GeneratedProtocolMessageType('ApexMetadata', (_message.Message,), {
+ 'DESCRIPTOR' : _APEXMETADATA,
+ '__module__' : 'ota_metadata_pb2'
+ # @@protoc_insertion_point(class_scope:build.tools.releasetools.ApexMetadata)
+ })
+_sym_db.RegisterMessage(ApexMetadata)
+
OtaMetadata = _reflection.GeneratedProtocolMessageType('OtaMetadata', (_message.Message,), {
'PropertyFilesEntry' : _reflection.GeneratedProtocolMessageType('PropertyFilesEntry', (_message.Message,), {
diff --git a/tools/releasetools/ota_package_parser.py b/tools/releasetools/ota_package_parser.py
index 331122b..1e733b9 100755
--- a/tools/releasetools/ota_package_parser.py
+++ b/tools/releasetools/ota_package_parser.py
@@ -215,7 +215,7 @@
logging.basicConfig(level=logging.INFO, format=logging_format)
try:
- with zipfile.ZipFile(args.ota_package, 'r') as package:
+ with zipfile.ZipFile(args.ota_package, 'r', allowZip64=True) as package:
package_parser = OtaPackageParser(package)
package_parser.Analyze()
except:
diff --git a/tools/releasetools/ota_utils.py b/tools/releasetools/ota_utils.py
index d444d41..6bbcc92 100644
--- a/tools/releasetools/ota_utils.py
+++ b/tools/releasetools/ota_utils.py
@@ -14,14 +14,16 @@
import copy
import itertools
+import logging
import os
import zipfile
import ota_metadata_pb2
from common import (ZipDelete, ZipClose, OPTIONS, MakeTempFile,
ZipWriteStr, BuildInfo, LoadDictionaryFromFile,
- SignFile, PARTITIONS_WITH_CARE_MAP, PartitionBuildProps)
+ SignFile, PARTITIONS_WITH_BUILD_PROP, PartitionBuildProps)
+logger = logging.getLogger(__name__)
OPTIONS.no_signing = False
OPTIONS.force_non_ab = False
@@ -38,7 +40,6 @@
METADATA_PROTO_NAME = 'META-INF/com/android/metadata.pb'
UNZIP_PATTERN = ['IMAGES/*', 'META/*', 'OTA/*', 'RADIO/*']
-
def FinalizeMetadata(metadata, input_file, output_file, needed_property_files):
"""Finalizes the metadata and signs an A/B OTA package.
@@ -62,7 +63,7 @@
def ComputeAllPropertyFiles(input_file, needed_property_files):
# Write the current metadata entry with placeholders.
- with zipfile.ZipFile(input_file) as input_zip:
+ with zipfile.ZipFile(input_file, allowZip64=True) as input_zip:
for property_files in needed_property_files:
metadata.property_files[property_files.name] = property_files.Compute(
input_zip)
@@ -70,7 +71,7 @@
if METADATA_NAME in namelist or METADATA_PROTO_NAME in namelist:
ZipDelete(input_file, [METADATA_NAME, METADATA_PROTO_NAME])
- output_zip = zipfile.ZipFile(input_file, 'a')
+ output_zip = zipfile.ZipFile(input_file, 'a', allowZip64=True)
WriteMetadata(metadata, output_zip)
ZipClose(output_zip)
@@ -82,7 +83,7 @@
return prelim_signing
def FinalizeAllPropertyFiles(prelim_signing, needed_property_files):
- with zipfile.ZipFile(prelim_signing) as prelim_signing_zip:
+ with zipfile.ZipFile(prelim_signing, allowZip64=True) as prelim_signing_zip:
for property_files in needed_property_files:
metadata.property_files[property_files.name] = property_files.Finalize(
prelim_signing_zip,
@@ -108,7 +109,7 @@
# Replace the METADATA entry.
ZipDelete(prelim_signing, [METADATA_NAME, METADATA_PROTO_NAME])
- output_zip = zipfile.ZipFile(prelim_signing, 'a')
+ output_zip = zipfile.ZipFile(prelim_signing, 'a', allowZip64=True)
WriteMetadata(metadata, output_zip)
ZipClose(output_zip)
@@ -119,7 +120,7 @@
SignOutput(prelim_signing, output_file)
# Reopen the final signed zip to double check the streaming metadata.
- with zipfile.ZipFile(output_file) as output_zip:
+ with zipfile.ZipFile(output_file, allowZip64=True) as output_zip:
for property_files in needed_property_files:
property_files.Verify(
output_zip, metadata.property_files[property_files.name].strip())
@@ -173,7 +174,7 @@
# delta_generator will error out on unused timestamps,
# so only generate timestamps for dynamic partitions
# used in OTA update.
- for partition in sorted(set(PARTITIONS_WITH_CARE_MAP) & ab_partitions):
+ for partition in sorted(set(PARTITIONS_WITH_BUILD_PROP) & ab_partitions):
partition_prop = build_info.info_dict.get(
'{}.build.prop'.format(partition))
# Skip if the partition is missing, or it doesn't have a build.prop
@@ -359,11 +360,11 @@
# Reload the info_dict as some build properties may change their values
# based on the value of ro.boot* properties.
info_dict = copy.deepcopy(default_build_info.info_dict)
- for partition in PARTITIONS_WITH_CARE_MAP:
+ for partition in PARTITIONS_WITH_BUILD_PROP:
partition_prop_key = "{}.build.prop".format(partition)
input_file = info_dict[partition_prop_key].input_file
if isinstance(input_file, zipfile.ZipFile):
- with zipfile.ZipFile(input_file.filename) as input_zip:
+ with zipfile.ZipFile(input_file.filename, allowZip64=True) as input_zip:
info_dict[partition_prop_key] = \
PartitionBuildProps.FromInputFile(input_zip, partition,
placeholder_values)
diff --git a/tools/releasetools/sign_target_files_apks.py b/tools/releasetools/sign_target_files_apks.py
index b4646b7..176e258 100755
--- a/tools/releasetools/sign_target_files_apks.py
+++ b/tools/releasetools/sign_target_files_apks.py
@@ -515,7 +515,7 @@
data,
payload_key,
container_key,
- key_passwords[container_key],
+ key_passwords,
apk_keys,
codename_to_api_level_map,
no_hashtree=True,
@@ -626,6 +626,10 @@
elif filename in ["META/care_map.pb", "META/care_map.txt"]:
pass
+ # Skip apex_info.pb because we sign/modify apexes
+ elif filename == "META/apex_info.pb":
+ pass
+
# Updates system_other.avbpubkey in /product/etc/.
elif filename in (
"PRODUCT/etc/security/avb/system_other.avbpubkey",
@@ -813,7 +817,7 @@
keys: A list of public keys to use during OTA package verification.
"""
temp_file = io.BytesIO()
- certs_zip = zipfile.ZipFile(temp_file, "w")
+ certs_zip = zipfile.ZipFile(temp_file, "w", allowZip64=True)
for k in keys:
common.ZipWrite(certs_zip, k)
common.ZipClose(certs_zip)
@@ -1294,7 +1298,7 @@
common.InitLogging()
- input_zip = zipfile.ZipFile(args[0], "r")
+ input_zip = zipfile.ZipFile(args[0], "r", allowZip64=True)
output_zip = zipfile.ZipFile(args[1], "w",
compression=zipfile.ZIP_DEFLATED,
allowZip64=True)
diff --git a/tools/releasetools/test_add_img_to_target_files.py b/tools/releasetools/test_add_img_to_target_files.py
index efa60b6..6b7a7db 100644
--- a/tools/releasetools/test_add_img_to_target_files.py
+++ b/tools/releasetools/test_add_img_to_target_files.py
@@ -93,10 +93,10 @@
# Set up the output zip.
output_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(output_file, 'w') as output_zip:
+ with zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
AddPackRadioImages(output_zip, images)
- with zipfile.ZipFile(output_file, 'r') as verify_zip:
+ with zipfile.ZipFile(output_file, 'r', allowZip64=True) as verify_zip:
for image in images:
self.assertIn('IMAGES/' + image + '.img', verify_zip.namelist())
@@ -344,12 +344,12 @@
image_paths = self._test_AddCareMapForAbOta()
output_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(output_file, 'w') as output_zip:
+ with zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
AddCareMapForAbOta(output_zip, ['system', 'vendor'], image_paths)
care_map_name = "META/care_map.pb"
temp_dir = common.MakeTempDir()
- with zipfile.ZipFile(output_file, 'r') as verify_zip:
+ with zipfile.ZipFile(output_file, 'r', allowZip64=True) as verify_zip:
self.assertTrue(care_map_name in verify_zip.namelist())
verify_zip.extract(care_map_name, path=temp_dir)
@@ -367,7 +367,7 @@
image_paths = self._test_AddCareMapForAbOta()
output_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(output_file, 'w') as output_zip:
+ with zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
# Create an existing META/care_map.pb entry.
common.ZipWriteStr(output_zip, 'META/care_map.pb',
'fake care_map.pb')
diff --git a/tools/releasetools/test_apex_utils.py b/tools/releasetools/test_apex_utils.py
index 339ddc7..71f6433 100644
--- a/tools/releasetools/test_apex_utils.py
+++ b/tools/releasetools/test_apex_utils.py
@@ -174,8 +174,8 @@
@test_utils.SkipIfExternalToolsUnavailable()
def test_ApexApkSigner_noAssetDir(self):
no_asset = common.MakeTempFile(suffix='.apex')
- with zipfile.ZipFile(no_asset, 'w') as output_zip:
- with zipfile.ZipFile(self.apex_with_apk, 'r') as input_zip:
+ with zipfile.ZipFile(no_asset, 'w', allowZip64=True) as output_zip:
+ with zipfile.ZipFile(self.apex_with_apk, 'r', allowZip64=True) as input_zip:
name_list = input_zip.namelist()
for name in name_list:
if not name.startswith('assets'):
diff --git a/tools/releasetools/test_common.py b/tools/releasetools/test_common.py
index 81ee53d..ecd759c 100644
--- a/tools/releasetools/test_common.py
+++ b/tools/releasetools/test_common.py
@@ -15,6 +15,7 @@
#
import copy
+import json
import os
import subprocess
import tempfile
@@ -363,7 +364,7 @@
self.assertEqual(int(expected_stat.st_mtime), int(new_stat.st_mtime))
# Reopen the zip file to verify.
- zip_file = zipfile.ZipFile(zip_file_name, "r")
+ zip_file = zipfile.ZipFile(zip_file_name, "r", allowZip64=True)
# Verify the timestamp.
info = zip_file.getinfo(arcname)
@@ -399,7 +400,7 @@
arcname = arcname[1:]
zip_file.close()
- zip_file = zipfile.ZipFile(zip_file_name, "w")
+ zip_file = zipfile.ZipFile(zip_file_name, "w", allowZip64=True)
try:
sha1_hash = sha1()
@@ -431,7 +432,7 @@
zip_file_name = zip_file.name
zip_file.close()
- zip_file = zipfile.ZipFile(zip_file_name, "w")
+ zip_file = zipfile.ZipFile(zip_file_name, "w", allowZip64=True)
try:
expected_compress_type = extra_args.get("compress_type",
@@ -475,7 +476,7 @@
arcname_large = arcname_large[1:]
zip_file.close()
- zip_file = zipfile.ZipFile(zip_file_name, "w")
+ zip_file = zipfile.ZipFile(zip_file_name, "w", allowZip64=True)
try:
sha1_hash = sha1()
@@ -599,7 +600,7 @@
try:
random_string = os.urandom(1024)
- zip_file = zipfile.ZipFile(zip_file_name, "w")
+ zip_file = zipfile.ZipFile(zip_file_name, "w", allowZip64=True)
# Default perms should be 0o644 when passing the filename.
common.ZipWriteStr(zip_file, "foo", random_string)
# Honor the specified perms.
@@ -644,7 +645,7 @@
try:
common.ZipDelete(zip_file.name, 'Test2')
- with zipfile.ZipFile(zip_file.name, 'r') as check_zip:
+ with zipfile.ZipFile(zip_file.name, 'r', allowZip64=True) as check_zip:
entries = check_zip.namelist()
self.assertTrue('Test1' in entries)
self.assertFalse('Test2' in entries)
@@ -652,21 +653,21 @@
self.assertRaises(
common.ExternalError, common.ZipDelete, zip_file.name, 'Test2')
- with zipfile.ZipFile(zip_file.name, 'r') as check_zip:
+ with zipfile.ZipFile(zip_file.name, 'r', allowZip64=True) as check_zip:
entries = check_zip.namelist()
self.assertTrue('Test1' in entries)
self.assertFalse('Test2' in entries)
self.assertTrue('Test3' in entries)
common.ZipDelete(zip_file.name, ['Test3'])
- with zipfile.ZipFile(zip_file.name, 'r') as check_zip:
+ with zipfile.ZipFile(zip_file.name, 'r', allowZip64=True) as check_zip:
entries = check_zip.namelist()
self.assertTrue('Test1' in entries)
self.assertFalse('Test2' in entries)
self.assertFalse('Test3' in entries)
common.ZipDelete(zip_file.name, ['Test1', 'Test2'])
- with zipfile.ZipFile(zip_file.name, 'r') as check_zip:
+ with zipfile.ZipFile(zip_file.name, 'r', allowZip64=True) as check_zip:
entries = check_zip.namelist()
self.assertFalse('Test1' in entries)
self.assertFalse('Test2' in entries)
@@ -834,7 +835,7 @@
if additional is None:
additional = []
target_files = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'w', allowZip64=True) as target_files_zip:
target_files_zip.writestr('META/apkcerts.txt', apkcerts_txt)
for entry in additional:
target_files_zip.writestr(entry, '')
@@ -842,7 +843,7 @@
def test_ReadApkCerts_NoncompressedApks(self):
target_files = self._write_apkcerts_txt(self.APKCERTS_TXT1)
- with zipfile.ZipFile(target_files, 'r') as input_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as input_zip:
certmap, ext = common.ReadApkCerts(input_zip)
self.assertDictEqual(self.APKCERTS_CERTMAP1, certmap)
@@ -855,7 +856,7 @@
self.APKCERTS_TXT2,
['Compressed1.apk.gz', 'Compressed3.apk'])
- with zipfile.ZipFile(target_files, 'r') as input_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as input_zip:
certmap, ext = common.ReadApkCerts(input_zip)
self.assertDictEqual(self.APKCERTS_CERTMAP2, certmap)
@@ -865,7 +866,7 @@
target_files = self._write_apkcerts_txt(
self.APKCERTS_TXT3, ['Compressed4.apk.xz'])
- with zipfile.ZipFile(target_files, 'r') as input_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as input_zip:
certmap, ext = common.ReadApkCerts(input_zip)
self.assertDictEqual(self.APKCERTS_CERTMAP3, certmap)
@@ -876,7 +877,7 @@
self.APKCERTS_TXT1 + self.APKCERTS_TXT2,
['Compressed1.apk.gz', 'Compressed3.apk'])
- with zipfile.ZipFile(target_files, 'r') as input_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as input_zip:
certmap, ext = common.ReadApkCerts(input_zip)
certmap_merged = self.APKCERTS_CERTMAP1.copy()
@@ -889,7 +890,7 @@
self.APKCERTS_TXT2 + self.APKCERTS_TXT3,
['Compressed1.apk.gz', 'Compressed4.apk.xz'])
- with zipfile.ZipFile(target_files, 'r') as input_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as input_zip:
self.assertRaises(ValueError, common.ReadApkCerts, input_zip)
def test_ReadApkCerts_MismatchingKeys(self):
@@ -899,12 +900,12 @@
)
target_files = self._write_apkcerts_txt(malformed_apkcerts_txt)
- with zipfile.ZipFile(target_files, 'r') as input_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as input_zip:
self.assertRaises(ValueError, common.ReadApkCerts, input_zip)
def test_ReadApkCerts_WithWithoutOptionalFields(self):
target_files = self._write_apkcerts_txt(self.APKCERTS_TXT4)
- with zipfile.ZipFile(target_files, 'r') as input_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as input_zip:
certmap, ext = common.ReadApkCerts(input_zip)
self.assertDictEqual(self.APKCERTS_CERTMAP4, certmap)
@@ -973,7 +974,7 @@
@test_utils.SkipIfExternalToolsUnavailable()
def test_GetSparseImage_emptyBlockMapFile(self):
target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip')
- with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'w', allowZip64=True) as target_files_zip:
target_files_zip.write(
test_utils.construct_sparse_image([
(0xCAC1, 6),
@@ -985,7 +986,7 @@
target_files_zip.writestr('SYSTEM/file2', os.urandom(4096 * 3))
tempdir = common.UnzipTemp(target_files)
- with zipfile.ZipFile(target_files, 'r') as input_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as input_zip:
sparse_image = common.GetSparseImage('system', tempdir, input_zip, False)
self.assertDictEqual(
@@ -995,6 +996,55 @@
},
sparse_image.file_map)
+ def test_PartitionMapFromTargetFiles(self):
+ target_files_dir = common.MakeTempDir()
+ os.makedirs(os.path.join(target_files_dir, 'SYSTEM'))
+ os.makedirs(os.path.join(target_files_dir, 'SYSTEM', 'vendor'))
+ os.makedirs(os.path.join(target_files_dir, 'PRODUCT'))
+ os.makedirs(os.path.join(target_files_dir, 'SYSTEM', 'product'))
+ os.makedirs(os.path.join(target_files_dir, 'SYSTEM', 'vendor', 'odm'))
+ os.makedirs(os.path.join(target_files_dir, 'VENDOR_DLKM'))
+ partition_map = common.PartitionMapFromTargetFiles(target_files_dir)
+ self.assertDictEqual(
+ partition_map,
+ {
+ 'system': 'SYSTEM',
+ 'vendor': 'SYSTEM/vendor',
+ # Prefer PRODUCT over SYSTEM/product
+ 'product': 'PRODUCT',
+ 'odm': 'SYSTEM/vendor/odm',
+ 'vendor_dlkm': 'VENDOR_DLKM',
+ # No system_ext or odm_dlkm
+ })
+
+ def test_SharedUidPartitionViolations(self):
+ uid_dict = {
+ 'android.uid.phone': {
+ 'system': ['system_phone.apk'],
+ 'system_ext': ['system_ext_phone.apk'],
+ },
+ 'android.uid.wifi': {
+ 'vendor': ['vendor_wifi.apk'],
+ 'odm': ['odm_wifi.apk'],
+ },
+ }
+ errors = common.SharedUidPartitionViolations(
+ uid_dict, [('system', 'system_ext'), ('vendor', 'odm')])
+ self.assertEqual(errors, [])
+
+ def test_SharedUidPartitionViolations_Violation(self):
+ uid_dict = {
+ 'android.uid.phone': {
+ 'system': ['system_phone.apk'],
+ 'vendor': ['vendor_phone.apk'],
+ },
+ }
+ errors = common.SharedUidPartitionViolations(
+ uid_dict, [('system', 'system_ext'), ('vendor', 'odm')])
+ self.assertIn(
+ ('APK sharedUserId "android.uid.phone" found across partition groups '
+ 'in partitions "system,vendor"'), errors)
+
def test_GetSparseImage_missingImageFile(self):
self.assertRaises(
AssertionError, common.GetSparseImage, 'system2', self.testdata_dir,
@@ -1006,7 +1056,7 @@
@test_utils.SkipIfExternalToolsUnavailable()
def test_GetSparseImage_missingBlockMapFile(self):
target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip')
- with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'w', allowZip64=True) as target_files_zip:
target_files_zip.write(
test_utils.construct_sparse_image([
(0xCAC1, 6),
@@ -1017,7 +1067,7 @@
target_files_zip.writestr('SYSTEM/file2', os.urandom(4096 * 3))
tempdir = common.UnzipTemp(target_files)
- with zipfile.ZipFile(target_files, 'r') as input_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as input_zip:
self.assertRaises(
AssertionError, common.GetSparseImage, 'system', tempdir, input_zip,
False)
@@ -1026,7 +1076,7 @@
def test_GetSparseImage_sharedBlocks_notAllowed(self):
"""Tests the case of having overlapping blocks but disallowed."""
target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip')
- with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'w', allowZip64=True) as target_files_zip:
target_files_zip.write(
test_utils.construct_sparse_image([(0xCAC2, 16)]),
arcname='IMAGES/system.img')
@@ -1040,7 +1090,7 @@
target_files_zip.writestr('SYSTEM/file2', os.urandom(4096 * 3))
tempdir = common.UnzipTemp(target_files)
- with zipfile.ZipFile(target_files, 'r') as input_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as input_zip:
self.assertRaises(
AssertionError, common.GetSparseImage, 'system', tempdir, input_zip,
False)
@@ -1049,7 +1099,7 @@
def test_GetSparseImage_sharedBlocks_allowed(self):
"""Tests the case for target using BOARD_EXT4_SHARE_DUP_BLOCKS := true."""
target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip')
- with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'w', allowZip64=True) as target_files_zip:
# Construct an image with a care_map of "0-5 9-12".
target_files_zip.write(
test_utils.construct_sparse_image([(0xCAC2, 16)]),
@@ -1064,7 +1114,7 @@
target_files_zip.writestr('SYSTEM/file2', os.urandom(4096 * 3))
tempdir = common.UnzipTemp(target_files)
- with zipfile.ZipFile(target_files, 'r') as input_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as input_zip:
sparse_image = common.GetSparseImage('system', tempdir, input_zip, True)
self.assertDictEqual(
@@ -1094,7 +1144,7 @@
def test_GetSparseImage_incompleteRanges(self):
"""Tests the case of ext4 images with holes."""
target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip')
- with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'w', allowZip64=True) as target_files_zip:
target_files_zip.write(
test_utils.construct_sparse_image([(0xCAC2, 16)]),
arcname='IMAGES/system.img')
@@ -1108,7 +1158,7 @@
target_files_zip.writestr('SYSTEM/file2', os.urandom(4096 * 3))
tempdir = common.UnzipTemp(target_files)
- with zipfile.ZipFile(target_files, 'r') as input_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as input_zip:
sparse_image = common.GetSparseImage('system', tempdir, input_zip, False)
self.assertEqual(
@@ -1119,7 +1169,7 @@
@test_utils.SkipIfExternalToolsUnavailable()
def test_GetSparseImage_systemRootImage_filenameWithExtraLeadingSlash(self):
target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip')
- with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'w', allowZip64=True) as target_files_zip:
target_files_zip.write(
test_utils.construct_sparse_image([(0xCAC2, 16)]),
arcname='IMAGES/system.img')
@@ -1136,7 +1186,7 @@
target_files_zip.writestr('SYSTEM/app/file3', os.urandom(4096 * 4))
tempdir = common.UnzipTemp(target_files)
- with zipfile.ZipFile(target_files, 'r') as input_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as input_zip:
sparse_image = common.GetSparseImage('system', tempdir, input_zip, False)
self.assertEqual(
@@ -1149,7 +1199,7 @@
@test_utils.SkipIfExternalToolsUnavailable()
def test_GetSparseImage_systemRootImage_nonSystemFiles(self):
target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip')
- with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'w', allowZip64=True) as target_files_zip:
target_files_zip.write(
test_utils.construct_sparse_image([(0xCAC2, 16)]),
arcname='IMAGES/system.img')
@@ -1163,7 +1213,7 @@
target_files_zip.writestr('ROOT/init.rc', os.urandom(4096 * 4))
tempdir = common.UnzipTemp(target_files)
- with zipfile.ZipFile(target_files, 'r') as input_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as input_zip:
sparse_image = common.GetSparseImage('system', tempdir, input_zip, False)
self.assertEqual(
@@ -1174,7 +1224,7 @@
@test_utils.SkipIfExternalToolsUnavailable()
def test_GetSparseImage_fileNotFound(self):
target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip')
- with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'w', allowZip64=True) as target_files_zip:
target_files_zip.write(
test_utils.construct_sparse_image([(0xCAC2, 16)]),
arcname='IMAGES/system.img')
@@ -1186,7 +1236,7 @@
target_files_zip.writestr('SYSTEM/file1', os.urandom(4096 * 7))
tempdir = common.UnzipTemp(target_files)
- with zipfile.ZipFile(target_files, 'r') as input_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as input_zip:
self.assertRaises(
AssertionError, common.GetSparseImage, 'system', tempdir, input_zip,
False)
@@ -1274,7 +1324,7 @@
@staticmethod
def _test_LoadInfoDict_createTargetFiles(info_dict, fstab_path):
target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip')
- with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'w', allowZip64=True) as target_files_zip:
info_values = ''.join(
['{}={}\n'.format(k, v) for k, v in sorted(info_dict.items())])
common.ZipWriteStr(target_files_zip, 'META/misc_info.txt', info_values)
@@ -1294,7 +1344,7 @@
target_files = self._test_LoadInfoDict_createTargetFiles(
self.INFO_DICT_DEFAULT,
'BOOT/RAMDISK/system/etc/recovery.fstab')
- with zipfile.ZipFile(target_files, 'r') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as target_files_zip:
loaded_dict = common.LoadInfoDict(target_files_zip)
self.assertEqual(3, loaded_dict['recovery_api_version'])
self.assertEqual(2, loaded_dict['fstab_version'])
@@ -1305,7 +1355,7 @@
target_files = self._test_LoadInfoDict_createTargetFiles(
self.INFO_DICT_DEFAULT,
'BOOT/RAMDISK/etc/recovery.fstab')
- with zipfile.ZipFile(target_files, 'r') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as target_files_zip:
loaded_dict = common.LoadInfoDict(target_files_zip)
self.assertEqual(3, loaded_dict['recovery_api_version'])
self.assertEqual(2, loaded_dict['fstab_version'])
@@ -1346,7 +1396,7 @@
target_files = self._test_LoadInfoDict_createTargetFiles(
info_dict,
'RECOVERY/RAMDISK/system/etc/recovery.fstab')
- with zipfile.ZipFile(target_files, 'r') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as target_files_zip:
loaded_dict = common.LoadInfoDict(target_files_zip)
self.assertEqual(3, loaded_dict['recovery_api_version'])
self.assertEqual(2, loaded_dict['fstab_version'])
@@ -1362,7 +1412,7 @@
target_files = self._test_LoadInfoDict_createTargetFiles(
info_dict,
'RECOVERY/RAMDISK/system/etc/recovery.fstab')
- with zipfile.ZipFile(target_files, 'r') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as target_files_zip:
loaded_dict = common.LoadInfoDict(target_files_zip)
self.assertEqual(3, loaded_dict['recovery_api_version'])
self.assertEqual(2, loaded_dict['fstab_version'])
@@ -1376,7 +1426,7 @@
target_files = self._test_LoadInfoDict_createTargetFiles(
info_dict,
'RECOVERY/RAMDISK/system/etc/recovery.fstab')
- with zipfile.ZipFile(target_files, 'r') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as target_files_zip:
loaded_dict = common.LoadInfoDict(target_files_zip)
self.assertEqual(3, loaded_dict['recovery_api_version'])
self.assertEqual(2, loaded_dict['fstab_version'])
@@ -1388,7 +1438,7 @@
self.INFO_DICT_DEFAULT,
'BOOT/RAMDISK/system/etc/recovery.fstab')
common.ZipDelete(target_files, 'META/misc_info.txt')
- with zipfile.ZipFile(target_files, 'r') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as target_files_zip:
self.assertRaises(ValueError, common.LoadInfoDict, target_files_zip)
@test_utils.SkipIfExternalToolsUnavailable()
@@ -1412,19 +1462,23 @@
target_files = self._test_LoadInfoDict_createTargetFiles(
self.INFO_DICT_DEFAULT,
'BOOT/RAMDISK/system/etc/recovery.fstab')
- with zipfile.ZipFile(target_files, 'r') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'r', allowZip64=True) as target_files_zip:
self.assertRaises(
AssertionError, common.LoadInfoDict, target_files_zip, True)
def test_MergeDynamicPartitionInfoDicts_ReturnsMergedDict(self):
framework_dict = {
+ 'use_dynamic_partitions': 'true',
'super_partition_groups': 'group_a',
'dynamic_partition_list': 'system',
'super_group_a_partition_list': 'system',
}
vendor_dict = {
+ 'use_dynamic_partitions': 'true',
'super_partition_groups': 'group_a group_b',
'dynamic_partition_list': 'vendor product',
+ 'super_block_devices': 'super',
+ 'super_super_device_size': '3000',
'super_group_a_partition_list': 'vendor',
'super_group_a_group_size': '1000',
'super_group_b_partition_list': 'product',
@@ -1434,8 +1488,11 @@
framework_dict=framework_dict,
vendor_dict=vendor_dict)
expected_merged_dict = {
+ 'use_dynamic_partitions': 'true',
'super_partition_groups': 'group_a group_b',
- 'dynamic_partition_list': 'system vendor product',
+ 'dynamic_partition_list': 'product system vendor',
+ 'super_block_devices': 'super',
+ 'super_super_device_size': '3000',
'super_group_a_partition_list': 'system vendor',
'super_group_a_group_size': '1000',
'super_group_b_partition_list': 'product',
@@ -1445,12 +1502,14 @@
def test_MergeDynamicPartitionInfoDicts_IgnoringFrameworkGroupSize(self):
framework_dict = {
+ 'use_dynamic_partitions': 'true',
'super_partition_groups': 'group_a',
'dynamic_partition_list': 'system',
'super_group_a_partition_list': 'system',
'super_group_a_group_size': '5000',
}
vendor_dict = {
+ 'use_dynamic_partitions': 'true',
'super_partition_groups': 'group_a group_b',
'dynamic_partition_list': 'vendor product',
'super_group_a_partition_list': 'vendor',
@@ -1462,8 +1521,9 @@
framework_dict=framework_dict,
vendor_dict=vendor_dict)
expected_merged_dict = {
+ 'use_dynamic_partitions': 'true',
'super_partition_groups': 'group_a group_b',
- 'dynamic_partition_list': 'system vendor product',
+ 'dynamic_partition_list': 'product system vendor',
'super_group_a_partition_list': 'system vendor',
'super_group_a_group_size': '1000',
'super_group_b_partition_list': 'product',
@@ -1704,7 +1764,7 @@
@staticmethod
def get_op_list(output_path):
- with zipfile.ZipFile(output_path) as output_zip:
+ with zipfile.ZipFile(output_path, allowZip64=True) as output_zip:
with output_zip.open('dynamic_partitions_op_list') as op_list:
return [line.decode().strip() for line in op_list.readlines()
if not line.startswith(b'#')]
@@ -1724,7 +1784,7 @@
MockBlockDifference("vendor", FakeSparseImage(1 * GiB))]
dp_diff = common.DynamicPartitionsDifference(target_info, block_diffs)
- with zipfile.ZipFile(self.output_path, 'w') as output_zip:
+ with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
self.assertEqual(str(self.script).strip(), """
@@ -1772,7 +1832,7 @@
dp_diff = common.DynamicPartitionsDifference(target_info,
block_diffs=[],
source_info_dict=source_info)
- with zipfile.ZipFile(self.output_path, 'w') as output_zip:
+ with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
lines = self.get_op_list(self.output_path)
@@ -1816,7 +1876,7 @@
dp_diff = common.DynamicPartitionsDifference(target_info, block_diffs,
source_info_dict=source_info)
- with zipfile.ZipFile(self.output_path, 'w') as output_zip:
+ with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
metadata_idx = self.script.lines.index(
@@ -1887,7 +1947,7 @@
dp_diff = common.DynamicPartitionsDifference(target_info, block_diffs,
source_info_dict=source_info)
- with zipfile.ZipFile(self.output_path, 'w') as output_zip:
+ with zipfile.ZipFile(self.output_path, 'w', allowZip64=True) as output_zip:
dp_diff.WriteScript(self.script, output_zip, write_verify_script=True)
self.assertNotIn("block_image_update", str(self.script),
@@ -1910,7 +1970,7 @@
@staticmethod
def _BuildZipFile(entries):
input_file = common.MakeTempFile(prefix='target_files-', suffix='.zip')
- with zipfile.ZipFile(input_file, 'w') as input_zip:
+ with zipfile.ZipFile(input_file, 'w', allowZip64=True) as input_zip:
for name, content in entries.items():
input_zip.writestr(name, content)
@@ -1927,7 +1987,7 @@
'ODM/etc/build.prop': '\n'.join(build_prop),
})
- with zipfile.ZipFile(input_file, 'r') as input_zip:
+ with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip:
placeholder_values = {
'ro.boot.product.device_name': ['std', 'pro']
}
@@ -1959,7 +2019,7 @@
'ODM/etc/build_pro.prop': '\n'.join(build_pro_prop),
})
- with zipfile.ZipFile(input_file, 'r') as input_zip:
+ with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip:
placeholder_values = {
'ro.boot.product.device_name': 'std'
}
@@ -1974,7 +2034,7 @@
'ro.product.odm.name': 'product1',
}, partition_props.build_props)
- with zipfile.ZipFile(input_file, 'r') as input_zip:
+ with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip:
placeholder_values = {
'ro.boot.product.device_name': 'pro'
}
@@ -1995,7 +2055,7 @@
'ODM/etc/build.prop': '\n'.join(build_prop),
})
- with zipfile.ZipFile(input_file, 'r') as input_zip:
+ with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip:
partition_props = common.PartitionBuildProps.FromInputFile(
input_zip, 'odm')
@@ -2038,7 +2098,7 @@
'ODM/etc/build_product2.prop': '\n'.join(product2_prop),
})
- with zipfile.ZipFile(input_file, 'r') as input_zip:
+ with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip:
placeholder_values = {
'ro.boot.product.device_name': 'std',
'ro.boot.product.product_name': 'product1',
@@ -2055,7 +2115,7 @@
'ro.product.odm.name': 'product1'
}, partition_props.build_props)
- with zipfile.ZipFile(input_file, 'r') as input_zip:
+ with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip:
placeholder_values = {
'ro.boot.product.device_name': 'pro',
'ro.boot.product.product_name': 'product2',
@@ -2089,7 +2149,7 @@
'ODM/etc/build_pro.prop': '\n'.join(build_pro_prop),
})
- with zipfile.ZipFile(input_file, 'r') as input_zip:
+ with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip:
placeholder_values = {
'ro.boot.product.device_name': 'std',
}
@@ -2126,7 +2186,7 @@
'ODM/etc/build_product2.prop': '\n'.join(product2_prop),
})
- with zipfile.ZipFile(input_file, 'r') as input_zip:
+ with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip:
placeholder_values = {
'ro.boot.product.device_name': 'std',
'ro.boot.product.product_name': 'product1',
diff --git a/tools/releasetools/test_merge_target_files.py b/tools/releasetools/test_merge_target_files.py
index ff8593b..7ea7f96 100644
--- a/tools/releasetools/test_merge_target_files.py
+++ b/tools/releasetools/test_merge_target_files.py
@@ -117,6 +117,15 @@
DEFAULT_FRAMEWORK_MISC_INFO_KEYS,
vendor_item_list))
+ def test_validate_config_lists_ReturnsFalseIfSharedExtractedPartitionImage(
+ self):
+ vendor_item_list = list(DEFAULT_VENDOR_ITEM_LIST)
+ vendor_item_list.append('IMAGES/system.img')
+ self.assertFalse(
+ validate_config_lists(DEFAULT_FRAMEWORK_ITEM_LIST,
+ DEFAULT_FRAMEWORK_MISC_INFO_KEYS,
+ vendor_item_list))
+
def test_validate_config_lists_ReturnsFalseIfBadSystemMiscInfoKeys(self):
for bad_key in ['dynamic_partition_list', 'super_partition_groups']:
framework_misc_info_keys = list(DEFAULT_FRAMEWORK_MISC_INFO_KEYS)
@@ -144,8 +153,7 @@
process_apex_keys_apk_certs_common(framework_dir, vendor_dir, output_dir,
set(['product', 'system', 'system_ext']),
- set(['odm', 'vendor']),
- 'apexkeys.txt')
+ set(['odm', 'vendor']), 'apexkeys.txt')
merged_entries = []
merged_path = os.path.join(self.testdata_dir, 'apexkeys_merge.txt')
@@ -180,8 +188,7 @@
self.assertRaises(ValueError, process_apex_keys_apk_certs_common,
framework_dir, conflict_dir, output_dir,
set(['product', 'system', 'system_ext']),
- set(['odm', 'vendor']),
- 'apexkeys.txt')
+ set(['odm', 'vendor']), 'apexkeys.txt')
def test_process_apex_keys_apk_certs_HandlesApkCertsSyntax(self):
output_dir = common.MakeTempDir()
@@ -201,8 +208,7 @@
process_apex_keys_apk_certs_common(framework_dir, vendor_dir, output_dir,
set(['product', 'system', 'system_ext']),
- set(['odm', 'vendor']),
- 'apkcerts.txt')
+ set(['odm', 'vendor']), 'apkcerts.txt')
merged_entries = []
merged_path = os.path.join(self.testdata_dir, 'apkcerts_merge.txt')
diff --git a/tools/releasetools/test_ota_from_target_files.py b/tools/releasetools/test_ota_from_target_files.py
index 84cd4c8..b556b3a 100644
--- a/tools/releasetools/test_ota_from_target_files.py
+++ b/tools/releasetools/test_ota_from_target_files.py
@@ -27,17 +27,20 @@
FinalizeMetadata, GetPackageMetadata, PropertyFiles)
from ota_from_target_files import (
_LoadOemDicts, AbOtaPropertyFiles,
+ GetTargetFilesZipForCustomImagesUpdates,
+ GetTargetFilesZipForPartialUpdates,
GetTargetFilesZipForSecondaryImages,
GetTargetFilesZipWithoutPostinstallConfig,
Payload, PayloadSigner, POSTINSTALL_CONFIG,
StreamingPropertyFiles, AB_PARTITIONS)
+from apex_utils import GetApexInfoFromTargetFiles
from test_utils import PropertyFilesTestCase
-def construct_target_files(secondary=False):
+def construct_target_files(secondary=False, compressedApex=False):
"""Returns a target-files.zip file for generating OTA packages."""
target_files = common.MakeTempFile(prefix='target_files-', suffix='.zip')
- with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'w', allowZip64=True) as target_files_zip:
# META/update_engine_config.txt
target_files_zip.writestr(
'META/update_engine_config.txt',
@@ -76,6 +79,11 @@
target_files_zip.writestr('IMAGES/system_other.img',
os.urandom(len("system_other")))
+ if compressedApex:
+ apex_file_name = 'com.android.apex.compressed.v1.capex'
+ apex_file = os.path.join(test_utils.get_current_dir(), apex_file_name)
+ target_files_zip.write(apex_file, 'SYSTEM/apex/' + apex_file_name)
+
return target_files
@@ -272,6 +280,21 @@
},
metadata)
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_GetApexInfoFromTargetFiles(self):
+ target_files = construct_target_files(compressedApex=True)
+ apex_infos = GetApexInfoFromTargetFiles(target_files)
+ self.assertEqual(len(apex_infos), 1)
+ self.assertEqual(apex_infos[0].package_name, "com.android.apex.compressed")
+ self.assertEqual(apex_infos[0].version, 1)
+ self.assertEqual(apex_infos[0].is_compressed, True)
+ # Compare the decompressed APEX size with the original uncompressed APEX
+ original_apex_name = 'com.android.apex.compressed.v1_original.apex'
+ original_apex_filepath = os.path.join(test_utils.get_current_dir(), original_apex_name)
+ uncompressed_apex_size = os.path.getsize(original_apex_filepath)
+ self.assertEqual(apex_infos[0].decompressed_size, uncompressed_apex_size)
+
+
def test_GetPackageMetadata_retrofitDynamicPartitions(self):
target_info = common.BuildInfo(self.TEST_TARGET_INFO_DICT, None)
common.OPTIONS.retrofit_dynamic_partitions = True
@@ -417,7 +440,7 @@
'super_google_dynamic_partitions_partition_list=system vendor product',
])
- with zipfile.ZipFile(input_file, 'a') as append_zip:
+ with zipfile.ZipFile(input_file, 'a', allowZip64=True) as append_zip:
common.ZipWriteStr(append_zip, 'META/misc_info.txt', misc_info)
common.ZipWriteStr(append_zip, 'META/dynamic_partitions_info.txt',
dynamic_partitions_info)
@@ -450,6 +473,86 @@
updated_dynamic_partitions_info)
@test_utils.SkipIfExternalToolsUnavailable()
+ def test_GetTargetFilesZipForPartialUpdates_singlePartition(self):
+ input_file = construct_target_files()
+ with zipfile.ZipFile(input_file, 'a', allowZip64=True) as append_zip:
+ common.ZipWriteStr(append_zip, 'IMAGES/system.map', 'fake map')
+
+ target_file = GetTargetFilesZipForPartialUpdates(input_file, ['system'])
+ with zipfile.ZipFile(target_file) as verify_zip:
+ namelist = verify_zip.namelist()
+ ab_partitions = verify_zip.read('META/ab_partitions.txt').decode()
+
+ self.assertIn('META/ab_partitions.txt', namelist)
+ self.assertIn('META/update_engine_config.txt', namelist)
+ self.assertIn('IMAGES/system.img', namelist)
+ self.assertIn('IMAGES/system.map', namelist)
+
+ self.assertNotIn('IMAGES/boot.img', namelist)
+ self.assertNotIn('IMAGES/system_other.img', namelist)
+ self.assertNotIn('RADIO/bootloader.img', namelist)
+ self.assertNotIn('RADIO/modem.img', namelist)
+
+ self.assertEqual('system', ab_partitions)
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_GetTargetFilesZipForPartialUpdates_unrecognizedPartition(self):
+ input_file = construct_target_files()
+ self.assertRaises(ValueError, GetTargetFilesZipForPartialUpdates,
+ input_file, ['product'])
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_GetTargetFilesZipForPartialUpdates_dynamicPartitions(self):
+ input_file = construct_target_files(secondary=True)
+ misc_info = '\n'.join([
+ 'use_dynamic_partition_size=true',
+ 'use_dynamic_partitions=true',
+ 'dynamic_partition_list=system vendor product',
+ 'super_partition_groups=google_dynamic_partitions',
+ 'super_google_dynamic_partitions_group_size=4873781248',
+ 'super_google_dynamic_partitions_partition_list=system vendor product',
+ ])
+ dynamic_partitions_info = '\n'.join([
+ 'super_partition_groups=google_dynamic_partitions',
+ 'super_google_dynamic_partitions_group_size=4873781248',
+ 'super_google_dynamic_partitions_partition_list=system vendor product',
+ ])
+
+ with zipfile.ZipFile(input_file, 'a', allowZip64=True) as append_zip:
+ common.ZipWriteStr(append_zip, 'META/misc_info.txt', misc_info)
+ common.ZipWriteStr(append_zip, 'META/dynamic_partitions_info.txt',
+ dynamic_partitions_info)
+
+ target_file = GetTargetFilesZipForPartialUpdates(input_file,
+ ['boot', 'system'])
+ with zipfile.ZipFile(target_file) as verify_zip:
+ namelist = verify_zip.namelist()
+ ab_partitions = verify_zip.read('META/ab_partitions.txt').decode()
+ updated_misc_info = verify_zip.read('META/misc_info.txt').decode()
+ updated_dynamic_partitions_info = verify_zip.read(
+ 'META/dynamic_partitions_info.txt').decode()
+
+ self.assertIn('META/ab_partitions.txt', namelist)
+ self.assertIn('IMAGES/boot.img', namelist)
+ self.assertIn('IMAGES/system.img', namelist)
+ self.assertIn('META/misc_info.txt', namelist)
+ self.assertIn('META/dynamic_partitions_info.txt', namelist)
+
+ self.assertNotIn('IMAGES/system_other.img', namelist)
+ self.assertNotIn('RADIO/bootloader.img', namelist)
+ self.assertNotIn('RADIO/modem.img', namelist)
+
+ # Check the vendor & product are removed from the partitions list.
+ expected_misc_info = misc_info.replace('system vendor product',
+ 'system')
+ expected_dynamic_partitions_info = dynamic_partitions_info.replace(
+ 'system vendor product', 'system')
+ self.assertEqual(expected_misc_info, updated_misc_info)
+ self.assertEqual(expected_dynamic_partitions_info,
+ updated_dynamic_partitions_info)
+ self.assertEqual('boot\nsystem', ab_partitions)
+
+ @test_utils.SkipIfExternalToolsUnavailable()
def test_GetTargetFilesZipWithoutPostinstallConfig(self):
input_file = construct_target_files()
target_file = GetTargetFilesZipWithoutPostinstallConfig(input_file)
@@ -464,6 +567,46 @@
with zipfile.ZipFile(target_file) as verify_zip:
self.assertNotIn(POSTINSTALL_CONFIG, verify_zip.namelist())
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_GetTargetFilesZipForCustomImagesUpdates_oemDefaultImage(self):
+ input_file = construct_target_files()
+ with zipfile.ZipFile(input_file, 'a', allowZip64=True) as append_zip:
+ common.ZipWriteStr(append_zip, 'IMAGES/oem.img', 'oem')
+ common.ZipWriteStr(append_zip, 'IMAGES/oem_test.img', 'oem_test')
+
+ target_file = GetTargetFilesZipForCustomImagesUpdates(
+ input_file, {'oem': 'oem.img'})
+
+ with zipfile.ZipFile(target_file) as verify_zip:
+ namelist = verify_zip.namelist()
+ ab_partitions = verify_zip.read('META/ab_partitions.txt').decode()
+ oem_image = verify_zip.read('IMAGES/oem.img').decode()
+
+ self.assertIn('META/ab_partitions.txt', namelist)
+ self.assertEqual('boot\nsystem\nvendor\nbootloader\nmodem', ab_partitions)
+ self.assertIn('IMAGES/oem.img', namelist)
+ self.assertEqual('oem', oem_image)
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_GetTargetFilesZipForCustomImagesUpdates_oemTestImage(self):
+ input_file = construct_target_files()
+ with zipfile.ZipFile(input_file, 'a', allowZip64=True) as append_zip:
+ common.ZipWriteStr(append_zip, 'IMAGES/oem.img', 'oem')
+ common.ZipWriteStr(append_zip, 'IMAGES/oem_test.img', 'oem_test')
+
+ target_file = GetTargetFilesZipForCustomImagesUpdates(
+ input_file, {'oem': 'oem_test.img'})
+
+ with zipfile.ZipFile(target_file) as verify_zip:
+ namelist = verify_zip.namelist()
+ ab_partitions = verify_zip.read('META/ab_partitions.txt').decode()
+ oem_image = verify_zip.read('IMAGES/oem.img').decode()
+
+ self.assertIn('META/ab_partitions.txt', namelist)
+ self.assertEqual('boot\nsystem\nvendor\nbootloader\nmodem', ab_partitions)
+ self.assertIn('IMAGES/oem.img', namelist)
+ self.assertEqual('oem_test', oem_image)
+
def _test_FinalizeMetadata(self, large_entry=False):
entries = [
'required-entry1',
@@ -472,7 +615,7 @@
zip_file = PropertyFilesTest.construct_zip_package(entries)
# Add a large entry of 1 GiB if requested.
if large_entry:
- with zipfile.ZipFile(zip_file, 'a') as zip_fp:
+ with zipfile.ZipFile(zip_file, 'a', allowZip64=True) as zip_fp:
zip_fp.writestr(
# Using 'zoo' so that the entry stays behind others after signing.
'zoo',
@@ -514,7 +657,7 @@
'optional-entry2',
]
zip_file = PropertyFilesTest.construct_zip_package(entries)
- with zipfile.ZipFile(zip_file, 'a') as zip_fp:
+ with zipfile.ZipFile(zip_file, 'a', allowZip64=True) as zip_fp:
zip_fp.writestr(
# 'foo-entry1' will appear ahead of all other entries (in alphabetical
# order) after the signing, which will in turn trigger the
@@ -558,7 +701,7 @@
)
zip_file = self.construct_zip_package(entries)
property_files = TestPropertyFiles()
- with zipfile.ZipFile(zip_file, 'r') as zip_fp:
+ with zipfile.ZipFile(zip_file, 'r', allowZip64=True) as zip_fp:
property_files_string = property_files.Compute(zip_fp)
tokens = self._parse_property_files_string(property_files_string)
@@ -574,7 +717,7 @@
)
zip_file = self.construct_zip_package(entries)
property_files = TestPropertyFiles()
- with zipfile.ZipFile(zip_file, 'r') as zip_fp:
+ with zipfile.ZipFile(zip_file, 'r', allowZip64=True) as zip_fp:
property_files_string = property_files.Compute(zip_fp)
tokens = self._parse_property_files_string(property_files_string)
@@ -587,7 +730,7 @@
)
zip_file = self.construct_zip_package(entries)
property_files = TestPropertyFiles()
- with zipfile.ZipFile(zip_file, 'r') as zip_fp:
+ with zipfile.ZipFile(zip_file, 'r', allowZip64=True) as zip_fp:
self.assertRaises(KeyError, property_files.Compute, zip_fp)
@test_utils.SkipIfExternalToolsUnavailable()
@@ -600,7 +743,7 @@
]
zip_file = self.construct_zip_package(entries)
property_files = TestPropertyFiles()
- with zipfile.ZipFile(zip_file, 'r') as zip_fp:
+ with zipfile.ZipFile(zip_file, 'r', allowZip64=True) as zip_fp:
raw_metadata = property_files.GetPropertyFilesString(
zip_fp, reserve_space=False)
streaming_metadata = property_files.Finalize(zip_fp, len(raw_metadata))
@@ -625,7 +768,7 @@
)
zip_file = self.construct_zip_package(entries)
property_files = TestPropertyFiles()
- with zipfile.ZipFile(zip_file, 'r') as zip_fp:
+ with zipfile.ZipFile(zip_file, 'r', allowZip64=True) as zip_fp:
# First get the raw metadata string (i.e. without padding space).
raw_metadata = property_files.GetPropertyFilesString(
zip_fp, reserve_space=False)
@@ -660,7 +803,7 @@
)
zip_file = self.construct_zip_package(entries)
property_files = TestPropertyFiles()
- with zipfile.ZipFile(zip_file, 'r') as zip_fp:
+ with zipfile.ZipFile(zip_file, 'r', allowZip64=True) as zip_fp:
# First get the raw metadata string (i.e. without padding space).
raw_metadata = property_files.GetPropertyFilesString(
zip_fp, reserve_space=False)
@@ -702,7 +845,7 @@
)
zip_file = self.construct_zip_package(entries)
property_files = StreamingPropertyFiles()
- with zipfile.ZipFile(zip_file, 'r') as zip_fp:
+ with zipfile.ZipFile(zip_file, 'r', allowZip64=True) as zip_fp:
property_files_string = property_files.Compute(zip_fp)
tokens = self._parse_property_files_string(property_files_string)
@@ -720,7 +863,7 @@
]
zip_file = self.construct_zip_package(entries)
property_files = StreamingPropertyFiles()
- with zipfile.ZipFile(zip_file, 'r') as zip_fp:
+ with zipfile.ZipFile(zip_file, 'r', allowZip64=True) as zip_fp:
raw_metadata = property_files.GetPropertyFilesString(
zip_fp, reserve_space=False)
streaming_metadata = property_files.Finalize(zip_fp, len(raw_metadata))
@@ -744,7 +887,7 @@
)
zip_file = self.construct_zip_package(entries)
property_files = StreamingPropertyFiles()
- with zipfile.ZipFile(zip_file, 'r') as zip_fp:
+ with zipfile.ZipFile(zip_file, 'r', allowZip64=True) as zip_fp:
# First get the raw metadata string (i.e. without padding space).
raw_metadata = property_files.GetPropertyFilesString(
zip_fp, reserve_space=False)
@@ -802,7 +945,7 @@
payload.Sign(payload_signer)
output_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(output_file, 'w') as output_zip:
+ with zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
payload.WriteToZip(output_zip)
# Find out the payload metadata offset and size.
@@ -867,7 +1010,7 @@
payload.Sign(payload_signer)
zip_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(zip_file, 'w') as zip_fp:
+ with zipfile.ZipFile(zip_file, 'w', allowZip64=True) as zip_fp:
# 'payload.bin',
payload.WriteToZip(zip_fp)
@@ -889,7 +1032,7 @@
def test_Compute(self):
zip_file = self.construct_zip_package_withValidPayload()
property_files = AbOtaPropertyFiles()
- with zipfile.ZipFile(zip_file, 'r') as zip_fp:
+ with zipfile.ZipFile(zip_file, 'r', allowZip64=True) as zip_fp:
property_files_string = property_files.Compute(zip_fp)
tokens = self._parse_property_files_string(property_files_string)
@@ -903,7 +1046,7 @@
def test_Finalize(self):
zip_file = self.construct_zip_package_withValidPayload(with_metadata=True)
property_files = AbOtaPropertyFiles()
- with zipfile.ZipFile(zip_file, 'r') as zip_fp:
+ with zipfile.ZipFile(zip_file, 'r', allowZip64=True) as zip_fp:
raw_metadata = property_files.GetPropertyFilesString(
zip_fp, reserve_space=False)
property_files_string = property_files.Finalize(
@@ -920,7 +1063,7 @@
def test_Verify(self):
zip_file = self.construct_zip_package_withValidPayload(with_metadata=True)
property_files = AbOtaPropertyFiles()
- with zipfile.ZipFile(zip_file, 'r') as zip_fp:
+ with zipfile.ZipFile(zip_file, 'r', allowZip64=True) as zip_fp:
raw_metadata = property_files.GetPropertyFilesString(
zip_fp, reserve_space=False)
@@ -1087,7 +1230,7 @@
payload.Sign(PayloadSigner())
output_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(output_file, 'w') as output_zip:
+ with zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
payload.WriteToZip(output_zip)
import check_ota_package_signature
@@ -1101,7 +1244,7 @@
payload.Sign(PayloadSigner())
output_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(output_file, 'w') as output_zip:
+ with zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
payload.WriteToZip(output_zip)
import check_ota_package_signature
@@ -1140,7 +1283,7 @@
payload.Sign(PayloadSigner())
output_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(output_file, 'w') as output_zip:
+ with zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
payload.WriteToZip(output_zip)
with zipfile.ZipFile(output_file) as verify_zip:
@@ -1162,14 +1305,14 @@
payload = self._create_payload_full()
output_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(output_file, 'w') as output_zip:
+ with zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
self.assertRaises(AssertionError, payload.WriteToZip, output_zip)
# Also test with incremental payload.
payload = self._create_payload_incremental()
output_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(output_file, 'w') as output_zip:
+ with zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
self.assertRaises(AssertionError, payload.WriteToZip, output_zip)
@test_utils.SkipIfExternalToolsUnavailable()
@@ -1178,7 +1321,7 @@
payload.Sign(PayloadSigner())
output_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(output_file, 'w') as output_zip:
+ with zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
payload.WriteToZip(output_zip)
with zipfile.ZipFile(output_file) as verify_zip:
@@ -1205,11 +1348,12 @@
]
BUILD_PROP = [
- 'ro.build.version.release=version-release',
'ro.build.id=build-id',
'ro.build.version.incremental=version-incremental',
'ro.build.type=build-type',
'ro.build.tags=build-tags',
+ 'ro.build.version.release=version-release',
+ 'ro.build.version.release_or_codename=version-release',
'ro.build.version.sdk=30',
'ro.build.version.security_patch=2020',
'ro.build.date.utc=12345678',
diff --git a/tools/releasetools/test_sign_apex.py b/tools/releasetools/test_sign_apex.py
index 82f5938..646b04d 100644
--- a/tools/releasetools/test_sign_apex.py
+++ b/tools/releasetools/test_sign_apex.py
@@ -57,3 +57,17 @@
False,
apk_keys)
self.assertTrue(os.path.exists(signed_test_apex))
+
+ @test_utils.SkipIfExternalToolsUnavailable()
+ def test_SignCompressedApexFile(self):
+ apex = os.path.join(test_utils.get_current_dir(), 'com.android.apex.compressed.v1.capex')
+ payload_key = os.path.join(self.testdata_dir, 'testkey_RSA4096.key')
+ container_key = os.path.join(self.testdata_dir, 'testkey')
+ signed_apex = sign_apex.SignApexFile(
+ 'avbtool',
+ apex,
+ payload_key,
+ container_key,
+ False,
+ codename_to_api_level_map={'S': 31})
+ self.assertTrue(os.path.exists(signed_apex))
diff --git a/tools/releasetools/test_sign_target_files_apks.py b/tools/releasetools/test_sign_target_files_apks.py
index 308172f..18e4858 100644
--- a/tools/releasetools/test_sign_target_files_apks.py
+++ b/tools/releasetools/test_sign_target_files_apks.py
@@ -164,15 +164,15 @@
"veritykeyid=id:d24f2590e9abab5cff5f59da4c4f0366e3f43e94\n")
input_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(input_file, 'w') as input_zip:
+ with zipfile.ZipFile(input_file, 'w', allowZip64=True) as input_zip:
input_zip.writestr('BOOT/cmdline', BOOT_CMDLINE1)
# Test with the first certificate.
cert_file = os.path.join(self.testdata_dir, 'verity.x509.pem')
output_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(input_file, 'r') as input_zip, \
- zipfile.ZipFile(output_file, 'w') as output_zip:
+ with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip, \
+ zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
ReplaceVerityKeyId(input_zip, output_zip, cert_file)
with zipfile.ZipFile(output_file) as output_zip:
@@ -181,8 +181,8 @@
# Test with the second certificate.
cert_file = os.path.join(self.testdata_dir, 'testkey.x509.pem')
- with zipfile.ZipFile(input_file, 'r') as input_zip, \
- zipfile.ZipFile(output_file, 'w') as output_zip:
+ with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip, \
+ zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
ReplaceVerityKeyId(input_zip, output_zip, cert_file)
with zipfile.ZipFile(output_file) as output_zip:
@@ -195,12 +195,12 @@
"loop.max_part=7\n")
input_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(input_file, 'w') as input_zip:
+ with zipfile.ZipFile(input_file, 'w', allowZip64=True) as input_zip:
input_zip.writestr('BOOT/cmdline', BOOT_CMDLINE)
output_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(input_file, 'r') as input_zip, \
- zipfile.ZipFile(output_file, 'w') as output_zip:
+ with zipfile.ZipFile(input_file, 'r', allowZip64=True) as input_zip, \
+ zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
ReplaceVerityKeyId(input_zip, output_zip, None)
with zipfile.ZipFile(output_file) as output_zip:
@@ -284,7 +284,7 @@
]
entry_name = 'SYSTEM/etc/security/otacerts.zip'
output_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(output_file, 'w') as output_zip:
+ with zipfile.ZipFile(output_file, 'w', allowZip64=True) as output_zip:
WriteOtacerts(output_zip, entry_name, certs)
with zipfile.ZipFile(output_file) as input_zip:
self.assertIn(entry_name, input_zip.namelist())
@@ -294,7 +294,7 @@
def test_CheckApkAndApexKeysAvailable(self):
input_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(input_file, 'w') as input_zip:
+ with zipfile.ZipFile(input_file, 'w', allowZip64=True) as input_zip:
input_zip.writestr('SYSTEM/app/App1.apk', "App1-content")
input_zip.writestr('SYSTEM/app/App2.apk.gz', "App2-content")
@@ -318,7 +318,7 @@
def test_CheckApkAndApexKeysAvailable_invalidApexKeys(self):
input_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(input_file, 'w') as input_zip:
+ with zipfile.ZipFile(input_file, 'w', allowZip64=True) as input_zip:
input_zip.writestr('SYSTEM/apex/Apex1.apex', "Apex1-content")
input_zip.writestr('SYSTEM/apex/Apex2.apex', "Apex2-content")
@@ -466,10 +466,10 @@
def test_ReadApexKeysInfo(self):
target_files = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'w', allowZip64=True) as target_files_zip:
target_files_zip.writestr('META/apexkeys.txt', self.APEX_KEYS_TXT)
- with zipfile.ZipFile(target_files) as target_files_zip:
+ with zipfile.ZipFile(target_files, allowZip64=True) as target_files_zip:
keys_info = ReadApexKeysInfo(target_files_zip)
self.assertEqual({
@@ -491,10 +491,10 @@
'container_private_key="build/make/target/product/security/testkey2.pk8" '
'partition="system"')
target_files = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'w', allowZip64=True) as target_files_zip:
target_files_zip.writestr('META/apexkeys.txt', apex_keys)
- with zipfile.ZipFile(target_files) as target_files_zip:
+ with zipfile.ZipFile(target_files, allowZip64=True) as target_files_zip:
self.assertRaises(ValueError, ReadApexKeysInfo, target_files_zip)
def test_ReadApexKeysInfo_missingPayloadPrivateKey(self):
@@ -505,10 +505,10 @@
'container_certificate="build/make/target/product/security/testkey.x509.pem" '
'container_private_key="build/make/target/product/security/testkey.pk8"')
target_files = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'w', allowZip64=True) as target_files_zip:
target_files_zip.writestr('META/apexkeys.txt', apex_keys)
- with zipfile.ZipFile(target_files) as target_files_zip:
+ with zipfile.ZipFile(target_files, allowZip64=True) as target_files_zip:
keys_info = ReadApexKeysInfo(target_files_zip)
self.assertEqual({
@@ -528,10 +528,10 @@
'container_certificate="build/make/target/product/security/testkey.x509.pem" '
'container_private_key="build/make/target/product/security/testkey.pk8"')
target_files = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'w', allowZip64=True) as target_files_zip:
target_files_zip.writestr('META/apexkeys.txt', apex_keys)
- with zipfile.ZipFile(target_files) as target_files_zip:
+ with zipfile.ZipFile(target_files, allowZip64=True) as target_files_zip:
keys_info = ReadApexKeysInfo(target_files_zip)
self.assertEqual({
@@ -551,10 +551,10 @@
'container_certificate="PRESIGNED" '
'container_private_key="PRESIGNED"')
target_files = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'w', allowZip64=True) as target_files_zip:
target_files_zip.writestr('META/apexkeys.txt', apex_keys)
- with zipfile.ZipFile(target_files) as target_files_zip:
+ with zipfile.ZipFile(target_files, allowZip64=True) as target_files_zip:
keys_info = ReadApexKeysInfo(target_files_zip)
self.assertEqual({
@@ -574,10 +574,10 @@
'container_certificate="PRESIGNED" '
'container_private_key="PRESIGNED"')
target_files = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(target_files, 'w') as target_files_zip:
+ with zipfile.ZipFile(target_files, 'w', allowZip64=True) as target_files_zip:
target_files_zip.writestr('META/apexkeys.txt', apex_keys)
- with zipfile.ZipFile(target_files) as target_files_zip:
+ with zipfile.ZipFile(target_files, allowZip64=True) as target_files_zip:
keys_info = ReadApexKeysInfo(target_files_zip)
self.assertEqual({
diff --git a/tools/releasetools/test_utils.py b/tools/releasetools/test_utils.py
index 7b7f22a..808b392 100755
--- a/tools/releasetools/test_utils.py
+++ b/tools/releasetools/test_utils.py
@@ -55,6 +55,11 @@
current_dir = os.path.dirname(os.path.realpath(__file__))
return os.path.join(current_dir, 'testdata')
+def get_current_dir():
+ """Returns the current dir, relative to the script dir."""
+ # The script dir is the one we want, which could be different from pwd.
+ current_dir = os.path.dirname(os.path.realpath(__file__))
+ return current_dir
def get_search_path():
"""Returns the search path that has 'framework/signapk.jar' under."""
@@ -199,7 +204,7 @@
@staticmethod
def construct_zip_package(entries):
zip_file = common.MakeTempFile(suffix='.zip')
- with zipfile.ZipFile(zip_file, 'w') as zip_fp:
+ with zipfile.ZipFile(zip_file, 'w', allowZip64=True) as zip_fp:
for entry in entries:
zip_fp.writestr(
entry,
diff --git a/tools/releasetools/test_validate_target_files.py b/tools/releasetools/test_validate_target_files.py
index ca70ca8..48b563d 100644
--- a/tools/releasetools/test_validate_target_files.py
+++ b/tools/releasetools/test_validate_target_files.py
@@ -272,7 +272,7 @@
input_file = common.MakeTempFile()
all_entries = ['SYSTEM/', 'SYSTEM/b', 'SYSTEM/a', 'IMAGES/',
'IMAGES/system.map', 'IMAGES/system.img']
- with zipfile.ZipFile(input_file, 'w') as input_zip:
+ with zipfile.ZipFile(input_file, 'w', allowZip64=True) as input_zip:
for name in all_entries:
input_zip.write(os.path.join(input_tmp, name), arcname=name)
@@ -321,7 +321,7 @@
input_file = common.MakeTempFile()
all_entries = ['SYSTEM/', 'SYSTEM/abc', 'IMAGES/',
'IMAGES/system.map', 'IMAGES/system.img']
- with zipfile.ZipFile(input_file, 'w') as input_zip:
+ with zipfile.ZipFile(input_file, 'w', allowZip64=True) as input_zip:
for name in all_entries:
input_zip.write(os.path.join(input_tmp, name), arcname=name)
@@ -357,9 +357,6 @@
'google/coral/coral:10/RP1A.200325.001/6337676:user/dev-keys',
'ro.product.odm.device=coral',
]
- input_tmp = ValidateTargetFilesTest.make_build_prop({
- 'ODM/etc/build.prop': '\n'.join(build_prop),
- })
+ input_tmp = ValidateTargetFilesTest.make_build_prop(build_prop)
- self.assertRaises(ValueError, CheckBuildPropDuplicity,
- input_tmp)
+ self.assertRaises(ValueError, CheckBuildPropDuplicity, input_tmp)
diff --git a/tools/releasetools/validate_target_files.py b/tools/releasetools/validate_target_files.py
index 2e3aa74..401857f 100755
--- a/tools/releasetools/validate_target_files.py
+++ b/tools/releasetools/validate_target_files.py
@@ -236,6 +236,7 @@
logging.info('Done checking %s', script_path)
+
# Symlink files in `src` to `dst`, if the files do not
# already exists in `dst` directory.
def symlinkIfNotExists(src, dst):
@@ -246,6 +247,7 @@
continue
os.symlink(os.path.join(src, filename), os.path.join(dst, filename))
+
def ValidateVerifiedBootImages(input_tmp, info_dict, options):
"""Validates the Verified Boot related images.
@@ -423,16 +425,25 @@
'Verified %s with avbtool (key: %s):\n%s', image, key,
stdoutdata.rstrip())
-def CheckDataDuplicity(lines):
+
+def CheckDataInconsistency(lines):
build_prop = {}
for line in lines:
if line.startswith("import") or line.startswith("#"):
continue
- key, value = line.split("=", 1)
+ if "=" not in line:
+ continue
+
+ key, value = line.rstrip().split("=", 1)
if key in build_prop:
- return key
+ logging.info("Duplicated key found for {}".format(key))
+ if value != build_prop[key]:
+ logging.error("Key {} is defined twice with different values {} vs {}"
+ .format(key, value, build_prop[key]))
+ return key
build_prop[key] = value
+
def CheckBuildPropDuplicity(input_tmp):
"""Check all buld.prop files inside directory input_tmp, raise error
if they contain duplicates"""
@@ -448,9 +459,11 @@
continue
logging.info("Checking {}".format(path))
with open(path, 'r') as fp:
- dupKey = CheckDataDuplicity(fp.readlines())
+ dupKey = CheckDataInconsistency(fp.readlines())
if dupKey:
- raise ValueError("{} contains duplicate keys for {}", path, dupKey)
+ raise ValueError("{} contains duplicate keys for {}".format(
+ path, dupKey))
+
def main():
parser = argparse.ArgumentParser(
@@ -487,7 +500,7 @@
input_tmp = common.UnzipTemp(args.target_files)
info_dict = common.LoadInfoDict(input_tmp)
- with zipfile.ZipFile(args.target_files, 'r') as input_zip:
+ with zipfile.ZipFile(args.target_files, 'r', allowZip64=True) as input_zip:
ValidateFileConsistency(input_zip, input_tmp, info_dict)
CheckBuildPropDuplicity(input_tmp)
diff --git a/tools/signapk/Android.bp b/tools/signapk/Android.bp
index c799dbf..b90f010 100644
--- a/tools/signapk/Android.bp
+++ b/tools/signapk/Android.bp
@@ -16,7 +16,7 @@
// the signapk tool (a .jar application used to sign packages)
// ============================================================
-java_library_host {
+java_binary_host {
name: "signapk",
srcs: ["src/**/*.java"],
manifest: "SignApk.mf",
@@ -27,11 +27,12 @@
"conscrypt-unbundled",
],
- required: ["libconscrypt_openjdk_jni"],
+ jni_libs: ["libconscrypt_openjdk_jni"],
// The post-build signing tools need signapk.jar (and its shared libraries,
// handled in their own Android.bp files)
dist: {
+ tag: ".jar",
targets: ["droidcore"],
},
}
diff --git a/tools/warn/android_project_list.py b/tools/warn/android_project_list.py
index 1010b24..82c0fbd 100644
--- a/tools/warn/android_project_list.py
+++ b/tools/warn/android_project_list.py
@@ -109,7 +109,7 @@
create_pattern('system/bt'),
create_pattern('system/connectivity'),
create_pattern('system/core/adb'),
- create_pattern('system/core/base'),
+ create_pattern('system/libbase'),
create_pattern('system/core/debuggerd'),
create_pattern('system/core/fastboot'),
create_pattern('system/core/fingerprintd'),
@@ -118,8 +118,8 @@
create_pattern('system/core/healthd'),
create_pattern('system/core/include'),
create_pattern('system/core/init'),
- create_pattern('system/core/libbacktrace'),
- create_pattern('system/core/liblog'),
+ create_pattern('system/unwinding/libbacktrace'),
+ create_pattern('system/logging/liblog'),
create_pattern('system/core/libpixelflinger'),
create_pattern('system/core/libprocessgroup'),
create_pattern('system/core/libsysutils'),
diff --git a/tools/warn/html_writer.py b/tools/warn/html_writer.py
index b8d3fe6..026a6d0 100644
--- a/tools/warn/html_writer.py
+++ b/tools/warn/html_writer.py
@@ -359,6 +359,25 @@
csvwriter.writerow([total, '', 'All warnings'])
+def dump_csv_with_description(csvwriter, warning_records, warning_messages,
+ warn_patterns, project_names):
+ """Outputs all the warning messages by project."""
+ csv_output = []
+ for record in warning_records:
+ project_name = project_names[record[1]]
+ pattern = warn_patterns[record[0]]
+ severity = pattern['severity'].header
+ category = pattern['category']
+ description = pattern['description']
+ warning = warning_messages[record[2]]
+ csv_output.append([project_name, severity,
+ category, description,
+ warning])
+ csv_output = sorted(csv_output)
+ for output in csv_output:
+ csvwriter.writerow(output)
+
+
# Return s with escaped backslash and quotation characters.
def escape_string(s):
return s.replace('\\', '\\\\').replace('"', '\\"')
@@ -666,6 +685,12 @@
with open(flags.csvpath, 'w') as f:
dump_csv(csv.writer(f, lineterminator='\n'), warn_patterns)
+ if flags.csvwithdescription:
+ with open(flags.csvwithdescription, 'w') as f:
+ dump_csv_with_description(csv.writer(f, lineterminator='\n'),
+ warning_records, warning_messages,
+ warn_patterns, project_names)
+
if flags.gencsv:
dump_csv(csv.writer(sys.stdout, lineterminator='\n'), warn_patterns)
else:
diff --git a/tools/warn/warn_common.py b/tools/warn/warn_common.py
index 68ed995..b2dd8ab 100755
--- a/tools/warn/warn_common.py
+++ b/tools/warn/warn_common.py
@@ -77,6 +77,9 @@
help='Save CSV warning file to the passed path')
parser.add_argument('--gencsv', action='store_true',
help='Generate CSV file with number of various warnings')
+ parser.add_argument('--csvwithdescription', default='',
+ help="""Save CSV warning file to the passed path this csv
+ will contain all the warning descriptions""")
parser.add_argument('--byproject', action='store_true',
help='Separate warnings in HTML output by project names')
parser.add_argument('--url', default='',
diff --git a/tools/zipalign/Android.bp b/tools/zipalign/Android.bp
index 8e6196d..1ebf4eb 100644
--- a/tools/zipalign/Android.bp
+++ b/tools/zipalign/Android.bp
@@ -4,20 +4,31 @@
// Zip alignment tool
//
-cc_binary_host {
- name: "zipalign",
+cc_defaults {
+ name: "zipalign_defaults",
+ target: {
+ windows: {
+ host_ldlibs: ["-lpthread"],
+ enabled: true,
+ },
+ },
+}
+cc_library_host_static {
+ name: "libzipalign",
srcs: [
"ZipAlign.cpp",
"ZipEntry.cpp",
"ZipFile.cpp",
],
-
+ export_include_dirs: [
+ "include",
+ ],
cflags: ["-Wall", "-Werror"],
// NOTE: Do not add any shared_libs dependencies because they will break the
// static_sdk_tools target.
- static_libs: [
+ whole_static_libs: [
"libutils",
"libcutils",
"liblog",
@@ -26,11 +37,39 @@
"libbase",
"libzopfli",
],
+ defaults: ["zipalign_defaults"],
+}
- target: {
- windows: {
- host_ldlibs: ["-lpthread"],
- enabled: true,
- },
+cc_binary_host {
+ name: "zipalign",
+ srcs: [
+ "ZipAlignMain.cpp",
+ ],
+ cflags: ["-Wall", "-Werror"],
+ static_libs: [
+ "libzipalign",
+ ],
+ defaults: ["zipalign_defaults"],
+}
+
+cc_test_host {
+ name: "zipalign_tests",
+ srcs: [
+ "tests/src/*_test.cpp",
+ ],
+ test_options: {
+ unit_test: true,
},
+ static_libs: [
+ "libbase",
+ "libzipalign",
+ "libgmock",
+ ],
+ data: [
+ "tests/data/diffOrders.zip",
+ "tests/data/holes.zip",
+ "tests/data/unaligned.zip",
+ ],
+ defaults: ["zipalign_defaults"],
+ test_suites: ["general-tests"],
}
diff --git a/tools/zipalign/OWNERS b/tools/zipalign/OWNERS
new file mode 100644
index 0000000..d701e4a
--- /dev/null
+++ b/tools/zipalign/OWNERS
@@ -0,0 +1,2 @@
+include platform/system/core:/janitors/OWNERS
+sanglardf@google.com
diff --git a/tools/zipalign/ZipAlign.cpp b/tools/zipalign/ZipAlign.cpp
index eea1749..08f67ff 100644
--- a/tools/zipalign/ZipAlign.cpp
+++ b/tools/zipalign/ZipAlign.cpp
@@ -14,35 +14,13 @@
* limitations under the License.
*/
-/*
- * Zip alignment tool
- */
#include "ZipFile.h"
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
-using namespace android;
-
-/*
- * Show program usage.
- */
-void usage(void)
-{
- fprintf(stderr, "Zip alignment utility\n");
- fprintf(stderr, "Copyright (C) 2009 The Android Open Source Project\n\n");
- fprintf(stderr,
- "Usage: zipalign [-f] [-p] [-v] [-z] <align> infile.zip outfile.zip\n"
- " zipalign -c [-p] [-v] <align> infile.zip\n\n" );
- fprintf(stderr,
- " <align>: alignment in bytes, e.g. '4' provides 32-bit alignment\n");
- fprintf(stderr, " -c: check alignment only (does not modify file)\n");
- fprintf(stderr, " -f: overwrite existing outfile.zip\n");
- fprintf(stderr, " -p: memory page alignment for stored shared object files\n");
- fprintf(stderr, " -v: verbose output\n");
- fprintf(stderr, " -z: recompress using Zopfli\n");
-}
+namespace android {
static int getAlignment(bool pageAlignSharedLibs, int defaultAlignment,
ZipEntry* pEntry) {
@@ -69,7 +47,6 @@
{
int numEntries = pZin->getNumEntries();
ZipEntry* pEntry;
- int bias = 0;
status_t status;
for (int i = 0; i < numEntries; i++) {
@@ -90,30 +67,20 @@
if (zopfli) {
status = pZout->addRecompress(pZin, pEntry, &pNewEntry);
- bias += pNewEntry->getCompressedLen() - pEntry->getCompressedLen();
} else {
status = pZout->add(pZin, pEntry, padding, &pNewEntry);
}
} else {
const int alignTo = getAlignment(pageAlignSharedLibs, alignment, pEntry);
- /*
- * Copy the entry, adjusting as required. We assume that the
- * file position in the new file will be equal to the file
- * position in the original.
- */
- off_t newOffset = pEntry->getFileOffset() + bias;
- padding = (alignTo - (newOffset % alignTo)) % alignTo;
-
//printf("--- %s: orig at %ld(+%d) len=%ld, adding pad=%d\n",
// pEntry->getFileName(), (long) pEntry->getFileOffset(),
// bias, (long) pEntry->getUncompressedLen(), padding);
- status = pZout->add(pZin, pEntry, padding, &pNewEntry);
+ status = pZout->add(pZin, pEntry, alignTo, &pNewEntry);
}
if (status != OK)
return 1;
- bias += padding;
//printf(" added '%s' at %ld (pad=%d)\n",
// pNewEntry->getFileName(), (long) pNewEntry->getFileOffset(),
// padding);
@@ -126,7 +93,7 @@
* Process a file. We open the input and output files, failing if the
* output file exists and "force" wasn't specified.
*/
-static int process(const char* inFileName, const char* outFileName,
+int process(const char* inFileName, const char* outFileName,
int alignment, bool force, bool zopfli, bool pageAlignSharedLibs)
{
ZipFile zin, zout;
@@ -169,7 +136,7 @@
/*
* Verify the alignment of a zip archive.
*/
-static int verify(const char* fileName, int alignment, bool verbose,
+int verify(const char* fileName, int alignment, bool verbose,
bool pageAlignSharedLibs)
{
ZipFile zipFile;
@@ -218,92 +185,4 @@
return foundBad ? 1 : 0;
}
-/*
- * Parse args.
- */
-int main(int argc, char* const argv[])
-{
- bool wantUsage = false;
- bool check = false;
- bool force = false;
- bool verbose = false;
- bool zopfli = false;
- bool pageAlignSharedLibs = false;
- int result = 1;
- int alignment;
- char* endp;
-
- if (argc < 4) {
- wantUsage = true;
- goto bail;
- }
-
- argc--;
- argv++;
-
- while (argc && argv[0][0] == '-') {
- const char* cp = argv[0] +1;
-
- while (*cp != '\0') {
- switch (*cp) {
- case 'c':
- check = true;
- break;
- case 'f':
- force = true;
- break;
- case 'v':
- verbose = true;
- break;
- case 'z':
- zopfli = true;
- break;
- case 'p':
- pageAlignSharedLibs = true;
- break;
- default:
- fprintf(stderr, "ERROR: unknown flag -%c\n", *cp);
- wantUsage = true;
- goto bail;
- }
-
- cp++;
- }
-
- argc--;
- argv++;
- }
-
- if (!((check && argc == 2) || (!check && argc == 3))) {
- wantUsage = true;
- goto bail;
- }
-
- alignment = strtol(argv[0], &endp, 10);
- if (*endp != '\0' || alignment <= 0) {
- fprintf(stderr, "Invalid value for alignment: %s\n", argv[0]);
- wantUsage = true;
- goto bail;
- }
-
- if (check) {
- /* check existing archive for correct alignment */
- result = verify(argv[1], alignment, verbose, pageAlignSharedLibs);
- } else {
- /* create the new archive */
- result = process(argv[1], argv[2], alignment, force, zopfli, pageAlignSharedLibs);
-
- /* trust, but verify */
- if (result == 0) {
- result = verify(argv[2], alignment, verbose, pageAlignSharedLibs);
- }
- }
-
-bail:
- if (wantUsage) {
- usage();
- result = 2;
- }
-
- return result;
-}
+} // namespace android
diff --git a/tools/zipalign/ZipAlignMain.cpp b/tools/zipalign/ZipAlignMain.cpp
new file mode 100644
index 0000000..49be916
--- /dev/null
+++ b/tools/zipalign/ZipAlignMain.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2008 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Zip alignment tool
+ */
+
+#include "ZipAlign.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+using namespace android;
+
+/*
+ * Show program usage.
+ */
+void usage(void)
+{
+ fprintf(stderr, "Zip alignment utility\n");
+ fprintf(stderr, "Copyright (C) 2009 The Android Open Source Project\n\n");
+ fprintf(stderr,
+ "Usage: zipalign [-f] [-p] [-v] [-z] <align> infile.zip outfile.zip\n"
+ " zipalign -c [-p] [-v] <align> infile.zip\n\n" );
+ fprintf(stderr,
+ " <align>: alignment in bytes, e.g. '4' provides 32-bit alignment\n");
+ fprintf(stderr, " -c: check alignment only (does not modify file)\n");
+ fprintf(stderr, " -f: overwrite existing outfile.zip\n");
+ fprintf(stderr, " -p: memory page alignment for stored shared object files\n");
+ fprintf(stderr, " -v: verbose output\n");
+ fprintf(stderr, " -z: recompress using Zopfli\n");
+}
+
+
+/*
+ * Parse args.
+ */
+int main(int argc, char* const argv[])
+{
+ bool wantUsage = false;
+ bool check = false;
+ bool force = false;
+ bool verbose = false;
+ bool zopfli = false;
+ bool pageAlignSharedLibs = false;
+ int result = 1;
+ int alignment;
+ char* endp;
+
+ if (argc < 4) {
+ wantUsage = true;
+ goto bail;
+ }
+
+ argc--;
+ argv++;
+
+ while (argc && argv[0][0] == '-') {
+ const char* cp = argv[0] +1;
+
+ while (*cp != '\0') {
+ switch (*cp) {
+ case 'c':
+ check = true;
+ break;
+ case 'f':
+ force = true;
+ break;
+ case 'v':
+ verbose = true;
+ break;
+ case 'z':
+ zopfli = true;
+ break;
+ case 'p':
+ pageAlignSharedLibs = true;
+ break;
+ default:
+ fprintf(stderr, "ERROR: unknown flag -%c\n", *cp);
+ wantUsage = true;
+ goto bail;
+ }
+
+ cp++;
+ }
+
+ argc--;
+ argv++;
+ }
+
+ if (!((check && argc == 2) || (!check && argc == 3))) {
+ wantUsage = true;
+ goto bail;
+ }
+
+ alignment = strtol(argv[0], &endp, 10);
+ if (*endp != '\0' || alignment <= 0) {
+ fprintf(stderr, "Invalid value for alignment: %s\n", argv[0]);
+ wantUsage = true;
+ goto bail;
+ }
+
+ if (check) {
+ /* check existing archive for correct alignment */
+ result = verify(argv[1], alignment, verbose, pageAlignSharedLibs);
+ } else {
+ /* create the new archive */
+ result = process(argv[1], argv[2], alignment, force, zopfli, pageAlignSharedLibs);
+
+ /* trust, but verify */
+ if (result == 0) {
+ result = verify(argv[2], alignment, verbose, pageAlignSharedLibs);
+ }
+ }
+
+bail:
+ if (wantUsage) {
+ usage();
+ result = 2;
+ }
+
+ return result;
+}
diff --git a/tools/zipalign/ZipEntry.cpp b/tools/zipalign/ZipEntry.cpp
index 810d74a..5233f0a 100644
--- a/tools/zipalign/ZipEntry.cpp
+++ b/tools/zipalign/ZipEntry.cpp
@@ -29,7 +29,7 @@
#include <string.h>
#include <time.h>
-using namespace android;
+namespace android {
/*
* Initialize a new ZipEntry structure from a FILE* positioned at a
@@ -696,3 +696,5 @@
ALOGD(" comment: '%s'\n", mFileComment);
}
+} // namespace android
+
diff --git a/tools/zipalign/ZipFile.cpp b/tools/zipalign/ZipFile.cpp
index 88505b7..1e3c413 100644
--- a/tools/zipalign/ZipFile.cpp
+++ b/tools/zipalign/ZipFile.cpp
@@ -35,7 +35,7 @@
#include <assert.h>
#include <inttypes.h>
-using namespace android;
+namespace android {
/*
* Some environments require the "b", some choke on it.
@@ -134,7 +134,7 @@
/*
* Return the Nth entry in the archive.
*/
-android::ZipEntry* ZipFile::getEntryByIndex(int idx) const
+ZipEntry* ZipFile::getEntryByIndex(int idx) const
{
if (idx < 0 || idx >= (int) mEntries.size())
return NULL;
@@ -145,7 +145,7 @@
/*
* Find an entry by name.
*/
-android::ZipEntry* ZipFile::getEntryByName(const char* fileName) const
+ZipEntry* ZipFile::getEntryByName(const char* fileName) const
{
/*
* Do a stupid linear string-compare search.
@@ -245,7 +245,11 @@
/* read the last part of the file into the buffer */
if (fread(buf, 1, readAmount, mZipFp) != (size_t) readAmount) {
- ALOGD("short file? wanted %ld\n", readAmount);
+ if (feof(mZipFp)) {
+ ALOGW("fread %ld bytes failed, unexpected EOF", readAmount);
+ } else {
+ ALOGW("fread %ld bytes failed, %s", readAmount, strerror(errno));
+ }
result = UNKNOWN_ERROR;
goto bail;
}
@@ -327,7 +331,11 @@
{
uint8_t checkBuf[4];
if (fread(checkBuf, 1, 4, mZipFp) != 4) {
- ALOGD("EOCD check read failed\n");
+ if (feof(mZipFp)) {
+ ALOGW("fread EOCD failed, unexpected EOF");
+ } else {
+ ALOGW("fread EOCD failed, %s", strerror(errno));
+ }
result = INVALID_OPERATION;
goto bail;
}
@@ -503,6 +511,32 @@
}
/*
+ * Based on the current position in the output zip, assess where the entry
+ * payload will end up if written as-is. If alignment is not satisfactory,
+ * add some padding in the extra field.
+ *
+ */
+status_t ZipFile::alignEntry(android::ZipEntry* pEntry, uint32_t alignTo){
+ if (alignTo == 0 || alignTo == 1)
+ return OK;
+
+ // Calculate where the entry payload offset will end up if we were to write
+ // it as-is.
+ uint64_t expectedPayloadOffset = ftell(mZipFp) +
+ android::ZipEntry::LocalFileHeader::kLFHLen +
+ pEntry->mLFH.mFileNameLength +
+ pEntry->mLFH.mExtraFieldLength;
+
+ // If the alignment is not what was requested, add some padding in the extra
+ // so the payload ends up where is requested.
+ uint64_t alignDiff = alignTo - (expectedPayloadOffset % alignTo);
+ if (alignDiff == 0)
+ return OK;
+
+ return pEntry->addPadding(alignDiff);
+}
+
+/*
* Add an entry by copying it from another zip file. If "padding" is
* nonzero, the specified number of bytes will be added to the "extra"
* field in the header.
@@ -510,7 +544,7 @@
* If "ppEntry" is non-NULL, a pointer to the new entry will be returned.
*/
status_t ZipFile::add(const ZipFile* pSourceZip, const ZipEntry* pSourceEntry,
- int padding, ZipEntry** ppEntry)
+ int alignTo, ZipEntry** ppEntry)
{
ZipEntry* pEntry = NULL;
status_t result;
@@ -537,11 +571,10 @@
result = pEntry->initFromExternal(pSourceEntry);
if (result != OK)
goto bail;
- if (padding != 0) {
- result = pEntry->addPadding(padding);
- if (result != OK)
- goto bail;
- }
+
+ result = alignEntry(pEntry, alignTo);
+ if (result != OK)
+ goto bail;
/*
* From here on out, failures are more interesting.
@@ -760,15 +793,18 @@
while (1) {
count = fread(tmpBuf, 1, sizeof(tmpBuf), srcFp);
- if (ferror(srcFp) || ferror(dstFp))
- return errnoToStatus(errno);
+ if (ferror(srcFp) || ferror(dstFp)) {
+ status_t status = errnoToStatus(errno);
+ ALOGW("fread %zu bytes failed, %s", count, strerror(errno));
+ return status;
+ }
if (count == 0)
break;
*pCRC32 = crc32(*pCRC32, tmpBuf, count);
if (fwrite(tmpBuf, 1, count, dstFp) != count) {
- ALOGD("fwrite %d bytes failed\n", (int) count);
+ ALOGW("fwrite %zu bytes failed, %s", count, strerror(errno));
return UNKNOWN_ERROR;
}
}
@@ -788,7 +824,7 @@
if (size > 0) {
*pCRC32 = crc32(*pCRC32, (const unsigned char*)data, size);
if (fwrite(data, 1, size, dstFp) != size) {
- ALOGD("fwrite %d bytes failed\n", (int) size);
+ ALOGW("fwrite %zu bytes failed, %s", size, strerror(errno));
return UNKNOWN_ERROR;
}
}
@@ -822,7 +858,11 @@
count = fread(tmpBuf, 1, readSize, srcFp);
if (count != readSize) { // error or unexpected EOF
- ALOGD("fread %d bytes failed\n", (int) readSize);
+ if (feof(srcFp)) {
+ ALOGW("fread %zu bytes failed, unexpected EOF", readSize);
+ } else {
+ ALOGW("fread %zu bytes failed, %s", readSize, strerror(errno));
+ }
return UNKNOWN_ERROR;
}
@@ -830,7 +870,7 @@
*pCRC32 = crc32(*pCRC32, tmpBuf, count);
if (fwrite(tmpBuf, 1, count, dstFp) != count) {
- ALOGD("fwrite %d bytes failed\n", (int) count);
+ ALOGW("fwrite %zu bytes failed, %s", count, strerror(errno));
return UNKNOWN_ERROR;
}
@@ -890,8 +930,7 @@
goto bail;
}
if (getSize < kBufSize) {
- ALOGV("+++ got %d bytes, EOF reached\n",
- (int)getSize);
+ ALOGV("+++ got %zu bytes, EOF reached\n", getSize);
atEof = true;
}
@@ -901,9 +940,9 @@
delete[] inBuf;
}
- ALOGV("+++ writing %d bytes\n", (int)outSize);
+ ALOGV("+++ writing %zu bytes\n", outSize);
if (fwrite(outBuf, 1, outSize, dstFp) != outSize) {
- ALOGD("write %d failed in deflate\n", (int)outSize);
+ ALOGW("fwrite %zu bytes failed, %s", outSize, strerror(errno));
result = UNKNOWN_ERROR;
goto bail;
}
@@ -1109,24 +1148,31 @@
getSize = n;
if (fseek(fp, (long) src, SEEK_SET) != 0) {
- ALOGD("filemove src seek %ld failed\n", (long) src);
+ ALOGW("filemove src seek %ld failed, %s",
+ (long) src, strerror(errno));
return UNKNOWN_ERROR;
}
if (fread(readBuf, 1, getSize, fp) != getSize) {
- ALOGD("filemove read %ld off=%ld failed\n",
- (long) getSize, (long) src);
+ if (feof(fp)) {
+ ALOGW("fread %zu bytes off=%ld failed, unexpected EOF",
+ getSize, (long) src);
+ } else {
+ ALOGW("fread %zu bytes off=%ld failed, %s",
+ getSize, (long) src, strerror(errno));
+ }
return UNKNOWN_ERROR;
}
if (fseek(fp, (long) dst, SEEK_SET) != 0) {
- ALOGD("filemove dst seek %ld failed\n", (long) dst);
+ ALOGW("filemove dst seek %ld failed, %s",
+ (long) dst, strerror(errno));
return UNKNOWN_ERROR;
}
if (fwrite(readBuf, 1, getSize, fp) != getSize) {
- ALOGD("filemove write %ld off=%ld failed\n",
- (long) getSize, (long) dst);
+ ALOGW("filemove write %zu off=%ld failed, %s",
+ getSize, (long) dst, strerror(errno));
return UNKNOWN_ERROR;
}
@@ -1374,12 +1420,17 @@
ZipEntry::putLongLE(&buf[0x10], mCentralDirOffset);
ZipEntry::putShortLE(&buf[0x14], mCommentLen);
- if (fwrite(buf, 1, kEOCDLen, fp) != kEOCDLen)
+ if (fwrite(buf, 1, kEOCDLen, fp) != kEOCDLen) {
+ ALOGW("fwrite EOCD failed, %s", strerror(errno));
return UNKNOWN_ERROR;
+ }
if (mCommentLen > 0) {
assert(mComment != NULL);
- if (fwrite(mComment, mCommentLen, 1, fp) != mCommentLen)
+ if (fwrite(mComment, mCommentLen, 1, fp) != mCommentLen) {
+ ALOGW("fwrite %d bytes failed, %s",
+ (int) mCommentLen, strerror(errno));
return UNKNOWN_ERROR;
+ }
}
return OK;
@@ -1397,3 +1448,4 @@
mCentralDirSize, mCentralDirOffset, mCommentLen);
}
+} // namespace android
diff --git a/tools/zipalign/ZipFile.h b/tools/zipalign/ZipFile.h
index 11d20c5..854f981 100644
--- a/tools/zipalign/ZipFile.h
+++ b/tools/zipalign/ZipFile.h
@@ -102,14 +102,14 @@
}
/*
- * Add an entry by copying it from another zip file. If "padding" is
- * nonzero, the specified number of bytes will be added to the "extra"
- * field in the header.
+ * Add an entry by copying it from another zip file. If "alignment" is
+ * nonzero, an appropriate number of bytes will be added to the "extra"
+ * field in the header so the entry payload is aligned.
*
* If "ppEntry" is non-NULL, a pointer to the new entry will be returned.
*/
status_t add(const ZipFile* pSourceZip, const ZipEntry* pSourceEntry,
- int padding, ZipEntry** ppEntry);
+ int alignment, ZipEntry** ppEntry);
/*
* Add an entry by copying it from another zip file, recompressing with
@@ -163,6 +163,8 @@
ZipFile(const ZipFile& src);
ZipFile& operator=(const ZipFile& src);
+ status_t alignEntry(android::ZipEntry* pEntry, uint32_t alignTo);
+
class EndOfCentralDir {
public:
EndOfCentralDir(void) :
diff --git a/tools/zipalign/include/ZipAlign.h b/tools/zipalign/include/ZipAlign.h
new file mode 100644
index 0000000..ab36086
--- /dev/null
+++ b/tools/zipalign/include/ZipAlign.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ZIPALIGN_H
+#define ZIPALIGN_H
+
+namespace android {
+
+/*
+ * Generate a new, aligned, zip "output" from an "input" zip.
+ * - alignTo: Alignment (in bytes) for uncompressed entries.
+ * - force : Overwrite output if it exists, fail otherwise.
+ * - zopfli : Recompress compressed entries with more efficient algorithm.
+ * Copy compressed entries as-is, and unaligned, otherwise.
+ * - pageAlignSharedLibs: Align .so files to 4096 and other files to
+ * alignTo, or all files to alignTo if false..
+ *
+ * Returns 0 on success.
+ */
+int process(const char* input, const char* output, int alignTo, bool force,
+ bool zopfli, bool pageAlignSharedLibs);
+
+/*
+ * Verify the alignment of a zip archive.
+ * - alignTo: Alignment (in bytes) for uncompressed entries.
+ * - pageAlignSharedLibs: Align .so files to 4096 and other files to
+ * alignTo, or all files to alignTo if false..
+ *
+ * Returns 0 on success.
+ */
+int verify(const char* fileName, int alignTo, bool verbose,
+ bool pageAlignSharedLibs);
+
+} // namespace android
+
+#endif // ZIPALIGN_H
diff --git a/tools/zipalign/tests/data/diffOrders.zip b/tools/zipalign/tests/data/diffOrders.zip
new file mode 100644
index 0000000..8f512ed
--- /dev/null
+++ b/tools/zipalign/tests/data/diffOrders.zip
Binary files differ
diff --git a/tools/zipalign/tests/data/holes.zip b/tools/zipalign/tests/data/holes.zip
new file mode 100644
index 0000000..c88f891
--- /dev/null
+++ b/tools/zipalign/tests/data/holes.zip
Binary files differ
diff --git a/tools/zipalign/tests/data/unaligned.zip b/tools/zipalign/tests/data/unaligned.zip
new file mode 100644
index 0000000..d572b1a
--- /dev/null
+++ b/tools/zipalign/tests/data/unaligned.zip
Binary files differ
diff --git a/tools/zipalign/tests/src/align_test.cpp b/tools/zipalign/tests/src/align_test.cpp
new file mode 100644
index 0000000..c79e791
--- /dev/null
+++ b/tools/zipalign/tests/src/align_test.cpp
@@ -0,0 +1,53 @@
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+#include "ZipAlign.h"
+
+#include <stdio.h>
+#include <string>
+
+#include <android-base/file.h>
+
+using namespace android;
+
+static std::string GetTestPath(const std::string& filename) {
+ static std::string test_data_dir = android::base::GetExecutableDirectory() + "/tests/data/";
+ return test_data_dir + filename;
+}
+
+TEST(Align, Unaligned) {
+ const std::string src = GetTestPath("unaligned.zip");
+ const std::string dst = GetTestPath("unaligned_out.zip");
+
+ int processed = process(src.c_str(), dst.c_str(), 4, true, false, 4096);
+ ASSERT_EQ(0, processed);
+
+ int verified = verify(dst.c_str(), 4, true, false);
+ ASSERT_EQ(0, verified);
+}
+
+// Align a zip featuring a hole at the beginning. The
+// hole in the archive is a delete entry in the Central
+// Directory.
+TEST(Align, Holes) {
+ const std::string src = GetTestPath("holes.zip");
+ const std::string dst = GetTestPath("holes_out.zip");
+
+ int processed = process(src.c_str(), dst.c_str(), 4, true, false, 4096);
+ ASSERT_EQ(0, processed);
+
+ int verified = verify(dst.c_str(), 4, false, true);
+ ASSERT_EQ(0, verified);
+}
+
+// Align a zip where LFH order and CD entries differ.
+TEST(Align, DifferenteOrders) {
+ const std::string src = GetTestPath("diffOrders.zip");
+ const std::string dst = GetTestPath("diffOrders_out.zip");
+
+ int processed = process(src.c_str(), dst.c_str(), 4, true, false, 4096);
+ ASSERT_EQ(0, processed);
+
+ int verified = verify(dst.c_str(), 4, false, true);
+ ASSERT_EQ(0, verified);
+}