Merge cb36eabcaf28 ("Merge tag 'perf-urgent-2026-03-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip") into android-mainline
Steps on the way to v7.0-rc2
Change-Id: If90b62d1bc20b7a052cc25c0b72363d31c15002a
Signed-off-by: Carlos Llamas <cmllamas@google.com>
diff --git a/BUILD.bazel b/BUILD.bazel
new file mode 100644
index 0000000..3e9ab00
--- /dev/null
+++ b/BUILD.bazel
@@ -0,0 +1,3385 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2021 The Android Open Source Project
+
+load("@bazel_skylib//lib:paths.bzl", "paths")
+load("@bazel_skylib//rules:common_settings.bzl", "string_flag")
+load("@bazel_skylib//rules:write_file.bzl", "write_file")
+load("@rules_cc//cc:defs.bzl", "cc_library")
+load("@rules_devicetree//devicetree:devicetree_library.bzl", "devicetree_library")
+load("@rules_pkg//pkg:install.bzl", "pkg_install")
+load("@rules_pkg//pkg:mappings.bzl", "pkg_filegroup", "pkg_files", "strip_prefix")
+load("@rules_pkg//pkg:pkg.bzl", "pkg_zip")
+load("@rules_python//python:defs.bzl", "py_library")
+load(
+ "//build/kernel/kleaf:common_kernels.bzl",
+ "common_kernel",
+ "common_kernel_protected_module_names",
+)
+load("//build/kernel/kleaf:constants.bzl", "COMMON_KCFLAGS", "DEFAULT_GKI_OUTS", "X86_64_OUTS")
+load("//build/kernel/kleaf:dwarves.bzl", "pahole")
+load("//build/kernel/kleaf:fail.bzl", "fail_rule")
+load(
+ "//build/kernel/kleaf:kernel.bzl",
+ "checkpatch",
+ "ddk_headers",
+ "ddk_headers_archive",
+ "initramfs",
+ "kernel_build",
+ "kernel_compile_commands",
+ "kernel_kythe",
+ "kernel_modules_install",
+ "merge_kzip",
+ "merged_kernel_uapi_headers",
+ "modinfo_summary_report",
+)
+load(":bazel/abi.bzl", "cc_binary_with_abi")
+load(
+ ":bazel/modules_private.bzl",
+ "get_gki_kunit_modules",
+ "get_gki_modules_list",
+ "get_gki_modules_superset",
+ "get_gki_unprotected_modules_list",
+ "get_kunit_modules_list",
+ "get_kunit_modules_superset",
+)
+
+package(
+ default_visibility = [
+ "//visibility:public",
+ ],
+)
+
+_GKI_AARCH64_MAKE_GOALS = [
+ "Image",
+ "Image.lz4",
+ "Image.gz",
+ "Image.lzma",
+ "Image.zst",
+ "modules",
+]
+
+_GKI_X86_64_MAKE_GOALS = [
+ "bzImage",
+ "modules",
+]
+
+# Extra generated headers below $OUT_DIR for external modules.
+_GENERATED_HEADERS_FOR_MODULE = [
+ "security/selinux/flask.h",
+ "security/selinux/av_permissions.h",
+]
+
+checkpatch(
+ name = "checkpatch",
+ checkpatch_pl = "scripts/checkpatch.pl",
+)
+
+# Deprecated - Use arch specific files from below.
+fail_rule(
+ name = "gki_system_dlkm_modules",
+ message = """
+ Common list for all architectures is deprecated.
+ Instead use the file corresponding to the architecture used:
+ i.e. `gki_system_dlkm_modules_{arch}`
+ """,
+)
+
+fail_rule(
+ name = "android/gki_system_dlkm_modules",
+ message = """
+ Common list for all architectures is deprecated.
+ Instead use the file corresponding to the architecture used:
+ i.e. `gki_system_dlkm_modules_{arch}`
+ """,
+)
+
+write_file(
+ name = "gki_system_dlkm_modules_arm64",
+ out = "gki/aarch64/system_dlkm_modules",
+ # Do not built kunit modules into system_dlkm
+ content = get_gki_modules_list("arm64") + [
+ # Ensure new line at the end.
+ "",
+ ],
+)
+
+common_kernel_protected_module_names(
+ name = "gki_aarch64_protected_module_names",
+ out = "gki/aarch64/protected_module_names",
+ exclude = get_gki_unprotected_modules_list("arm64"),
+ module_names = get_gki_modules_list("arm64") + get_kunit_modules_list("arm64"),
+)
+
+write_file(
+ name = "gki_system_dlkm_modules_x86_64",
+ out = "gki/x86_64/system_dlkm_modules",
+ # Do not built kunit modules into system_dlkm
+ content = get_gki_modules_list("x86_64") + [
+ # Ensure new line at the end.
+ "",
+ ],
+)
+
+common_kernel_protected_module_names(
+ name = "gki_x86_64_protected_module_names",
+ out = "gki/x86_64/protected_module_names",
+ exclude = get_gki_unprotected_modules_list("x86_64"),
+ module_names = get_gki_modules_list("x86_64") + get_kunit_modules_list("x86_64"),
+)
+
+_SET_KERNEL_DIR_CMD = "KERNEL_DIR=\"{kernel_dir}\"".format(
+ kernel_dir = paths.join(
+ package_relative_label(":x").workspace_root,
+ package_relative_label(":x").package,
+ ),
+)
+
+write_file(
+ name = "set_kernel_dir_build_config",
+ out = "set_kernel_dir_build_config/build.config",
+ content = [
+ _SET_KERNEL_DIR_CMD,
+ "",
+ ],
+ visibility = ["//visibility:public"], # TODO: This should be private
+)
+
+filegroup(
+ name = "common_kernel_sources",
+ srcs = glob(
+ ["**"],
+ exclude = [
+ "BUILD.bazel",
+ "**/*.bzl",
+ ".git/**",
+
+ # ctag files
+ "tags",
+ "TAGS",
+
+ # temporary ctag files
+ "tags.temp",
+ "tags.lock",
+
+ # cscope files
+ "cscope.*",
+ "ncscope.*",
+
+ # ABI and symbol list files
+ "gki/**",
+ ],
+ ),
+ visibility = ["//visibility:public"],
+)
+
+_KUNIT_COMMON_GUIDE = [
+ "Modules for KUnit testing on Android devices only:",
+ "(KUnit: Linux Kernel Unit Testing https://www.kernel.org/doc/html/latest/dev-tools/kunit/)",
+ "(https://android.googlesource.com/kernel/common/+/refs/heads/android-mainline/tools/testing/kunit/android/README)",
+]
+
+write_file(
+ name = "generate_gki_module_info_arm64",
+ out = "generate_gki_module_info_arm64/README",
+ content = _KUNIT_COMMON_GUIDE + get_kunit_modules_list("arm64") + [
+ "", # keep new line at the end
+ ],
+ visibility = ["//visibility:private"],
+)
+
+write_file(
+ name = "generate_gki_module_info_x86_64",
+ out = "generate_gki_module_info_x86_64/README",
+ content = _KUNIT_COMMON_GUIDE + get_kunit_modules_list("x86_64") + [
+ "", # keep new line at the end
+ ],
+ visibility = ["//visibility:private"],
+)
+
+common_kernel(
+ name = "kernel_aarch64",
+ outs = DEFAULT_GKI_OUTS,
+ arch = "arm64",
+ build_gki_artifacts = True,
+ ddk_headers_archive = ":kernel_aarch64_ddk_headers_archive",
+ ddk_module_headers = [":all_headers_aarch64"],
+ defconfig = "arch/arm64/configs/gki_defconfig",
+ extra_dist = [
+ ":test_mappings_zip",
+ ":tests_zip_arm64",
+ ],
+ generated_headers_for_module = _GENERATED_HEADERS_FOR_MODULE,
+ gki_system_dlkm_modules = ":gki_system_dlkm_modules_arm64",
+ kcflags = COMMON_KCFLAGS,
+ make_goals = _GKI_AARCH64_MAKE_GOALS,
+ makefile = ":Makefile",
+ module_implicit_outs = get_gki_modules_list("arm64") + get_kunit_modules_list("arm64"),
+ modules_superset = get_gki_modules_superset("arm64") + get_kunit_modules_superset("arm64"),
+ system_dlkm_extra_archive_files = [":generate_gki_module_info_arm64"],
+ visibility = ["//visibility:public"],
+
+ # Symbol lists and module lists
+ # kmi_symbol_list = None,
+ # additional_kmi_symbol_lists = [],
+ # kmi_symbol_list_strict_mode = False,
+ # protected_modules_list = None,
+ # trim_nonlisted_kmi = False,
+
+ # ABI
+ # abi_definition_stg = None,
+ # kmi_enforced = False,
+)
+
+common_kernel(
+ name = "kernel_aarch64_16k",
+ outs = DEFAULT_GKI_OUTS,
+ arch = "arm64",
+ build_gki_artifacts = True,
+ ddk_headers_archive = ":kernel_aarch64_ddk_headers_archive",
+ ddk_module_headers = [":all_headers_aarch64"],
+ defconfig = "arch/arm64/configs/gki_defconfig",
+ extra_dist = [
+ ":test_mappings_zip",
+ ":tests_zip_arm64_16k",
+ ],
+ generated_headers_for_module = _GENERATED_HEADERS_FOR_MODULE,
+ gki_system_dlkm_modules = ":gki_system_dlkm_modules_arm64",
+ kcflags = COMMON_KCFLAGS,
+ make_goals = _GKI_AARCH64_MAKE_GOALS,
+ makefile = ":Makefile",
+ module_implicit_outs = get_gki_modules_list("arm64") + get_kunit_modules_list("arm64"),
+ modules_superset = get_gki_modules_superset("arm64") + get_kunit_modules_superset("arm64"),
+ page_size = "16k",
+ system_dlkm_extra_archive_files = [":generate_gki_module_info_arm64"],
+ visibility = ["//visibility:public"],
+)
+
+fail_rule(
+ name = "kernel_aarch64_debug",
+ message = """\
+Consider building @//common:kernel_aarch64 with:
+ * --notrim to disable trimming, or
+ * --debug to enable additional debug options.""",
+ visibility = ["//visibility:public"],
+)
+
+fail_rule(
+ name = "kernel_x86_64_debug",
+ message = """\
+Consider building @//common:kernel_x86_64 with:
+ * --notrim to disable trimming, or
+ * --debug to enable additional debug options.""",
+ visibility = ["//visibility:public"],
+)
+
+common_kernel(
+ name = "kernel_x86_64",
+ outs = X86_64_OUTS,
+ arch = "x86_64",
+ build_gki_artifacts = True,
+ ddk_module_headers = [":all_headers_x86_64"],
+ defconfig = "arch/x86/configs/gki_defconfig",
+ extra_dist = [
+ ":test_mappings_zip",
+ ":tests_zip_x86_64",
+ ],
+ generated_headers_for_module = _GENERATED_HEADERS_FOR_MODULE,
+ gki_system_dlkm_modules = ":gki_system_dlkm_modules_x86_64",
+ kcflags = COMMON_KCFLAGS,
+ make_goals = _GKI_X86_64_MAKE_GOALS,
+ makefile = ":Makefile",
+ module_implicit_outs = get_gki_modules_list("x86_64") + get_kunit_modules_list("x86_64"),
+ modules_superset = get_gki_modules_superset("x86_64") + get_kunit_modules_superset("x86_64"),
+ system_dlkm_extra_archive_files = [":generate_gki_module_info_x86_64"],
+ visibility = ["//visibility:public"],
+)
+
+alias(
+ name = "kernel",
+ actual = ":kernel_aarch64",
+ visibility = ["//visibility:public"],
+)
+
+alias(
+ name = "kernel_dist",
+ actual = ":kernel_aarch64_dist",
+ visibility = ["//visibility:public"],
+)
+
+kernel_compile_commands(
+ name = "kernel_aarch64_compile_commands",
+ visibility = ["//visibility:public"],
+ deps = [":kernel_aarch64"],
+)
+
+kernel_compile_commands(
+ name = "kernel_x86_64_compile_commands",
+ visibility = ["//visibility:public"],
+ deps = [":kernel_x86_64"],
+)
+
+string_flag(
+ name = "kernel_kythe_corpus",
+ build_setting_default = "",
+ visibility = ["//visibility:public"],
+)
+
+kernel_kythe(
+ name = "kernel_aarch64_kythe",
+ corpus = ":kernel_kythe_corpus",
+ kernel_build = ":kernel_aarch64",
+ visibility = ["//visibility:public"],
+)
+
+pkg_files(
+ name = "kernel_aarch64_kythe_files",
+ srcs = [
+ ":kernel_aarch64_kythe",
+ ],
+ strip_prefix = strip_prefix.files_only(),
+ visibility = ["//visibility:private"],
+)
+
+pkg_install(
+ name = "kernel_aarch64_kythe_dist",
+ srcs = [":kernel_aarch64_kythe_files"],
+ visibility = ["//visibility:public"],
+)
+
+kernel_kythe(
+ name = "kernel_x86_64_kythe",
+ corpus = ":kernel_kythe_corpus",
+ kernel_build = ":kernel_x86_64",
+ visibility = ["//visibility:public"],
+)
+
+pkg_files(
+ name = "kernel_x86_64_kythe_files",
+ srcs = [
+ ":kernel_aarch64_kythe",
+ ],
+ strip_prefix = strip_prefix.files_only(),
+ visibility = ["//visibility:private"],
+)
+
+pkg_install(
+ name = "kernel_x86_64_kythe_dist",
+ srcs = [":kernel_x86_64_kythe_files"],
+ visibility = ["//visibility:public"],
+)
+
+merge_kzip(
+ name = "kernel_kythe",
+ srcs = [
+ ":kernel_aarch64_kythe",
+ ":kernel_x86_64_kythe",
+ ],
+ visibility = ["//visibility:public"],
+)
+
+pkg_files(
+ name = "kernel_kythe_files",
+ srcs = [
+ ":kernel_kythe",
+ ],
+ strip_prefix = strip_prefix.files_only(),
+ visibility = ["//visibility:private"],
+)
+
+pkg_install(
+ name = "kernel_kythe_dist",
+ srcs = [":kernel_kythe_files"],
+ visibility = ["//visibility:public"],
+)
+
+# Microdroid is not a real device. The kernel image is built with special
+# configs to reduce the size. Hence, not using mixed build.
+kernel_build(
+ name = "kernel_aarch64_microdroid",
+ srcs = [":kernel_aarch64_sources"],
+ outs = [
+ "Image",
+ "System.map",
+ "modules.builtin",
+ "modules.builtin.modinfo",
+ "vmlinux",
+ "vmlinux.symvers",
+ ],
+ arch = "arm64",
+ defconfig = "arch/arm64/configs/microdroid_defconfig",
+ make_goals = [
+ "Image",
+ ],
+ makefile = ":Makefile",
+)
+
+pkg_files(
+ name = "kernel_aarch64_microdroid_dist_files",
+ srcs = [
+ ":kernel_aarch64_microdroid",
+ ],
+ strip_prefix = strip_prefix.files_only(),
+ visibility = ["//visibility:private"],
+)
+
+pkg_install(
+ name = "kernel_aarch64_microdroid_dist",
+ srcs = ["kernel_aarch64_microdroid_dist_files"],
+ destdir = "out/kernel_aarch64_microdroid/dist",
+)
+
+kernel_build(
+ name = "kernel_aarch64_microdroid_16k",
+ srcs = [":kernel_aarch64_sources"],
+ outs = [
+ "Image",
+ "System.map",
+ "modules.builtin",
+ "modules.builtin.modinfo",
+ "vmlinux",
+ "vmlinux.symvers",
+ ],
+ arch = "arm64",
+ defconfig = "arch/arm64/configs/microdroid_defconfig",
+ make_goals = [
+ "Image",
+ ],
+ makefile = ":Makefile",
+ page_size = "16k",
+)
+
+pkg_files(
+ name = "kernel_aarch64_microdroid_16k_dist_files",
+ srcs = [
+ ":kernel_aarch64_microdroid_16k",
+ ],
+ strip_prefix = strip_prefix.files_only(),
+ visibility = ["//visibility:private"],
+)
+
+pkg_install(
+ name = "kernel_aarch64_microdroid_16k_dist",
+ srcs = ["kernel_aarch64_microdroid_16k_dist_files"],
+ destdir = "out/kernel_aarch64_microdroid_16k/dist",
+)
+
+# Microdroid is not a real device. The kernel image is built with special
+# configs to reduce the size. Hence, not using mixed build.
+kernel_build(
+ name = "kernel_x86_64_microdroid",
+ srcs = [":kernel_x86_64_sources"],
+ outs = X86_64_OUTS,
+ arch = "x86_64",
+ defconfig = "arch/x86/configs/microdroid_defconfig",
+ make_goals = [
+ "bzImage",
+ ],
+ makefile = ":Makefile",
+)
+
+pkg_files(
+ name = "kernel_x86_64_microdroid_dist_files",
+ srcs = [
+ ":kernel_x86_64_microdroid",
+ ],
+ strip_prefix = strip_prefix.files_only(),
+ visibility = ["//visibility:private"],
+)
+
+pkg_install(
+ name = "kernel_x86_64_microdroid_dist",
+ srcs = ["kernel_x86_64_microdroid_dist_files"],
+ destdir = "out/kernel_x86_64_microdroid/dist",
+)
+
+kernel_build(
+ name = "kernel_aarch64_crashdump",
+ srcs = [":kernel_aarch64_sources"],
+ outs = [
+ "Image",
+ ],
+ arch = "arm64",
+ defconfig = "arch/arm64/configs/crashdump_defconfig",
+ make_goals = [
+ "Image",
+ ],
+ makefile = ":Makefile",
+)
+
+pkg_files(
+ name = "kernel_aarch64_crashdump_dist_files",
+ srcs = [
+ ":kernel_aarch64_crashdump",
+ ],
+ strip_prefix = strip_prefix.files_only(),
+ visibility = ["//visibility:private"],
+)
+
+pkg_install(
+ name = "kernel_aarch64_crashdump_dist",
+ srcs = ["kernel_aarch64_crashdump_dist_files"],
+ destdir = "out/kernel_aarch64_crashdump/dist",
+)
+
+kernel_build(
+ name = "kernel_x86_64_crashdump",
+ srcs = [":kernel_x86_64_sources"],
+ outs = X86_64_OUTS,
+ arch = "x86_64",
+ defconfig = "arch/x86/configs/crashdump_defconfig",
+ make_goals = [
+ "bzImage",
+ ],
+ makefile = ":Makefile",
+)
+
+pkg_files(
+ name = "kernel_x86_64_crashdump_dist_files",
+ srcs = [
+ ":kernel_x86_64_crashdump",
+ ],
+ strip_prefix = strip_prefix.files_only(),
+ visibility = ["//visibility:private"],
+)
+
+pkg_install(
+ name = "kernel_x86_64_crashdump_dist",
+ srcs = ["kernel_x86_64_crashdump_dist_files"],
+ destdir = "out/kernel_x86_64_crashdump/dist",
+)
+
+_DB845C_MODULE_OUTS = [
+ # keep sorted
+ "crypto/michael_mic.ko",
+ "drivers/base/regmap/regmap-sdw.ko",
+ "drivers/base/regmap/regmap-slimbus.ko",
+ "drivers/bus/mhi/host/mhi.ko",
+ "drivers/clk/qcom/camcc-sc7280.ko",
+ "drivers/clk/qcom/camcc-sm8550.ko",
+ "drivers/clk/qcom/clk-qcom.ko",
+ "drivers/clk/qcom/clk-rpmh.ko",
+ "drivers/clk/qcom/clk-spmi-pmic-div.ko",
+ "drivers/clk/qcom/dispcc-sc7280.ko",
+ "drivers/clk/qcom/dispcc-sdm845.ko",
+ "drivers/clk/qcom/dispcc-sm8250.ko",
+ "drivers/clk/qcom/dispcc-sm8550.ko",
+ "drivers/clk/qcom/gcc-sc7280.ko",
+ "drivers/clk/qcom/gcc-sdm845.ko",
+ "drivers/clk/qcom/gcc-sm8250.ko",
+ "drivers/clk/qcom/gcc-sm8450.ko",
+ "drivers/clk/qcom/gcc-sm8550.ko",
+ "drivers/clk/qcom/gcc-sm8650.ko",
+ "drivers/clk/qcom/gpucc-sc7280.ko",
+ "drivers/clk/qcom/gpucc-sdm845.ko",
+ "drivers/clk/qcom/gpucc-sm8250.ko",
+ "drivers/clk/qcom/gpucc-sm8550.ko",
+ "drivers/clk/qcom/gpucc-sm8650.ko",
+ "drivers/clk/qcom/lpass-gfm-sm8250.ko",
+ "drivers/clk/qcom/lpassaudiocc-sc7280.ko",
+ "drivers/clk/qcom/lpasscorecc-sc7280.ko",
+ "drivers/clk/qcom/tcsrcc-sm8550.ko",
+ "drivers/clk/qcom/tcsrcc-sm8650.ko",
+ "drivers/clk/qcom/videocc-sc7280.ko",
+ "drivers/clk/qcom/videocc-sdm845.ko",
+ "drivers/clk/qcom/videocc-sm8250.ko",
+ "drivers/clk/qcom/videocc-sm8550.ko",
+ "drivers/cpufreq/qcom-cpufreq-hw.ko",
+ "drivers/dma-buf/heaps/system_heap.ko",
+ "drivers/dma/qcom/bam_dma.ko",
+ "drivers/dma/qcom/gpi.ko",
+ "drivers/extcon/extcon-usb-gpio.ko",
+ "drivers/firmware/qcom/qcom-scm.ko",
+ "drivers/firmware/qcom/qcom_tzmem.ko",
+ "drivers/gpio/gpio-wcd934x.ko",
+ "drivers/gpu/drm/bridge/aux-bridge.ko",
+ "drivers/gpu/drm/bridge/aux-hpd-bridge.ko",
+ "drivers/gpu/drm/bridge/display-connector.ko",
+ "drivers/gpu/drm/bridge/lontium-lt9611.ko",
+ "drivers/gpu/drm/bridge/lontium-lt9611uxc.ko",
+ "drivers/gpu/drm/display/drm_display_helper.ko",
+ "drivers/gpu/drm/display/drm_dp_aux_bus.ko",
+ "drivers/gpu/drm/drm_exec.ko",
+ "drivers/gpu/drm/drm_gpuvm.ko",
+ "drivers/gpu/drm/msm/msm.ko",
+ "drivers/gpu/drm/panel/panel-novatek-nt36672e.ko",
+ "drivers/gpu/drm/panel/panel-visionox-vtdr6130.ko",
+ "drivers/gpu/drm/scheduler/gpu-sched.ko",
+ "drivers/hwspinlock/qcom_hwspinlock.ko",
+ "drivers/i2c/busses/i2c-designware-core.ko",
+ "drivers/i2c/busses/i2c-designware-platform.ko",
+ "drivers/i2c/busses/i2c-qcom-geni.ko",
+ "drivers/i2c/busses/i2c-qup.ko",
+ "drivers/i2c/busses/i2c-rk3x.ko",
+ "drivers/i2c/i2c-dev.ko",
+ "drivers/i2c/i2c-mux.ko",
+ "drivers/i2c/muxes/i2c-mux-pca954x.ko",
+ "drivers/iio/adc/qcom-spmi-adc5.ko",
+ "drivers/iio/adc/qcom-vadc-common.ko",
+ "drivers/input/misc/pm8941-pwrkey.ko",
+ "drivers/interconnect/icc-clk.ko",
+ "drivers/interconnect/qcom/icc-bcm-voter.ko",
+ "drivers/interconnect/qcom/icc-osm-l3.ko",
+ "drivers/interconnect/qcom/icc-rpmh.ko",
+ "drivers/interconnect/qcom/qnoc-sc7280.ko",
+ "drivers/interconnect/qcom/qnoc-sdm845.ko",
+ "drivers/interconnect/qcom/qnoc-sm8250.ko",
+ "drivers/interconnect/qcom/qnoc-sm8450.ko",
+ "drivers/interconnect/qcom/qnoc-sm8550.ko",
+ "drivers/interconnect/qcom/qnoc-sm8650.ko",
+ "drivers/iommu/arm/arm-smmu/arm_smmu.ko",
+ "drivers/irqchip/qcom-pdc.ko",
+ "drivers/leds/rgb/leds-qcom-lpg.ko",
+ "drivers/mailbox/qcom-apcs-ipc-mailbox.ko",
+ "drivers/mailbox/qcom-ipcc.ko",
+ "drivers/mfd/qcom-spmi-pmic.ko",
+ "drivers/mfd/wcd934x.ko",
+ "drivers/misc/fastrpc.ko",
+ "drivers/mmc/host/cqhci.ko",
+ "drivers/mmc/host/sdhci-msm.ko",
+ "drivers/mux/mux-core.ko",
+ "drivers/net/can/spi/mcp251xfd/mcp251xfd.ko",
+ "drivers/net/wireless/ath/ath.ko",
+ "drivers/net/wireless/ath/ath10k/ath10k_core.ko",
+ "drivers/net/wireless/ath/ath10k/ath10k_pci.ko",
+ "drivers/net/wireless/ath/ath10k/ath10k_snoc.ko",
+ "drivers/net/wireless/ath/ath11k/ath11k.ko",
+ "drivers/net/wireless/ath/ath11k/ath11k_ahb.ko",
+ "drivers/net/wireless/ath/ath11k/ath11k_pci.ko",
+ "drivers/net/wireless/ath/ath12k/ath12k.ko",
+ "drivers/net/wireless/ath/ath12k/wifi7/ath12k_wifi7.ko",
+ "drivers/nvmem/nvmem_qfprom.ko",
+ "drivers/pci/pwrctrl/pci-pwrctrl-pwrseq.ko",
+ "drivers/phy/phy-snps-eusb2.ko",
+ "drivers/phy/qualcomm/phy-qcom-edp.ko",
+ "drivers/phy/qualcomm/phy-qcom-eusb2-repeater.ko",
+ "drivers/phy/qualcomm/phy-qcom-qmp-combo.ko",
+ "drivers/phy/qualcomm/phy-qcom-qmp-pcie.ko",
+ "drivers/phy/qualcomm/phy-qcom-qmp-pcie-msm8996.ko",
+ "drivers/phy/qualcomm/phy-qcom-qmp-ufs.ko",
+ "drivers/phy/qualcomm/phy-qcom-qmp-usb.ko",
+ "drivers/phy/qualcomm/phy-qcom-qmp-usbc.ko",
+ "drivers/phy/qualcomm/phy-qcom-qusb2.ko",
+ "drivers/phy/qualcomm/phy-qcom-snps-femto-v2.ko",
+ "drivers/phy/qualcomm/phy-qcom-usb-hs.ko",
+ "drivers/pinctrl/qcom/pinctrl-lpass-lpi.ko",
+ "drivers/pinctrl/qcom/pinctrl-msm.ko",
+ "drivers/pinctrl/qcom/pinctrl-sc7280.ko",
+ "drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.ko",
+ "drivers/pinctrl/qcom/pinctrl-sdm845.ko",
+ "drivers/pinctrl/qcom/pinctrl-sm8250.ko",
+ "drivers/pinctrl/qcom/pinctrl-sm8250-lpass-lpi.ko",
+ "drivers/pinctrl/qcom/pinctrl-sm8450.ko",
+ "drivers/pinctrl/qcom/pinctrl-sm8550.ko",
+ "drivers/pinctrl/qcom/pinctrl-sm8550-lpass-lpi.ko",
+ "drivers/pinctrl/qcom/pinctrl-sm8650.ko",
+ "drivers/pinctrl/qcom/pinctrl-sm8650-lpass-lpi.ko",
+ "drivers/pinctrl/qcom/pinctrl-spmi-gpio.ko",
+ "drivers/pinctrl/qcom/pinctrl-spmi-mpp.ko",
+ "drivers/pmdomain/qcom/cpr.ko",
+ "drivers/pmdomain/qcom/rpmhpd.ko",
+ "drivers/power/reset/qcom-pon.ko",
+ "drivers/power/reset/reboot-mode.ko",
+ "drivers/power/reset/syscon-reboot-mode.ko",
+ "drivers/power/sequencing/pwrseq-qcom-wcn.ko",
+ "drivers/power/supply/qcom_battmgr.ko",
+ "drivers/regulator/gpio-regulator.ko",
+ "drivers/regulator/qcom-rpmh-regulator.ko",
+ "drivers/regulator/qcom_spmi-regulator.ko",
+ "drivers/regulator/qcom_usb_vbus-regulator.ko",
+ "drivers/remoteproc/qcom_common.ko",
+ "drivers/remoteproc/qcom_pil_info.ko",
+ "drivers/remoteproc/qcom_q6v5.ko",
+ "drivers/remoteproc/qcom_q6v5_adsp.ko",
+ "drivers/remoteproc/qcom_q6v5_mss.ko",
+ "drivers/remoteproc/qcom_q6v5_pas.ko",
+ "drivers/remoteproc/qcom_q6v5_wcss.ko",
+ "drivers/remoteproc/qcom_sysmon.ko",
+ "drivers/reset/reset-qcom-aoss.ko",
+ "drivers/reset/reset-qcom-pdc.ko",
+ "drivers/rpmsg/qcom_glink.ko",
+ "drivers/rpmsg/qcom_glink_rpm.ko",
+ "drivers/rpmsg/qcom_glink_smem.ko",
+ "drivers/rpmsg/qcom_smd.ko",
+ "drivers/rpmsg/rpmsg_ns.ko",
+ "drivers/rtc/rtc-pm8xxx.ko",
+ "drivers/slimbus/slim-qcom-ngd-ctrl.ko",
+ "drivers/slimbus/slimbus.ko",
+ "drivers/soc/qcom/apr.ko",
+ "drivers/soc/qcom/cmd-db.ko",
+ "drivers/soc/qcom/llcc-qcom.ko",
+ "drivers/soc/qcom/mdt_loader.ko",
+ "drivers/soc/qcom/pdr_interface.ko",
+ "drivers/soc/qcom/pmic_glink.ko",
+ "drivers/soc/qcom/pmic_glink_altmode.ko",
+ "drivers/soc/qcom/qcom_aoss.ko",
+ "drivers/soc/qcom/qcom_ice.ko",
+ "drivers/soc/qcom/qcom_pd_mapper.ko",
+ "drivers/soc/qcom/qcom_pdr_msg.ko",
+ "drivers/soc/qcom/qcom_rpmh.ko",
+ "drivers/soc/qcom/qmi_helpers.ko",
+ "drivers/soc/qcom/rmtfs_mem.ko",
+ "drivers/soc/qcom/smem.ko",
+ "drivers/soc/qcom/smp2p.ko",
+ "drivers/soc/qcom/smsm.ko",
+ "drivers/soc/qcom/socinfo.ko",
+ "drivers/soc/qcom/spm.ko",
+ "drivers/soc/qcom/ubwc_config.ko",
+ "drivers/soundwire/soundwire-bus.ko",
+ "drivers/soundwire/soundwire-qcom.ko",
+ "drivers/spi/spi-geni-qcom.ko",
+ "drivers/spi/spi-pl022.ko",
+ "drivers/spi/spi-qcom-qspi.ko",
+ "drivers/spi/spi-qup.ko",
+ "drivers/spmi/spmi-pmic-arb.ko",
+ "drivers/thermal/qcom/lmh.ko",
+ "drivers/thermal/qcom/qcom-spmi-adc-tm5.ko",
+ "drivers/thermal/qcom/qcom-spmi-temp-alarm.ko",
+ "drivers/thermal/qcom/qcom_tsens.ko",
+ "drivers/tty/serial/msm_serial.ko",
+ "drivers/ufs/host/ufs-qcom.ko",
+ "drivers/usb/common/ulpi.ko",
+ "drivers/usb/host/ohci-hcd.ko",
+ "drivers/usb/host/ohci-pci.ko",
+ "drivers/usb/host/ohci-platform.ko",
+ "drivers/usb/host/xhci-pci-renesas.ko",
+ "drivers/usb/typec/mux/fsa4480.ko",
+ "drivers/usb/typec/mux/nb7vpq904m.ko",
+ "drivers/usb/typec/mux/wcd939x-usbss.ko",
+ "drivers/usb/typec/tcpm/qcom/qcom_pmic_tcpm.ko",
+ "drivers/usb/typec/ucsi/ucsi_glink.ko",
+ "net/mac80211/mac80211.ko",
+ "net/qrtr/qrtr.ko",
+ "net/qrtr/qrtr-mhi.ko",
+ "net/qrtr/qrtr-smd.ko",
+ "net/qrtr/qrtr-tun.ko",
+ "net/wireless/cfg80211.ko",
+ "sound/soc/codecs/snd-soc-dmic.ko",
+ "sound/soc/codecs/snd-soc-hdmi-codec.ko",
+ "sound/soc/codecs/snd-soc-lpass-macro-common.ko",
+ "sound/soc/codecs/snd-soc-lpass-rx-macro.ko",
+ "sound/soc/codecs/snd-soc-lpass-tx-macro.ko",
+ "sound/soc/codecs/snd-soc-lpass-va-macro.ko",
+ "sound/soc/codecs/snd-soc-lpass-wsa-macro.ko",
+ "sound/soc/codecs/snd-soc-max98357a.ko",
+ "sound/soc/codecs/snd-soc-max98927.ko",
+ "sound/soc/codecs/snd-soc-rl6231.ko",
+ "sound/soc/codecs/snd-soc-rt5663.ko",
+ "sound/soc/codecs/snd-soc-rt5682.ko",
+ "sound/soc/codecs/snd-soc-rt5682-i2c.ko",
+ "sound/soc/codecs/snd-soc-rt5682s.ko",
+ "sound/soc/codecs/snd-soc-wcd-classh.ko",
+ "sound/soc/codecs/snd-soc-wcd-common.ko",
+ "sound/soc/codecs/snd-soc-wcd-mbhc.ko",
+ "sound/soc/codecs/snd-soc-wcd9335.ko",
+ "sound/soc/codecs/snd-soc-wcd934x.ko",
+ "sound/soc/codecs/snd-soc-wcd938x.ko",
+ "sound/soc/codecs/snd-soc-wcd938x-sdw.ko",
+ "sound/soc/codecs/snd-soc-wcd939x.ko",
+ "sound/soc/codecs/snd-soc-wcd939x-sdw.ko",
+ "sound/soc/codecs/snd-soc-wsa881x.ko",
+ "sound/soc/codecs/snd-soc-wsa884x.ko",
+ "sound/soc/qcom/qdsp6/q6adm.ko",
+ "sound/soc/qcom/qdsp6/q6afe.ko",
+ "sound/soc/qcom/qdsp6/q6afe-clocks.ko",
+ "sound/soc/qcom/qdsp6/q6afe-dai.ko",
+ "sound/soc/qcom/qdsp6/q6apm-dai.ko",
+ "sound/soc/qcom/qdsp6/q6apm-lpass-dais.ko",
+ "sound/soc/qcom/qdsp6/q6asm.ko",
+ "sound/soc/qcom/qdsp6/q6asm-dai.ko",
+ "sound/soc/qcom/qdsp6/q6core.ko",
+ "sound/soc/qcom/qdsp6/q6prm.ko",
+ "sound/soc/qcom/qdsp6/q6prm-clocks.ko",
+ "sound/soc/qcom/qdsp6/q6routing.ko",
+ "sound/soc/qcom/qdsp6/snd-q6apm.ko",
+ "sound/soc/qcom/qdsp6/snd-q6dsp-common.ko",
+ "sound/soc/qcom/snd-soc-lpass-cdc-dma.ko",
+ "sound/soc/qcom/snd-soc-lpass-cpu.ko",
+ "sound/soc/qcom/snd-soc-lpass-hdmi.ko",
+ "sound/soc/qcom/snd-soc-lpass-platform.ko",
+ "sound/soc/qcom/snd-soc-lpass-sc7280.ko",
+ "sound/soc/qcom/snd-soc-qcom-common.ko",
+ "sound/soc/qcom/snd-soc-qcom-sdw.ko",
+ "sound/soc/qcom/snd-soc-sc7280.ko",
+ "sound/soc/qcom/snd-soc-sc8280xp.ko",
+ "sound/soc/qcom/snd-soc-sdm845.ko",
+ "sound/soc/qcom/snd-soc-sm8250.ko",
+]
+
+_DB845C_WATCHDOG_MODULE_OUTS = [
+ "drivers/watchdog/pm8916_wdt.ko",
+ "drivers/watchdog/qcom-wdt.ko",
+]
+
+kernel_build(
+ name = "db845c",
+ srcs = [":kernel_aarch64_sources"],
+ outs = [
+ "arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dtb",
+ "arch/arm64/boot/dts/qcom/qrb5165-rb5.dtb",
+ "arch/arm64/boot/dts/qcom/sdm845-db845c.dtb",
+ "arch/arm64/boot/dts/qcom/sm8450-hdk.dtb",
+ "arch/arm64/boot/dts/qcom/sm8450-qrd.dtb",
+ "arch/arm64/boot/dts/qcom/sm8550-hdk.dtb",
+ "arch/arm64/boot/dts/qcom/sm8550-qrd.dtb",
+ "arch/arm64/boot/dts/qcom/sm8650-hdk.dtb",
+ "arch/arm64/boot/dts/qcom/sm8650-qrd.dtb",
+ ],
+ arch = "arm64",
+ # Enable mixed build.
+ base_kernel = ":kernel_aarch64",
+ defconfig = "arch/arm64/configs/gki_defconfig",
+ generate_out_targets = False,
+ make_goals = [
+ "modules",
+ "qcom/qrb5165-rb5.dtb",
+ "qcom/qcs6490-rb3gen2.dtb",
+ "qcom/sdm845-db845c.dtb",
+ "qcom/sm8450-hdk.dtb",
+ "qcom/sm8450-qrd.dtb",
+ "qcom/sm8550-hdk.dtb",
+ "qcom/sm8550-qrd.dtb",
+ "qcom/sm8650-hdk.dtb",
+ "qcom/sm8650-qrd.dtb",
+ ],
+ makefile = ":Makefile",
+ module_outs = _DB845C_MODULE_OUTS + select({
+ "//build/kernel/kleaf:kgdb_is_true": [],
+ "//conditions:default": _DB845C_WATCHDOG_MODULE_OUTS,
+ }),
+ pre_defconfig_fragments = ["arch/arm64/configs/db845c_gki.fragment"],
+ strip_modules = select({
+ "//build/kernel/kleaf:debug_is_true": False,
+ "//conditions:default": True,
+ }),
+)
+
+kernel_modules_install(
+ name = "db845c_modules_install",
+ kernel_build = ":db845c",
+)
+
+merged_kernel_uapi_headers(
+ name = "db845c_merged_kernel_uapi_headers",
+ kernel_build = ":db845c",
+)
+
+initramfs(
+ name = "db845c_initramfs",
+ kernel_modules_install = ":db845c_modules_install",
+ visibility = ["//visibility:private"],
+)
+
+pkg_files(
+ name = "db845c_dist_files",
+ srcs = [
+ ":db845c",
+ ":db845c_initramfs",
+ ":db845c_modules_install",
+ ":db845c_merged_kernel_uapi_headers",
+ # Mixed build: Additional GKI artifacts.
+ ":kernel_aarch64",
+ ":kernel_aarch64_modules",
+ ":kernel_aarch64_additional_artifacts",
+ ":tests_zip_arm64",
+ ],
+ strip_prefix = strip_prefix.files_only(),
+ visibility = ["//visibility:private"],
+)
+
+pkg_install(
+ name = "db845c_dist",
+ srcs = [":db845c_dist_files"],
+ destdir = "out/db845/dist",
+)
+
+_ROCKPI4_MODULE_OUTS = [
+ # keep sorted
+ "drivers/char/hw_random/virtio-rng.ko",
+ "drivers/clk/clk-rk808.ko",
+ "drivers/cpufreq/cpufreq-dt.ko",
+ "drivers/dma/pl330.ko",
+ "drivers/gpu/drm/bridge/analogix/analogix_dp.ko",
+ "drivers/gpu/drm/bridge/synopsys/dw-hdmi.ko",
+ "drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.ko",
+ "drivers/gpu/drm/display/drm_display_helper.ko",
+ "drivers/gpu/drm/display/drm_dp_aux_bus.ko",
+ "drivers/gpu/drm/drm_dma_helper.ko",
+ "drivers/gpu/drm/rockchip/rockchipdrm.ko",
+ "drivers/i2c/busses/i2c-rk3x.ko",
+ "drivers/iio/adc/rockchip_saradc.ko",
+ "drivers/iio/buffer/industrialio-triggered-buffer.ko",
+ "drivers/iio/buffer/kfifo_buf.ko",
+ "drivers/mfd/rk8xx-core.ko",
+ "drivers/mfd/rk8xx-i2c.ko",
+ "drivers/mfd/rk8xx-spi.ko",
+ "drivers/mmc/core/pwrseq_simple.ko",
+ "drivers/mmc/host/cqhci.ko",
+ "drivers/mmc/host/dw_mmc.ko",
+ "drivers/mmc/host/dw_mmc-pltfm.ko",
+ "drivers/mmc/host/dw_mmc-rockchip.ko",
+ "drivers/mmc/host/sdhci-of-arasan.ko",
+ "drivers/net/ethernet/stmicro/stmmac/dwmac-rk.ko",
+ "drivers/net/ethernet/stmicro/stmmac/dwmac-sun55i.ko",
+ "drivers/net/ethernet/stmicro/stmmac/stmmac.ko",
+ "drivers/net/ethernet/stmicro/stmmac/stmmac-platform.ko",
+ "drivers/net/mdio/mdio-mux.ko",
+ "drivers/net/net_failover.ko",
+ "drivers/net/pcs/pcs_xpcs.ko",
+ "drivers/net/virtio_net.ko",
+ "drivers/pci/controller/pcie-rockchip-host.ko",
+ "drivers/phy/rockchip/phy-rockchip-emmc.ko",
+ "drivers/phy/rockchip/phy-rockchip-inno-usb2.ko",
+ "drivers/phy/rockchip/phy-rockchip-pcie.ko",
+ "drivers/phy/rockchip/phy-rockchip-typec.ko",
+ "drivers/pwm/pwm-rockchip.ko",
+ "drivers/regulator/fan53555.ko",
+ "drivers/regulator/pwm-regulator.ko",
+ "drivers/regulator/rk808-regulator.ko",
+ "drivers/rtc/rtc-rk808.ko",
+ "drivers/soc/rockchip/io-domain.ko",
+ "drivers/thermal/rockchip_thermal.ko",
+ "drivers/usb/host/ohci-hcd.ko",
+ "drivers/usb/host/ohci-platform.ko",
+ "net/core/failover.ko",
+]
+
+_ROCKPI4_WATCHDOG_MODULE_OUTS = [
+ # keep sorted
+ "drivers/watchdog/dw_wdt.ko",
+]
+
+# TODO(b/258259749): Convert rockpi4 to mixed build
+kernel_build(
+ name = "rockpi4",
+ srcs = [":kernel_aarch64_sources"],
+ outs = [
+ "Image",
+ "System.map",
+ "modules.builtin",
+ "modules.builtin.modinfo",
+ "vmlinux",
+ "vmlinux.symvers",
+ ],
+ arch = "arm64",
+ defconfig = "arch/arm64/configs/gki_defconfig",
+ generate_out_targets = False,
+ kmi_symbol_list_strict_mode = False,
+ make_goals = [
+ "Image",
+ "modules",
+ ],
+ makefile = ":Makefile",
+ module_outs = get_gki_modules_list("arm64") + get_kunit_modules_list("arm64") + _ROCKPI4_MODULE_OUTS + select({
+ "//build/kernel/kleaf:kgdb_is_true": [],
+ "//conditions:default": _ROCKPI4_WATCHDOG_MODULE_OUTS,
+ }),
+ pre_defconfig_fragments = ["arch/arm64/configs/rockpi4_gki.fragment"],
+ trim_nonlisted_kmi = False,
+ visibility = ["//visibility:private"],
+)
+
+kernel_modules_install(
+ name = "rockpi4_modules_install",
+ kernel_build = ":rockpi4",
+)
+
+initramfs(
+ name = "rockpi4_initramfs",
+ kernel_modules_install = ":rockpi4_modules_install",
+ ramdisk_compression = "lz4",
+ visibility = ["//visibility:private"],
+)
+
+pkg_files(
+ name = "rockpi4_dist_files",
+ srcs = [
+ ":rockpi4",
+ ":rockpi4_initramfs",
+ ":rockpi4_modules_install",
+ "//common-modules/virtual-device:rk3399-rock-pi-4b",
+ ],
+ strip_prefix = strip_prefix.files_only(),
+ visibility = ["//visibility:private"],
+)
+
+pkg_install(
+ name = "rockpi4_dist",
+ srcs = [":rockpi4_dist_files"],
+ destdir = "out/rockpi4/dist",
+)
+
+_AMLOGIC_MODULE_OUTS = [
+ # keep sorted
+ "drivers/char/hw_random/meson-rng.ko",
+ "drivers/clk/clk-pwm.ko",
+ "drivers/clk/meson/axg.ko",
+ "drivers/clk/meson/axg-aoclk.ko",
+ "drivers/clk/meson/axg-audio.ko",
+ "drivers/clk/meson/clk-cpu-dyndiv.ko",
+ "drivers/clk/meson/clk-phase.ko",
+ "drivers/clk/meson/g12a.ko",
+ "drivers/clk/meson/g12a-aoclk.ko",
+ "drivers/clk/meson/gxbb.ko",
+ "drivers/clk/meson/gxbb-aoclk.ko",
+ "drivers/clk/meson/meson-aoclk.ko",
+ "drivers/clk/meson/meson-eeclk.ko",
+ "drivers/clk/meson/sclk-div.ko",
+ "drivers/clk/meson/vclk.ko",
+ "drivers/crypto/amlogic/amlogic-gxl-crypto.ko",
+ "drivers/firmware/meson/meson_sm.ko",
+ "drivers/gpu/drm/bridge/display-connector.ko",
+ "drivers/gpu/drm/bridge/synopsys/dw-hdmi.ko",
+ "drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.ko",
+ "drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.ko",
+ "drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.ko",
+ "drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.ko",
+ "drivers/gpu/drm/display/drm_display_helper.ko",
+ "drivers/gpu/drm/drm_dma_helper.ko",
+ "drivers/gpu/drm/meson/meson-drm.ko",
+ "drivers/gpu/drm/meson/meson_dw_hdmi.ko",
+ "drivers/gpu/drm/meson/meson_dw_mipi_dsi.ko",
+ "drivers/i2c/busses/i2c-meson.ko",
+ "drivers/iio/adc/meson_saradc.ko",
+ "drivers/irqchip/irq-meson-gpio.ko",
+ "drivers/leds/leds-gpio.ko",
+ "drivers/media/cec/platform/meson/ao-cec.ko",
+ "drivers/media/cec/platform/meson/ao-cec-g12a.ko",
+ "drivers/media/platform/amlogic/meson-ge2d/ge2d.ko",
+ "drivers/media/rc/meson-ir.ko",
+ "drivers/mfd/khadas-mcu.ko",
+ "drivers/mmc/core/pwrseq_emmc.ko",
+ "drivers/mmc/core/pwrseq_simple.ko",
+ "drivers/mmc/host/meson-gx-mmc.ko",
+ "drivers/net/ethernet/stmicro/stmmac/dwmac-generic.ko",
+ "drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.ko",
+ "drivers/net/ethernet/stmicro/stmmac/dwmac-meson.ko",
+ "drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.ko",
+ "drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.ko",
+ "drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.ko",
+ "drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.ko",
+ "drivers/net/ethernet/stmicro/stmmac/stmmac.ko",
+ "drivers/net/ethernet/stmicro/stmmac/stmmac-platform.ko",
+ "drivers/net/mdio/mdio-mux.ko",
+ "drivers/net/mdio/mdio-mux-meson-g12a.ko",
+ "drivers/net/mdio/mdio-mux-meson-gxl.ko",
+ "drivers/net/pcs/pcs_xpcs.ko",
+ "drivers/net/phy/meson-gxl.ko",
+ "drivers/net/phy/realtek.ko",
+ "drivers/net/phy/smsc.ko",
+ "drivers/pci/controller/dwc/pci-meson.ko",
+ "drivers/phy/amlogic/phy-meson-axg-mipi-pcie-analog.ko",
+ "drivers/phy/amlogic/phy-meson-axg-pcie.ko",
+ "drivers/phy/amlogic/phy-meson-g12a-usb2.ko",
+ "drivers/phy/amlogic/phy-meson-g12a-usb3-pcie.ko",
+ "drivers/phy/amlogic/phy-meson-gxl-usb2.ko",
+ "drivers/phy/amlogic/phy-meson8b-usb2.ko",
+ "drivers/pinctrl/meson/pinctrl-amlogic-c3.ko",
+ "drivers/pinctrl/meson/pinctrl-amlogic-t7.ko",
+ "drivers/pinctrl/meson/pinctrl-meson.ko",
+ "drivers/pinctrl/meson/pinctrl-meson-a1.ko",
+ "drivers/pinctrl/meson/pinctrl-meson-axg.ko",
+ "drivers/pinctrl/meson/pinctrl-meson-axg-pmx.ko",
+ "drivers/pinctrl/meson/pinctrl-meson-g12a.ko",
+ "drivers/pinctrl/meson/pinctrl-meson-gxbb.ko",
+ "drivers/pinctrl/meson/pinctrl-meson-gxl.ko",
+ "drivers/pinctrl/meson/pinctrl-meson-s4.ko",
+ "drivers/pinctrl/meson/pinctrl-meson8-pmx.ko",
+ "drivers/pmdomain/amlogic/meson-ee-pwrc.ko",
+ "drivers/pmdomain/amlogic/meson-secure-pwrc.ko",
+ "drivers/pwm/pwm-meson.ko",
+ "drivers/regulator/pwm-regulator.ko",
+ "drivers/reset/reset-meson.ko",
+ "drivers/reset/reset-meson-audio-arb.ko",
+ "drivers/rtc/rtc-meson-vrtc.ko",
+ "drivers/soc/amlogic/meson-canvas.ko",
+ "drivers/soc/amlogic/meson-clk-measure.ko",
+ "drivers/spi/spi-meson-spicc.ko",
+ "drivers/spi/spi-meson-spifc.ko",
+ "drivers/thermal/amlogic_thermal.ko",
+ "drivers/thermal/khadas_mcu_fan.ko",
+ "drivers/tty/serial/meson_uart.ko",
+ "drivers/usb/dwc2/dwc2.ko",
+ "drivers/usb/dwc3/dwc3-meson-g12a.ko",
+ "sound/soc/codecs/snd-soc-dmic.ko",
+ "sound/soc/codecs/snd-soc-hdmi-codec.ko",
+ "sound/soc/codecs/snd-soc-spdif-rx.ko",
+ "sound/soc/codecs/snd-soc-spdif-tx.ko",
+ "sound/soc/meson/snd-soc-meson-aiu.ko",
+ "sound/soc/meson/snd-soc-meson-axg-fifo.ko",
+ "sound/soc/meson/snd-soc-meson-axg-frddr.ko",
+ "sound/soc/meson/snd-soc-meson-axg-pdm.ko",
+ "sound/soc/meson/snd-soc-meson-axg-sound-card.ko",
+ "sound/soc/meson/snd-soc-meson-axg-spdifin.ko",
+ "sound/soc/meson/snd-soc-meson-axg-spdifout.ko",
+ "sound/soc/meson/snd-soc-meson-axg-tdm-formatter.ko",
+ "sound/soc/meson/snd-soc-meson-axg-tdm-interface.ko",
+ "sound/soc/meson/snd-soc-meson-axg-tdmin.ko",
+ "sound/soc/meson/snd-soc-meson-axg-tdmout.ko",
+ "sound/soc/meson/snd-soc-meson-axg-toddr.ko",
+ "sound/soc/meson/snd-soc-meson-card-utils.ko",
+ "sound/soc/meson/snd-soc-meson-codec-glue.ko",
+ "sound/soc/meson/snd-soc-meson-g12a-toacodec.ko",
+ "sound/soc/meson/snd-soc-meson-g12a-tohdmitx.ko",
+ "sound/soc/meson/snd-soc-meson-gx-sound-card.ko",
+ "sound/soc/meson/snd-soc-meson-t9015.ko",
+]
+
+_AMLOGIC_WATCHDOG_MODULE_OUTS = [
+ # keep sorted
+ "drivers/watchdog/meson_gxbb_wdt.ko",
+ "drivers/watchdog/meson_wdt.ko",
+]
+
+kernel_build(
+ name = "yukawa",
+ srcs = [":kernel_aarch64_sources"],
+ outs = [
+ "Image",
+ "System.map",
+ "arch/arm64/boot/dts/amlogic/meson-g12a-sei510.dtb",
+ "arch/arm64/boot/dts/amlogic/meson-g12b-a311d-khadas-vim3.dtb",
+ "arch/arm64/boot/dts/amlogic/meson-sm1-khadas-vim3l.dtb",
+ "arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dtb",
+ "modules.builtin",
+ "modules.builtin.modinfo",
+ "vmlinux",
+ "vmlinux.symvers",
+ ],
+ arch = "arm64",
+ defconfig = "arch/arm64/configs/gki_defconfig",
+ generate_out_targets = False,
+ make_goals = [
+ "Image",
+ "modules",
+ "amlogic/meson-g12a-sei510.dtb",
+ "amlogic/meson-sm1-sei610.dtb",
+ "amlogic/meson-g12b-a311d-khadas-vim3.dtb",
+ "amlogic/meson-sm1-khadas-vim3l.dtb",
+ ],
+ makefile = ":Makefile",
+ module_outs = get_gki_modules_list("arm64") + get_kunit_modules_list("arm64") + _AMLOGIC_MODULE_OUTS + select({
+ "//build/kernel/kleaf:kgdb_is_true": [],
+ "//conditions:default": _AMLOGIC_WATCHDOG_MODULE_OUTS,
+ }),
+ pre_defconfig_fragments = ["arch/arm64/configs/amlogic_gki.fragment"],
+ visibility = ["//visibility:private"],
+)
+
+kernel_modules_install(
+ name = "yukawa_modules_install",
+ kernel_build = ":yukawa",
+ visibility = ["//visibility:private"],
+)
+
+initramfs(
+ name = "yukawa_initramfs",
+ kernel_modules_install = ":yukawa_modules_install",
+ visibility = ["//visibility:private"],
+)
+
+pkg_files(
+ name = "yukawa_dist_files",
+ srcs = [
+ ":yukawa",
+ ":yukawa_initramfs",
+ ":yukawa_modules_install",
+ ],
+ strip_prefix = strip_prefix.files_only(),
+ visibility = ["//visibility:private"],
+)
+
+pkg_install(
+ name = "yukawa_dist",
+ srcs = [":yukawa_dist_files"],
+ visibility = ["//visibility:private"],
+ destdir = "out/yukawa/dist",
+)
+
+# allmodconfig build tests.
+# These are build tests only, so:
+# - outs are intentionally set to empty to not copy anything to DIST_DIR
+# - --allow-undeclared-modules must be used so modules are not declared or copied.
+# - No dist target because these are build tests. We don't care about the artifacts.
+
+# tools/bazel build --allow_undeclared_modules //common:kernel_aarch64_allmodconfig
+kernel_build(
+ name = "kernel_aarch64_allmodconfig",
+ srcs = [":kernel_aarch64_sources"],
+ # Hack to actually check the build.
+ # Otherwise, Bazel thinks that there are no output files, and skip building.
+ outs = [".config"],
+ arch = "arm64",
+ defconfig = "//build/kernel/kleaf:allmodconfig",
+ make_goals = [
+ "Image",
+ "modules",
+ ],
+ makefile = ":Makefile",
+ post_defconfig_fragments = ["arch/arm64/configs/allmodconfig.fragment"],
+ visibility = ["//visibility:private"],
+)
+
+# tools/bazel build --allow_undeclared_modules //common:kernel_x86_64_allmodconfig
+kernel_build(
+ name = "kernel_x86_64_allmodconfig",
+ srcs = [":kernel_x86_64_sources"],
+ # Hack to actually check the build.
+ # Otherwise, Bazel thinks that there are no output files, and skip building.
+ outs = [".config"],
+ arch = "x86_64",
+ defconfig = "//build/kernel/kleaf:allmodconfig",
+ make_goals = [
+ "bzImage",
+ "modules",
+ ],
+ makefile = ":Makefile",
+ post_defconfig_fragments = ["arch/x86/configs/allmodconfig.fragment"],
+ visibility = ["//visibility:private"],
+)
+
+# tools/bazel build //common:allmodconfig_modinfo_summaries --allow_undeclared_modules
+modinfo_summary_report(
+ name = "allmodconfig_modinfo_summaries",
+ deps = [
+ ":kernel_aarch64_allmodconfig",
+ ":kernel_x86_64_allmodconfig",
+ ],
+)
+
+# tools/bazel build --allow_undeclared_modules //common:kernel_arm_allmodconfig
+kernel_build(
+ name = "kernel_arm_allmodconfig",
+ # We don't have an arm-specific source list, so use the common one.
+ srcs = [":common_kernel_sources"],
+ # Hack to actually check the build.
+ # Otherwise, Bazel thinks that there are no output files, and skip building.
+ outs = [".config"],
+ arch = "arm",
+ defconfig = "//build/kernel/kleaf:allmodconfig",
+ make_goals = [
+ "zImage",
+ "modules",
+ ],
+ makefile = ":Makefile",
+ post_defconfig_fragments = ["arch/arm/configs/allmodconfig.fragment"],
+ visibility = ["//visibility:private"],
+)
+
+# libbpf
+cc_library(
+ name = "tools_includes",
+ hdrs = glob([
+ "tools/include/**/*.h",
+ ]),
+ includes = [
+ "tools/include",
+ "tools/include/uapi",
+ ],
+)
+
+cc_library(
+ name = "bpf_x86",
+ srcs = glob([
+ "tools/arch/x86/include/**/*.h",
+ "tools/lib/bpf/*.c",
+ ]),
+ hdrs = glob([
+ "tools/lib/bpf/*.h",
+ ]),
+ implementation_deps = [":tools_includes"],
+ strip_include_prefix = "tools/lib",
+ visibility = ["//visibility:public"],
+ deps = ["//prebuilts/kernel-build-tools:imported_libs"],
+)
+
+pahole(
+ name = "pahole",
+ visibility = ["//visibility:private"],
+ deps = [":bpf_x86"],
+)
+
+# KUnit test targets
+_KUNIT_DIR = "testcases/kunit"
+
+pkg_files(
+ name = "kunit_tests_config_arm64",
+ srcs = [
+ "tools/testing/kunit/android/tradefed_configs/config_arm64.xml",
+ ],
+ renames = {
+ "tools/testing/kunit/android/tradefed_configs/config_arm64.xml": _KUNIT_DIR + "/kunit.config",
+ },
+ visibility = ["//visibility:private"],
+)
+
+pkg_files(
+ name = "kunit_tests_config_x86_64",
+ srcs = [
+ "tools/testing/kunit/android/tradefed_configs/config_x86_64.xml",
+ ],
+ renames = {
+ "tools/testing/kunit/android/tradefed_configs/config_x86_64.xml": _KUNIT_DIR + "/kunit.config",
+ },
+ visibility = ["//visibility:private"],
+)
+
+pkg_files(
+ name = "kunit_modules_arm64",
+ srcs = get_gki_kunit_modules("arm64", "4k"),
+ prefix = _KUNIT_DIR + "/arm64",
+ visibility = ["//visibility:private"],
+)
+
+pkg_files(
+ name = "kunit_modules_arm64_16k",
+ srcs = get_gki_kunit_modules("arm64", "16k"),
+ prefix = _KUNIT_DIR + "/arm64",
+ visibility = ["//visibility:private"],
+)
+
+pkg_files(
+ name = "kunit_modules_x86_64",
+ srcs = get_gki_kunit_modules("x86_64"),
+ prefix = _KUNIT_DIR + "/x86_64",
+ visibility = ["//visibility:private"],
+)
+
+pkg_filegroup(
+ name = "kunit_tests_arm64_pkg_files",
+ srcs = [
+ ":kunit_modules_arm64",
+ ":kunit_tests_config_arm64",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+pkg_filegroup(
+ name = "kunit_tests_arm64_16k_pkg_files",
+ srcs = [
+ ":kunit_modules_arm64_16k",
+ ":kunit_tests_config_arm64",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+pkg_filegroup(
+ name = "kunit_tests_x86_64_pkg_files",
+ srcs = [
+ ":kunit_modules_x86_64",
+ ":kunit_tests_config_x86_64",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+# KUnit build rules for local execution workflow
+# Run by bazel run //common:kunit_tests_arm64 -- -v --destdir /tmp/kernel_tests/
+pkg_install(
+ name = "kunit_tests_arm64",
+ srcs = [
+ ":kunit_tests_arm64_pkg_files",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+pkg_install(
+ name = "kunit_tests_arm64_16k",
+ srcs = [
+ ":kunit_tests_arm64_16k_pkg_files",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+pkg_install(
+ name = "kunit_tests_x86_64",
+ srcs = [
+ ":kunit_tests_x86_64_pkg_files",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+py_library(
+ name = "kunit_parser",
+ srcs = [
+ "tools/testing/kunit/kunit_parser.py",
+ "tools/testing/kunit/kunit_printer.py",
+ ],
+ imports = ["tools/testing/kunit"],
+ visibility = ["//visibility:public"],
+)
+
+# DDK Headers
+# All headers. These are the public targets for DDK modules to use.
+alias(
+ name = "all_headers",
+ actual = "all_headers_aarch64",
+ visibility = ["//visibility:public"],
+)
+
+ddk_headers(
+ name = "all_headers_aarch64",
+ hdrs = [":all_headers_allowlist_aarch64"] + select({
+ "//build/kernel/kleaf:allow_ddk_unsafe_headers_set": [":all_headers_unsafe"],
+ "//conditions:default": [],
+ }),
+ visibility = ["//visibility:public"],
+)
+
+ddk_headers_archive(
+ name = "kernel_aarch64_ddk_headers_archive",
+ srcs = [
+ "all_headers_aarch64",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+ddk_headers(
+ name = "all_headers_arm",
+ hdrs = [":all_headers_allowlist_arm"] + select({
+ "//build/kernel/kleaf:allow_ddk_unsafe_headers_set": [":all_headers_unsafe"],
+ "//conditions:default": [],
+ }),
+ visibility = ["//visibility:public"],
+)
+
+ddk_headers_archive(
+ name = "kernel_x86_64_ddk_headers_archive",
+ srcs = [
+ "all_headers_x86_64",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+ddk_headers(
+ name = "all_headers_x86_64",
+ hdrs = [":all_headers_allowlist_x86_64"] + select({
+ "//build/kernel/kleaf:allow_ddk_unsafe_headers_set": [":all_headers_unsafe"],
+ "//conditions:default": [],
+ }),
+ visibility = ["//visibility:public"],
+)
+
+# Implementation details for DDK headers. The targets below cannot be directly
+# depended on by DDK modules.
+
+# Headers needed to include drivers/usb/host/xhci.h.
+ddk_headers(
+ name = "xhci_headers",
+ hdrs = [
+ "drivers/usb/core/hub.h",
+ "drivers/usb/core/usb.h",
+ "drivers/usb/host/pci-quirks.h",
+ "drivers/usb/host/xhci.h",
+ "drivers/usb/host/xhci-caps.h",
+ "drivers/usb/host/xhci-ext-caps.h",
+ "drivers/usb/host/xhci-plat.h",
+ "drivers/usb/host/xhci-port.h",
+ ],
+ linux_includes = [
+ "drivers/usb",
+ "drivers/usb/host",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+# Headers needed by ipu6-psys
+ddk_headers(
+ name = "ipu6_headers",
+ hdrs = [
+ "drivers/media/pci/intel/ipu6/ipu6.h",
+ "drivers/media/pci/intel/ipu6/ipu6-bus.h",
+ "drivers/media/pci/intel/ipu6/ipu6-buttress.h",
+ "drivers/media/pci/intel/ipu6/ipu6-cpd.h",
+ "drivers/media/pci/intel/ipu6/ipu6-dma.h",
+ "drivers/media/pci/intel/ipu6/ipu6-fw-com.h",
+ "drivers/media/pci/intel/ipu6/ipu6-mmu.h",
+ "drivers/media/pci/intel/ipu6/ipu6-platform-buttress-regs.h",
+ "drivers/media/pci/intel/ipu6/ipu6-platform-regs.h",
+ ],
+ linux_includes = [
+ "drivers/media/pci/intel/ipu6",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+# DDK headers allowlist. This is the list of all headers and include
+# directories that are safe to use in DDK modules.
+ddk_headers(
+ name = "all_headers_allowlist_aarch64",
+ hdrs = [
+ "drivers/dma/dmaengine.h",
+ "drivers/extcon/extcon.h",
+ "drivers/opp/opp.h",
+ "drivers/pci/controller/dwc/pcie-designware.h",
+ "drivers/pci/pci.h",
+ "drivers/thermal/thermal_core.h",
+ "drivers/thermal/thermal_debugfs.h",
+ "drivers/thermal/thermal_netlink.h",
+ "drivers/thermal/thermal_thresholds.h",
+ "drivers/ufs/core/ufshcd-crypto.h",
+ "drivers/ufs/core/ufshcd-priv.h",
+ "drivers/ufs/host/ufshcd-pltfrm.h",
+ "drivers/usb/dwc3/core.h",
+ "sound/usb/card.h",
+ "sound/usb/usbaudio.h",
+ ":all_headers_allowlist_aarch64_globs",
+ ":all_headers_allowlist_common_globs",
+ ":all_headers_allowlist_exynos",
+ ":xhci_headers",
+ ],
+ # The list of include directories where source files can #include headers
+ # from. In other words, these are the `-I` option to the C compiler.
+ # These are prepended to LINUXINCLUDE.
+ linux_includes = [
+ "arch/arm64/include",
+ "arch/arm64/include/uapi",
+ "drivers/dma",
+ "drivers/extcon",
+ "drivers/opp",
+ "drivers/pci",
+ "drivers/pci/controller/dwc",
+ "drivers/thermal",
+ "drivers/ufs",
+ "drivers/usb",
+ "sound/usb",
+ "include",
+ "include/uapi",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+ddk_headers(
+ name = "all_headers_allowlist_arm",
+ hdrs = [
+ ":all_headers_allowlist_arm_globs",
+ ":all_headers_allowlist_common_globs",
+ ],
+ # The list of include directories where source files can #include headers
+ # from. In other words, these are the `-I` option to the C compiler.
+ # These are prepended to LINUXINCLUDE.
+ linux_includes = [
+ "arch/arm/include",
+ "arch/arm/include/uapi",
+ "include",
+ "include/uapi",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+ddk_headers(
+ name = "all_headers_allowlist_x86_64",
+ hdrs = [
+ ":all_headers_allowlist_common_globs",
+ ":all_headers_allowlist_x86_64_globs",
+ ":ipu6_headers",
+ ],
+ # The list of include directories where source files can #include headers
+ # from. In other words, these are the `-I` option to the C compiler.
+ # These are prepended to LINUXINCLUDE.
+ linux_includes = [
+ "arch/x86/include",
+ "arch/x86/include/uapi",
+ "include",
+ "include/uapi",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+# List of DDK headers allowlist that are glob()-ed to avoid changes of BUILD
+# file when the list of files changes. All headers in these directories
+# are safe to use.
+# These are separate filegroup targets so the all_headers_allowlist_* are
+# more friendly to batch BUILD file update tools like buildozer.
+
+# globs() for arm64 only
+filegroup(
+ name = "all_headers_allowlist_aarch64_globs",
+ srcs = glob(["arch/arm64/include/**/*.h"]),
+ visibility = ["//visibility:private"],
+)
+
+# globs() for arm only
+filegroup(
+ name = "all_headers_allowlist_arm_globs",
+ srcs = glob(["arch/arm/include/**/*.h"]),
+ visibility = ["//visibility:private"],
+)
+
+# globs() for x86 only
+filegroup(
+ name = "all_headers_allowlist_x86_64_globs",
+ srcs = glob(["arch/x86/include/**/*.h"]),
+ visibility = ["//visibility:private"],
+)
+
+# globs() for all architectures
+filegroup(
+ name = "all_headers_allowlist_common_globs",
+ srcs = glob(["include/**/*.h"]),
+ visibility = ["//visibility:private"],
+)
+
+# DDK headers unsafe list. This is the list of all headers and include
+# directories that may be used during migration from kernel_module's, but
+# should be avoided in general.
+# Use with caution; items may:
+# - be removed without notice
+# - be moved into all_headers
+ddk_headers(
+ name = "all_headers_unsafe",
+ hdrs = [
+ "//build/kernel/kleaf:user_ddk_unsafe_headers",
+ ],
+ # The list of include directories where source files can #include headers
+ # from. In other words, these are the `-I` option to the C compiler.
+ # Unsafe include directories are appended to ccflags-y.
+ includes = [
+ ".",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+ddk_headers(
+ name = "all_headers_allowlist_exynos",
+ hdrs = [
+ "drivers/android/binder_alloc.h",
+ "drivers/android/binder_internal.h",
+ "drivers/android/binder_trace.h",
+ "drivers/android/dbitmap.h",
+ "kernel/sched/cpudeadline.h",
+ "kernel/sched/cpupri.h",
+ "kernel/sched/ext.h",
+ "kernel/sched/features.h",
+ "kernel/sched/sched.h",
+ "kernel/sched/stats.h",
+ "kernel/workqueue_internal.h",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+devicetree_library(
+ name = "dt-bindings",
+ hdrs = glob([
+ "scripts/dtc/include-prefixes/dt-bindings/**/*.h",
+ ]),
+ includes = ["scripts/dtc/include-prefixes"],
+ visibility = ["//visibility:private"],
+)
+
+devicetree_library(
+ name = "dtc_includes_aarch64",
+ hdrs = glob([
+ "scripts/dtc/include-prefixes/arm64/**/*.h",
+ "scripts/dtc/include-prefixes/arm64/**/*.dtsi",
+ ]),
+ includes = ["scripts/dtc/include-prefixes"],
+ visibility = ["//visibility:public"],
+ deps = [":dt-bindings"],
+)
+
+alias(
+ name = "dtc_includes_x86_64",
+ actual = ":dt-bindings",
+ visibility = ["//visibility:public"],
+)
+
+devicetree_library(
+ name = "all_exynos_google_dtsi",
+ hdrs = glob([
+ "arch/arm64/boot/dts/exynos/google/**/*.dtsi",
+ "arch/arm64/boot/dts/exynos/google/**/*.h",
+ ]),
+ includes = ["arch/arm64/boot/dts/exynos/google"],
+ visibility = ["//devices/google:__subpackages__"],
+ deps = ["//common:dtc_includes_aarch64"],
+)
+
+_KSELFTEST_DIR = "testcases/selftests"
+
+_KSELFTEST_COPTS = [
+ "-O3",
+ "-pthread",
+ "-std=gnu99",
+ "-include",
+ paths.join(
+ package_relative_label(":x").workspace_root,
+ package_relative_label(":x").package,
+ "tools/testing/selftests/android/include/bionic-compat.h",
+ ),
+] + select({
+ "//build/kernel/kleaf/platforms/config_settings:android_arm": ["-mcpu=cortex-a8"],
+ "//conditions:default": [],
+})
+
+cc_library(
+ name = "kselftest_headers_lib",
+ hdrs = glob(["tools/testing/selftests/*.h"]) + [
+ "tools/testing/selftests/android/include/bionic-compat.h",
+ ],
+ copts = _KSELFTEST_COPTS,
+ defines = [
+ "_GNU_SOURCE=",
+ ],
+ includes = [
+ "tools/testing/selftests",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_binderfs_binderfs_test",
+ srcs = ["tools/testing/selftests/filesystems/binderfs/binderfs_test.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [":kselftest_headers_lib"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_breakpoints_breakpoint_test",
+ srcs = select({
+ "//build/kernel/kleaf/platforms/config_settings:android_x86_64": ["tools/testing/selftests/breakpoints/breakpoint_test.c"],
+ "//build/kernel/kleaf/platforms/config_settings:android_i386": ["tools/testing/selftests/breakpoints/breakpoint_test.c"],
+ "//build/kernel/kleaf/platforms/config_settings:android_arm64": ["tools/testing/selftests/breakpoints/breakpoint_test_arm64.c"],
+ "//conditions:default": [],
+ }),
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [":kselftest_headers_lib"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_kcmp_kcmp_test",
+ srcs = ["tools/testing/selftests/kcmp/kcmp_test.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [":kselftest_headers_lib"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_net_tests_socket",
+ srcs = ["tools/testing/selftests/net/socket.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [":kselftest_headers_lib"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_net_tests_psock_tpacket",
+ srcs = ["tools/testing/selftests/net/psock_tpacket.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ":kselftest_psock_lib",
+ ],
+)
+
+cc_library(
+ name = "kselftest_psock_lib",
+ hdrs = ["tools/testing/selftests/net/psock_lib.h"],
+ includes = [
+ "tools/testing/selftests",
+ "tools/testing/selftests/net",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_net_tests_reuseport_dualstack",
+ srcs = ["tools/testing/selftests/net/reuseport_dualstack.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [":kselftest_headers_lib"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_net_tests_reuseaddr_conflict",
+ srcs = ["tools/testing/selftests/net/reuseaddr_conflict.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [":kselftest_headers_lib"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_ptrace_peeksiginfo",
+ srcs = ["tools/testing/selftests/ptrace/peeksiginfo.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [":kselftest_headers_lib"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_rtc_rtctest",
+ srcs = ["tools/testing/selftests/rtc/rtctest.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [":kselftest_headers_lib"],
+)
+
+cc_library(
+ name = "kselftest_vdso",
+ srcs = ["tools/testing/selftests/vDSO/parse_vdso.c"],
+ hdrs = [
+ "include/uapi/linux/auxvec.h",
+ "include/uapi/linux/elf.h",
+ "tools/testing/selftests/vDSO/parse_vdso.h",
+ "tools/testing/selftests/vDSO/vdso_call.h",
+ "tools/testing/selftests/vDSO/vdso_config.h",
+ ],
+ copts = _KSELFTEST_COPTS + [
+ "-I",
+ paths.join(
+ package_relative_label(":x").workspace_root,
+ package_relative_label(":x").package,
+ "include/uapi/",
+ ),
+ ],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_vdso_vdso_test_abi",
+ srcs = ["tools/testing/selftests/vDSO/vdso_test_abi.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_vdso",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_vdso_vdso_test_getcpu",
+ srcs = ["tools/testing/selftests/vDSO/vdso_test_getcpu.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_vdso",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_vdso_vdso_test_gettimeofday",
+ srcs = ["tools/testing/selftests/vDSO/vdso_test_gettimeofday.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_vdso",
+ ],
+)
+
+cc_library(
+ name = "kselftest_futex_headers_lib",
+ hdrs = glob(["tools/testing/selftests/futex/include/*.h"]),
+ copts = _KSELFTEST_COPTS,
+ visibility = ["//visibility:private"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_futex_futex_requeue_pi_mismatched_ops",
+ srcs = ["tools/testing/selftests/futex/functional/futex_requeue_pi_mismatched_ops.c"],
+ out = "futex_requeue_pi_mismatched_ops",
+ copts = _KSELFTEST_COPTS,
+ includes = [
+ "tools/testing/selftests",
+ "tools/testing/selftests/futex/include",
+ ],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_futex_headers_lib",
+ ":kselftest_headers_lib",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_futex_futex_requeue_pi_signal_restart",
+ srcs = ["tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c"],
+ out = "futex_requeue_pi_signal_restart",
+ copts = _KSELFTEST_COPTS,
+ includes = [
+ "tools/testing/selftests",
+ "tools/testing/selftests/futex/include",
+ ],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_futex_headers_lib",
+ ":kselftest_headers_lib",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_futex_futex_requeue_pi",
+ srcs = ["tools/testing/selftests/futex/functional/futex_requeue_pi.c"],
+ out = "futex_requeue_pi",
+ copts = _KSELFTEST_COPTS,
+ includes = [
+ "tools/testing/selftests",
+ "tools/testing/selftests/futex/include",
+ ],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_futex_headers_lib",
+ ":kselftest_headers_lib",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_futex_futex_requeue",
+ srcs = ["tools/testing/selftests/futex/functional/futex_requeue.c"],
+ out = "futex_requeue",
+ copts = _KSELFTEST_COPTS,
+ includes = [
+ "tools/testing/selftests",
+ "tools/testing/selftests/futex/include",
+ ],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_futex_headers_lib",
+ ":kselftest_headers_lib",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_futex_futex_wait_private_mapped_file",
+ srcs = ["tools/testing/selftests/futex/functional/futex_wait_private_mapped_file.c"],
+ out = "futex_wait_private_mapped_file",
+ copts = _KSELFTEST_COPTS,
+ includes = [
+ "tools/testing/selftests",
+ "tools/testing/selftests/futex/include",
+ ],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_futex_headers_lib",
+ ":kselftest_headers_lib",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_futex_futex_wait_timeout",
+ srcs = ["tools/testing/selftests/futex/functional/futex_wait_timeout.c"],
+ out = "futex_wait_timeout",
+ copts = _KSELFTEST_COPTS,
+ includes = [
+ "tools/testing/selftests",
+ "tools/testing/selftests/futex/include",
+ ],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_futex_headers_lib",
+ ":kselftest_headers_lib",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_futex_futex_wait_uninitialized_heap",
+ srcs = ["tools/testing/selftests/futex/functional/futex_wait_uninitialized_heap.c"],
+ out = "futex_wait_uninitialized_heap",
+ copts = _KSELFTEST_COPTS,
+ includes = [
+ "tools/testing/selftests",
+ "tools/testing/selftests/futex/include",
+ ],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_futex_headers_lib",
+ ":kselftest_headers_lib",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_futex_futex_wait_wouldblock",
+ srcs = ["tools/testing/selftests/futex/functional/futex_wait_wouldblock.c"],
+ out = "futex_wait_wouldblock",
+ copts = _KSELFTEST_COPTS,
+ includes = [
+ "tools/testing/selftests",
+ "tools/testing/selftests/futex/include",
+ ],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_futex_headers_lib",
+ ":kselftest_headers_lib",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_futex_futex_wait",
+ srcs = ["tools/testing/selftests/futex/functional/futex_wait.c"],
+ out = "futex_wait",
+ copts = _KSELFTEST_COPTS,
+ includes = [
+ "tools/testing/selftests",
+ "tools/testing/selftests/futex/include",
+ ],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_futex_headers_lib",
+ ":kselftest_headers_lib",
+ ],
+)
+
+cc_library(
+ name = "kselftest_memfd",
+ srcs = ["tools/testing/selftests/memfd/common.c"],
+ hdrs = ["tools/testing/selftests/memfd/common.h"],
+ copts = _KSELFTEST_COPTS,
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_memfd_test",
+ srcs = ["tools/testing/selftests/memfd/memfd_test.c"],
+ copts = _KSELFTEST_COPTS,
+ includes = ["tools/testing/selftests"],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [":kselftest_memfd"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_mm_compaction_test",
+ srcs = ["tools/testing/selftests/mm/compaction_test.c"],
+ copts = _KSELFTEST_COPTS,
+ includes = ["tools/testing/selftests"],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ "@libcap",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_mm_hugepage_mmap",
+ srcs = ["tools/testing/selftests/mm/hugepage-mmap.c"],
+ copts = _KSELFTEST_COPTS,
+ includes = ["tools/testing/selftests"],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ "@libcap",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_mm_hugepage_shm",
+ srcs = ["tools/testing/selftests/mm/hugepage-shm.c"],
+ copts = _KSELFTEST_COPTS,
+ includes = ["tools/testing/selftests"],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ "@libcap",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_mm_map_hugetlb",
+ srcs = ["tools/testing/selftests/mm/map_hugetlb.c"],
+ copts = _KSELFTEST_COPTS,
+ includes = ["tools/testing/selftests"],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ":kselftest_mm_vm_util",
+ "@libcap",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_mm_mlock_random_test",
+ srcs = [
+ "tools/testing/selftests/mm/mlock-random-test.c",
+ "tools/testing/selftests/mm/mlock2.h",
+ ],
+ copts = _KSELFTEST_COPTS,
+ includes = [
+ "tools/testing/selftests",
+ "tools/testing/selftests/mm",
+ ],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ "@libcap",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_mm_mlock2_tests",
+ srcs = [
+ "tools/testing/selftests/mm/mlock2.h",
+ "tools/testing/selftests/mm/mlock2-tests.c",
+ ],
+ copts = _KSELFTEST_COPTS,
+ includes = [
+ "tools/testing/selftests",
+ "tools/testing/selftests/mm",
+ ],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ "@libcap",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_mm_on_fault_limit",
+ srcs = ["tools/testing/selftests/mm/on-fault-limit.c"],
+ copts = _KSELFTEST_COPTS,
+ includes = ["tools/testing/selftests"],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ "@libcap",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_mm_mremap_dontunmap",
+ srcs = ["tools/testing/selftests/mm/mremap_dontunmap.c"],
+ copts = _KSELFTEST_COPTS,
+ includes = ["tools/testing/selftests"],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ "@libcap",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_mm_mremap_test",
+ srcs = ["tools/testing/selftests/mm/mremap_test.c"],
+ copts = _KSELFTEST_COPTS,
+ includes = ["tools/testing/selftests"],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ "@libcap",
+ ],
+)
+
+cc_library(
+ name = "kselftest_mm_vm_util",
+ srcs = ["tools/testing/selftests/mm/vm_util.c"],
+ hdrs = [
+ "include/uapi/linux/fs.h",
+ "tools/testing/selftests/mm/vm_util.h",
+ ],
+ copts = _KSELFTEST_COPTS,
+ includes = [
+ "include/uapi/",
+ "tools/testing/selftests",
+ ],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_mm_thuge_gen",
+ srcs = [
+ "tools/testing/selftests/mm/thuge-gen.c",
+ ],
+ copts = _KSELFTEST_COPTS + [
+ "-Wno-macro-redefined",
+ ],
+ includes = ["tools/testing/selftests"],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ":kselftest_mm_vm_util",
+ "@libcap",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_mm_transhuge_stress",
+ srcs = [
+ "tools/testing/selftests/mm/transhuge-stress.c",
+ ],
+ copts = _KSELFTEST_COPTS,
+ includes = [
+ "tools/testing/selftests",
+ "tools/testing/selftests/mm/",
+ ],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ":kselftest_mm_vm_util",
+ "@libcap",
+ ],
+)
+
+cc_library(
+ name = "kselftest_mm_uffd_common",
+ srcs = ["tools/testing/selftests/mm/uffd-common.c"],
+ hdrs = [
+ "include/uapi/linux/userfaultfd.h",
+ "mm/gup_test.h",
+ "tools/testing/selftests/kselftest.h",
+ "tools/testing/selftests/mm/uffd-common.h",
+ ],
+ copts = _KSELFTEST_COPTS,
+ includes = [
+ "include/uapi/",
+ "tools/testing/selftests/mm/",
+ ],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ":kselftest_mm_vm_util",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_mm_uffd_unit_tests",
+ srcs = [
+ "tools/testing/selftests/mm/uffd-unit-tests.c",
+ ],
+ copts = _KSELFTEST_COPTS,
+ includes = [
+ "tools/testing/selftests",
+ "tools/testing/selftests/mm/",
+ ],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ":kselftest_mm_uffd_common",
+ ":kselftest_mm_vm_util",
+ "@libcap",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_size_test_get_size",
+ srcs = ["tools/testing/selftests/size/get_size.c"],
+ copts = _KSELFTEST_COPTS + select({
+ "//build/kernel/kleaf/platforms/config_settings:android_x86_64": ["-mstackrealign"],
+ "//conditions:default": [],
+ }),
+ includes = [
+ "tools/testing/selftests",
+ ],
+ linkopts = ["-nostartfiles"],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [":kselftest_headers_lib"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_timers_adjtick",
+ srcs = ["tools/testing/selftests/timers/adjtick.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ":kselftest_timers_common_hdrs",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_timers_alarmtimer_suspend",
+ srcs = ["tools/testing/selftests/timers/alarmtimer-suspend.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ":kselftest_timers_common_hdrs",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_timers_change_skew",
+ srcs = ["tools/testing/selftests/timers/change_skew.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ":kselftest_timers_inconsistency_check",
+ ":kselftest_timers_nanosleep",
+ ":kselftest_timers_tests_raw_skew",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_timers_clocksource_switch",
+ srcs = ["tools/testing/selftests/timers/clocksource-switch.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ":kselftest_timers_inconsistency_check",
+ ":kselftest_timers_nanosleep",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_timers_freq_step",
+ srcs = ["tools/testing/selftests/timers/freq-step.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [":kselftest_headers_lib"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_timers_inconsistency_check",
+ srcs = ["tools/testing/selftests/timers/inconsistency-check.c"],
+ out = "inconsistency-check",
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ":kselftest_timers_common_hdrs",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_timers_leap_a_day",
+ srcs = ["tools/testing/selftests/timers/leap-a-day.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ":kselftest_timers_common_hdrs",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_timers_leapcrash",
+ srcs = ["tools/testing/selftests/timers/leapcrash.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [":kselftest_headers_lib"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_timers_nanosleep",
+ srcs = ["tools/testing/selftests/timers/nanosleep.c"],
+ out = "nanosleep",
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ":kselftest_timers_common_hdrs",
+ ],
+)
+
+cc_library(
+ name = "kselftest_timers_common_hdrs",
+ hdrs = [
+ "include/vdso/time64.h",
+ ],
+ copts = _KSELFTEST_COPTS,
+ includes = ["."],
+ visibility = ["//visibility:private"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_timers_nsleep_lat",
+ srcs = ["tools/testing/selftests/timers/nsleep-lat.c"],
+ out = "nsleep-lat",
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ":kselftest_timers_common_hdrs",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_timers_posix_timers",
+ srcs = ["tools/testing/selftests/timers/posix_timers.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ":kselftest_timers_common_hdrs",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_timers_tests_raw_skew",
+ srcs = ["tools/testing/selftests/timers/raw_skew.c"],
+ out = "raw_skew",
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ":kselftest_timers_common_hdrs",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_timers_set_2038",
+ srcs = ["tools/testing/selftests/timers/set-2038.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ":kselftest_timers_common_hdrs",
+ ":kselftest_timers_inconsistency_check",
+ ":kselftest_timers_nanosleep",
+ ":kselftest_timers_nsleep_lat",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_timers_set_tai",
+ srcs = ["tools/testing/selftests/timers/set-tai.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [":kselftest_headers_lib"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_timers_set_timer_lat",
+ srcs = ["tools/testing/selftests/timers/set-timer-lat.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ":kselftest_timers_common_hdrs",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_timers_set_tz",
+ srcs = ["tools/testing/selftests/timers/set-tz.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [":kselftest_headers_lib"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_timers_skew_consistency",
+ srcs = ["tools/testing/selftests/timers/skew_consistency.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ":kselftest_timers_inconsistency_check",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_timers_threadtest",
+ srcs = ["tools/testing/selftests/timers/threadtest.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [":kselftest_headers_lib"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_timers_valid_adjtimex",
+ srcs = ["tools/testing/selftests/timers/valid-adjtimex.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ":kselftest_timers_common_hdrs",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_net_socket",
+ srcs = ["tools/testing/selftests/net/socket.c"],
+ copts = _KSELFTEST_COPTS + ["-Wno-gnu-variable-sized-type-not-at-end"],
+ includes = ["tools/testing/selftests"],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [":kselftest_headers_lib"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_net_reuseaddr_conflict",
+ srcs = ["tools/testing/selftests/net/reuseaddr_conflict.c"],
+ copts = _KSELFTEST_COPTS,
+ includes = ["tools/testing/selftests"],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [":kselftest_headers_lib"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_net_psock_tpacket",
+ srcs = [
+ "tools/testing/selftests/net/psock_lib.h",
+ "tools/testing/selftests/net/psock_tpacket.c",
+ ],
+ copts = _KSELFTEST_COPTS + ["-Wno-gnu-variable-sized-type-not-at-end"],
+ includes = ["tools/testing/selftests"],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [":kselftest_headers_lib"],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_capabilities_test_execve",
+ srcs = ["tools/testing/selftests/capabilities/test_execve.c"],
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_capabilities_validate_cap",
+ ":kselftest_headers_lib",
+ "@libcap_ng//:libcap-ng",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_capabilities_validate_cap",
+ srcs = ["tools/testing/selftests/capabilities/validate_cap.c"],
+ out = "validate_cap",
+ copts = _KSELFTEST_COPTS,
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ "@libcap_ng//:libcap-ng",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_seccomp_seccomp_bpf",
+ srcs = [
+ "tools/testing/selftests/clone3/clone3_selftests.h",
+ "tools/testing/selftests/seccomp/seccomp_bpf.c",
+ ],
+ copts = _KSELFTEST_COPTS + [
+ "-Wno-unused-function",
+ "-D__GLIBC_PREREQ(a,b)",
+ ],
+ includes = ["tools/testing/selftests"],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ "@libcap",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_x86_single_step_syscall",
+ srcs = [
+ "tools/testing/selftests/x86/helpers.h",
+ "tools/testing/selftests/x86/single_step_syscall.c",
+ ],
+ abis = [
+ "x86_64",
+ "x86",
+ ],
+ copts = _KSELFTEST_COPTS,
+ includes = ["tools/testing/selftests"],
+ linkopts = ["-static"],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_x86_syscall_nt",
+ srcs = [
+ "tools/testing/selftests/x86/helpers.h",
+ "tools/testing/selftests/x86/syscall_nt.c",
+ ],
+ abis = [
+ "x86_64",
+ "x86",
+ ],
+ copts = _KSELFTEST_COPTS,
+ includes = ["tools/testing/selftests"],
+ linkopts = ["-static"],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_x86_ptrace_syscall",
+ srcs = [
+ "tools/testing/selftests/x86/helpers.h",
+ "tools/testing/selftests/x86/ptrace_syscall.c",
+ ],
+ abis = [
+ "x86_64",
+ "x86",
+ ],
+ copts = _KSELFTEST_COPTS + ["-fomit-frame-pointer"],
+ includes = ["tools/testing/selftests"],
+ linkopts = ["-static"],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_x86_test_mremap_vdso",
+ srcs = [
+ "tools/testing/selftests/x86/helpers.h",
+ "tools/testing/selftests/x86/test_mremap_vdso.c",
+ ],
+ abis = [
+ "x86_64",
+ "x86",
+ ],
+ copts = _KSELFTEST_COPTS,
+ includes = ["tools/testing/selftests"],
+ linkopts = ["-static"],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_x86_check_initial_reg_state",
+ srcs = [
+ "tools/testing/selftests/x86/check_initial_reg_state.c",
+ "tools/testing/selftests/x86/helpers.h",
+ ],
+ abis = [
+ "x86_64",
+ "x86",
+ ],
+ copts = _KSELFTEST_COPTS,
+ includes = ["tools/testing/selftests"],
+ linkopts = [
+ "-static",
+ "-Wl,-ereal_start",
+ ],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ],
+)
+
+cc_binary_with_abi(
+ name = "kselftest_x86_ldt_gdt",
+ srcs = [
+ "tools/testing/selftests/x86/helpers.h",
+ "tools/testing/selftests/x86/ldt_gdt.c",
+ ],
+ abis = [
+ "x86_64",
+ "x86",
+ ],
+ copts = _KSELFTEST_COPTS,
+ includes = ["tools/testing/selftests"],
+ linkopts = ["-static"],
+ path_prefix = _KSELFTEST_DIR,
+ target_compatible_with = ["@platforms//os:android"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":kselftest_headers_lib",
+ ],
+)
+
+pkg_files(
+ name = "kselftest_config_x86_64",
+ srcs = ["tools/testing/selftests/android/config_x86_64.xml"],
+ renames = {"tools/testing/selftests/android/config_x86_64.xml": _KSELFTEST_DIR + "/selftests.config"},
+ visibility = ["//visibility:private"],
+)
+
+pkg_files(
+ name = "vts_kselftest_config_x86_64",
+ srcs = ["tools/testing/selftests/android/vts_config_x86_64.xml"],
+ renames = {"tools/testing/selftests/android/vts_config_x86_64.xml": _KSELFTEST_DIR + "/vts_kselftests.config"},
+ visibility = ["//visibility:private"],
+)
+
+pkg_files(
+ name = "kselftest_config_arm64",
+ srcs = ["tools/testing/selftests/android/config_arm64.xml"],
+ renames = {"tools/testing/selftests/android/config_arm64.xml": _KSELFTEST_DIR + "/selftests.config"},
+ visibility = ["//visibility:private"],
+)
+
+pkg_files(
+ name = "vts_kselftest_config_arm64",
+ srcs = ["tools/testing/selftests/android/vts_config_arm64.xml"],
+ renames = {"tools/testing/selftests/android/vts_config_arm64.xml": _KSELFTEST_DIR + "/vts_kselftests.config"},
+ visibility = ["//visibility:private"],
+)
+
+pkg_filegroup(
+ name = "kselftest_tests_x86_64",
+ srcs = [
+ ":kselftest_binderfs_binderfs_test_x86_64",
+ ":kselftest_breakpoints_breakpoint_test_x86_64",
+ ":kselftest_capabilities_test_execve_x86_64",
+ ":kselftest_capabilities_validate_cap_x86_64",
+ ":kselftest_futex_futex_requeue_pi_mismatched_ops_x86_64",
+ ":kselftest_futex_futex_requeue_pi_signal_restart_x86_64",
+ ":kselftest_futex_futex_requeue_pi_x86_64",
+ ":kselftest_futex_futex_requeue_x86_64",
+ ":kselftest_futex_futex_wait_private_mapped_file_x86_64",
+ ":kselftest_futex_futex_wait_timeout_x86_64",
+ ":kselftest_futex_futex_wait_uninitialized_heap_x86_64",
+ ":kselftest_futex_futex_wait_wouldblock_x86_64",
+ ":kselftest_futex_futex_wait_x86_64",
+ ":kselftest_kcmp_kcmp_test_x86_64",
+ ":kselftest_memfd_test_x86_64",
+ ":kselftest_mm_compaction_test_x86_64",
+ ":kselftest_mm_hugepage_mmap_x86_64",
+ ":kselftest_mm_hugepage_shm_x86_64",
+ ":kselftest_mm_map_hugetlb_x86_64",
+ ":kselftest_mm_mlock2_tests_x86_64",
+ ":kselftest_mm_mlock_random_test_x86_64",
+ ":kselftest_mm_mremap_dontunmap_x86_64",
+ ":kselftest_mm_mremap_test_x86_64",
+ ":kselftest_mm_on_fault_limit_x86_64",
+ ":kselftest_mm_thuge_gen_x86_64",
+ ":kselftest_mm_transhuge_stress_x86_64",
+ ":kselftest_mm_uffd_unit_tests_x86_64",
+ ":kselftest_net_psock_tpacket_x86_64",
+ ":kselftest_net_reuseaddr_conflict_x86_64",
+ ":kselftest_net_socket_x86_64",
+ ":kselftest_ptrace_peeksiginfo_x86_64",
+ ":kselftest_rtc_rtctest_x86_64",
+ ":kselftest_seccomp_seccomp_bpf_x86_64",
+ ":kselftest_size_test_get_size_x86_64",
+ ":kselftest_timers_adjtick_x86_64",
+ ":kselftest_timers_alarmtimer_suspend_x86_64",
+ ":kselftest_timers_change_skew_x86_64",
+ ":kselftest_timers_clocksource_switch_x86_64",
+ ":kselftest_timers_freq_step_x86_64",
+ ":kselftest_timers_inconsistency_check_x86_64",
+ ":kselftest_timers_leap_a_day_x86_64",
+ ":kselftest_timers_leapcrash_x86_64",
+ ":kselftest_timers_nanosleep_x86_64",
+ ":kselftest_timers_nsleep_lat_x86_64",
+ ":kselftest_timers_posix_timers_x86_64",
+ ":kselftest_timers_set_2038_x86_64",
+ ":kselftest_timers_set_tai_x86_64",
+ ":kselftest_timers_set_timer_lat_x86_64",
+ ":kselftest_timers_set_tz_x86_64",
+ ":kselftest_timers_skew_consistency_x86_64",
+ ":kselftest_timers_tests_raw_skew_x86_64",
+ ":kselftest_timers_threadtest_x86_64",
+ ":kselftest_timers_valid_adjtimex_x86_64",
+ ":kselftest_vdso_vdso_test_abi_x86_64",
+ ":kselftest_vdso_vdso_test_getcpu_x86_64",
+ ":kselftest_vdso_vdso_test_gettimeofday_x86_64",
+ ":kselftest_x86_check_initial_reg_state_x86_64",
+ ":kselftest_x86_ldt_gdt_x86_64",
+ ":kselftest_x86_ptrace_syscall_x86_64",
+ ":kselftest_x86_single_step_syscall_x86_64",
+ ":kselftest_x86_syscall_nt_x86_64",
+ ":kselftest_x86_test_mremap_vdso_x86_64",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+pkg_filegroup(
+ name = "vts_kselftest_tests_x86_64",
+ srcs = [
+ ":kselftest_binderfs_binderfs_test_x86_64",
+ ":kselftest_breakpoints_breakpoint_test_x86_64",
+ ":kselftest_capabilities_test_execve_x86_64",
+ ":kselftest_capabilities_validate_cap_x86_64",
+ ":kselftest_futex_futex_requeue_pi_mismatched_ops_x86_64",
+ ":kselftest_futex_futex_requeue_pi_signal_restart_x86_64",
+ ":kselftest_futex_futex_requeue_pi_x86_64",
+ ":kselftest_futex_futex_requeue_x86_64",
+ ":kselftest_futex_futex_wait_private_mapped_file_x86_64",
+ ":kselftest_futex_futex_wait_timeout_x86_64",
+ ":kselftest_futex_futex_wait_uninitialized_heap_x86_64",
+ ":kselftest_futex_futex_wait_wouldblock_x86_64",
+ ":kselftest_futex_futex_wait_x86_64",
+ ":kselftest_kcmp_kcmp_test_x86_64",
+ ":kselftest_rtc_rtctest_x86_64",
+ # ":kselftest_net_tests_bpf_x86_64", # Disabled due to test failures
+ # ":kselftest_net_tests_psock_fanout_x86_64", # Disabled due to test failures
+ ":kselftest_net_tests_psock_tpacket_x86_64",
+ ":kselftest_net_tests_reuseaddr_conflict_x86_64",
+ ":kselftest_net_tests_reuseport_dualstack_x86_64",
+ ":kselftest_net_tests_socket_x86_64",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+pkg_filegroup(
+ name = "kselftest_tests_x86",
+ srcs = [
+ ":kselftest_binderfs_binderfs_test_x86",
+ ":kselftest_breakpoints_breakpoint_test_x86",
+ ":kselftest_capabilities_test_execve_x86",
+ ":kselftest_capabilities_validate_cap_x86",
+ ":kselftest_futex_futex_requeue_pi_mismatched_ops_x86",
+ ":kselftest_futex_futex_requeue_pi_signal_restart_x86",
+ ":kselftest_futex_futex_requeue_pi_x86",
+ ":kselftest_futex_futex_requeue_x86",
+ ":kselftest_futex_futex_wait_private_mapped_file_x86",
+ ":kselftest_futex_futex_wait_timeout_x86",
+ ":kselftest_futex_futex_wait_uninitialized_heap_x86",
+ ":kselftest_futex_futex_wait_wouldblock_x86",
+ ":kselftest_futex_futex_wait_x86",
+ ":kselftest_kcmp_kcmp_test_x86",
+ ":kselftest_mm_compaction_test_x86",
+ ":kselftest_mm_hugepage_mmap_x86",
+ ":kselftest_mm_hugepage_shm_x86",
+ ":kselftest_mm_map_hugetlb_x86",
+ ":kselftest_mm_mlock2_tests_x86",
+ ":kselftest_mm_mlock_random_test_x86",
+ ":kselftest_mm_mremap_dontunmap_x86",
+ ":kselftest_mm_mremap_test_x86",
+ ":kselftest_mm_on_fault_limit_x86",
+ ":kselftest_mm_thuge_gen_x86",
+ ":kselftest_mm_transhuge_stress_x86",
+ ":kselftest_mm_uffd_unit_tests_x86",
+ ":kselftest_net_psock_tpacket_x86",
+ ":kselftest_net_reuseaddr_conflict_x86",
+ ":kselftest_net_socket_x86",
+ ":kselftest_ptrace_peeksiginfo_x86",
+ ":kselftest_rtc_rtctest_x86",
+ ":kselftest_seccomp_seccomp_bpf_x86",
+ ":kselftest_size_test_get_size_x86",
+ ":kselftest_timers_adjtick_x86",
+ ":kselftest_timers_alarmtimer_suspend_x86",
+ ":kselftest_timers_change_skew_x86",
+ ":kselftest_timers_clocksource_switch_x86",
+ ":kselftest_timers_freq_step_x86",
+ ":kselftest_timers_inconsistency_check_x86",
+ ":kselftest_timers_leap_a_day_x86",
+ ":kselftest_timers_leapcrash_x86",
+ ":kselftest_timers_nanosleep_x86",
+ ":kselftest_timers_nsleep_lat_x86",
+ ":kselftest_timers_posix_timers_x86",
+ ":kselftest_timers_set_2038_x86",
+ ":kselftest_timers_set_tai_x86",
+ ":kselftest_timers_set_timer_lat_x86",
+ ":kselftest_timers_set_tz_x86",
+ ":kselftest_timers_skew_consistency_x86",
+ ":kselftest_timers_tests_raw_skew_x86",
+ ":kselftest_timers_threadtest_x86",
+ ":kselftest_timers_valid_adjtimex_x86",
+ ":kselftest_vdso_vdso_test_abi_x86",
+ ":kselftest_vdso_vdso_test_getcpu_x86",
+ ":kselftest_vdso_vdso_test_gettimeofday_x86",
+ ":kselftest_x86_check_initial_reg_state_x86",
+ ":kselftest_x86_ldt_gdt_x86",
+ ":kselftest_x86_ptrace_syscall_x86",
+ ":kselftest_x86_single_step_syscall_x86",
+ ":kselftest_x86_syscall_nt_x86",
+ ":kselftest_x86_test_mremap_vdso_x86",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+pkg_filegroup(
+ name = "vts_kselftest_tests_x86",
+ srcs = [
+ ":kselftest_binderfs_binderfs_test_x86",
+ ":kselftest_breakpoints_breakpoint_test_x86",
+ ":kselftest_capabilities_test_execve_x86",
+ ":kselftest_capabilities_validate_cap_x86",
+ ":kselftest_futex_futex_requeue_pi_mismatched_ops_x86",
+ ":kselftest_futex_futex_requeue_pi_signal_restart_x86",
+ ":kselftest_futex_futex_requeue_pi_x86",
+ ":kselftest_futex_futex_requeue_x86",
+ ":kselftest_futex_futex_wait_private_mapped_file_x86",
+ ":kselftest_futex_futex_wait_timeout_x86",
+ ":kselftest_futex_futex_wait_uninitialized_heap_x86",
+ ":kselftest_futex_futex_wait_wouldblock_x86",
+ ":kselftest_futex_futex_wait_x86",
+ ":kselftest_kcmp_kcmp_test_x86",
+ ":kselftest_rtc_rtctest_x86",
+ # ":kselftest_net_tests_bpf_x86", Disabled due to test failures
+ # ":kselftest_net_tests_psock_fanout_x86", Disabled due to test failures
+ ":kselftest_net_tests_psock_tpacket_x86",
+ ":kselftest_net_tests_reuseaddr_conflict_x86",
+ ":kselftest_net_tests_reuseport_dualstack_x86",
+ ":kselftest_net_tests_socket_x86",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+pkg_filegroup(
+ name = "kselftest_tests_arm",
+ srcs = [
+ ":kselftest_binderfs_binderfs_test_arm",
+ ":kselftest_capabilities_test_execve_arm",
+ ":kselftest_capabilities_validate_cap_arm",
+ ":kselftest_futex_futex_requeue_arm",
+ ":kselftest_futex_futex_requeue_pi_arm",
+ ":kselftest_futex_futex_requeue_pi_mismatched_ops_arm",
+ ":kselftest_futex_futex_requeue_pi_signal_restart_arm",
+ ":kselftest_futex_futex_wait_arm",
+ ":kselftest_futex_futex_wait_private_mapped_file_arm",
+ ":kselftest_futex_futex_wait_timeout_arm",
+ ":kselftest_futex_futex_wait_uninitialized_heap_arm",
+ ":kselftest_futex_futex_wait_wouldblock_arm",
+ ":kselftest_kcmp_kcmp_test_arm",
+ ":kselftest_mm_compaction_test_arm",
+ ":kselftest_mm_hugepage_mmap_arm",
+ ":kselftest_mm_hugepage_shm_arm",
+ ":kselftest_mm_map_hugetlb_arm",
+ ":kselftest_mm_mlock2_tests_arm",
+ #":kselftest_mm_mlock_random_test_arm",
+ ":kselftest_mm_mremap_dontunmap_arm",
+ ":kselftest_mm_mremap_test_arm",
+ ":kselftest_mm_on_fault_limit_arm",
+ ":kselftest_mm_thuge_gen_arm",
+ ":kselftest_mm_transhuge_stress_arm",
+ ":kselftest_mm_uffd_unit_tests_arm",
+ #":kselftest_net_psock_tpacket_arm",
+ ":kselftest_net_reuseaddr_conflict_arm",
+ ":kselftest_net_socket_arm",
+ ":kselftest_ptrace_peeksiginfo_arm",
+ ":kselftest_rtc_rtctest_arm",
+ #":kselftest_seccomp_seccomp_bpf_arm",
+ ":kselftest_size_test_get_size_arm",
+ ":kselftest_timers_adjtick_arm",
+ ":kselftest_timers_alarmtimer_suspend_arm",
+ ":kselftest_timers_change_skew_arm",
+ ":kselftest_timers_clocksource_switch_arm",
+ #":kselftest_timers_freq_step_arm",
+ ":kselftest_timers_inconsistency_check_arm",
+ ":kselftest_timers_leap_a_day_arm",
+ ":kselftest_timers_leapcrash_arm",
+ ":kselftest_timers_nanosleep_arm",
+ ":kselftest_timers_nsleep_lat_arm",
+ ":kselftest_timers_posix_timers_arm",
+ ":kselftest_timers_set_2038_arm",
+ ":kselftest_timers_set_tai_arm",
+ ":kselftest_timers_set_timer_lat_arm",
+ ":kselftest_timers_set_tz_arm",
+ ":kselftest_timers_skew_consistency_arm",
+ ":kselftest_timers_tests_raw_skew_arm",
+ ":kselftest_timers_threadtest_arm",
+ ":kselftest_timers_valid_adjtimex_arm",
+ ":kselftest_vdso_vdso_test_abi_arm",
+ ":kselftest_vdso_vdso_test_getcpu_arm",
+ ":kselftest_vdso_vdso_test_gettimeofday_arm",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+pkg_filegroup(
+ name = "vts_kselftest_tests_arm",
+ srcs = [
+ ":kselftest_binderfs_binderfs_test_arm",
+ # ":kselftest_breakpoints_breakpoint_test_arm", Disabled due to not supported for this architecture
+ ":kselftest_capabilities_test_execve_arm",
+ ":kselftest_capabilities_validate_cap_arm",
+ ":kselftest_futex_futex_requeue_arm",
+ ":kselftest_futex_futex_requeue_pi_arm",
+ ":kselftest_futex_futex_requeue_pi_mismatched_ops_arm",
+ ":kselftest_futex_futex_requeue_pi_signal_restart_arm",
+ ":kselftest_futex_futex_wait_arm",
+ ":kselftest_futex_futex_wait_private_mapped_file_arm",
+ ":kselftest_futex_futex_wait_timeout_arm",
+ ":kselftest_futex_futex_wait_uninitialized_heap_arm",
+ ":kselftest_futex_futex_wait_wouldblock_arm",
+ ":kselftest_kcmp_kcmp_test_arm",
+ ":kselftest_rtc_rtctest_arm",
+ # ":kselftest_net_tests_bpf_arm", Disabled due to test failures
+ # ":kselftest_net_tests_psock_fanout_arm", Disabled due to test failures
+ ":kselftest_net_tests_psock_tpacket_arm",
+ ":kselftest_net_tests_reuseaddr_conflict_arm",
+ ":kselftest_net_tests_reuseport_dualstack_arm",
+ ":kselftest_net_tests_socket_arm",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+pkg_filegroup(
+ name = "kselftest_tests_arm64",
+ srcs = [
+ ":kselftest_binderfs_binderfs_test_arm64",
+ ":kselftest_breakpoints_breakpoint_test_arm64",
+ ":kselftest_capabilities_test_execve_arm64",
+ ":kselftest_capabilities_validate_cap_arm64",
+ ":kselftest_futex_futex_requeue_arm64",
+ ":kselftest_futex_futex_requeue_pi_arm64",
+ ":kselftest_futex_futex_requeue_pi_mismatched_ops_arm64",
+ ":kselftest_futex_futex_requeue_pi_signal_restart_arm64",
+ ":kselftest_futex_futex_wait_arm64",
+ ":kselftest_futex_futex_wait_private_mapped_file_arm64",
+ ":kselftest_futex_futex_wait_timeout_arm64",
+ ":kselftest_futex_futex_wait_uninitialized_heap_arm64",
+ ":kselftest_futex_futex_wait_wouldblock_arm64",
+ ":kselftest_kcmp_kcmp_test_arm64",
+ ":kselftest_memfd_test_arm64",
+ ":kselftest_mm_compaction_test_arm64",
+ ":kselftest_mm_hugepage_mmap_arm64",
+ ":kselftest_mm_hugepage_shm_arm64",
+ ":kselftest_mm_map_hugetlb_arm64",
+ ":kselftest_mm_mlock2_tests_arm64",
+ ":kselftest_mm_mlock_random_test_arm64",
+ ":kselftest_mm_mremap_dontunmap_arm64",
+ ":kselftest_mm_mremap_test_arm64",
+ ":kselftest_mm_on_fault_limit_arm64",
+ ":kselftest_mm_thuge_gen_arm64",
+ ":kselftest_mm_transhuge_stress_arm64",
+ ":kselftest_mm_uffd_unit_tests_arm64",
+ ":kselftest_net_psock_tpacket_arm64",
+ ":kselftest_net_reuseaddr_conflict_arm64",
+ ":kselftest_net_socket_arm64",
+ ":kselftest_ptrace_peeksiginfo_arm64",
+ ":kselftest_rtc_rtctest_arm64",
+ ":kselftest_seccomp_seccomp_bpf_arm64",
+ ":kselftest_size_test_get_size_arm64",
+ ":kselftest_timers_adjtick_arm64",
+ ":kselftest_timers_alarmtimer_suspend_arm64",
+ ":kselftest_timers_change_skew_arm64",
+ ":kselftest_timers_clocksource_switch_arm64",
+ ":kselftest_timers_freq_step_arm64",
+ ":kselftest_timers_inconsistency_check_arm64",
+ ":kselftest_timers_leap_a_day_arm64",
+ ":kselftest_timers_leapcrash_arm64",
+ ":kselftest_timers_nanosleep_arm64",
+ ":kselftest_timers_nsleep_lat_arm64",
+ ":kselftest_timers_posix_timers_arm64",
+ ":kselftest_timers_set_2038_arm64",
+ ":kselftest_timers_set_tai_arm64",
+ ":kselftest_timers_set_timer_lat_arm64",
+ ":kselftest_timers_set_tz_arm64",
+ ":kselftest_timers_skew_consistency_arm64",
+ ":kselftest_timers_tests_raw_skew_arm64",
+ ":kselftest_timers_threadtest_arm64",
+ ":kselftest_timers_valid_adjtimex_arm64",
+ ":kselftest_vdso_vdso_test_abi_arm64",
+ ":kselftest_vdso_vdso_test_getcpu_arm64",
+ ":kselftest_vdso_vdso_test_gettimeofday_arm64",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+pkg_filegroup(
+ name = "vts_kselftest_tests_arm64",
+ srcs = [
+ ":kselftest_binderfs_binderfs_test_arm64",
+ ":kselftest_breakpoints_breakpoint_test_arm64",
+ ":kselftest_capabilities_test_execve_arm64",
+ ":kselftest_capabilities_validate_cap_arm64",
+ ":kselftest_futex_futex_requeue_arm64",
+ ":kselftest_futex_futex_requeue_pi_arm64",
+ ":kselftest_futex_futex_requeue_pi_mismatched_ops_arm64",
+ ":kselftest_futex_futex_requeue_pi_signal_restart_arm64",
+ ":kselftest_futex_futex_wait_arm64",
+ ":kselftest_futex_futex_wait_private_mapped_file_arm64",
+ ":kselftest_futex_futex_wait_timeout_arm64",
+ ":kselftest_futex_futex_wait_uninitialized_heap_arm64",
+ ":kselftest_futex_futex_wait_wouldblock_arm64",
+ ":kselftest_kcmp_kcmp_test_arm64",
+ ":kselftest_rtc_rtctest_arm64",
+ # ":kselftest_net_tests_bpf_arm64", Disabled due to test failures
+ # ":kselftest_net_tests_psock_fanout_arm64", Disabled due to test failures
+ ":kselftest_net_tests_psock_tpacket_arm64",
+ ":kselftest_net_tests_reuseaddr_conflict_arm64",
+ ":kselftest_net_tests_reuseport_dualstack_arm64",
+ ":kselftest_net_tests_socket_arm64",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+pkg_filegroup(
+ name = "kselftest_tests_x86_64_pkg_filegroup",
+ srcs = [
+ ":kselftest_config_x86_64",
+ ":kselftest_tests_x86",
+ ":kselftest_tests_x86_64",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+pkg_filegroup(
+ name = "vts_kselftest_tests_x86_64_pkg_filegroup",
+ srcs = [
+ ":vts_kselftest_config_x86_64",
+ ":vts_kselftest_tests_x86",
+ ":vts_kselftest_tests_x86_64",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+pkg_filegroup(
+ name = "kselftest_tests_arm64_pkg_filegroup",
+ srcs = [
+ ":kselftest_config_arm64",
+ ":kselftest_tests_arm",
+ ":kselftest_tests_arm64",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+pkg_filegroup(
+ name = "vts_kselftest_tests_arm64_pkg_filegroup",
+ srcs = [
+ ":vts_kselftest_config_arm64",
+ ":vts_kselftest_tests_arm",
+ ":vts_kselftest_tests_arm64",
+ ],
+ visibility = ["//visibility:private"],
+)
+
+pkg_install(
+ name = "kselftest_tests_x86_64_install",
+ srcs = [":kselftest_tests_x86_64_pkg_filegroup"],
+ visibility = ["//visibility:private"],
+)
+
+pkg_install(
+ name = "vts_kselftest_tests_x86_64_install",
+ srcs = [":vts_kselftest_tests_x86_64_pkg_filegroup"],
+ visibility = ["//visibility:private"],
+)
+
+pkg_install(
+ name = "kselftest_tests_arm64_install",
+ srcs = [":kselftest_tests_arm64_pkg_filegroup"],
+ visibility = ["//visibility:private"],
+)
+
+pkg_install(
+ name = "vts_kselftest_tests_arm64_install",
+ srcs = [":vts_kselftest_tests_arm64_pkg_filegroup"],
+ visibility = ["//visibility:private"],
+)
+
+pkg_zip(
+ name = "tests_zip_x86_64",
+ srcs = [
+ ":kselftest_tests_x86_64_pkg_filegroup",
+ ":kunit_tests_x86_64_pkg_files",
+ ],
+ out = "x86_64/tests.zip",
+ visibility = ["//visibility:public"],
+)
+
+pkg_zip(
+ name = "vts_tests_zip_x86_64",
+ srcs = [
+ ":vts_kselftest_tests_x86_64_pkg_filegroup",
+ ],
+ out = "x86_64/vts_tests.zip",
+ visibility = ["//visibility:private"],
+)
+
+pkg_zip(
+ name = "tests_zip_arm64",
+ srcs = [
+ ":kselftest_tests_arm64_pkg_filegroup",
+ ":kunit_tests_arm64_pkg_files",
+ ],
+ out = "arm64/tests.zip",
+ visibility = ["//visibility:public"],
+)
+
+pkg_zip(
+ name = "vts_tests_zip_arm64",
+ srcs = [
+ ":vts_kselftest_tests_arm64_pkg_filegroup",
+ ],
+ out = "arm64/vts_tests.zip",
+ visibility = ["//visibility:private"],
+)
+
+pkg_zip(
+ name = "tests_zip_arm64_16k",
+ srcs = [
+ ":kselftest_tests_arm64_pkg_filegroup",
+ ":kunit_tests_arm64_16k_pkg_files",
+ ],
+ out = "arm64_16k/tests.zip",
+ visibility = ["//visibility:public"],
+)
+
+pkg_files(
+ name = "tests_zip_x86_64_files",
+ srcs = [":tests_zip_x86_64"],
+ strip_prefix = strip_prefix.files_only(),
+ visibility = ["//visibility:private"],
+)
+
+pkg_files(
+ name = "vts_tests_zip_x86_64_files",
+ srcs = [":vts_tests_zip_x86_64"],
+ strip_prefix = strip_prefix.files_only(),
+ visibility = ["//visibility:private"],
+)
+
+pkg_install(
+ name = "tests_zip_x86_64_dist",
+ srcs = [
+ ":vts_tests_zip_x86_64_files",
+ ],
+ destdir = "out/tests_x86_64/dist",
+)
+
+pkg_files(
+ name = "tests_zip_arm64_files",
+ srcs = [":tests_zip_arm64"],
+ strip_prefix = strip_prefix.files_only(),
+ visibility = ["//visibility:private"],
+)
+
+pkg_files(
+ name = "vts_tests_zip_arm64_files",
+ srcs = [":vts_tests_zip_arm64"],
+ strip_prefix = strip_prefix.files_only(),
+ visibility = ["//visibility:private"],
+)
+
+pkg_install(
+ name = "tests_zip_arm64_dist",
+ srcs = [
+ ":vts_tests_zip_arm64_files",
+ ],
+ destdir = "out/tests_arm64/dist",
+)
+
+_TEST_MAPPINGS = glob(["**/TEST_MAPPING"])
+
+pkg_files(
+ name = "test_mappings",
+ srcs = _TEST_MAPPINGS,
+ prefix = package_name(),
+ renames = {file: file for file in _TEST_MAPPINGS},
+ visibility = ["//visibility:private"],
+)
+
+pkg_zip(
+ name = "test_mappings_zip",
+ srcs = [
+ ":test_mappings",
+ ],
+ out = "test_mappings.zip",
+ visibility = ["//visibility:public"],
+)
+
+exports_files([
+ "Makefile",
+])
diff --git a/Documentation/ABI/stable/sysfs-module b/Documentation/ABI/stable/sysfs-module
index 41b1f16..47b8fd0 100644
--- a/Documentation/ABI/stable/sysfs-module
+++ b/Documentation/ABI/stable/sysfs-module
@@ -45,3 +45,21 @@
Description:
If the module source has MODULE_VERSION, this file will contain
the version of the source code.
+
+What: /sys/module/MODULENAME/scmversion
+Date: November 2020
+KernelVersion: 5.12
+Contact: Will McVicker <willmcvicker@google.com>
+Description: This read-only file will appear if modpost was supplied with an
+ SCM version for the module. It can be enabled with the config
+ MODULE_SCMVERSION. The SCM version is retrieved by
+ scripts/setlocalversion, which means that the presence of this
+ file depends on CONFIG_LOCALVERSION_AUTO=y. When read, the SCM
+ version that the module was compiled with is returned. The SCM
+ version is returned in the following format::
+
+ ===
+ Git: g[a-f0-9]\+(-dirty)\?
+ Mercurial: hg[a-f0-9]\+(-dirty)\?
+ Subversion: svn[0-9]\+
+ ===
diff --git a/Documentation/ABI/testing/OWNERS b/Documentation/ABI/testing/OWNERS
new file mode 100644
index 0000000..3ab5dca
--- /dev/null
+++ b/Documentation/ABI/testing/OWNERS
@@ -0,0 +1 @@
+per-file sysfs-fs-f2fs=file:/fs/f2fs/OWNERS
diff --git a/Documentation/ABI/testing/sysfs-class-android_usb b/Documentation/ABI/testing/sysfs-class-android_usb
new file mode 100644
index 0000000..3f8131e
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-android_usb
@@ -0,0 +1,16 @@
+Android USB devices (eg. /sys/class/android_usb/android0/)
+
+What: /sys/class/android_usb/<android_device>/state
+Date: Feb 2024
+Contact: Neill Kapron <nkapron@google.com>
+Description:
+ The state of the USB connection. This attribute is likely
+ redundant with the /sys/class/UDC/state attribute, and should
+ be deprecated/removed when userspace can be refactored.
+ Change on the state will also generate uevent KOBJ_CHANGE on
+ the port with the new state included in the message as
+ "USB_STATE=<STATE>". Note this is not the correct usage of
+ uevents, but necessary due to the requirement to maintaine
+ userspace API compatibility.
+
+ Valid values: CONNECTED, DISCONNECTED, CONFIGURED
diff --git a/Documentation/ABI/testing/sysfs-fs-fuse b/Documentation/ABI/testing/sysfs-fs-fuse
new file mode 100644
index 0000000..2260af3
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-fs-fuse
@@ -0,0 +1,7 @@
+What: /sys/fs/fuse/features/fuse_passthrough
+Date: February 2025
+Contact: Daniel Rosenberg <drosen@google.com>
+Description:
+ Read-only file that contains the word 'supported' if fuse
+ passthrough with Android modifications is supported, does not
+ exist otherwise
diff --git a/Documentation/ABI/testing/sysfs-fs-incfs b/Documentation/ABI/testing/sysfs-fs-incfs
new file mode 100644
index 0000000..e4e05f9
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-fs-incfs
@@ -0,0 +1,70 @@
+What: /sys/fs/incremental-fs/features/corefs
+Date: 2019
+Contact: Paul Lawrence <paullawrence@google.com>
+Description: Reads 'supported'. Always present.
+
+What: /sys/fs/incremental-fs/features/v2
+Date: April 2021
+Contact: Paul Lawrence <paullawrence@google.com>
+Description: Reads 'supported'. Present if all v2 features of incfs are
+ supported.
+
+What: /sys/fs/incremental-fs/features/zstd
+Date: April 2021
+Contact: Paul Lawrence <paullawrence@google.com>
+Description: Reads 'supported'. Present if zstd compression is supported
+ for data blocks.
+
+What: /sys/fs/incremental-fs/features/bugfix_throttling
+Date: January 2023
+Contact: Paul Lawrence <paullawrence@google.com>
+Description: Reads 'supported'. Present if the throttling lock bug is fixed
+ https://android-review.git.corp.google.com/c/kernel/common/+/2381827
+
+What: /sys/fs/incremental-fs/instances/[name]
+Date: April 2021
+Contact: Paul Lawrence <paullawrence@google.com>
+Description: Folder created when incfs is mounted with the sysfs_name=[name]
+ option. If this option is used, the following values are created
+ in this folder.
+
+What: /sys/fs/incremental-fs/instances/[name]/reads_delayed_min
+Date: April 2021
+Contact: Paul Lawrence <paullawrence@google.com>
+Description: Returns a count of the number of reads that were delayed as a
+ result of the per UID read timeouts min time setting.
+
+What: /sys/fs/incremental-fs/instances/[name]/reads_delayed_min_us
+Date: April 2021
+Contact: Paul Lawrence <paullawrence@google.com>
+Description: Returns total delay time for all files since first mount as a
+ result of the per UID read timeouts min time setting.
+
+What: /sys/fs/incremental-fs/instances/[name]/reads_delayed_pending
+Date: April 2021
+Contact: Paul Lawrence <paullawrence@google.com>
+Description: Returns a count of the number of reads that were delayed as a
+ result of waiting for a pending read.
+
+What: /sys/fs/incremental-fs/instances/[name]/reads_delayed_pending_us
+Date: April 2021
+Contact: Paul Lawrence <paullawrence@google.com>
+Description: Returns total delay time for all files since first mount as a
+ result of waiting for a pending read.
+
+What: /sys/fs/incremental-fs/instances/[name]/reads_failed_hash_verification
+Date: April 2021
+Contact: Paul Lawrence <paullawrence@google.com>
+Description: Returns number of reads that failed because of hash verification
+ failures.
+
+What: /sys/fs/incremental-fs/instances/[name]/reads_failed_other
+Date: April 2021
+Contact: Paul Lawrence <paullawrence@google.com>
+Description: Returns number of reads that failed for reasons other than
+ timing out or hash failures.
+
+What: /sys/fs/incremental-fs/instances/[name]/reads_failed_timed_out
+Date: April 2021
+Contact: Paul Lawrence <paullawrence@google.com>
+Description: Returns number of reads that timed out.
diff --git a/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons b/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons
new file mode 100644
index 0000000..acb19b9
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-wakeup_reasons
@@ -0,0 +1,16 @@
+What: /sys/kernel/wakeup_reasons/last_resume_reason
+Date: February 2014
+Contact: Ruchi Kandoi <kandoiruchi@google.com>
+Description:
+ The /sys/kernel/wakeup_reasons/last_resume_reason is
+ used to report wakeup reasons after system exited suspend.
+
+What: /sys/kernel/wakeup_reasons/last_suspend_time
+Date: March 2015
+Contact: jinqian <jinqian@google.com>
+Description:
+ The /sys/kernel/wakeup_reasons/last_suspend_time is
+ used to report time spent in last suspend cycle. It contains
+ two numbers (in seconds) separated by space. First number is
+ the time spent in suspend and resume processes. Second number
+ is the time spent in sleep state.
\ No newline at end of file
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index cb850e5..ea177d68 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -2155,6 +2155,10 @@
If specified, z/VM IUCV HVC accepts connections
from listed z/VM user IDs only.
+ hvc_dcc.enable= [ARM,ARM64] Enable DCC driver at runtime. For GKI,
+ disabled at runtime by default to prevent
+ crashes in devices which do not support DCC.
+
hv_nopvspin [X86,HYPER_V,EARLY]
Disables the paravirt spinlock optimizations
which allow the hypervisor to 'idle' the guest
@@ -3070,7 +3074,7 @@
CONFIG_KUNIT to be set to be fully enabled. The
default value can be overridden via
KUNIT_DEFAULT_ENABLED.
- Default is 1 (enabled)
+ Default is 0 (disabled)
kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
Default is 0 (don't ignore, but inject #GP)
@@ -6279,6 +6283,21 @@
rcutorture.verbose= [KNL]
Enable additional printk() statements.
+ rcupdate.rcu_boot_end_delay= [KNL]
+ Minimum time in milliseconds from the start of boot
+ that must elapse before the boot sequence can be marked
+ complete from RCU's perspective, after which RCU's
+ behavior becomes more relaxed. The default value is also
+ configurable via CONFIG_RCU_BOOT_END_DELAY.
+ Userspace can also mark the boot as completed
+ sooner by writing the time in milliseconds, say once
+ userspace considers the system as booted, to:
+ /sys/module/rcupdate/parameters/rcu_boot_end_delay
+ Or even just writing a value of 0 to this sysfs node.
+ The sysfs node can also be used to extend the delay
+ to be larger than the default, assuming the marking
+ of boot complete has not yet occurred.
+
rcupdate.rcu_cpu_stall_ftrace_dump= [KNL]
Dump ftrace buffer after reporting RCU CPU
stall warning.
diff --git a/Documentation/dev-tools/kunit/index.rst b/Documentation/dev-tools/kunit/index.rst
index b3593ae..bb38fa8 100644
--- a/Documentation/dev-tools/kunit/index.rst
+++ b/Documentation/dev-tools/kunit/index.rst
@@ -18,6 +18,15 @@
faq
running_tips
+.. warning::
+ AOSP only supports running tests loaded with modules. Built-in
+ test execution support has been disabled. In addition, in order
+ to fully enable running module loaded tests both CONFIG_KUNIT
+ needs to be enabled and kernel command line argument
+ `kunit.enable` needs to be set to 1.
+
+ The remaining KUnit documentation has been left as-is.
+
This section details the kernel unit testing framework.
Introduction
diff --git a/Documentation/device-mapper/dm-bow.txt b/Documentation/device-mapper/dm-bow.txt
new file mode 100644
index 0000000..e3fc4d2
--- /dev/null
+++ b/Documentation/device-mapper/dm-bow.txt
@@ -0,0 +1,99 @@
+dm_bow (backup on write)
+========================
+
+dm_bow is a device mapper driver that uses the free space on a device to back up
+data that is overwritten. The changes can then be committed by a simple state
+change, or rolled back by removing the dm_bow device and running a command line
+utility over the underlying device.
+
+dm_bow has three states, set by writing ‘1’ or ‘2’ to /sys/block/dm-?/bow/state.
+It is only possible to go from state 0 (initial state) to state 1, and then from
+state 1 to state 2.
+
+State 0: dm_bow collects all trims to the device and assumes that these mark
+free space on the overlying file system that can be safely used. Typically the
+mount code would create the dm_bow device, mount the file system, call the
+FITRIM ioctl on the file system then switch to state 1. These trims are not
+propagated to the underlying device.
+
+State 1: All writes to the device cause the underlying data to be backed up to
+the free (trimmed) area as needed in such a way as they can be restored.
+However, the writes, with one exception, then happen exactly as they would
+without dm_bow, so the device is always in a good final state. The exception is
+that sector 0 is used to keep a log of the latest changes, both to indicate that
+we are in this state and to allow rollback. See below for all details. If there
+isn't enough free space, writes are failed with -ENOSPC.
+
+State 2: The transition to state 2 triggers replacing the special sector 0 with
+the normal sector 0, and the freeing of all state information. dm_bow then
+becomes a pass-through driver, allowing the device to continue to be used with
+minimal performance impact.
+
+Usage
+=====
+dm-bow takes one command line parameter, the name of the underlying device.
+
+dm-bow will typically be used in the following way. dm-bow will be loaded with a
+suitable underlying device and the resultant device will be mounted. A file
+system trim will be issued via the FITRIM ioctl, then the device will be
+switched to state 1. The file system will now be used as normal. At some point,
+the changes can either be committed by switching to state 2, or rolled back by
+unmounting the file system, removing the dm-bow device and running the command
+line utility. Note that rebooting the device will be equivalent to unmounting
+and removing, but the command line utility must still be run
+
+Details of operation in state 1
+===============================
+
+dm_bow maintains a type for all sectors. A sector can be any of:
+
+SECTOR0
+SECTOR0_CURRENT
+UNCHANGED
+FREE
+CHANGED
+BACKUP
+
+SECTOR0 is the first sector on the device, and is used to hold the log of
+changes. This is the one exception.
+
+SECTOR0_CURRENT is a sector picked from the FREE sectors, and is where reads and
+writes from the true sector zero are redirected to. Note that like any backup
+sector, if the sector is written to directly, it must be moved again.
+
+UNCHANGED means that the sector has not been changed since we entered state 1.
+Thus if it is written to or trimmed, the contents must first be backed up.
+
+FREE means that the sector was trimmed in state 0 and has not yet been written
+to or used for backup. On being written to, a FREE sector is changed to CHANGED.
+
+CHANGED means that the sector has been modified, and can be further modified
+without further backup.
+
+BACKUP means that this is a free sector being used as a backup. On being written
+to, the contents must first be backed up again.
+
+All backup operations are logged to the first sector. The log sector has the
+format:
+--------------------------------------------------------
+| Magic | Count | Sequence | Log entry | Log entry | …
+--------------------------------------------------------
+
+Magic is a magic number. Count is the number of log entries. Sequence is 0
+initially. A log entry is
+
+-----------------------------------
+| Source | Dest | Size | Checksum |
+-----------------------------------
+
+When SECTOR0 is full, the log sector is backed up and another empty log sector
+created with sequence number one higher. The first entry in any log entry with
+sequence > 0 therefore must be the log of the backing up of the previous log
+sector. Note that sequence is not strictly needed, but is a useful sanity check
+and potentially limits the time spent trying to restore a corrupted snapshot.
+
+On entering state 1, dm_bow has a list of free sectors. All other sectors are
+unchanged. Sector0_current is selected from the free sectors and the contents of
+sector 0 are copied there. The sector 0 is backed up, which triggers the first
+log entry to be written.
+
diff --git a/Documentation/filesystems/OWNERS b/Documentation/filesystems/OWNERS
new file mode 100644
index 0000000..a63dbf4
--- /dev/null
+++ b/Documentation/filesystems/OWNERS
@@ -0,0 +1 @@
+per-file f2fs**=file:/fs/f2fs/OWNERS
diff --git a/Documentation/filesystems/incfs.rst b/Documentation/filesystems/incfs.rst
new file mode 100644
index 0000000..f0fb1d0
--- /dev/null
+++ b/Documentation/filesystems/incfs.rst
@@ -0,0 +1,85 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================================================
+incfs: A stacked incremental filesystem for Linux
+=================================================
+
+/sys/fs interface
+=================
+
+Please update Documentation/ABI/testing/sysfs-fs-incfs if you update this
+section.
+
+incfs creates the following files in /sys/fs.
+
+Features
+--------
+
+/sys/fs/incremental-fs/features/corefs
+ Reads 'supported'. Always present.
+
+/sys/fs/incremental-fs/features/v2
+ Reads 'supported'. Present if all v2 features of incfs are supported. These
+ are:
+ fs-verity support
+ inotify support
+ ioclts:
+ INCFS_IOC_SET_READ_TIMEOUTS
+ INCFS_IOC_GET_READ_TIMEOUTS
+ INCFS_IOC_GET_BLOCK_COUNT
+ INCFS_IOC_CREATE_MAPPED_FILE
+ .incomplete folder
+ .blocks_written pseudo file
+ report_uid mount option
+
+/sys/fs/incremental-fs/features/zstd
+ Reads 'supported'. Present if zstd compression is supported for data blocks.
+
+/sys/fs/incremental-fs/features/bugfix_throttling
+ Reads 'supported'. Present if the throttling lock bug is fixed
+
+Optional per mount
+------------------
+
+For each incfs mount, the mount option sysfs_name=[name] creates a /sys/fs
+node called:
+
+/sys/fs/incremental-fs/instances/[name]
+
+This will contain the following files:
+
+/sys/fs/incremental-fs/instances/[name]/reads_delayed_min
+ Returns a count of the number of reads that were delayed as a result of the
+ per UID read timeouts min time setting.
+
+/sys/fs/incremental-fs/instances/[name]/reads_delayed_min_us
+ Returns total delay time for all files since first mount as a result of the
+ per UID read timeouts min time setting.
+
+/sys/fs/incremental-fs/instances/[name]/reads_delayed_pending
+ Returns a count of the number of reads that were delayed as a result of
+ waiting for a pending read.
+
+/sys/fs/incremental-fs/instances/[name]/reads_delayed_pending_us
+ Returns total delay time for all files since first mount as a result of
+ waiting for a pending read.
+
+/sys/fs/incremental-fs/instances/[name]/reads_failed_hash_verification
+ Returns number of reads that failed because of hash verification failures.
+
+/sys/fs/incremental-fs/instances/[name]/reads_failed_other
+ Returns number of reads that failed for reasons other than timing out or
+ hash failures.
+
+/sys/fs/incremental-fs/instances/[name]/reads_failed_timed_out
+ Returns number of reads that timed out.
+
+For reads_delayed_*** settings, note that a file can count for both
+reads_delayed_min and reads_delayed_pending if incfs first waits for a pending
+read then has to wait further for the min time. In that case, the time spent
+waiting is split between reads_delayed_pending_us, which is increased by the
+time spent waiting for the pending read, and reads_delayed_min_us, which is
+increased by the remainder of the time spent waiting.
+
+Reads that timed out are not added to the reads_delayed_pending or the
+reads_delayed_pending_us counters.
diff --git a/Documentation/filesystems/overlayfs.rst b/Documentation/filesystems/overlayfs.rst
index af5a69f..33fa170 100644
--- a/Documentation/filesystems/overlayfs.rst
+++ b/Documentation/filesystems/overlayfs.rst
@@ -204,7 +204,7 @@
1. return EXDEV error: this error is returned by rename(2) when trying to
move a file or directory across filesystem boundaries. Hence
- applications are usually prepared to handle this error (mv(1) for example
+ applications are usually prepared to hande this error (mv(1) for example
recursively copies the directory tree). This is the default behavior.
2. If the "redirect_dir" feature is enabled, then the directory will be
diff --git a/Documentation/kbuild/kbuild.rst b/Documentation/kbuild/kbuild.rst
index 5a9013b..39ca5b03 100644
--- a/Documentation/kbuild/kbuild.rst
+++ b/Documentation/kbuild/kbuild.rst
@@ -37,6 +37,11 @@
will be used in all cases where kbuild does preprocessing including
building C files and assembler files.
+KCPPFLAGS_COMPAT
+----------------
+Additional options to pass to $(CC_COMPAT) when preprocessing C and assembler
+files.
+
KAFLAGS
-------
Additional options to the assembler (for built-in and modules).
diff --git a/Documentation/scheduler/sched-energy.rst b/Documentation/scheduler/sched-energy.rst
index 4e47aaf..557052f 100644
--- a/Documentation/scheduler/sched-energy.rst
+++ b/Documentation/scheduler/sched-energy.rst
@@ -379,7 +379,7 @@
because it is the only one providing some degree of consistency between
frequency requests and energy predictions.
-Using EAS with any other governor than schedutil is not supported.
+Using EAS with any other governor than schedutil is not recommended.
6.5 Scale-invariant utilization signals
diff --git a/Kconfig b/Kconfig
index 307e5811..d051645 100644
--- a/Kconfig
+++ b/Kconfig
@@ -32,3 +32,6 @@
source "Documentation/Kconfig"
source "io_uring/Kconfig"
+
+# ANDROID: Set KCONFIG_EXT_PREFIX to decend into an external project.
+source "$(KCONFIG_EXT_PREFIX)Kconfig.ext"
diff --git a/Kconfig.ext b/Kconfig.ext
new file mode 100644
index 0000000..48d805f
--- /dev/null
+++ b/Kconfig.ext
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+# This file is intentionally empty. It's used as a placeholder for when
+# KCONFIG_EXT_PREFIX isn't defined.
diff --git a/MAINTAINERS b/MAINTAINERS
index 61bf550..33f4bdc 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -12499,6 +12499,13 @@
F: Documentation/hwmon/ina233.rst
F: drivers/hwmon/pmbus/ina233.c
+INCREMENTAL FILE SYSTEM
+M: Paul Lawrence <paullawrence@google.com>
+L: linux-unionfs@vger.kernel.org
+S: Supported
+F: fs/incfs/
+F: tools/testing/selftests/filesystems/incfs/
+
INDEX OF FURTHER KERNEL DOCUMENTATION
M: Carlos Bilbao <carlos.bilbao@kernel.org>
S: Maintained
diff --git a/Makefile b/Makefile
index e944c6e..adda796 100644
--- a/Makefile
+++ b/Makefile
@@ -151,6 +151,24 @@
export KBUILD_EXTMOD
+# ANDROID: set up mixed-build support. mixed-build allows device kernel modules
+# to be compiled against a GKI kernel. This approach still uses the headers and
+# Kbuild from device kernel, so care must be taken to ensure that those headers match.
+ifdef KBUILD_MIXED_TREE
+# Need vmlinux.symvers for modpost and System.map for depmod, check whether they exist in KBUILD_MIXED_TREE
+required_mixed_files=vmlinux.symvers System.map
+$(if $(filter-out $(words $(required_mixed_files)), \
+ $(words $(wildcard $(add-prefix $(KBUILD_MIXED_TREE)/,$(required_mixed_files))))),,\
+ $(error KBUILD_MIXED_TREE=$(KBUILD_MIXED_TREE) doesn't contain $(required_mixed_files)))
+endif
+
+mixed-build-prefix = $(if $(KBUILD_MIXED_TREE),$(KBUILD_MIXED_TREE)/)
+export KBUILD_MIXED_TREE
+# This is a hack for kleaf to set mixed-build-prefix within the execution of a make rule, e.g.
+# within __modinst_pre.
+# TODO(b/205893923): Revert this hack once it is properly handled.
+export mixed-build-prefix
+
ifeq ("$(origin W)", "command line")
KBUILD_EXTRA_WARN := $(W)
endif
@@ -797,11 +815,13 @@
libs-y := lib/
endif # KBUILD_EXTMOD
+ifndef KBUILD_MIXED_TREE
# The all: target is the default when no target is given on the
# command line.
# This allow a user to issue only 'make' to build a kernel including modules
# Defaults to vmlinux, but the arch makefile usually adds further targets
all: vmlinux
+endif
CFLAGS_GCOV := -fprofile-arcs -ftest-coverage
ifdef CONFIG_CC_IS_GCC
@@ -1037,7 +1057,13 @@
else
CC_FLAGS_LTO := -flto
endif
+
+ifeq ($(SRCARCH),x86)
+# Workaround for compiler / linker bug
CC_FLAGS_LTO += -fvisibility=hidden
+else
+CC_FLAGS_LTO += -fvisibility=default
+endif
# Limit inlining across translation units to reduce binary size
KBUILD_LDFLAGS += -mllvm -import-instr-limit=5
@@ -1271,6 +1297,7 @@
vmlinux.a: $(KBUILD_VMLINUX_OBJS) scripts/head-object-list.txt FORCE
$(call if_changed,ar_vmlinux.a)
+ifndef KBUILD_MIXED_TREE
PHONY += vmlinux_o
vmlinux_o: vmlinux.a $(KBUILD_VMLINUX_LIBS)
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.vmlinux_o
@@ -1293,6 +1320,7 @@
vmlinux: export LDFLAGS_vmlinux = $(_LDFLAGS_vmlinux)
vmlinux: vmlinux.o $(KBUILD_LDS) modpost
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.vmlinux
+endif
# The actual objects are generated when descending,
# make sure no implicit rule kicks in
@@ -1535,7 +1563,9 @@
# Devicetree files
ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/boot/dts/),)
-dtstree := arch/$(SRCARCH)/boot/dts
+# ANDROID: allow this to be overridden by the build environment. This allows
+# one to compile a device tree that is located out-of-tree.
+dtstree ?= arch/$(SRCARCH)/boot/dts
endif
dtbindingtree := Documentation/devicetree/bindings
@@ -1996,6 +2026,29 @@
@false
endif
+# ---------------------------------------------------------------------------
+# Kernel headers from External Modules
+
+#Default location for installed headers
+export INSTALL_HDR_PATH = $(objtree)/usr
+
+quiet_cmd_headers_install = INSTALL $(INSTALL_HDR_PATH)/include
+ cmd_headers_install = \
+ mkdir -p $(INSTALL_HDR_PATH); \
+ rsync -mrl --include='*/' --include='*\.h' --exclude='*' \
+ usr/include $(INSTALL_HDR_PATH);
+
+PHONY += headers_install
+headers_install: headers
+ $(call cmd,headers_install)
+
+hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj
+
+PHONY += headers
+headers:
+ $(Q)$(MAKE) $(hdr-inst)=include/uapi
+ $(Q)$(MAKE) $(hdr-inst)=arch/$(SRCARCH)/include/uapi
+
endif # KBUILD_EXTMOD
# ---------------------------------------------------------------------------
@@ -2046,7 +2099,7 @@
endif # CONFIG_MODULES
PHONY += modpost
-modpost: $(if $(single-build),, $(if $(KBUILD_BUILTIN), vmlinux.o)) \
+modpost: $(if $(single-build),, $(if $(KBUILD_MIXED_TREE), vmlinux.symvers, $(if $(KBUILD_BUILTIN), vmlinux.o))) \
$(if $(KBUILD_MODULES), modules_check)
$(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
@@ -2098,7 +2151,7 @@
# Error messages still appears in the original language
PHONY += $(build-dir)
$(build-dir): prepare
- $(Q)$(MAKE) $(build)=$@ need-builtin=1 need-modorder=1 $(single-goals)
+ $(Q)$(MAKE) $(build)=$@ $(if $(KBUILD_MIXED_TREE),,need-builtin=1) need-modorder=1 $(single-goals)
clean-dirs := $(addprefix _clean_, $(clean-dirs))
PHONY += $(clean-dirs) clean
@@ -2107,7 +2160,8 @@
clean: $(clean-dirs)
$(call cmd,rmfiles)
- @find . $(RCS_FIND_IGNORE) \
+ @find . $(if $(filter-out arch/$(SRCARCH)/boot/dts, $(dtstree)), $(dtstree)) \
+ $(RCS_FIND_IGNORE) \
\( -name '*.[aios]' -o -name '*.rsi' -o -name '*.ko' -o -name '.*.cmd' \
-o -name '*.ko.*' \
-o -name '*.dtb' -o -name '*.dtbo' \
@@ -2162,7 +2216,7 @@
cmd_gen_compile_commands = $(PYTHON3) $< -a $(AR) -o $@ $(filter-out $<, $(real-prereqs))
compile_commands.json: $(srctree)/scripts/clang-tools/gen_compile_commands.py \
- $(if $(KBUILD_EXTMOD),, vmlinux.a $(KBUILD_VMLINUX_LIBS)) \
+ $(if $(KBUILD_EXTMOD)$(KBUILD_MIXED_TREE),, vmlinux.a $(KBUILD_VMLINUX_LIBS)) \
$(if $(CONFIG_MODULES), modules.order) FORCE
$(call if_changed,gen_compile_commands)
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 0000000..d73e98e
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1,20 @@
+# The full list of approvers is defined in
+# https://android.googlesource.com/kernel/common/+/refs/meta/config/OWNERS
+
+# The following OWNERS are defined at the top level to improve the OWNERS
+# suggestions through any user interface. Consider those people the ones that
+# can help with finding the best person to review.
+adelva@google.com
+gprocida@google.com
+gregkh@google.com
+joneslee@google.com
+maennich@google.com
+surenb@google.com
+tkjos@google.com
+willdeacon@google.com
+
+# Test mapping changes can be made by anyone
+per-file */TEST_MAPPING = *
+
+# Test config xml can be made by anyone
+per-file */*.xml = *
diff --git a/OWNERS_DrNo b/OWNERS_DrNo
new file mode 100644
index 0000000..ff82a86
--- /dev/null
+++ b/OWNERS_DrNo
@@ -0,0 +1,32 @@
+# Authoritative list of Dr. No reviewers to approve changes on GKI release
+# branches, such as android12-5.10.
+#
+# This file has no effect in this branch, but is referred to from release
+# branches. So, please do not move or rename.
+#
+# See the GKI release documentation (go/gki-dr-no) for further details.
+
+# Main reviewers
+adelva@google.com
+cmllamas@google.com
+gprocida@google.com
+isaacmanjarres@google.com
+joneslee@google.com
+jstultz@google.com
+maennich@google.com
+surenb@google.com
+tkjos@google.com
+willdeacon@google.com
+willmcvicker@google.com
+
+# GKI Release Team
+howardsoc@google.com #{LAST_RESORT_SUGGESTION}
+szuweilin@google.com #{LAST_RESORT_SUGGESTION}
+
+# Backup
+kiyoungkim@google.com #{LAST_RESORT_SUGGESTION}
+sspatil@google.com #{LAST_RESORT_SUGGESTION}
+
+# Give DrNo Exceptions to TEST_MAPPING or test xml configs
+per-file */TEST_MAPPING = *
+per-file */*.xml = *
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..91cb840
--- /dev/null
+++ b/README.md
@@ -0,0 +1,183 @@
+# How do I submit patches to Android Common Kernels
+
+1. BEST: Make all of your changes to upstream Linux. If appropriate, backport to the stable releases.
+ These patches will be merged automatically in the corresponding common kernels. If the patch is already
+ in upstream Linux, post a backport of the patch that conforms to the patch requirements below.
+ - Do not send patches upstream that contain only symbol exports. To be considered for upstream Linux,
+additions of `EXPORT_SYMBOL_GPL()` require an in-tree modular driver that uses the symbol -- so include
+the new driver or changes to an existing driver in the same patchset as the export.
+ - When sending patches upstream, the commit message must contain a clear case for why the patch
+is needed and beneficial to the community. Enabling out-of-tree drivers or functionality is not
+a persuasive case.
+
+2. LESS GOOD: Develop your patches out-of-tree (from an upstream Linux point-of-view). Unless these are
+ fixing an Android-specific bug, these are very unlikely to be accepted unless they have been
+ coordinated with kernel-team@android.com. If you want to proceed, post a patch that conforms to the
+ patch requirements below.
+
+# Common Kernel patch requirements
+
+- All patches must conform to the Linux kernel coding standards and pass `scripts/checkpatch.pl`
+- Patches shall not break gki_defconfig or allmodconfig builds for arm, arm64, x86, x86_64 architectures
+(see https://source.android.com/setup/build/building-kernels)
+- If the patch is not merged from an upstream branch, the subject must be tagged with the type of patch:
+`UPSTREAM:`, `BACKPORT:`, `FROMGIT:`, `FROMLIST:`, or `ANDROID:`.
+- All patches must have a `Change-Id:` tag (see https://gerrit-review.googlesource.com/Documentation/user-changeid.html)
+- If an Android bug has been assigned, there must be a `Bug:` tag.
+- All patches must have a `Signed-off-by:` tag by the author and the submitter
+
+Additional requirements are listed below based on patch type
+
+## Requirements for backports from mainline Linux: `UPSTREAM:`, `BACKPORT:`
+
+- If the patch is a cherry-pick from Linux mainline with no changes at all
+ - tag the patch subject with `UPSTREAM:`.
+ - add upstream commit information with a `(cherry picked from commit ...)` line
+ - if applicable, prefer to cherry-pick the commit from the corresponding LTS branch.
+ - append new signature tags (e.g. `Bug:`, `Change-Id:`, etc.) at the end to keep the
+ chronological order.
+ - Example:
+ - if the upstream commit message is
+```
+ important patch from upstream
+
+ This is the detailed description of the important patch
+
+ Signed-off-by: Fred Jones <fred.jones@foo.org>
+```
+>- then Joe Smith would upload the patch for the common kernel as
+```
+ UPSTREAM: important patch from upstream
+
+ This is the detailed description of the important patch
+
+ Signed-off-by: Fred Jones <fred.jones@foo.org>
+
+ Bug: 135791357
+ Change-Id: I4caaaa566ea080fa148c5e768bb1a0b6f7201c01
+ (cherry picked from commit c31e73121f4c1ec41143423ac6ce3ce6dafdcec1)
+ Signed-off-by: Joe Smith <joe.smith@foo.org>
+```
+
+- If the patch requires any changes from the upstream version, tag the patch with `BACKPORT:`
+instead of `UPSTREAM:`.
+ - use the same tags as `UPSTREAM:`
+ - add comments about the changes under the `(cherry picked from commit ...)` line
+ - Example:
+```
+ BACKPORT: important patch from upstream
+
+ This is the detailed description of the important patch
+
+ Signed-off-by: Fred Jones <fred.jones@foo.org>
+
+ Bug: 135791357
+ Change-Id: I4caaaa566ea080fa148c5e768bb1a0b6f7201c01
+ (cherry picked from commit c31e73121f4c1ec41143423ac6ce3ce6dafdcec1)
+ [joe: Resolved minor conflict in drivers/foo/bar.c ]
+ Signed-off-by: Joe Smith <joe.smith@foo.org>
+```
+
+## Requirements for other backports: `FROMGIT:`, `FROMLIST:`,
+
+- If the patch has been merged into an upstream maintainer tree, but has not yet
+been merged into Linux mainline
+ - tag the patch subject with `FROMGIT:`
+ - add info on where the patch came from as `(cherry picked from commit <sha1> <repo> <branch>)`.
+This must be a branch on a tree which is normally merged into Linus's tree and is not rebased. For
+example, don't use `linux-next` which is rebased and never directly merged into Linus's tree, but
+you *can* use SHAs from `net` *or* `net-next`, which are merged into Linus's tree at various points
+in the release.
+ - if changes were required, use `BACKPORT: FROMGIT:`
+ - Example:
+ - if the commit message in the maintainer tree is
+```
+ important patch from upstream
+
+ This is the detailed description of the important patch
+
+ Signed-off-by: Fred Jones <fred.jones@foo.org>
+```
+>- then Joe Smith would upload the patch for the common kernel as
+```
+ FROMGIT: important patch from upstream
+
+ This is the detailed description of the important patch
+
+ Signed-off-by: Fred Jones <fred.jones@foo.org>
+
+ Bug: 135791357
+ (cherry picked from commit 878a2fd9de10b03d11d2f622250285c7e63deace
+ https://git.kernel.org/pub/scm/linux/kernel/git/foo/bar.git test-branch)
+ Change-Id: I4caaaa566ea080fa148c5e768bb1a0b6f7201c01
+ Signed-off-by: Joe Smith <joe.smith@foo.org>
+```
+
+
+- If the patch has been submitted to LKML, but not accepted into any maintainer tree
+ - tag the patch subject with `FROMLIST:`
+ - add a `Link:` tag with a link to the submittal on lore.kernel.org
+ - add a `Bug:` tag with the Android bug (required for patches not accepted into
+a maintainer tree)
+ - if changes were required, use `BACKPORT: FROMLIST:`
+ - Example:
+```
+ FROMLIST: important patch from upstream
+
+ This is the detailed description of the important patch
+
+ Signed-off-by: Fred Jones <fred.jones@foo.org>
+
+ Bug: 135791357
+ Link: https://lore.kernel.org/lkml/20190619171517.GA17557@someone.com/
+ Change-Id: I4caaaa566ea080fa148c5e768bb1a0b6f7201c01
+ Signed-off-by: Joe Smith <joe.smith@foo.org>
+```
+
+- If a patch has been submitted to the community, but rejected, do NOT use the
+ `FROMLIST:` tag to try to hide this fact. Use the `ANDROID:` tag as
+ described below as this must be considered as an Android-specific submission,
+ not an upstream submission as the community will not accept these changes
+ as-is.
+
+## Requirements for Android-specific patches: `ANDROID:`
+
+- If the patch is fixing a bug to Android-specific code
+ - tag the patch subject with `ANDROID:`
+ - add a `Fixes:` tag that cites the patch with the bug
+ - Example:
+```
+ ANDROID: fix android-specific bug in foobar.c
+
+ This is the detailed description of the important fix
+
+ Fixes: 1234abcd2468 ("foobar: add cool feature")
+ Change-Id: I4caaaa566ea080fa148c5e768bb1a0b6f7201c01
+ Signed-off-by: Joe Smith <joe.smith@foo.org>
+```
+
+- If the patch is a new feature
+ - tag the patch subject with `ANDROID:`
+ - add a `Bug:` tag with the Android bug (required for android-specific features)
+
+## Requirements for revert patches:
+
+- Add a reason for the revert
+- Do not delete or modify the revert information that is generated when using
+`git revert`
+- If modifications have been made after creating the revert, include a list of
+these in the commit message
+- Example:
+```
+ Revert "ANDROID: fix android-specific bug in foobar.c"
+
+ This reverts commit a57a7913f53e34c8a8d905444b126b3316146e69.
+
+ Reason for revert: Breaks a lot of internal tests
+
+ Additional modifications: Resolved merge conflicts
+
+ Bug: 135791357
+ Change-Id: I4caaaa566ea080fa148c5e768bb1a0b6f7201c01
+ Signed-off-by: Joe Smith <joe.smith@foo.org>
+```
diff --git a/arch/arm/OWNERS b/arch/arm/OWNERS
new file mode 100644
index 0000000..54f66d6
--- /dev/null
+++ b/arch/arm/OWNERS
@@ -0,0 +1 @@
+include ../arm64/OWNERS
diff --git a/arch/arm/configs/allmodconfig.fragment b/arch/arm/configs/allmodconfig.fragment
new file mode 100644
index 0000000..a3592d0
--- /dev/null
+++ b/arch/arm/configs/allmodconfig.fragment
@@ -0,0 +1,13 @@
+CONFIG_UNWINDER_FRAME_POINTER=y
+# CONFIG_AFS_FS is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_BPFILTER is not set
+# CONFIG_BUILTIN_MODULE_RANGES is not set
+# CONFIG_RANDSTRUCT is not set
+# CONFIG_RANDSTRUCT_FULL is not set
+CONFIG_RANDSTRUCT_NONE=y
+# CONFIG_SAMPLES is not set
+# CONFIG_WERROR is not set
+# CONFIG_MODULE_COMPRESS is not set
+CONFIG_MODULE_SIG_SHA256=y
+# CONFIG_UAPI_HEADER_TEST is not set
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 4e8e89a..5f3e135 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -50,6 +50,10 @@
#include <trace/events/ipi.h>
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_raise);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_entry);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_exit);
+
/*
* as from 2.5, kernels no longer have an init_tasks structure
* so we need some other way of telling a new secondary core
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 38dba5f..160c279 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -25,7 +25,7 @@
select ARCH_HAS_CURRENT_STACK_POINTER
select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEBUG_VM_PGTABLE
- select ARCH_HAS_DMA_OPS if XEN
+ select ARCH_HAS_DMA_OPS if (XEN || GKI_HACKS_TO_FIX)
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_FAST_MULTIPLIER
@@ -252,6 +252,7 @@
select HAVE_RSEQ
select HAVE_RUST if RUSTC_SUPPORTS_ARM64
select HAVE_STACKPROTECTOR
+ select HAVE_STATIC_CALL if CFI
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KPROBES
select HAVE_KRETPROBES
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 54eb1d7..e41a932 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -325,7 +325,6 @@
select GPIOLIB
select PINCTRL
select HAVE_PWRCTRL if PCI
- select HAVE_SHARED_GPIOS
help
This enables support for the ARMv8 based Qualcomm chipsets.
diff --git a/arch/arm64/OWNERS b/arch/arm64/OWNERS
new file mode 100644
index 0000000..2aaef67
--- /dev/null
+++ b/arch/arm64/OWNERS
@@ -0,0 +1,4 @@
+per-file crypto/**=file:/crypto/OWNERS
+per-file {include,kernel,kvm,lib}/**=willdeacon@google.com
+per-file mm/**=file:/mm/OWNERS
+per-file net/**=file:/net/OWNERS
diff --git a/arch/arm64/configs/allmodconfig.fragment b/arch/arm64/configs/allmodconfig.fragment
new file mode 100644
index 0000000..3c7beea
--- /dev/null
+++ b/arch/arm64/configs/allmodconfig.fragment
@@ -0,0 +1,12 @@
+# CONFIG_AFS_FS is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_BPFILTER is not set
+# CONFIG_BUILTIN_MODULE_RANGES is not set
+# CONFIG_RANDSTRUCT is not set
+# CONFIG_RANDSTRUCT_FULL is not set
+CONFIG_RANDSTRUCT_NONE=y
+# CONFIG_SAMPLES is not set
+# CONFIG_WERROR is not set
+# CONFIG_MODULE_COMPRESS is not set
+CONFIG_MODULE_SIG_SHA256=y
+# CONFIG_UAPI_HEADER_TEST is not set
diff --git a/arch/arm64/configs/amlogic_gki.fragment b/arch/arm64/configs/amlogic_gki.fragment
new file mode 100644
index 0000000..4aca7f4
--- /dev/null
+++ b/arch/arm64/configs/amlogic_gki.fragment
@@ -0,0 +1,135 @@
+#
+# Generic drivers/frameworks
+#
+CONFIG_COMMON_CLK_PWM=m
+CONFIG_REGULATOR_PWM=m
+CONFIG_PWRSEQ_EMMC=m
+CONFIG_PWRSEQ_SIMPLE=m
+CONFIG_USB_DWC2=m
+CONFIG_LEDS_GPIO=m
+
+#
+# Networking
+#
+CONFIG_REALTEK_PHY=m
+CONFIG_STMMAC_ETH=m
+CONFIG_STMMAC_PLATFORM=m
+
+#
+# WLAN
+#
+CONFIG_WLAN_VENDOR_BROADCOM=y
+
+#
+# Amlogic
+#
+CONFIG_ARCH_MESON=y
+CONFIG_SERIAL_MESON=m
+CONFIG_SERIAL_MESON_CONSOLE=y
+
+#
+# Amlogic drivers as modules
+#
+
+# core
+CONFIG_MESON_SM=m
+CONFIG_RESET_MESON=m
+CONFIG_MESON_IRQ_GPIO=m
+
+# clocks
+CONFIG_COMMON_CLK_MESON_REGMAP=y
+CONFIG_COMMON_CLK_MESON_DUALDIV=y
+CONFIG_COMMON_CLK_MESON_MPLL=y
+CONFIG_COMMON_CLK_MESON_PHASE=m
+CONFIG_COMMON_CLK_MESON_PLL=y
+CONFIG_COMMON_CLK_MESON_SCLK_DIV=m
+CONFIG_COMMON_CLK_MESON_VID_PLL_DIV=y
+CONFIG_COMMON_CLK_MESON_AO_CLKC=m
+CONFIG_COMMON_CLK_MESON_EE_CLKC=m
+CONFIG_COMMON_CLK_MESON_CPU_DYNDIV=m
+CONFIG_COMMON_CLK_GXBB=m
+CONFIG_COMMON_CLK_AXG=m
+CONFIG_COMMON_CLK_G12A=m
+
+# PHY
+CONFIG_PHY_MESON8B_USB2=m
+CONFIG_PHY_MESON_GXL_USB2=m
+CONFIG_PHY_MESON_G12A_USB2=m
+CONFIG_PHY_MESON_G12A_USB3_PCIE=m
+CONFIG_PHY_MESON_AXG_PCIE=m
+CONFIG_PHY_MESON_AXG_MIPI_PCIE_ANALOG=m
+
+# peripherals
+CONFIG_I2C_MESON=m
+CONFIG_MMC_MESON_GX=m
+CONFIG_HW_RANDOM_MESON=m
+CONFIG_USB_DWC3_MESON_G12A=m
+CONFIG_MESON_SARADC=m
+CONFIG_SPI_MESON_SPICC=m
+CONFIG_SPI_MESON_SPIFC=m
+CONFIG_PCI_MESON=m
+CONFIG_DWMAC_MESON=m
+CONFIG_MDIO_BUS_MUX_MESON_G12A=m
+CONFIG_MESON_GXL_PHY=m
+CONFIG_PINCTRL_MESON=m
+CONFIG_PINCTRL_MESON_GXBB=m
+CONFIG_PINCTRL_MESON_GXL=m
+CONFIG_PINCTRL_MESON_AXG=m
+CONFIG_PINCTRL_MESON_AXG_PMX=m
+CONFIG_PINCTRL_MESON_G12A=m
+CONFIG_MESON_GXBB_WATCHDOG=m
+CONFIG_MESON_WATCHDOG=m
+CONFIG_PWM_MESON=m
+CONFIG_IR_MESON=m
+CONFIG_MFD_KHADAS_MCU=m
+CONFIG_KHADAS_MCU_FAN_THERMAL=m
+CONFIG_AMLOGIC_THERMAL=m
+
+# sound
+CONFIG_SND_MESON_AXG_SOUND_CARD=m
+CONFIG_SND_MESON_GX_SOUND_CARD=m
+CONFIG_SND_MESON_G12A_TOHDMITX=m
+
+# display / video
+CONFIG_DRM_MESON=m
+CONFIG_DRM_MESON_DW_HDMI=m
+CONFIG_DRM_DW_HDMI=m
+CONFIG_DRM_DW_HDMI_AHB_AUDIO=m
+CONFIG_DRM_DW_HDMI_I2S_AUDIO=m
+CONFIG_DRM_DW_HDMI_CEC=m
+CONFIG_CEC_MESON_AO=m
+CONFIG_CEC_MESON_G12A_AO=m
+CONFIG_VIDEO_MESON_GE2D=m
+
+# SoC drivers
+CONFIG_MESON_CANVAS=m
+CONFIG_MESON_CLK_MEASURE=m
+CONFIG_MESON_EE_PM_DOMAINS=m
+CONFIG_MESON_SECURE_PM_DOMAINS=m
+
+#
+# Amlogic drivers disable
+#
+
+# 32-bit SoC drivers
+CONFIG_MESON6_TIMER=n
+CONFIG_MESON_MX_SOCINFO=n
+
+# only needed by DRM on S805X
+CONFIG_MESON_GX_SOCINFO=n
+
+#
+# Debug / Testing
+#
+
+# devtmpfs needed for buildroot/udev module loading, serial console
+#CONFIG_DEVTMPFS=y
+#CONFIG_DEVTMPFS_MOUNT=y
+
+# debug/testing with FB console
+#CONFIG_DRM_KMS_FB_HELPER=y
+#CONFIG_DRM_FBDEV_EMULATION=y
+#CONFIG_FB=y
+#CONFIG_VT=y
+#CONFIG_FRAMEBUFFER_CONSOLE=y
+#CONFIG_LOGO=y
diff --git a/arch/arm64/configs/crashdump_defconfig b/arch/arm64/configs/crashdump_defconfig
new file mode 100644
index 0000000..04238b05
--- /dev/null
+++ b/arch/arm64/configs/crashdump_defconfig
@@ -0,0 +1,78 @@
+# CONFIG_WERROR is not set
+# CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_PREEMPT=y
+# CONFIG_CPU_ISOLATION is not set
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=10
+# CONFIG_UTS_NS is not set
+# CONFIG_TIME_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZ4 is not set
+# CONFIG_RD_ZSTD is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KEXEC=y
+CONFIG_ARM64_VA_BITS_48=y
+CONFIG_NR_CPUS=2
+# CONFIG_RODATA_FULL_DEFAULT_ENABLED is not set
+# CONFIG_ARM64_TAGGED_ADDR_ABI is not set
+# CONFIG_ARM64_SVE is not set
+# CONFIG_EFI is not set
+# CONFIG_SECCOMP is not set
+# CONFIG_STACKPROTECTOR is not set
+# CONFIG_MQ_IOSCHED_DEADLINE is not set
+# CONFIG_MQ_IOSCHED_KYBER is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_BINFMT_SCRIPT is not set
+# CONFIG_SWAP is not set
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+# CONFIG_SLUB_CPU_PARTIAL is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_PCI=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCI_ENDPOINT=y
+CONFIG_DEVTMPFS=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+CONFIG_ARM_SCMI_PROTOCOL=y
+# CONFIG_ARM_SMCCC_SOC_ID is not set
+# CONFIG_BLK_DEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_VIRTIO_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_HWMON is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_VIRTIO_PCI=y
+# CONFIG_VIRTIO_PCI_LEGACY is not set
+# CONFIG_VHOST_MENU is not set
+# CONFIG_ARM_ARCH_TIMER_EVTSTREAM is not set
+# CONFIG_FSL_ERRATUM_A008585 is not set
+# CONFIG_HISILICON_ERRATUM_161010101 is not set
+# CONFIG_ARM64_ERRATUM_858921 is not set
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_XZ_DEC=y
+CONFIG_DMA_RESTRICTED_POOL=y
+# CONFIG_SYMBOLIC_ERRNAME is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/arm64/configs/db845c_gki.fragment b/arch/arm64/configs/db845c_gki.fragment
new file mode 100644
index 0000000..092da33
--- /dev/null
+++ b/arch/arm64/configs/db845c_gki.fragment
@@ -0,0 +1,367 @@
+# CONFIG_MODULE_SIG_ALL is not set
+CONFIG_QRTR=m
+CONFIG_QRTR_TUN=m
+CONFIG_SCSI_UFS_QCOM=m
+CONFIG_INPUT_PM8941_PWRKEY=m
+CONFIG_SERIAL_MSM=m
+CONFIG_I2C_QCOM_GENI=m
+CONFIG_I2C_QUP=m
+CONFIG_PINCTRL_MSM=m
+CONFIG_PINCTRL_QCOM_SPMI_PMIC=m
+CONFIG_PINCTRL_SDM845=m
+CONFIG_POWER_RESET_QCOM_PON=m
+CONFIG_SYSCON_REBOOT_MODE=m
+CONFIG_QCOM_TSENS=m
+CONFIG_QCOM_WDT=m
+CONFIG_PM8916_WATCHDOG=m
+CONFIG_MFD_SPMI_PMIC=m
+CONFIG_SPMI_MSM_PMIC_ARB=m
+CONFIG_REGULATOR_QCOM_RPMH=m
+CONFIG_REGULATOR_QCOM_SPMI=m
+CONFIG_DRM_MSM=m
+# CONFIG_DRM_MSM_DSI_28NM_PHY is not set
+# CONFIG_DRM_MSM_DSI_20NM_PHY is not set
+# CONFIG_DRM_MSM_DSI_28NM_8960_PHY is not set
+CONFIG_DRM_LONTIUM_LT9611=m
+CONFIG_USB_OHCI_HCD=m
+CONFIG_USB_OHCI_HCD_PLATFORM=m
+# CONFIG_USB_DWC3_HAPS is not set
+# CONFIG_USB_DWC3_OF_SIMPLE is not set
+CONFIG_USB_GADGET_VBUS_DRAW=500
+# CONFIG_USB_DUMMY_HCD is not set
+CONFIG_USB_ROLE_SWITCH=y
+CONFIG_USB_ULPI_BUS=m
+CONFIG_MMC_SDHCI_MSM=m
+CONFIG_RTC_DRV_PM8XXX=m
+CONFIG_COMMON_CLK_QCOM=m
+CONFIG_SDM_GPUCC_845=m
+CONFIG_QCOM_CLK_RPMH=m
+CONFIG_SDM_DISPCC_845=m
+CONFIG_HWSPINLOCK_QCOM=m
+CONFIG_QCOM_LLCC=m
+CONFIG_QCOM_RMTFS_MEM=m
+CONFIG_QCOM_SMEM=m
+CONFIG_QCOM_SMSM=m
+CONFIG_EXTCON_USB_GPIO=m
+CONFIG_RESET_QCOM_AOSS=m
+CONFIG_RESET_QCOM_PDC=m
+CONFIG_PHY_QCOM_QMP=m
+CONFIG_PHY_QCOM_QUSB2=m
+CONFIG_PHY_QCOM_USB_HS=m
+CONFIG_NVMEM_QCOM_QFPROM=m
+CONFIG_INTERCONNECT_QCOM=y
+CONFIG_INTERCONNECT_QCOM_OSM_L3=m
+CONFIG_INTERCONNECT_QCOM_SDM845=m
+CONFIG_QCOM_RPMH=m
+CONFIG_QCOM_RPMHPD=m
+CONFIG_WLAN_VENDOR_ATH=y
+CONFIG_ATH10K_AHB=y
+CONFIG_ATH10K=m
+CONFIG_ATH10K_PCI=m
+CONFIG_ATH10K_SNOC=m
+CONFIG_QRTR_SMD=m
+CONFIG_QCOM_FASTRPC=m
+CONFIG_QCOM_APCS_IPC=m
+CONFIG_QCOM_Q6V5_COMMON=m
+CONFIG_QCOM_RPROC_COMMON=m
+CONFIG_QCOM_Q6V5_ADSP=m
+CONFIG_QCOM_Q6V5_MSS=m
+CONFIG_QCOM_Q6V5_PAS=m
+CONFIG_QCOM_Q6V5_WCSS=m
+CONFIG_QCOM_SYSMON=m
+CONFIG_RPMSG_QCOM_GLINK_SMEM=m
+CONFIG_RPMSG_QCOM_SMD=m
+CONFIG_QCOM_AOSS_QMP=m
+CONFIG_QCOM_SMP2P=m
+CONFIG_QCOM_SOCINFO=m
+CONFIG_QCOM_APR=m
+CONFIG_RPMSG_QCOM_GLINK_RPM=m
+CONFIG_QCOM_PDC=m
+CONFIG_QCOM_SCM=m
+CONFIG_ARM_SMMU=m
+CONFIG_ARM_QCOM_CPUFREQ_HW=m
+# XXX Audio bits start here
+CONFIG_I2C_CHARDEV=m
+CONFIG_I2C_MUX=m
+CONFIG_I2C_MUX_PCA954x=m
+CONFIG_I2C_DESIGNWARE_CORE=m
+CONFIG_I2C_DESIGNWARE_PLATFORM=m
+CONFIG_I2C_RK3X=m
+CONFIG_SPI_PL022=m
+CONFIG_SPI_QCOM_QSPI=m
+CONFIG_SPI_QUP=m
+CONFIG_SPI_QCOM_GENI=m
+CONFIG_GPIO_WCD934X=m
+CONFIG_MFD_WCD934X=m
+CONFIG_REGULATOR_GPIO=m
+CONFIG_SND_SOC_QCOM=m
+CONFIG_SND_SOC_QCOM_COMMON=m
+CONFIG_SND_SOC_QDSP6_COMMON=m
+CONFIG_SND_SOC_QDSP6_CORE=m
+CONFIG_SND_SOC_QDSP6_AFE=m
+CONFIG_SND_SOC_QDSP6_AFE_DAI=m
+CONFIG_SND_SOC_QDSP6_ADM=m
+CONFIG_SND_SOC_QDSP6_ROUTING=m
+CONFIG_SND_SOC_QDSP6_ASM=m
+CONFIG_SND_SOC_QDSP6_ASM_DAI=m
+CONFIG_SND_SOC_QDSP6=m
+CONFIG_SND_SOC_SDM845=m
+CONFIG_SND_SOC_DMIC=m
+CONFIG_SND_SOC_WCD9335=m
+CONFIG_SND_SOC_WCD934X=m
+CONFIG_SND_SOC_WSA881X=m
+CONFIG_QCOM_BAM_DMA=m
+CONFIG_QCOM_GPI_DMA=m
+CONFIG_SPMI_PMIC_CLKDIV=m
+CONFIG_SOUNDWIRE=m
+CONFIG_SOUNDWIRE_QCOM=m
+CONFIG_SLIMBUS=m
+CONFIG_SLIM_QCOM_NGD_CTRL=m
+CONFIG_DMABUF_HEAPS_SYSTEM=m
+CONFIG_SDM_VIDEOCC_845=m
+# CONFIG_CXD2880_SPI_DRV is not set
+# CONFIG_MEDIA_TUNER_SIMPLE is not set
+# CONFIG_MEDIA_TUNER_TDA18250 is not set
+# CONFIG_MEDIA_TUNER_TDA8290 is not set
+# CONFIG_MEDIA_TUNER_TDA827X is not set
+# CONFIG_MEDIA_TUNER_TDA18271 is not set
+# CONFIG_MEDIA_TUNER_TDA9887 is not set
+# CONFIG_MEDIA_TUNER_TEA5761 is not set
+# CONFIG_MEDIA_TUNER_TEA5767 is not set
+# CONFIG_MEDIA_TUNER_MSI001 is not set
+# CONFIG_MEDIA_TUNER_MT20XX is not set
+# CONFIG_MEDIA_TUNER_MT2060 is not set
+# CONFIG_MEDIA_TUNER_MT2063 is not set
+# CONFIG_MEDIA_TUNER_MT2266 is not set
+# CONFIG_MEDIA_TUNER_MT2131 is not set
+# CONFIG_MEDIA_TUNER_QT1010 is not set
+# CONFIG_MEDIA_TUNER_XC2028 is not set
+# CONFIG_MEDIA_TUNER_XC5000 is not set
+# CONFIG_MEDIA_TUNER_XC4000 is not set
+# CONFIG_MEDIA_TUNER_MXL5005S is not set
+# CONFIG_MEDIA_TUNER_MXL5007T is not set
+# CONFIG_MEDIA_TUNER_MC44S803 is not set
+# CONFIG_MEDIA_TUNER_MAX2165 is not set
+# CONFIG_MEDIA_TUNER_TDA18218 is not set
+# CONFIG_MEDIA_TUNER_FC0011 is not set
+# CONFIG_MEDIA_TUNER_FC0012 is not set
+# CONFIG_MEDIA_TUNER_FC0013 is not set
+# CONFIG_MEDIA_TUNER_TDA18212 is not set
+# CONFIG_MEDIA_TUNER_E4000 is not set
+# CONFIG_MEDIA_TUNER_FC2580 is not set
+# CONFIG_MEDIA_TUNER_M88RS6000T is not set
+# CONFIG_MEDIA_TUNER_TUA9001 is not set
+# CONFIG_MEDIA_TUNER_SI2157 is not set
+# CONFIG_MEDIA_TUNER_IT913X is not set
+# CONFIG_MEDIA_TUNER_R820T is not set
+# CONFIG_MEDIA_TUNER_MXL301RF is not set
+# CONFIG_MEDIA_TUNER_QM1D1C0042 is not set
+# CONFIG_MEDIA_TUNER_QM1D1B0004 is not set
+# CONFIG_DVB_STB0899 is not set
+# CONFIG_DVB_STB6100 is not set
+# CONFIG_DVB_STV090x is not set
+# CONFIG_DVB_STV0910 is not set
+# CONFIG_DVB_STV6110x is not set
+# CONFIG_DVB_STV6111 is not set
+# CONFIG_DVB_MXL5XX is not set
+# CONFIG_DVB_M88DS3103 is not set
+# CONFIG_DVB_DRXK is not set
+# CONFIG_DVB_TDA18271C2DD is not set
+# CONFIG_DVB_SI2165 is not set
+# CONFIG_DVB_MN88472 is not set
+# CONFIG_DVB_MN88473 is not set
+# CONFIG_DVB_CX24110 is not set
+# CONFIG_DVB_CX24123 is not set
+# CONFIG_DVB_MT312 is not set
+# CONFIG_DVB_ZL10036 is not set
+# CONFIG_DVB_ZL10039 is not set
+# CONFIG_DVB_S5H1420 is not set
+# CONFIG_DVB_STV0288 is not set
+# CONFIG_DVB_STB6000 is not set
+# CONFIG_DVB_STV0299 is not set
+# CONFIG_DVB_STV6110 is not set
+# CONFIG_DVB_STV0900 is not set
+# CONFIG_DVB_TDA8083 is not set
+# CONFIG_DVB_TDA10086 is not set
+# CONFIG_DVB_TDA8261 is not set
+# CONFIG_DVB_VES1X93 is not set
+# CONFIG_DVB_TUNER_ITD1000 is not set
+# CONFIG_DVB_TUNER_CX24113 is not set
+# CONFIG_DVB_TDA826X is not set
+# CONFIG_DVB_TUA6100 is not set
+# CONFIG_DVB_CX24116 is not set
+# CONFIG_DVB_CX24117 is not set
+# CONFIG_DVB_CX24120 is not set
+# CONFIG_DVB_SI21XX is not set
+# CONFIG_DVB_TS2020 is not set
+# CONFIG_DVB_DS3000 is not set
+# CONFIG_DVB_MB86A16 is not set
+# CONFIG_DVB_TDA10071 is not set
+# CONFIG_DVB_SP8870 is not set
+# CONFIG_DVB_SP887X is not set
+# CONFIG_DVB_CX22700 is not set
+# CONFIG_DVB_CX22702 is not set
+# CONFIG_DVB_S5H1432 is not set
+# CONFIG_DVB_DRXD is not set
+# CONFIG_DVB_L64781 is not set
+# CONFIG_DVB_TDA1004X is not set
+# CONFIG_DVB_NXT6000 is not set
+# CONFIG_DVB_MT352 is not set
+# CONFIG_DVB_ZL10353 is not set
+# CONFIG_DVB_DIB3000MB is not set
+# CONFIG_DVB_DIB3000MC is not set
+# CONFIG_DVB_DIB7000M is not set
+# CONFIG_DVB_DIB7000P is not set
+# CONFIG_DVB_DIB9000 is not set
+# CONFIG_DVB_TDA10048 is not set
+# CONFIG_DVB_AF9013 is not set
+# CONFIG_DVB_EC100 is not set
+# CONFIG_DVB_STV0367 is not set
+# CONFIG_DVB_CXD2820R is not set
+# CONFIG_DVB_CXD2841ER is not set
+# CONFIG_DVB_RTL2830 is not set
+# CONFIG_DVB_RTL2832 is not set
+# CONFIG_DVB_RTL2832_SDR is not set
+# CONFIG_DVB_SI2168 is not set
+# CONFIG_DVB_ZD1301_DEMOD is not set
+# CONFIG_DVB_CXD2880 is not set
+# CONFIG_DVB_VES1820 is not set
+# CONFIG_DVB_TDA10021 is not set
+# CONFIG_DVB_TDA10023 is not set
+# CONFIG_DVB_STV0297 is not set
+# CONFIG_DVB_NXT200X is not set
+# CONFIG_DVB_OR51211 is not set
+# CONFIG_DVB_OR51132 is not set
+# CONFIG_DVB_BCM3510 is not set
+# CONFIG_DVB_LGDT330X is not set
+# CONFIG_DVB_LGDT3305 is not set
+# CONFIG_DVB_LGDT3306A is not set
+# CONFIG_DVB_LG2160 is not set
+# CONFIG_DVB_S5H1409 is not set
+# CONFIG_DVB_AU8522_DTV is not set
+# CONFIG_DVB_AU8522_V4L is not set
+# CONFIG_DVB_S5H1411 is not set
+# CONFIG_DVB_S921 is not set
+# CONFIG_DVB_DIB8000 is not set
+# CONFIG_DVB_MB86A20S is not set
+# CONFIG_DVB_TC90522 is not set
+# CONFIG_DVB_MN88443X is not set
+# CONFIG_DVB_PLL is not set
+# CONFIG_DVB_TUNER_DIB0070 is not set
+# CONFIG_DVB_TUNER_DIB0090 is not set
+# CONFIG_DVB_DRX39XYJ is not set
+# CONFIG_DVB_LNBH25 is not set
+# CONFIG_DVB_LNBH29 is not set
+# CONFIG_DVB_LNBP21 is not set
+# CONFIG_DVB_LNBP22 is not set
+# CONFIG_DVB_ISL6405 is not set
+# CONFIG_DVB_ISL6421 is not set
+# CONFIG_DVB_ISL6423 is not set
+# CONFIG_DVB_A8293 is not set
+# CONFIG_DVB_LGS8GL5 is not set
+# CONFIG_DVB_LGS8GXX is not set
+# CONFIG_DVB_ATBM8830 is not set
+# CONFIG_DVB_TDA665x is not set
+# CONFIG_DVB_IX2505V is not set
+# CONFIG_DVB_M88RS2000 is not set
+# CONFIG_DVB_AF9033 is not set
+# CONFIG_DVB_HORUS3A is not set
+# CONFIG_DVB_ASCOT2E is not set
+# CONFIG_DVB_HELENE is not set
+# CONFIG_DVB_CXD2099 is not set
+# CONFIG_DVB_SP2 is not set
+CONFIG_QCOM_COMMAND_DB=m
+CONFIG_QCOM_LMH=m
+# XXX RB5 bits start here
+CONFIG_QCOM_IPCC=m
+CONFIG_QCOM_SPMI_ADC5=m
+CONFIG_QCOM_SPMI_TEMP_ALARM=m
+CONFIG_RPMSG_NS=m
+CONFIG_CAN_MCP251XFD=m
+CONFIG_ATH11K=m
+CONFIG_ATH11K_AHB=m
+CONFIG_ATH11K_PCI=m
+CONFIG_PINCTRL_SM8250=m
+CONFIG_PINCTRL_SM8250_LPASS_LPI=m
+CONFIG_PINCTRL_LPASS_LPI=m
+CONFIG_QCOM_SPMI_ADC_TM5=m
+CONFIG_REGULATOR_QCOM_USB_VBUS=m
+CONFIG_DRM_DISPLAY_CONNECTOR=m
+CONFIG_DRM_LONTIUM_LT9611UXC=m
+CONFIG_SND_SOC_SM8250=m
+CONFIG_SND_SOC_LPASS_WSA_MACRO=m
+CONFIG_SND_SOC_LPASS_VA_MACRO=m
+CONFIG_TYPEC_QCOM_PMIC=m
+CONFIG_LEDS_QCOM_LPG=m
+CONFIG_SM_GPUCC_8250=m
+CONFIG_SM_DISPCC_8250=m
+CONFIG_SM_VIDEOCC_8250=m
+CONFIG_CLK_GFM_LPASS_SM8250=m
+CONFIG_PHY_QCOM_USB_SNPS_FEMTO_V2=m
+CONFIG_INTERCONNECT_QCOM_SM8250=m
+CONFIG_CRYPTO_MICHAEL_MIC=m
+CONFIG_QCOM_CPR=m
+CONFIG_QCOM_SPM=m
+CONFIG_TYPEC_MUX_NB7VPQ904M=m
+# XXX SM8450 bits start here
+CONFIG_PINCTRL_SM8450=m
+CONFIG_SM_GCC_8450=m
+CONFIG_INTERCONNECT_QCOM_SM8450=m
+# XXX SM8550 and SM8650 common bits start here
+CONFIG_ATH12K=m
+CONFIG_BATTERY_QCOM_BATTMGR=m
+CONFIG_PHY_QCOM_EUSB2_REPEATER=m
+CONFIG_PHY_SNPS_EUSB2=m
+CONFIG_QCOM_PMIC_GLINK=m
+CONFIG_SND_SOC_LPASS_RX_MACRO=m
+CONFIG_SND_SOC_LPASS_TX_MACRO=m
+CONFIG_SND_SOC_SC8280XP=m
+CONFIG_SND_SOC_WCD938X=m
+CONFIG_SND_SOC_WCD938X_SDW=m
+CONFIG_SND_SOC_WCD939X=m
+CONFIG_SND_SOC_WCD939X_SDW=m
+CONFIG_SND_SOC_WSA884X=m
+CONFIG_TYPEC_MUX_FSA4480=m
+CONFIG_TYPEC_MUX_WCD939X_USBSS=m
+CONFIG_UCSI_PMIC_GLINK=m
+# XXX SM8550 bits start here
+CONFIG_DRM_PANEL_VISIONOX_VTDR6130=m
+CONFIG_INTERCONNECT_QCOM_SM8550=m
+CONFIG_PINCTRL_SM8550=m
+CONFIG_PINCTRL_SM8550_LPASS_LPI=m
+CONFIG_SM_CAMCC_8550=m
+CONFIG_SM_DISPCC_8550=m
+CONFIG_SM_GCC_8550=m
+CONFIG_SM_GPUCC_8550=m
+CONFIG_SM_TCSRCC_8550=m
+CONFIG_SM_VIDEOCC_8550=m
+# XXX SM8650 bits start here
+CONFIG_INTERCONNECT_QCOM_SM8650=m
+CONFIG_PINCTRL_SM8650=m
+CONFIG_PINCTRL_SM8650_LPASS_LPI=m
+CONFIG_SM_GCC_8650=m
+CONFIG_SM_GPUCC_8650=m
+CONFIG_SM_TCSRCC_8650=m
+# CFG80211 & MAC80211 as modules
+CONFIG_CFG80211=m
+CONFIG_NL80211_TESTMODE=y
+CONFIG_CFG80211_CERTIFICATION_ONUS=y
+CONFIG_CFG80211_REG_CELLULAR_HINTS=y
+CONFIG_MAC80211=m
+# PWRSEQ driver for WCN BT-WLAN chipsets on sm8250 and newer SoCs
+# Depends on CONFIG_POWER_SEQUENCING in gki_defconfig
+CONFIG_POWER_SEQUENCING_QCOM_WCN=m
+CONFIG_USB_XHCI_PCI_RENESAS=m
+# XXX RB3Gen2 bits start here
+CONFIG_DRM_PANEL_NOVATEK_NT36672E=m
+CONFIG_INTERCONNECT_QCOM_SC7280=m
+CONFIG_PHY_QCOM_EDP=m
+CONFIG_PINCTRL_SC7280=m
+CONFIG_PINCTRL_SC7280_LPASS_LPI=m
+CONFIG_SC_CAMCC_7280=m
+CONFIG_SC_DISPCC_7280=m
+CONFIG_SC_GCC_7280=m
+CONFIG_SC_GPUCC_7280=m
+CONFIG_SC_LPASS_CORECC_7280=m
+CONFIG_SC_VIDEOCC_7280=m
+CONFIG_SND_SOC_SC7280=m
diff --git a/arch/arm64/configs/gki_defconfig b/arch/arm64/configs/gki_defconfig
new file mode 100644
index 0000000..b416a91
--- /dev/null
+++ b/arch/arm64/configs/gki_defconfig
@@ -0,0 +1,794 @@
+CONFIG_LOCALVERSION="-4k"
+CONFIG_AUDIT=y
+CONFIG_TIME_KUNIT_TEST=m
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set
+CONFIG_BPF_LSM=y
+CONFIG_PREEMPT=y
+CONFIG_SCHED_CLASS_EXT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_BOOST=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_LAZY=y
+CONFIG_RCU_LAZY_DEFAULT_OFF=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_IKHEADERS=m
+CONFIG_UCLAMP_TASK=y
+CONFIG_UCLAMP_BUCKETS_COUNT=20
+CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_V1=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_UCLAMP_TASK_GROUP=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CPUSETS_V1=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+# CONFIG_PID_NS is not set
+CONFIG_RT_SOFTIRQ_AWARE_SCHED=y
+CONFIG_RELAY=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+CONFIG_BOOT_CONFIG=y
+CONFIG_EXPERT=y
+# CONFIG_FHANDLE is not set
+# CONFIG_RSEQ is not set
+CONFIG_PROFILING=y
+CONFIG_RUST=y
+CONFIG_ARCH_SUNXI=y
+CONFIG_ARCH_HISI=y
+CONFIG_ARCH_QCOM=y
+CONFIG_ARCH_TEGRA=y
+CONFIG_ARM64_VA_BITS_39=y
+CONFIG_NR_CPUS=32
+CONFIG_PARAVIRT_TIME_ACCOUNTING=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+CONFIG_COMPAT=y
+CONFIG_ARMV8_DEPRECATED=y
+CONFIG_SWP_EMULATION=y
+CONFIG_CP15_BARRIER_EMULATION=y
+CONFIG_SETEND_EMULATION=y
+CONFIG_ARM64_PMEM=y
+CONFIG_ARM64_PSEUDO_NMI=y
+CONFIG_RANDOMIZE_BASE=y
+# CONFIG_RANDOMIZE_MODULE_REGION_FULL is not set
+CONFIG_UNWIND_PATCH_PAC_INTO_SCS=y
+CONFIG_CMDLINE="console=ttynull stack_depot_disable=on cgroup_disable=pressure kasan.stacktrace=off kvm-arm.mode=protected bootconfig ioremap_guard"
+# CONFIG_DMI is not set
+CONFIG_HIBERNATION=y
+CONFIG_PM_USERSPACE_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_PM_ADVANCED_DEBUG=y
+CONFIG_ENERGY_MODEL=y
+CONFIG_CPU_IDLE=y
+CONFIG_CPU_IDLE_GOV_MENU=y
+CONFIG_CPU_IDLE_GOV_TEO=y
+CONFIG_ARM_PSCI_CPUIDLE=y
+CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_TIMES=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_ARM_SCPI_CPUFREQ=y
+CONFIG_ARM_SCMI_CPUFREQ=y
+# CONFIG_ARM_TEGRA194_CPUFREQ is not set
+CONFIG_VIRTUALIZATION=y
+CONFIG_KVM=y
+CONFIG_KPROBES=y
+CONFIG_SHADOW_CALL_STACK=y
+CONFIG_CFI=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_GENDWARFKSYMS=y
+CONFIG_MODULE_SCMVERSION=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_PROTECT=y
+CONFIG_MODULE_SIG_SHA256=y
+CONFIG_MODPROBE_PATH=""
+CONFIG_BLK_DEV_ZONED=y
+CONFIG_BLK_CGROUP_IOPRIO=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
+CONFIG_GKI_HACKS_TO_FIX=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
+CONFIG_READ_ONLY_THP_FOR_FS=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_CMA_SYSFS=y
+CONFIG_CMA_AREAS=16
+# CONFIG_ZONE_DMA is not set
+CONFIG_ANON_VMA_NAME=y
+CONFIG_USERFAULTFD=y
+CONFIG_LRU_GEN=y
+CONFIG_LRU_GEN_ENABLED=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+# CONFIG_AF_UNIX_OOB is not set
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_MIGRATE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_NET_IPIP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_ESP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_GRE=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_PROCFS=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XTABLES_COMPAT=y
+CONFIG_NETFILTER_XTABLES_LEGACY=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_DSCP=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_L2TP=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES_LEGACY=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES_LEGACY=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_TIPC=m
+CONFIG_L2TP=m
+CONFIG_BRIDGE=y
+CONFIG_VLAN_8021Q=m
+CONFIG_6LOWPAN=m
+CONFIG_IEEE802154=m
+CONFIG_IEEE802154_6LOWPAN=m
+CONFIG_MAC802154=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_SFQ=y
+CONFIG_NET_SCH_TBF=y
+CONFIG_NET_SCH_NETEM=y
+CONFIG_NET_SCH_CODEL=y
+CONFIG_NET_SCH_FQ_CODEL=y
+CONFIG_NET_SCH_FQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_BASIC=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_CLS_MATCHALL=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_NET_ACT_BPF=y
+CONFIG_VSOCKETS=y
+CONFIG_VIRTIO_VSOCKETS=m
+CONFIG_CGROUP_NET_PRIO=y
+CONFIG_CAN=m
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_HIDP=m
+CONFIG_BT_HCIBTSDIO=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_BCM=y
+CONFIG_BT_HCIUART_QCA=y
+CONFIG_RFKILL=m
+CONFIG_NET_9P=m
+CONFIG_NFC=m
+CONFIG_NETDEV_ADDR_LIST_TEST=m
+CONFIG_PCI=y
+CONFIG_PCIEAER=y
+CONFIG_PCI_IOV=y
+# CONFIG_VGA_ARB is not set
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCIE_DW_PLAT_EP=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCI_ENDPOINT=y
+# CONFIG_PCI_PWRCTRL_TC9563 is not set
+CONFIG_FW_LOADER_USER_HELPER=y
+# CONFIG_FW_CACHE is not set
+CONFIG_REGMAP_KUNIT=m
+# CONFIG_SUN50I_DE2_BUS is not set
+# CONFIG_SUNXI_RSB is not set
+CONFIG_ARM_SCMI_PROTOCOL=y
+CONFIG_ARM_SCPI_PROTOCOL=y
+CONFIG_ARM_SDE_INTERFACE=y
+# CONFIG_EFI_ARMSTUB_DTB_LOADER is not set
+CONFIG_TEGRA_BPMP=y
+CONFIG_GNSS=m
+CONFIG_ZRAM=m
+CONFIG_ZRAM_BACKEND_LZ4=y
+CONFIG_ZRAM_BACKEND_ZSTD=y
+CONFIG_ZRAM_BACKEND_LZO=y
+CONFIG_ZRAM_WRITEBACK=y
+CONFIG_ZRAM_MULTI_COMP=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_VIRTIO_BLK=m
+CONFIG_BLK_DEV_UBLK=y
+CONFIG_BLK_DEV_NVME=y
+CONFIG_NVME_MULTIPATH=y
+CONFIG_SRAM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_OPEN_DICE=m
+CONFIG_VCPU_STALL_DETECTOR=m
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
+CONFIG_DM_SNAPSHOT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_BOW=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+CONFIG_WIREGUARD=y
+CONFIG_IFB=y
+CONFIG_MACSEC=m
+CONFIG_TUN=y
+CONFIG_VETH=y
+CONFIG_LED_TRIGGER_PHY=y
+CONFIG_AX88796B_PHY=y
+CONFIG_CAN_VCAN=m
+CONFIG_CAN_SLCAN=m
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_USB_NET_DRIVERS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_CDC_EEM=m
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_NET_CDC_SUBSET is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_USB_NET_AQC111=m
+# CONFIG_WLAN_VENDOR_ADMTEK is not set
+# CONFIG_WLAN_VENDOR_ATH is not set
+# CONFIG_WLAN_VENDOR_ATMEL is not set
+# CONFIG_WLAN_VENDOR_BROADCOM is not set
+# CONFIG_WLAN_VENDOR_INTEL is not set
+# CONFIG_WLAN_VENDOR_INTERSIL is not set
+# CONFIG_WLAN_VENDOR_MARVELL is not set
+# CONFIG_WLAN_VENDOR_MEDIATEK is not set
+# CONFIG_WLAN_VENDOR_RALINK is not set
+# CONFIG_WLAN_VENDOR_REALTEK is not set
+# CONFIG_WLAN_VENDOR_RSI is not set
+# CONFIG_WLAN_VENDOR_ST is not set
+# CONFIG_WLAN_VENDOR_TI is not set
+# CONFIG_WLAN_VENDOR_ZYDAS is not set
+# CONFIG_WLAN_VENDOR_QUANTENNA is not set
+CONFIG_WWAN=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KUNIT_TEST=m
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_EXAR is not set
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=0
+CONFIG_SERIAL_8250_DW=y
+# CONFIG_SERIAL_8250_TEGRA is not set
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_AMBA_PL011=y
+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_SERIAL_TEGRA_TCU=y
+CONFIG_SERIAL_QCOM_GENI=y
+CONFIG_SERIAL_QCOM_GENI_CONSOLE=y
+CONFIG_SERIAL_SPRD=y
+CONFIG_SERIAL_SPRD_CONSOLE=y
+CONFIG_NULL_TTY=y
+CONFIG_HVC_DCC=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_VIRTIO_CONSOLE=m
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_HISI is not set
+# CONFIG_HW_RANDOM_HISTB is not set
+CONFIG_HW_RANDOM_CCTRNG=m
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVPORT is not set
+# CONFIG_I2C_HELPER_AUTO is not set
+# CONFIG_I2C_TEGRA_BPMP is not set
+CONFIG_I3C=y
+CONFIG_SPI=y
+CONFIG_SPI_MEM=y
+CONFIG_SPI_SLAVE=y
+# CONFIG_SPMI_MSM_PMIC_ARB is not set
+CONFIG_PTP_1588_CLOCK=m
+# CONFIG_PTP_1588_CLOCK_VMCLOCK is not set
+# CONFIG_PINCTRL_SUN8I_H3_R is not set
+# CONFIG_PINCTRL_SUN50I_A64 is not set
+# CONFIG_PINCTRL_SUN50I_A64_R is not set
+# CONFIG_PINCTRL_SUN50I_A100 is not set
+# CONFIG_PINCTRL_SUN50I_A100_R is not set
+# CONFIG_PINCTRL_SUN50I_H5 is not set
+# CONFIG_PINCTRL_SUN50I_H6 is not set
+# CONFIG_PINCTRL_SUN50I_H6_R is not set
+# CONFIG_PINCTRL_SUN50I_H616 is not set
+# CONFIG_PINCTRL_SUN50I_H616_R is not set
+# CONFIG_PINCTRL_SUN55I_A523 is not set
+# CONFIG_PINCTRL_SUN55I_A523_R is not set
+CONFIG_GPIO_GENERIC_PLATFORM=y
+# CONFIG_GPIO_TEGRA is not set
+# CONFIG_GPIO_TEGRA186 is not set
+CONFIG_POWER_RESET_HISI=y
+CONFIG_POWER_RESET_SYSCON=y
+# CONFIG_POWER_SEQUENCING_QCOM_WCN is not set
+# CONFIG_HWMON is not set
+CONFIG_THERMAL=y
+CONFIG_THERMAL_NETLINK=y
+CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=100
+CONFIG_THERMAL_GOV_BANG_BANG=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
+CONFIG_CPU_THERMAL=y
+CONFIG_CPU_IDLE_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_MFD_ACT8945A=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_RC_CORE=y
+CONFIG_BPF_LIRC_MODE2=y
+CONFIG_LIRC=y
+# CONFIG_RC_MAP is not set
+CONFIG_RC_DECODERS=y
+CONFIG_RC_DEVICES=y
+CONFIG_MEDIA_CEC_RC=y
+# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set
+# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set
+# CONFIG_MEDIA_RADIO_SUPPORT is not set
+# CONFIG_MEDIA_SDR_SUPPORT is not set
+# CONFIG_MEDIA_TEST_SUPPORT is not set
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_GSPCA=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_DRM=y
+# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_DRM_ACCEL=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_HRTIMER=y
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_TOPOLOGY_KUNIT_TEST=m
+CONFIG_SND_SOC_UTILS_KUNIT_TEST=m
+CONFIG_HID_BATTERY_STRENGTH=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_UCLOGIC=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NINTENDO=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_PLAYSTATION=y
+CONFIG_PLAYSTATION_FF=y
+CONFIG_HID_ROCCAT=y
+CONFIG_HID_SONY=y
+CONFIG_SONY_FF=y
+CONFIG_HID_STEAM=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_HID_KUNIT_TEST=m
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_OTG=y
+CONFIG_USB_MON=m
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_DBGCAP=y
+CONFIG_USB_XHCI_SIDEBAND=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_ACM=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_GADGET=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_ANDROID_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_ACM=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_ECM=y
+CONFIG_USB_CONFIGFS_EEM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_UAC2=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_UVC=y
+CONFIG_TYPEC=y
+CONFIG_TYPEC_TCPM=y
+CONFIG_TYPEC_TCPCI=y
+CONFIG_TYPEC_UCSI=y
+CONFIG_TYPEC_DP_ALTMODE=y
+CONFIG_MMC=y
+# CONFIG_PWRSEQ_EMMC is not set
+# CONFIG_PWRSEQ_SIMPLE is not set
+CONFIG_MMC_CRYPTO=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFS_BSG=y
+CONFIG_SCSI_UFS_CRYPTO=y
+CONFIG_SCSI_UFSHCD_PCI=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_DWC_TC_PLATFORM=y
+CONFIG_SCSI_UFS_HISI=y
+CONFIG_LEDS_CLASS_FLASH=y
+CONFIG_LEDS_CLASS_MULTICOLOR=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_LIB_KUNIT_TEST=m
+CONFIG_RTC_DRV_PL030=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_UDMABUF=y
+CONFIG_DMABUF_HEAPS=y
+CONFIG_UIO=y
+CONFIG_VIRTIO_PCI=m
+CONFIG_VIRTIO_BALLOON=m
+CONFIG_VHOST_VSOCK=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ASHMEM_RUST=y
+CONFIG_COMMON_CLK_SCPI=y
+# CONFIG_SUNXI_CCU is not set
+CONFIG_CLK_KUNIT_TEST=m
+CONFIG_CLK_GATE_KUNIT_TEST=m
+CONFIG_HWSPINLOCK=y
+# CONFIG_SUN50I_ERRATUM_UNKNOWN1 is not set
+CONFIG_TEGRA_HSP_MBOX=y
+CONFIG_IOMMU_IO_PGTABLE_ARMV7S=y
+CONFIG_REMOTEPROC=y
+CONFIG_REMOTEPROC_CDEV=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_QCOM_GENI_SE=y
+CONFIG_ARCH_TEGRA_234_SOC=y
+# CONFIG_ARM_SCMI_POWER_DOMAIN is not set
+# CONFIG_ARM_SCPI_POWER_DOMAIN is not set
+CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+CONFIG_DEVFREQ_GOV_POWERSAVE=y
+CONFIG_DEVFREQ_GOV_USERSPACE=y
+CONFIG_DEVFREQ_GOV_PASSIVE=y
+CONFIG_PM_DEVFREQ_EVENT=y
+CONFIG_MEMORY=y
+CONFIG_IIO=y
+CONFIG_IIO_BUFFER=y
+CONFIG_IIO_TRIGGER=y
+CONFIG_IIO_FORMAT_KUNIT_TEST=m
+CONFIG_PWM=y
+CONFIG_POWERCAP=y
+CONFIG_IDLE_INJECT=y
+CONFIG_USB4=m
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_BINDERFS=y
+CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST=m
+CONFIG_ANDROID_VENDOR_HOOKS=y
+CONFIG_ANDROID_DEBUG_KINFO=y
+CONFIG_LIBNVDIMM=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_KUNIT_TESTS=m
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_COMPRESSION=y
+CONFIG_F2FS_UNFAIR_RWSEM=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
+CONFIG_FS_VERITY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_VIRTIO_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_INCREMENTAL_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_KUNIT_TEST=m
+CONFIG_EXFAT_FS=y
+CONFIG_TMPFS=y
+# CONFIG_EFIVAR_FS is not set
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_PMSG=y
+CONFIG_PSTORE_RAM=y
+CONFIG_EROFS_FS=y
+CONFIG_EROFS_FS_PCPU_KTHREAD=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=y
+CONFIG_NLS_CODEPAGE_775=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_855=y
+CONFIG_NLS_CODEPAGE_857=y
+CONFIG_NLS_CODEPAGE_860=y
+CONFIG_NLS_CODEPAGE_861=y
+CONFIG_NLS_CODEPAGE_862=y
+CONFIG_NLS_CODEPAGE_863=y
+CONFIG_NLS_CODEPAGE_864=y
+CONFIG_NLS_CODEPAGE_865=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_869=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=y
+CONFIG_NLS_CODEPAGE_874=y
+CONFIG_NLS_ISO8859_8=y
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_3=y
+CONFIG_NLS_ISO8859_4=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_6=y
+CONFIG_NLS_ISO8859_7=y
+CONFIG_NLS_ISO8859_9=y
+CONFIG_NLS_ISO8859_13=y
+CONFIG_NLS_ISO8859_14=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_KOI8_U=y
+CONFIG_NLS_MAC_ROMAN=y
+CONFIG_NLS_MAC_CELTIC=y
+CONFIG_NLS_MAC_CENTEURO=y
+CONFIG_NLS_MAC_CROATIAN=y
+CONFIG_NLS_MAC_CYRILLIC=y
+CONFIG_NLS_MAC_GAELIC=y
+CONFIG_NLS_MAC_GREEK=y
+CONFIG_NLS_MAC_ICELAND=y
+CONFIG_NLS_MAC_INUIT=y
+CONFIG_NLS_MAC_ROMANIAN=y
+CONFIG_NLS_MAC_TURKISH=y
+CONFIG_NLS_UTF8=y
+CONFIG_UNICODE=y
+CONFIG_PROC_MEM_FORCE_PTRACE=y
+CONFIG_MSEAL_SYSTEM_MAPPINGS=y
+CONFIG_SECURITY=y
+CONFIG_STATIC_USERMODEHELPER=y
+CONFIG_STATIC_USERMODEHELPER_PATH=""
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SAFESETID=y
+CONFIG_SECURITY_LANDLOCK=y
+CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_ECDH=y
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_ADIANTUM=y
+CONFIG_CRYPTO_HCTR2=y
+CONFIG_CRYPTO_CHACHA20POLY1305=y
+CONFIG_CRYPTO_CCM=y
+CONFIG_CRYPTO_BLAKE2B=y
+CONFIG_CRYPTO_CMAC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_LZ4=y
+CONFIG_CRYPTO_ZSTD=y
+CONFIG_CRYPTO_GHASH_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_TRACE_MMIO_ACCESS=y
+CONFIG_CRYPTO_LIB_BLAKE2B_KUNIT_TEST=m
+CONFIG_CRYPTO_LIB_BLAKE2S_KUNIT_TEST=m
+CONFIG_CRYPTO_LIB_CURVE25519_KUNIT_TEST=m
+CONFIG_CRYPTO_LIB_MD5_KUNIT_TEST=m
+CONFIG_CRYPTO_LIB_POLY1305_KUNIT_TEST=m
+CONFIG_CRYPTO_LIB_POLYVAL_KUNIT_TEST=m
+CONFIG_CRYPTO_LIB_SHA1_KUNIT_TEST=m
+CONFIG_CRYPTO_LIB_SHA256_KUNIT_TEST=m
+CONFIG_CRYPTO_LIB_SHA512_KUNIT_TEST=m
+CONFIG_CRYPTO_LIB_SHA3_KUNIT_TEST=m
+CONFIG_SWIOTLB_DYNAMIC=y
+CONFIG_DMA_RESTRICTED_POOL=y
+CONFIG_DMA_CMA=y
+CONFIG_PRINTK_TIME=y
+CONFIG_PRINTK_CALLER=y
+CONFIG_STACKTRACE_BUILD_ID=y
+CONFIG_DYNAMIC_DEBUG_CORE=y
+CONFIG_DEBUG_INFO_DWARF5=y
+CONFIG_DEBUG_INFO_COMPRESSED_ZSTD=y
+CONFIG_DEBUG_INFO_BTF=y
+CONFIG_MODULE_ALLOW_BTF_MISMATCH=y
+CONFIG_HEADERS_INSTALL=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_UBSAN=y
+CONFIG_UBSAN_TRAP=y
+# CONFIG_UBSAN_BOOL is not set
+# CONFIG_UBSAN_ENUM is not set
+CONFIG_PAGE_OWNER=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_MEM_ALLOC_PROFILING=y
+# CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT is not set
+CONFIG_KASAN=y
+CONFIG_KASAN_HW_TAGS=y
+CONFIG_KFENCE=y
+CONFIG_KFENCE_SAMPLE_INTERVAL=500
+CONFIG_KFENCE_NUM_OBJECTS=63
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=-1
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_SCHEDSTATS=y
+CONFIG_PROVE_LOCKING=y
+# CONFIG_PROVE_RAW_LOCK_NESTING is not set
+CONFIG_HIST_TRIGGERS=y
+CONFIG_PID_IN_CONTEXTIDR=y
+CONFIG_KUNIT=m
+CONFIG_KUNIT_DEBUGFS=y
+CONFIG_KUNIT_TEST=m
+CONFIG_KUNIT_EXAMPLE_TEST=m
+# CONFIG_KUNIT_DEFAULT_ENABLED is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/arm64/configs/microdroid_defconfig b/arch/arm64/configs/microdroid_defconfig
new file mode 100644
index 0000000..39b1ce1
--- /dev/null
+++ b/arch/arm64/configs/microdroid_defconfig
@@ -0,0 +1,181 @@
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_BOOST=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+# CONFIG_UTS_NS is not set
+# CONFIG_TIME_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_ZSTD is not set
+CONFIG_BOOT_CONFIG=y
+CONFIG_PROFILING=y
+CONFIG_ARM64_VA_BITS_39=y
+CONFIG_NR_CPUS=32
+CONFIG_PARAVIRT_TIME_ACCOUNTING=y
+CONFIG_KEXEC_FILE=y
+CONFIG_ARM64_SW_TTBR0_PAN=y
+CONFIG_RANDOMIZE_BASE=y
+# CONFIG_RANDOMIZE_MODULE_REGION_FULL is not set
+CONFIG_CMDLINE="stack_depot_disable=on kasan.stacktrace=off cgroup_disable=pressure ioremap_guard panic=-1 bootconfig"
+# CONFIG_EFI is not set
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_VIRTUALIZATION=y
+CONFIG_JUMP_LABEL=y
+CONFIG_SHADOW_CALL_STACK=y
+CONFIG_CFI=y
+CONFIG_BLK_DEV_ZONED=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
+CONFIG_IOSCHED_BFQ=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
+CONFIG_ANON_VMA_NAME=y
+CONFIG_USERFAULTFD=y
+CONFIG_LRU_GEN=y
+CONFIG_NET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_VSOCKETS=y
+CONFIG_VIRTIO_VSOCKETS=y
+# CONFIG_WIRELESS is not set
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCIEAER=y
+CONFIG_PCI_IOV=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCIE_DW_PLAT_EP=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCI_ENDPOINT=y
+CONFIG_FW_LOADER_USER_HELPER=y
+# CONFIG_FW_CACHE is not set
+CONFIG_ARM_SCMI_PROTOCOL=y
+# CONFIG_ARM_SCMI_POWER_DOMAIN is not set
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_VIRTIO_BLK=y
+CONFIG_OPEN_DICE=y
+CONFIG_VCPU_STALL_DETECTOR=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_PCI is not set
+CONFIG_SERIAL_8250_RUNTIME_UARTS=0
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_NULL_TTY=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_CCTRNG=y
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVPORT is not set
+CONFIG_POWER_RESET_SYSCON=y
+# CONFIG_HWMON is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+# CONFIG_RTC_NVMEM is not set
+CONFIG_RTC_DRV_PL030=y
+CONFIG_RTC_DRV_PL031=y
+CONFIG_DMABUF_HEAPS=y
+CONFIG_DMABUF_SYSFS_STATS=y
+CONFIG_UIO=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_STAGING=y
+CONFIG_HWSPINLOCK=y
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_USE_FOR_EXT2 is not set
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=y
+CONFIG_TMPFS=y
+CONFIG_EROFS_FS=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_UNICODE=y
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_STATIC_USERMODEHELPER=y
+CONFIG_STATIC_USERMODEHELPER_PATH=""
+CONFIG_SECURITY_SELINUX=y
+CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
+CONFIG_CRYPTO_HCTR2=y
+CONFIG_CRYPTO_XTS=y
+CONFIG_CRYPTO_LZO=y
+CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
+CONFIG_TRACE_MMIO_ACCESS=y
+CONFIG_DMA_RESTRICTED_POOL=y
+CONFIG_PRINTK_TIME=y
+CONFIG_PRINTK_CALLER=y
+CONFIG_DYNAMIC_DEBUG_CORE=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_INFO_DWARF5=y
+CONFIG_DEBUG_INFO_REDUCED=y
+CONFIG_HEADERS_INSTALL=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_UBSAN=y
+CONFIG_UBSAN_TRAP=y
+CONFIG_UBSAN_LOCAL_BOUNDS=y
+# CONFIG_UBSAN_SHIFT is not set
+# CONFIG_UBSAN_BOOL is not set
+# CONFIG_UBSAN_ENUM is not set
+CONFIG_PAGE_OWNER=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_KASAN=y
+CONFIG_KASAN_HW_TAGS=y
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=-1
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_SCHEDSTATS=y
+# CONFIG_DEBUG_PREEMPT is not set
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_HIST_TRIGGERS=y
+CONFIG_PID_IN_CONTEXTIDR=y
+# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/arm64/configs/rockpi4_gki.fragment b/arch/arm64/configs/rockpi4_gki.fragment
new file mode 100644
index 0000000..b35b904
--- /dev/null
+++ b/arch/arm64/configs/rockpi4_gki.fragment
@@ -0,0 +1,80 @@
+# Core features
+CONFIG_ARCH_ROCKCHIP=y
+# CONFIG_CLK_PX30 is not set
+# CONFIG_CLK_RV110X is not set
+# CONFIG_CLK_RK3036 is not set
+# CONFIG_CLK_RK312X is not set
+# CONFIG_CLK_RK3188 is not set
+# CONFIG_CLK_RK322X is not set
+# CONFIG_CLK_RK3288 is not set
+# CONFIG_CLK_RK3308 is not set
+# CONFIG_CLK_RK3328 is not set
+# CONFIG_CLK_RK3368 is not set
+CONFIG_COMMON_CLK_RK808=m
+CONFIG_CPUFREQ_DT=m
+CONFIG_MFD_RK8XX_I2C=m
+CONFIG_MFD_RK8XX_SPI=m
+CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PHY_ROCKCHIP_PCIE=m
+CONFIG_PL330_DMA=m
+CONFIG_PWM_ROCKCHIP=m
+CONFIG_PWRSEQ_SIMPLE=m
+CONFIG_REGULATOR_FAN53555=m
+CONFIG_REGULATOR_PWM=m
+CONFIG_REGULATOR_RK808=m
+CONFIG_ROCKCHIP_IOMMU=y
+CONFIG_ROCKCHIP_IODOMAIN=m
+CONFIG_ROCKCHIP_MBOX=y
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_ROCKCHIP_THERMAL=m
+
+# Ethernet
+CONFIG_STMMAC_ETH=m
+# CONFIG_DWMAC_GENERIC is not set
+# CONFIG_DWMAC_IPQ806X is not set
+# CONFIG_DWMAC_QCOM_ETHQOS is not set
+# CONFIG_DWMAC_SUNXI is not set
+# CONFIG_DWMAC_SUN8I is not set
+
+# I2C
+CONFIG_I2C_RK3X=m
+
+# Watchdog
+CONFIG_DW_WATCHDOG=m
+
+# Display
+CONFIG_DRM_ROCKCHIP=m
+CONFIG_ROCKCHIP_ANALOGIX_DP=y
+CONFIG_ROCKCHIP_DW_HDMI=y
+CONFIG_ROCKCHIP_DW_MIPI_DSI=y
+
+# USB 2.x
+CONFIG_PHY_ROCKCHIP_INNO_USB2=m
+CONFIG_USB_OHCI_HCD=m
+# CONFIG_USB_OHCI_HCD_PCI is not set
+CONFIG_USB_OHCI_HCD_PLATFORM=m
+
+# eMMC / SD-Card
+CONFIG_MMC_SDHCI_OF_ARASAN=m
+CONFIG_MMC_DW=m
+CONFIG_MMC_DW_ROCKCHIP=m
+CONFIG_PHY_ROCKCHIP_EMMC=m
+
+# Real-time clock
+CONFIG_RTC_DRV_RK808=m
+
+# Type-C
+CONFIG_PHY_ROCKCHIP_TYPEC=m
+
+# SAR ADC
+CONFIG_ROCKCHIP_SARADC=m
+
+# To boot Linux distributions like Debian
+CONFIG_DEVTMPFS=y
+
+# To bootstrap rootfs with QEMU
+CONFIG_HW_RANDOM_VIRTIO=m
+CONFIG_VIRTIO_PCI=m
+CONFIG_VIRTIO_BLK=m
+CONFIG_VIRTIO_NET=m
+CONFIG_VIRTIO_PCI_LEGACY=y
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index e30c4c8..7de8d6b 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -33,6 +33,7 @@
#include <linux/stddef.h>
#include <linux/string.h>
#include <linux/thread_info.h>
+#include <linux/android_vendor.h>
#include <vdso/processor.h>
@@ -199,6 +200,7 @@ struct thread_struct {
u64 gcs_base;
u64 gcs_size;
#endif
+ ANDROID_VENDOR_DATA(1);
};
static inline unsigned int thread_get_vl(struct thread_struct *thread,
diff --git a/arch/arm64/include/asm/static_call.h b/arch/arm64/include/asm/static_call.h
new file mode 100644
index 0000000..3315805
--- /dev/null
+++ b/arch/arm64/include/asm/static_call.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_STATIC_CALL_H
+#define _ASM_STATIC_CALL_H
+
+#define __ARCH_DEFINE_STATIC_CALL_TRAMP(name, target) \
+ asm(" .pushsection .static_call.text, \"ax\" \n" \
+ " .align 3 \n" \
+ " .globl " STATIC_CALL_TRAMP_STR(name) " \n" \
+ STATIC_CALL_TRAMP_STR(name) ": \n" \
+ " hint 34 /* BTI C */ \n" \
+ " adrp x16, 1f \n" \
+ " ldr x16, [x16, :lo12:1f] \n" \
+ " cbz x16, 0f \n" \
+ " br x16 \n" \
+ "0: ret \n" \
+ " .type " STATIC_CALL_TRAMP_STR(name) ", %function \n" \
+ " .size " STATIC_CALL_TRAMP_STR(name) ", . - " STATIC_CALL_TRAMP_STR(name) " \n" \
+ " .popsection \n" \
+ " .pushsection .rodata, \"a\" \n" \
+ " .align 3 \n" \
+ "1: .quad " target " \n" \
+ " .popsection \n")
+
+#define ARCH_DEFINE_STATIC_CALL_TRAMP(name, func) \
+ __ARCH_DEFINE_STATIC_CALL_TRAMP(name, #func)
+
+#define ARCH_DEFINE_STATIC_CALL_NULL_TRAMP(name) \
+ __ARCH_DEFINE_STATIC_CALL_TRAMP(name, "0x0")
+
+#define ARCH_DEFINE_STATIC_CALL_RET0_TRAMP(name) \
+ ARCH_DEFINE_STATIC_CALL_TRAMP(name, __static_call_return0)
+
+#endif /* _ASM_STATIC_CALL_H */
diff --git a/arch/arm64/kernel/Makefile b/arch/arm64/kernel/Makefile
index 76f32e4..fe62710 100644
--- a/arch/arm64/kernel/Makefile
+++ b/arch/arm64/kernel/Makefile
@@ -46,6 +46,7 @@
obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
obj-$(CONFIG_HARDLOCKUP_DETECTOR_PERF) += watchdog_hld.o
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+obj-$(CONFIG_HAVE_STATIC_CALL) += static_call.o
obj-$(CONFIG_CPU_PM) += sleep.o suspend.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_EFI) += efi.o efi-rt-wrapper.o
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index c31f8e1..da3caf0 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1667,6 +1667,7 @@ const struct cpumask *system_32bit_el0_cpumask(void)
return cpu_possible_mask;
}
+EXPORT_SYMBOL_GPL(system_32bit_el0_cpumask);
const struct cpumask *task_cpu_fallback_mask(struct task_struct *p)
{
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index 4895549..8f7b1aa 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -41,6 +41,8 @@
#include <linux/thread_info.h>
#include <linux/prctl.h>
#include <linux/stacktrace.h>
+#include <trace/hooks/mpam.h>
+#include <trace/hooks/fpsimd.h>
#include <asm/alternative.h>
#include <asm/arch_timer.h>
@@ -247,6 +249,7 @@ void show_regs(struct pt_regs *regs)
__show_regs(regs);
dump_backtrace(regs, NULL, KERN_DEFAULT);
}
+EXPORT_SYMBOL_GPL(show_regs);
static void tls_thread_flush(void)
{
@@ -720,6 +723,12 @@ struct task_struct *__switch_to(struct task_struct *prev,
gcs_thread_switch(next);
/*
+ * vendor hook is needed before the dsb(),
+ * because MPAM is related to cache maintenance.
+ */
+ trace_android_vh_mpam_set(prev, next);
+
+ /*
* Complete any pending TLB or cache maintenance on this CPU in case the
* thread migrates to a different CPU. This full barrier is also
* required by the membarrier system call. Additionally it makes any
@@ -738,6 +747,8 @@ struct task_struct *__switch_to(struct task_struct *prev,
if (prev->thread.sctlr_user != next->thread.sctlr_user)
update_sctlr_el1(next->thread.sctlr_user);
+ trace_android_vh_is_fpsimd_save(prev, next);
+
/* the actual thread switch */
last = cpu_switch_to(prev, next);
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index 23c05dc..56e69b2 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -277,6 +277,7 @@ u64 cpu_logical_map(unsigned int cpu)
{
return __cpu_logical_map[cpu];
}
+EXPORT_SYMBOL_GPL(cpu_logical_map);
void __init __no_sanitize_address setup_arch(char **cmdline_p)
{
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index 1aa3241..ff60108 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -54,6 +54,12 @@
#include <asm/virt.h>
#include <trace/events/ipi.h>
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/debug.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_raise);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_entry);
+EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_exit);
/*
* as from 2.5, kernels no longer have an init_tasks structure
@@ -977,6 +983,7 @@ static void do_handle_IPI(int ipinr)
ipi_cpu_crash_stop(cpu, get_irq_regs());
unreachable();
} else {
+ trace_android_vh_ipi_stop(get_irq_regs());
local_cpu_stop(cpu);
}
break;
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 3ebcf8c53..fd83c2e 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -444,6 +444,7 @@ noinline noinstr void arch_bpf_stack_walk(bool (*consume_entry)(void *cookie, u6
kunwind_stack_walk(arch_bpf_unwind_consume_entry, &data, current, NULL);
}
+EXPORT_SYMBOL_GPL(arch_stack_walk);
static const char *state_source_string(const struct kunwind_state *state)
{
@@ -493,6 +494,7 @@ void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
put_task_stack(tsk);
}
+EXPORT_SYMBOL_GPL(dump_backtrace);
void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
{
diff --git a/arch/arm64/kernel/static_call.c b/arch/arm64/kernel/static_call.c
new file mode 100644
index 0000000..944ecab
--- /dev/null
+++ b/arch/arm64/kernel/static_call.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/static_call.h>
+#include <linux/memory.h>
+#include <asm/text-patching.h>
+
+void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
+{
+ u64 literal;
+ int ret;
+
+ /* decode the instructions to discover the literal address */
+ literal = ALIGN_DOWN((u64)tramp + 4, SZ_4K) +
+ aarch64_insn_adrp_get_offset(le32_to_cpup(tramp + 4)) +
+ 8 * aarch64_insn_decode_immediate(AARCH64_INSN_IMM_12,
+ le32_to_cpup(tramp + 8));
+
+ ret = aarch64_insn_write_literal_u64((void *)literal, (u64)func);
+ WARN_ON_ONCE(ret);
+}
+EXPORT_SYMBOL_GPL(arch_static_call_transform);
diff --git a/arch/arm64/kernel/vdso32/Makefile b/arch/arm64/kernel/vdso32/Makefile
index 9d0efed..03c9878 100644
--- a/arch/arm64/kernel/vdso32/Makefile
+++ b/arch/arm64/kernel/vdso32/Makefile
@@ -90,6 +90,10 @@
VDSO_LDFLAGS += -shared --build-id=sha1
VDSO_LDFLAGS += --orphan-handling=$(CONFIG_LD_ORPHAN_WARN_LEVEL)
+# Add user-supplied KCPPFLAGS_COMPAT as the last assignments
+VDSO_CFLAGS += $(KCPPFLAGS_COMPAT)
+VDSO_AFLAGS += $(KCPPFLAGS_COMPAT)
+
# Borrow vdsomunge.c from the arm vDSO
# We have to use a relative path because scripts/Makefile.host prefixes
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index ad6133b..af9e81f 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -191,6 +191,7 @@
LOCK_TEXT
KPROBES_TEXT
HYPERVISOR_TEXT
+ STATIC_CALL_TEXT
*(.gnu.warning)
}
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index b2b5792..a645416 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -8,6 +8,7 @@
#include <linux/cache.h>
#include <linux/dma-map-ops.h>
#include <xen/xen.h>
+#include <trace/hooks/iommu.h>
#include <asm/cacheflush.h>
#include <asm/xen/xen-ops.h>
diff --git a/arch/s390/include/asm/hardirq.h b/arch/s390/include/asm/hardirq.h
index a5b4538..da3d530 100644
--- a/arch/s390/include/asm/hardirq.h
+++ b/arch/s390/include/asm/hardirq.h
@@ -16,6 +16,12 @@
#define local_softirq_pending() (get_lowcore()->softirq_pending)
#define set_softirq_pending(x) (get_lowcore()->softirq_pending = (x))
#define or_softirq_pending(x) (get_lowcore()->softirq_pending |= (x))
+/*
+ * Not sure what the right thing is here for s390,
+ * but returning 0 will result in no logical change
+ * from what happens now
+ */
+#define __cpu_softirq_pending(x) (0)
#define __ARCH_IRQ_STAT
#define __ARCH_IRQ_EXIT_IRQS_DISABLED
diff --git a/arch/x86/OWNERS b/arch/x86/OWNERS
new file mode 100644
index 0000000..f59fa99
--- /dev/null
+++ b/arch/x86/OWNERS
@@ -0,0 +1,3 @@
+per-file crypto/**=file:/crypto/OWNERS
+per-file mm/**=file:/mm/OWNERS
+per-file net/**=file:/net/OWNERS
diff --git a/arch/x86/configs/allmodconfig.fragment b/arch/x86/configs/allmodconfig.fragment
new file mode 100644
index 0000000..a3592d0
--- /dev/null
+++ b/arch/x86/configs/allmodconfig.fragment
@@ -0,0 +1,13 @@
+CONFIG_UNWINDER_FRAME_POINTER=y
+# CONFIG_AFS_FS is not set
+# CONFIG_AF_RXRPC is not set
+# CONFIG_BPFILTER is not set
+# CONFIG_BUILTIN_MODULE_RANGES is not set
+# CONFIG_RANDSTRUCT is not set
+# CONFIG_RANDSTRUCT_FULL is not set
+CONFIG_RANDSTRUCT_NONE=y
+# CONFIG_SAMPLES is not set
+# CONFIG_WERROR is not set
+# CONFIG_MODULE_COMPRESS is not set
+CONFIG_MODULE_SIG_SHA256=y
+# CONFIG_UAPI_HEADER_TEST is not set
diff --git a/arch/x86/configs/crashdump_defconfig b/arch/x86/configs/crashdump_defconfig
new file mode 100644
index 0000000..aeb1d91
--- /dev/null
+++ b/arch/x86/configs/crashdump_defconfig
@@ -0,0 +1,81 @@
+# CONFIG_WERROR is not set
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_LZ4=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+CONFIG_NO_HZ_IDLE=y
+CONFIG_PREEMPT=y
+CONFIG_LOG_BUF_SHIFT=12
+# CONFIG_UTS_NS is not set
+# CONFIG_TIME_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+# CONFIG_RD_ZSTD is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_KEXEC=y
+# CONFIG_X86_EXTENDED_PLATFORM is not set
+# CONFIG_SCHED_OMIT_FRAME_POINTER is not set
+# CONFIG_X86_MCE is not set
+# CONFIG_PERF_EVENTS_AMD_UNCORE is not set
+# CONFIG_X86_IOPL_IOPERM is not set
+# CONFIG_MTRR_SANITIZER is not set
+# CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS is not set
+CONFIG_PHYSICAL_START=0x100000
+# CONFIG_RANDOMIZE_BASE is not set
+CONFIG_LEGACY_VSYSCALL_NONE=y
+# CONFIG_SUSPEND is not set
+# CONFIG_ACPI is not set
+# CONFIG_VIRTUALIZATION is not set
+CONFIG_JUMP_LABEL=y
+# CONFIG_SECCOMP is not set
+# CONFIG_STACKPROTECTOR is not set
+# CONFIG_VMAP_STACK is not set
+# CONFIG_MQ_IOSCHED_DEADLINE is not set
+# CONFIG_MQ_IOSCHED_KYBER is not set
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+# CONFIG_BINFMT_SCRIPT is not set
+# CONFIG_SWAP is not set
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+# CONFIG_COMPAT_BRK is not set
+# CONFIG_COMPACTION is not set
+CONFIG_PCI=y
+CONFIG_PCI_ENDPOINT=y
+CONFIG_DEVTMPFS=y
+# CONFIG_STANDALONE is not set
+# CONFIG_PREVENT_FIRMWARE_BUILD is not set
+# CONFIG_DMIID is not set
+# CONFIG_BLK_DEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO_I8042 is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_DEV_BUS=y
+# CONFIG_SERIAL_DEV_CTRL_TTYPORT is not set
+CONFIG_VIRTIO_CONSOLE=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_HWMON is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_VIRTIO_PCI=y
+# CONFIG_VIRTIO_PCI_LEGACY is not set
+# CONFIG_VHOST_MENU is not set
+# CONFIG_X86_PLATFORM_DEVICES is not set
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+CONFIG_TMPFS=y
+# CONFIG_MISC_FILESYSTEMS is not set
+# CONFIG_SYMBOLIC_ERRNAME is not set
+# CONFIG_X86_VERBOSE_BOOTUP is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/x86/configs/gki_defconfig b/arch/x86/configs/gki_defconfig
new file mode 100644
index 0000000..8c96e3d
--- /dev/null
+++ b/arch/x86/configs/gki_defconfig
@@ -0,0 +1,730 @@
+CONFIG_KERNEL_LZ4=y
+CONFIG_AUDIT=y
+CONFIG_TIME_KUNIT_TEST=m
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_JIT=y
+CONFIG_BPF_JIT_ALWAYS_ON=y
+# CONFIG_BPF_UNPRIV_DEFAULT_OFF is not set
+CONFIG_BPF_LSM=y
+CONFIG_PREEMPT=y
+# CONFIG_PREEMPT_DYNAMIC is not set
+CONFIG_SCHED_CLASS_EXT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_BOOST=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_RCU_LAZY=y
+CONFIG_RCU_LAZY_DEFAULT_OFF=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_IKHEADERS=m
+CONFIG_UCLAMP_TASK=y
+CONFIG_UCLAMP_BUCKETS_COUNT=20
+CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_V1=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_UCLAMP_TASK_GROUP=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CPUSETS_V1=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+# CONFIG_TIME_NS is not set
+# CONFIG_PID_NS is not set
+CONFIG_RT_SOFTIRQ_AWARE_SCHED=y
+CONFIG_RELAY=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+CONFIG_BOOT_CONFIG=y
+CONFIG_EXPERT=y
+# CONFIG_UID16 is not set
+# CONFIG_FHANDLE is not set
+# CONFIG_PCSPKR_PLATFORM is not set
+# CONFIG_RSEQ is not set
+CONFIG_PROFILING=y
+CONFIG_RUST=y
+CONFIG_SMP=y
+CONFIG_X86_INTEL_LPSS=y
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_PARAVIRT=y
+CONFIG_PARAVIRT_TIME_ACCOUNTING=y
+CONFIG_NR_CPUS=32
+# CONFIG_X86_MCE is not set
+# CONFIG_X86_VSYSCALL_EMULATION is not set
+# CONFIG_MTRR_SANITIZER is not set
+CONFIG_X86_USER_SHADOW_STACK=y
+CONFIG_EFI=y
+CONFIG_EFI_STUB=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="console=ttynull stack_depot_disable=on cgroup_disable=pressure bootconfig"
+# CONFIG_CFI_AUTO_DEFAULT is not set
+CONFIG_HIBERNATION=y
+CONFIG_PM_USERSPACE_AUTOSLEEP=y
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_PM_DEBUG=y
+CONFIG_PM_ADVANCED_DEBUG=y
+# CONFIG_ACPI_AC is not set
+# CONFIG_ACPI_BATTERY is not set
+# CONFIG_ACPI_FAN is not set
+# CONFIG_ACPI_THERMAL is not set
+# CONFIG_X86_PM_TIMER is not set
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_TIMES=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_INTEL_IDLE=y
+CONFIG_IA32_EMULATION=y
+CONFIG_KVM=y
+CONFIG_KVM_INTEL=y
+CONFIG_KVM_AMD=y
+CONFIG_KPROBES=y
+CONFIG_JUMP_LABEL=y
+CONFIG_CFI=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
+CONFIG_GENDWARFKSYMS=y
+CONFIG_MODULE_SCMVERSION=y
+CONFIG_MODULE_SIG=y
+CONFIG_MODULE_SIG_PROTECT=y
+CONFIG_MODULE_SIG_SHA256=y
+CONFIG_MODPROBE_PATH=""
+CONFIG_BLK_DEV_ZONED=y
+CONFIG_BLK_CGROUP_IOPRIO=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
+CONFIG_GKI_HACKS_TO_FIX=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
+CONFIG_READ_ONLY_THP_FOR_FS=y
+CONFIG_CMA=y
+CONFIG_CMA_DEBUGFS=y
+CONFIG_CMA_SYSFS=y
+CONFIG_CMA_AREAS=16
+# CONFIG_ZONE_DMA is not set
+CONFIG_ANON_VMA_NAME=y
+CONFIG_USERFAULTFD=y
+CONFIG_LRU_GEN=y
+CONFIG_LRU_GEN_ENABLED=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+# CONFIG_AF_UNIX_OOB is not set
+CONFIG_XFRM_USER=y
+CONFIG_XFRM_INTERFACE=y
+CONFIG_XFRM_MIGRATE=y
+CONFIG_XFRM_STATISTICS=y
+CONFIG_NET_KEY=y
+CONFIG_XDP_SOCKETS=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_MULTIPLE_TABLES=y
+CONFIG_NET_IPIP=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
+CONFIG_NET_IPVTI=y
+CONFIG_INET_ESP=y
+CONFIG_INET_UDP_DIAG=y
+CONFIG_INET_DIAG_DESTROY=y
+CONFIG_IPV6_ROUTER_PREF=y
+CONFIG_IPV6_ROUTE_INFO=y
+CONFIG_IPV6_OPTIMISTIC_DAD=y
+CONFIG_INET6_ESP=y
+CONFIG_INET6_IPCOMP=y
+CONFIG_IPV6_MIP6=y
+CONFIG_IPV6_VTI=y
+CONFIG_IPV6_GRE=y
+CONFIG_IPV6_MULTIPLE_TABLES=y
+CONFIG_IPV6_SUBTREES=y
+CONFIG_IPV6_MROUTE=y
+CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y
+CONFIG_NETFILTER=y
+CONFIG_NF_CONNTRACK=y
+CONFIG_NF_CONNTRACK_SECMARK=y
+CONFIG_NF_CONNTRACK_PROCFS=y
+CONFIG_NF_CONNTRACK_EVENTS=y
+CONFIG_NF_CONNTRACK_AMANDA=y
+CONFIG_NF_CONNTRACK_FTP=y
+CONFIG_NF_CONNTRACK_H323=y
+CONFIG_NF_CONNTRACK_IRC=y
+CONFIG_NF_CONNTRACK_NETBIOS_NS=y
+CONFIG_NF_CONNTRACK_PPTP=y
+CONFIG_NF_CONNTRACK_SANE=y
+CONFIG_NF_CONNTRACK_TFTP=y
+CONFIG_NF_CT_NETLINK=y
+CONFIG_NETFILTER_XTABLES_COMPAT=y
+CONFIG_NETFILTER_XTABLES_LEGACY=y
+CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
+CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
+CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_DSCP=y
+CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
+CONFIG_NETFILTER_XT_TARGET_LOG=y
+CONFIG_NETFILTER_XT_TARGET_MARK=y
+CONFIG_NETFILTER_XT_TARGET_NFLOG=y
+CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
+CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
+CONFIG_NETFILTER_XT_TARGET_TEE=y
+CONFIG_NETFILTER_XT_TARGET_TPROXY=y
+CONFIG_NETFILTER_XT_TARGET_TRACE=y
+CONFIG_NETFILTER_XT_TARGET_SECMARK=y
+CONFIG_NETFILTER_XT_TARGET_TCPMSS=y
+CONFIG_NETFILTER_XT_MATCH_BPF=y
+CONFIG_NETFILTER_XT_MATCH_COMMENT=y
+CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y
+CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
+CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
+CONFIG_NETFILTER_XT_MATCH_DSCP=y
+CONFIG_NETFILTER_XT_MATCH_ESP=y
+CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
+CONFIG_NETFILTER_XT_MATCH_HELPER=y
+CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+CONFIG_NETFILTER_XT_MATCH_L2TP=y
+CONFIG_NETFILTER_XT_MATCH_LENGTH=y
+CONFIG_NETFILTER_XT_MATCH_LIMIT=y
+CONFIG_NETFILTER_XT_MATCH_MAC=y
+CONFIG_NETFILTER_XT_MATCH_MARK=y
+CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
+CONFIG_NETFILTER_XT_MATCH_OWNER=y
+CONFIG_NETFILTER_XT_MATCH_POLICY=y
+CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2=y
+CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG=y
+CONFIG_NETFILTER_XT_MATCH_SOCKET=y
+CONFIG_NETFILTER_XT_MATCH_STATE=y
+CONFIG_NETFILTER_XT_MATCH_STATISTIC=y
+CONFIG_NETFILTER_XT_MATCH_STRING=y
+CONFIG_NETFILTER_XT_MATCH_TIME=y
+CONFIG_NETFILTER_XT_MATCH_U32=y
+CONFIG_IP_NF_IPTABLES_LEGACY=y
+CONFIG_IP_NF_IPTABLES=y
+CONFIG_IP_NF_MATCH_ECN=y
+CONFIG_IP_NF_MATCH_RPFILTER=y
+CONFIG_IP_NF_MATCH_TTL=y
+CONFIG_IP_NF_FILTER=y
+CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_IP_NF_NAT=y
+CONFIG_IP_NF_TARGET_MASQUERADE=y
+CONFIG_IP_NF_TARGET_NETMAP=y
+CONFIG_IP_NF_TARGET_REDIRECT=y
+CONFIG_IP_NF_MANGLE=y
+CONFIG_IP_NF_RAW=y
+CONFIG_IP_NF_SECURITY=y
+CONFIG_IP_NF_ARPFILTER=y
+CONFIG_IP_NF_ARP_MANGLE=y
+CONFIG_IP6_NF_IPTABLES_LEGACY=y
+CONFIG_IP6_NF_IPTABLES=y
+CONFIG_IP6_NF_MATCH_RPFILTER=y
+CONFIG_IP6_NF_FILTER=y
+CONFIG_IP6_NF_TARGET_REJECT=y
+CONFIG_IP6_NF_MANGLE=y
+CONFIG_IP6_NF_RAW=y
+CONFIG_TIPC=m
+CONFIG_L2TP=m
+CONFIG_BRIDGE=y
+CONFIG_VLAN_8021Q=m
+CONFIG_6LOWPAN=m
+CONFIG_IEEE802154=m
+CONFIG_IEEE802154_6LOWPAN=m
+CONFIG_MAC802154=m
+CONFIG_NET_SCHED=y
+CONFIG_NET_SCH_HTB=y
+CONFIG_NET_SCH_PRIO=y
+CONFIG_NET_SCH_MULTIQ=y
+CONFIG_NET_SCH_SFQ=y
+CONFIG_NET_SCH_TBF=y
+CONFIG_NET_SCH_NETEM=y
+CONFIG_NET_SCH_CODEL=y
+CONFIG_NET_SCH_FQ_CODEL=y
+CONFIG_NET_SCH_FQ=y
+CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_CLS_BASIC=y
+CONFIG_NET_CLS_FW=y
+CONFIG_NET_CLS_U32=y
+CONFIG_CLS_U32_MARK=y
+CONFIG_NET_CLS_FLOW=y
+CONFIG_NET_CLS_BPF=y
+CONFIG_NET_CLS_MATCHALL=y
+CONFIG_NET_EMATCH=y
+CONFIG_NET_EMATCH_CMP=y
+CONFIG_NET_EMATCH_NBYTE=y
+CONFIG_NET_EMATCH_U32=y
+CONFIG_NET_EMATCH_META=y
+CONFIG_NET_EMATCH_TEXT=y
+CONFIG_NET_CLS_ACT=y
+CONFIG_NET_ACT_POLICE=y
+CONFIG_NET_ACT_GACT=y
+CONFIG_NET_ACT_MIRRED=y
+CONFIG_NET_ACT_SKBEDIT=y
+CONFIG_NET_ACT_BPF=y
+CONFIG_VSOCKETS=y
+CONFIG_VIRTIO_VSOCKETS=m
+CONFIG_CGROUP_NET_PRIO=y
+CONFIG_CAN=m
+CONFIG_BT=m
+CONFIG_BT_RFCOMM=m
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_HIDP=m
+CONFIG_BT_HCIBTSDIO=m
+CONFIG_BT_HCIUART=m
+CONFIG_BT_HCIUART_LL=y
+CONFIG_BT_HCIUART_BCM=y
+CONFIG_BT_HCIUART_QCA=y
+CONFIG_RFKILL=m
+CONFIG_NET_9P=m
+CONFIG_NFC=m
+CONFIG_NETDEV_ADDR_LIST_TEST=m
+CONFIG_PCI=y
+CONFIG_PCIEAER=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_IOV=y
+CONFIG_HOTPLUG_PCI_ACPI=y
+CONFIG_PCIE_DW_PLAT_EP=y
+CONFIG_PCI_ENDPOINT=y
+CONFIG_FW_LOADER_USER_HELPER=y
+# CONFIG_FW_CACHE is not set
+CONFIG_REGMAP_KUNIT=m
+CONFIG_GNSS=m
+CONFIG_OF=y
+CONFIG_ZRAM=m
+CONFIG_ZRAM_BACKEND_LZ4=y
+CONFIG_ZRAM_BACKEND_ZSTD=y
+CONFIG_ZRAM_BACKEND_LZO=y
+CONFIG_ZRAM_WRITEBACK=y
+CONFIG_ZRAM_MULTI_COMP=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_VIRTIO_BLK=m
+CONFIG_BLK_DEV_UBLK=y
+CONFIG_BLK_DEV_NVME=y
+CONFIG_NVME_MULTIPATH=y
+CONFIG_SRAM=y
+CONFIG_UID_SYS_STATS=y
+CONFIG_VCPU_STALL_DETECTOR=m
+CONFIG_SCSI=y
+# CONFIG_SCSI_PROC_FS is not set
+CONFIG_BLK_DEV_SD=y
+CONFIG_SCSI_SCAN_ASYNC=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
+CONFIG_DM_SNAPSHOT=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+CONFIG_DM_BOW=y
+CONFIG_NETDEVICES=y
+CONFIG_DUMMY=y
+CONFIG_WIREGUARD=y
+CONFIG_IFB=y
+CONFIG_MACSEC=m
+CONFIG_TUN=y
+CONFIG_VETH=y
+CONFIG_LED_TRIGGER_PHY=y
+CONFIG_AX88796B_PHY=y
+CONFIG_CAN_VCAN=m
+CONFIG_CAN_SLCAN=m
+CONFIG_PPP=m
+CONFIG_PPP_BSDCOMP=m
+CONFIG_PPP_DEFLATE=m
+CONFIG_PPP_MPPE=m
+CONFIG_PPTP=m
+CONFIG_PPPOL2TP=m
+CONFIG_USB_NET_DRIVERS=m
+CONFIG_USB_RTL8150=m
+CONFIG_USB_RTL8152=m
+CONFIG_USB_USBNET=m
+CONFIG_USB_NET_CDC_EEM=m
+# CONFIG_USB_NET_NET1080 is not set
+# CONFIG_USB_NET_CDC_SUBSET is not set
+# CONFIG_USB_NET_ZAURUS is not set
+CONFIG_USB_NET_AQC111=m
+# CONFIG_WLAN_VENDOR_ADMTEK is not set
+# CONFIG_WLAN_VENDOR_ATH is not set
+# CONFIG_WLAN_VENDOR_ATMEL is not set
+# CONFIG_WLAN_VENDOR_BROADCOM is not set
+# CONFIG_WLAN_VENDOR_INTEL is not set
+# CONFIG_WLAN_VENDOR_INTERSIL is not set
+# CONFIG_WLAN_VENDOR_MARVELL is not set
+# CONFIG_WLAN_VENDOR_MEDIATEK is not set
+# CONFIG_WLAN_VENDOR_RALINK is not set
+# CONFIG_WLAN_VENDOR_REALTEK is not set
+# CONFIG_WLAN_VENDOR_RSI is not set
+# CONFIG_WLAN_VENDOR_ST is not set
+# CONFIG_WLAN_VENDOR_TI is not set
+# CONFIG_WLAN_VENDOR_ZYDAS is not set
+# CONFIG_WLAN_VENDOR_QUANTENNA is not set
+CONFIG_WWAN=m
+CONFIG_INPUT_EVDEV=y
+CONFIG_INPUT_KUNIT_TEST=m
+CONFIG_KEYBOARD_GPIO=y
+# CONFIG_MOUSE_PS2 is not set
+CONFIG_INPUT_JOYSTICK=y
+CONFIG_JOYSTICK_XPAD=y
+CONFIG_INPUT_TOUCHSCREEN=y
+CONFIG_INPUT_MISC=y
+CONFIG_INPUT_UINPUT=y
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=32
+CONFIG_SERIAL_8250_RUNTIME_UARTS=0
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_SAMSUNG=y
+CONFIG_SERIAL_SAMSUNG_CONSOLE=y
+CONFIG_NULL_TTY=y
+CONFIG_SERIAL_DEV_BUS=y
+CONFIG_VIRTIO_CONSOLE=m
+CONFIG_HW_RANDOM=y
+# CONFIG_HW_RANDOM_INTEL is not set
+# CONFIG_HW_RANDOM_AMD is not set
+# CONFIG_HW_RANDOM_VIA is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVPORT is not set
+CONFIG_HPET=y
+# CONFIG_I2C_HELPER_AUTO is not set
+CONFIG_I3C=y
+CONFIG_SPI=y
+CONFIG_SPI_MEM=y
+CONFIG_SPI_SLAVE=y
+CONFIG_PTP_1588_CLOCK=m
+# CONFIG_PTP_1588_CLOCK_VMCLOCK is not set
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_GENERIC_PLATFORM=y
+CONFIG_POWER_SEQUENCING=m
+# CONFIG_HWMON is not set
+CONFIG_THERMAL_NETLINK=y
+CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=100
+CONFIG_THERMAL_GOV_BANG_BANG=y
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_CPU_IDLE_THERMAL=y
+CONFIG_DEVFREQ_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+# CONFIG_X86_PKG_TEMP_THERMAL is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_MFD_SYSCON=y
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
+CONFIG_RC_CORE=y
+CONFIG_BPF_LIRC_MODE2=y
+CONFIG_LIRC=y
+# CONFIG_RC_MAP is not set
+CONFIG_RC_DECODERS=y
+CONFIG_RC_DEVICES=y
+CONFIG_MEDIA_CEC_RC=y
+# CONFIG_MEDIA_ANALOG_TV_SUPPORT is not set
+# CONFIG_MEDIA_DIGITAL_TV_SUPPORT is not set
+# CONFIG_MEDIA_RADIO_SUPPORT is not set
+# CONFIG_MEDIA_SDR_SUPPORT is not set
+# CONFIG_MEDIA_TEST_SUPPORT is not set
+CONFIG_MEDIA_USB_SUPPORT=y
+CONFIG_USB_GSPCA=y
+CONFIG_USB_VIDEO_CLASS=y
+CONFIG_V4L_PLATFORM_DRIVERS=y
+CONFIG_V4L_MEM2MEM_DRIVERS=y
+CONFIG_DRM=y
+# CONFIG_DRM_DEBUG_MODESET_LOCK is not set
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_DRM_ACCEL=y
+CONFIG_SOUND=y
+CONFIG_SND=y
+CONFIG_SND_HRTIMER=y
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_USB_AUDIO=y
+CONFIG_SND_SOC=y
+CONFIG_SND_SOC_TOPOLOGY_KUNIT_TEST=m
+CONFIG_SND_SOC_UTILS_KUNIT_TEST=m
+CONFIG_SND_SOC_INTEL_USER_FRIENDLY_LONG_NAMES=y
+CONFIG_HID_BATTERY_STRENGTH=y
+CONFIG_HIDRAW=y
+CONFIG_UHID=y
+CONFIG_HID_APPLE=y
+CONFIG_HID_PRODIKEYS=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_UCLOGIC=y
+CONFIG_HID_LOGITECH=y
+CONFIG_HID_LOGITECH_DJ=y
+CONFIG_HID_MAGICMOUSE=y
+CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
+CONFIG_HID_NINTENDO=y
+CONFIG_HID_PICOLCD=y
+CONFIG_HID_PLANTRONICS=y
+CONFIG_HID_PLAYSTATION=y
+CONFIG_PLAYSTATION_FF=y
+CONFIG_HID_ROCCAT=y
+CONFIG_HID_SONY=y
+CONFIG_SONY_FF=y
+CONFIG_HID_STEAM=y
+CONFIG_HID_WACOM=y
+CONFIG_HID_WIIMOTE=y
+CONFIG_HID_KUNIT_TEST=m
+CONFIG_USB_HIDDEV=y
+CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
+CONFIG_USB_MON=m
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_DBGCAP=y
+CONFIG_USB_XHCI_SIDEBAND=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_ROOT_HUB_TT=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_ACM=m
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_USB_DWC3=y
+CONFIG_USB_SERIAL=m
+CONFIG_USB_SERIAL_FTDI_SIO=m
+CONFIG_USB_GADGET=y
+CONFIG_USB_CONFIGFS=y
+CONFIG_ANDROID_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_SERIAL=y
+CONFIG_USB_CONFIGFS_ACM=y
+CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_ECM=y
+CONFIG_USB_CONFIGFS_EEM=y
+CONFIG_USB_CONFIGFS_MASS_STORAGE=y
+CONFIG_USB_CONFIGFS_F_FS=y
+CONFIG_USB_CONFIGFS_F_UAC2=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_HID=y
+CONFIG_USB_CONFIGFS_F_UVC=y
+CONFIG_TYPEC=y
+CONFIG_TYPEC_TCPM=y
+CONFIG_TYPEC_TCPCI=y
+CONFIG_TYPEC_UCSI=y
+CONFIG_TYPEC_DP_ALTMODE=y
+CONFIG_MMC=y
+# CONFIG_PWRSEQ_EMMC is not set
+# CONFIG_PWRSEQ_SIMPLE is not set
+CONFIG_MMC_CRYPTO=y
+CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
+CONFIG_SCSI_UFSHCD=y
+CONFIG_SCSI_UFS_BSG=y
+CONFIG_SCSI_UFS_CRYPTO=y
+CONFIG_SCSI_UFSHCD_PCI=y
+CONFIG_SCSI_UFSHCD_PLATFORM=y
+CONFIG_SCSI_UFS_DWC_TC_PLATFORM=y
+CONFIG_LEDS_CLASS_FLASH=y
+CONFIG_LEDS_CLASS_MULTICOLOR=y
+CONFIG_LEDS_TRIGGER_TIMER=y
+CONFIG_LEDS_TRIGGER_TRANSIENT=y
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_RTC_LIB_KUNIT_TEST=m
+CONFIG_UDMABUF=y
+CONFIG_DMABUF_HEAPS=y
+CONFIG_UIO=y
+CONFIG_VIRTIO_PCI=m
+CONFIG_VIRTIO_BALLOON=m
+CONFIG_VHOST_VSOCK=y
+CONFIG_STAGING=y
+CONFIG_ASHMEM=y
+CONFIG_ASHMEM_RUST=y
+CONFIG_INTEL_IOMMU=y
+CONFIG_IRQ_REMAP=y
+CONFIG_REMOTEPROC=y
+CONFIG_REMOTEPROC_CDEV=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_PM_DEVFREQ_EVENT=y
+CONFIG_IIO=y
+CONFIG_IIO_BUFFER=y
+CONFIG_IIO_TRIGGER=y
+CONFIG_IIO_FORMAT_KUNIT_TEST=m
+CONFIG_POWERCAP=y
+CONFIG_IDLE_INJECT=y
+CONFIG_USB4=m
+CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_ANDROID_BINDERFS=y
+CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST=m
+CONFIG_ANDROID_VENDOR_HOOKS=y
+CONFIG_LIBNVDIMM=y
+CONFIG_INTERCONNECT=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_KUNIT_TESTS=m
+CONFIG_F2FS_FS=y
+CONFIG_F2FS_FS_SECURITY=y
+CONFIG_F2FS_FS_COMPRESSION=y
+CONFIG_F2FS_UNFAIR_RWSEM=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
+CONFIG_FS_VERITY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_QUOTA=y
+CONFIG_QFMT_V2=y
+CONFIG_FUSE_FS=y
+CONFIG_VIRTIO_FS=y
+CONFIG_OVERLAY_FS=y
+CONFIG_INCREMENTAL_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_FAT_KUNIT_TEST=m
+CONFIG_EXFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_EFIVAR_FS is not set
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_PMSG=y
+CONFIG_PSTORE_RAM=y
+CONFIG_EROFS_FS=y
+CONFIG_EROFS_FS_PCPU_KTHREAD=y
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=y
+CONFIG_NLS_CODEPAGE_775=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_855=y
+CONFIG_NLS_CODEPAGE_857=y
+CONFIG_NLS_CODEPAGE_860=y
+CONFIG_NLS_CODEPAGE_861=y
+CONFIG_NLS_CODEPAGE_862=y
+CONFIG_NLS_CODEPAGE_863=y
+CONFIG_NLS_CODEPAGE_864=y
+CONFIG_NLS_CODEPAGE_865=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_869=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=y
+CONFIG_NLS_CODEPAGE_874=y
+CONFIG_NLS_ISO8859_8=y
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_3=y
+CONFIG_NLS_ISO8859_4=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_6=y
+CONFIG_NLS_ISO8859_7=y
+CONFIG_NLS_ISO8859_9=y
+CONFIG_NLS_ISO8859_13=y
+CONFIG_NLS_ISO8859_14=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_KOI8_U=y
+CONFIG_NLS_MAC_ROMAN=y
+CONFIG_NLS_MAC_CELTIC=y
+CONFIG_NLS_MAC_CENTEURO=y
+CONFIG_NLS_MAC_CROATIAN=y
+CONFIG_NLS_MAC_CYRILLIC=y
+CONFIG_NLS_MAC_GAELIC=y
+CONFIG_NLS_MAC_GREEK=y
+CONFIG_NLS_MAC_ICELAND=y
+CONFIG_NLS_MAC_INUIT=y
+CONFIG_NLS_MAC_ROMANIAN=y
+CONFIG_NLS_MAC_TURKISH=y
+CONFIG_NLS_UTF8=y
+CONFIG_UNICODE=y
+CONFIG_PROC_MEM_FORCE_PTRACE=y
+CONFIG_MSEAL_SYSTEM_MAPPINGS=y
+CONFIG_SECURITY=y
+CONFIG_STATIC_USERMODEHELPER=y
+CONFIG_STATIC_USERMODEHELPER_PATH=""
+CONFIG_SECURITY_SELINUX=y
+CONFIG_SECURITY_SAFESETID=y
+CONFIG_SECURITY_LANDLOCK=y
+CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
+CONFIG_ZERO_CALL_USED_REGS=y
+CONFIG_FORTIFY_SOURCE=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_CRYPTO_NULL=y
+CONFIG_CRYPTO_ECDH=y
+CONFIG_CRYPTO_DES=y
+CONFIG_CRYPTO_ADIANTUM=y
+CONFIG_CRYPTO_HCTR2=y
+CONFIG_CRYPTO_CHACHA20POLY1305=y
+CONFIG_CRYPTO_CCM=y
+CONFIG_CRYPTO_BLAKE2B=y
+CONFIG_CRYPTO_CMAC=y
+CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_XCBC=y
+CONFIG_CRYPTO_LZ4=y
+CONFIG_CRYPTO_ZSTD=y
+CONFIG_CRYPTO_AES_NI_INTEL=y
+CONFIG_CRYPTO_LIB_BLAKE2B_KUNIT_TEST=m
+CONFIG_CRYPTO_LIB_BLAKE2S_KUNIT_TEST=m
+CONFIG_CRYPTO_LIB_CURVE25519_KUNIT_TEST=m
+CONFIG_CRYPTO_LIB_MD5_KUNIT_TEST=m
+CONFIG_CRYPTO_LIB_POLY1305_KUNIT_TEST=m
+CONFIG_CRYPTO_LIB_POLYVAL_KUNIT_TEST=m
+CONFIG_CRYPTO_LIB_SHA1_KUNIT_TEST=m
+CONFIG_CRYPTO_LIB_SHA256_KUNIT_TEST=m
+CONFIG_CRYPTO_LIB_SHA512_KUNIT_TEST=m
+CONFIG_CRYPTO_LIB_SHA3_KUNIT_TEST=m
+CONFIG_SWIOTLB_DYNAMIC=y
+CONFIG_DMA_CMA=y
+CONFIG_PRINTK_TIME=y
+CONFIG_PRINTK_CALLER=y
+CONFIG_STACKTRACE_BUILD_ID=y
+CONFIG_DYNAMIC_DEBUG_CORE=y
+CONFIG_DEBUG_INFO_DWARF5=y
+CONFIG_DEBUG_INFO_COMPRESSED_ZSTD=y
+CONFIG_DEBUG_INFO_BTF=y
+CONFIG_MODULE_ALLOW_BTF_MISMATCH=y
+CONFIG_HEADERS_INSTALL=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_UBSAN=y
+CONFIG_UBSAN_TRAP=y
+# CONFIG_UBSAN_BOOL is not set
+# CONFIG_UBSAN_ENUM is not set
+CONFIG_PAGE_OWNER=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_MEM_ALLOC_PROFILING=y
+# CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT is not set
+CONFIG_KFENCE=y
+CONFIG_KFENCE_SAMPLE_INTERVAL=500
+CONFIG_KFENCE_NUM_OBJECTS=63
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=-1
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_SCHEDSTATS=y
+CONFIG_PROVE_LOCKING=y
+# CONFIG_PROVE_RAW_LOCK_NESTING is not set
+CONFIG_HIST_TRIGGERS=y
+CONFIG_KUNIT=m
+CONFIG_KUNIT_DEBUGFS=y
+CONFIG_KUNIT_TEST=m
+CONFIG_KUNIT_EXAMPLE_TEST=m
+# CONFIG_KUNIT_DEFAULT_ENABLED is not set
diff --git a/arch/x86/configs/microdroid_defconfig b/arch/x86/configs/microdroid_defconfig
new file mode 100644
index 0000000..fcd4ad9
--- /dev/null
+++ b/arch/x86/configs/microdroid_defconfig
@@ -0,0 +1,249 @@
+CONFIG_KERNEL_LZ4=y
+CONFIG_AUDIT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_PREEMPT=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_PSI=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_BOOST=y
+CONFIG_RCU_NOCB_CPU=y
+CONFIG_UCLAMP_TASK=y
+CONFIG_UCLAMP_BUCKETS_COUNT=20
+CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_BLK_CGROUP=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_UCLAMP_TASK_GROUP=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+# CONFIG_UTS_NS is not set
+# CONFIG_TIME_NS is not set
+# CONFIG_PID_NS is not set
+# CONFIG_NET_NS is not set
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+CONFIG_BOOT_CONFIG=y
+CONFIG_PROFILING=y
+CONFIG_SMP=y
+CONFIG_X86_X2APIC=y
+CONFIG_HYPERVISOR_GUEST=y
+CONFIG_PARAVIRT=y
+CONFIG_PARAVIRT_TIME_ACCOUNTING=y
+CONFIG_NR_CPUS=32
+# CONFIG_X86_MCE is not set
+CONFIG_EFI=y
+CONFIG_KEXEC_FILE=y
+CONFIG_CMDLINE_BOOL=y
+CONFIG_CMDLINE="stack_depot_disable=on cgroup_disable=pressure ioremap_guard panic=-1 bootconfig acpi=noirq"
+CONFIG_PM_WAKELOCKS=y
+CONFIG_PM_WAKELOCKS_LIMIT=0
+# CONFIG_PM_WAKELOCKS_GC is not set
+CONFIG_CPU_FREQ_STAT=y
+CONFIG_CPU_FREQ_TIMES=y
+CONFIG_CPU_FREQ_GOV_POWERSAVE=y
+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_JUMP_LABEL=y
+CONFIG_BLK_DEV_ZONED=y
+CONFIG_BLK_CGROUP_IOCOST=y
+CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK=y
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
+CONFIG_IOSCHED_BFQ=y
+CONFIG_BFQ_GROUP_IOSCHED=y
+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
+CONFIG_BINFMT_MISC=y
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+CONFIG_SLAB_FREELIST_RANDOM=y
+CONFIG_SLAB_FREELIST_HARDENED=y
+CONFIG_SHUFFLE_PAGE_ALLOCATOR=y
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MEMORY_HOTPLUG=y
+CONFIG_MEMORY_HOTREMOVE=y
+CONFIG_DEFAULT_MMAP_MIN_ADDR=32768
+CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y
+CONFIG_ANON_VMA_NAME=y
+CONFIG_USERFAULTFD=y
+CONFIG_LRU_GEN=y
+CONFIG_DAMON=y
+CONFIG_DAMON_PADDR=y
+CONFIG_DAMON_RECLAIM=y
+CONFIG_NET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_VSOCKETS=y
+CONFIG_VIRTIO_VSOCKETS=y
+# CONFIG_WIRELESS is not set
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCIEAER=y
+CONFIG_PCI_MSI=y
+CONFIG_PCI_IOV=y
+CONFIG_PCIE_DW_PLAT_EP=y
+CONFIG_PCI_ENDPOINT=y
+CONFIG_FW_LOADER_USER_HELPER=y
+# CONFIG_FW_CACHE is not set
+CONFIG_OF=y
+CONFIG_ZRAM=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=16
+CONFIG_BLK_DEV_RAM=y
+CONFIG_BLK_DEV_RAM_SIZE=8192
+CONFIG_VIRTIO_BLK=y
+CONFIG_MD=y
+CONFIG_BLK_DEV_DM=y
+CONFIG_BLK_INLINE_ENCRYPTION=y
+CONFIG_DM_CRYPT=y
+CONFIG_DM_DEFAULT_KEY=y
+CONFIG_DM_UEVENT=y
+CONFIG_DM_VERITY=y
+CONFIG_DM_VERITY_FEC=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_PCI is not set
+CONFIG_SERIAL_8250_RUNTIME_UARTS=0
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_NULL_TTY=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_VIRTIO=y
+# CONFIG_DEVMEM is not set
+# CONFIG_DEVPORT is not set
+CONFIG_HPET=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_GENERIC_PLATFORM=y
+# CONFIG_HWMON is not set
+CONFIG_THERMAL_NETLINK=y
+CONFIG_THERMAL_STATISTICS=y
+CONFIG_THERMAL_EMERGENCY_POWEROFF_DELAY_MS=100
+CONFIG_THERMAL_GOV_USER_SPACE=y
+CONFIG_CPU_THERMAL=y
+CONFIG_THERMAL_EMULATION=y
+# CONFIG_X86_PKG_TEMP_THERMAL is not set
+CONFIG_WATCHDOG=y
+CONFIG_WATCHDOG_CORE=y
+CONFIG_MFD_SYSCON=y
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_EDAC=y
+CONFIG_RTC_CLASS=y
+CONFIG_DMABUF_HEAPS=y
+CONFIG_DMABUF_SYSFS_STATS=y
+CONFIG_UIO=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_STAGING=y
+CONFIG_LIBNVDIMM=y
+CONFIG_EXT4_FS=y
+# CONFIG_EXT4_USE_FOR_EXT2 is not set
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_EXT4_FS_SECURITY=y
+# CONFIG_DNOTIFY is not set
+CONFIG_FUSE_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+# CONFIG_EFIVAR_FS is not set
+CONFIG_EROFS_FS=y
+CONFIG_FS_ENCRYPTION=y
+CONFIG_FS_ENCRYPTION_INLINE_CRYPT=y
+# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_NLS_CODEPAGE_437=y
+CONFIG_NLS_CODEPAGE_737=y
+CONFIG_NLS_CODEPAGE_775=y
+CONFIG_NLS_CODEPAGE_850=y
+CONFIG_NLS_CODEPAGE_852=y
+CONFIG_NLS_CODEPAGE_855=y
+CONFIG_NLS_CODEPAGE_857=y
+CONFIG_NLS_CODEPAGE_860=y
+CONFIG_NLS_CODEPAGE_861=y
+CONFIG_NLS_CODEPAGE_862=y
+CONFIG_NLS_CODEPAGE_863=y
+CONFIG_NLS_CODEPAGE_864=y
+CONFIG_NLS_CODEPAGE_865=y
+CONFIG_NLS_CODEPAGE_866=y
+CONFIG_NLS_CODEPAGE_869=y
+CONFIG_NLS_CODEPAGE_936=y
+CONFIG_NLS_CODEPAGE_950=y
+CONFIG_NLS_CODEPAGE_932=y
+CONFIG_NLS_CODEPAGE_949=y
+CONFIG_NLS_CODEPAGE_874=y
+CONFIG_NLS_ISO8859_8=y
+CONFIG_NLS_CODEPAGE_1250=y
+CONFIG_NLS_CODEPAGE_1251=y
+CONFIG_NLS_ASCII=y
+CONFIG_NLS_ISO8859_1=y
+CONFIG_NLS_ISO8859_2=y
+CONFIG_NLS_ISO8859_3=y
+CONFIG_NLS_ISO8859_4=y
+CONFIG_NLS_ISO8859_5=y
+CONFIG_NLS_ISO8859_6=y
+CONFIG_NLS_ISO8859_7=y
+CONFIG_NLS_ISO8859_9=y
+CONFIG_NLS_ISO8859_13=y
+CONFIG_NLS_ISO8859_14=y
+CONFIG_NLS_ISO8859_15=y
+CONFIG_NLS_KOI8_R=y
+CONFIG_NLS_KOI8_U=y
+CONFIG_NLS_MAC_ROMAN=y
+CONFIG_NLS_MAC_CELTIC=y
+CONFIG_NLS_MAC_CENTEURO=y
+CONFIG_NLS_MAC_CROATIAN=y
+CONFIG_NLS_MAC_CYRILLIC=y
+CONFIG_NLS_MAC_GAELIC=y
+CONFIG_NLS_MAC_GREEK=y
+CONFIG_NLS_MAC_ICELAND=y
+CONFIG_NLS_MAC_INUIT=y
+CONFIG_NLS_MAC_ROMANIAN=y
+CONFIG_NLS_MAC_TURKISH=y
+CONFIG_NLS_UTF8=y
+CONFIG_UNICODE=y
+CONFIG_SECURITY=y
+CONFIG_SECURITYFS=y
+CONFIG_SECURITY_NETWORK=y
+CONFIG_HARDENED_USERCOPY=y
+CONFIG_STATIC_USERMODEHELPER=y
+CONFIG_STATIC_USERMODEHELPER_PATH=""
+CONFIG_SECURITY_SELINUX=y
+CONFIG_INIT_ON_ALLOC_DEFAULT_ON=y
+CONFIG_CRYPTO_HCTR2=y
+CONFIG_CRYPTO_XTS=y
+CONFIG_CRYPTO_LZO=y
+CONFIG_CRYPTO_AES_NI_INTEL=y
+CONFIG_CRYPTO_SHA256_SSSE3=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DYNAMIC_DEBUG_CORE=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DEBUG_INFO_DWARF5=y
+CONFIG_DEBUG_INFO_REDUCED=y
+CONFIG_HEADERS_INSTALL=y
+# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_UBSAN=y
+CONFIG_UBSAN_TRAP=y
+CONFIG_UBSAN_LOCAL_BOUNDS=y
+# CONFIG_UBSAN_SHIFT is not set
+# CONFIG_UBSAN_BOOL is not set
+# CONFIG_UBSAN_ENUM is not set
+CONFIG_PAGE_OWNER=y
+CONFIG_DEBUG_STACK_USAGE=y
+CONFIG_KFENCE=y
+CONFIG_KFENCE_SAMPLE_INTERVAL=500
+CONFIG_KFENCE_NUM_OBJECTS=63
+CONFIG_PANIC_ON_OOPS=y
+CONFIG_PANIC_TIMEOUT=-1
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_SCHEDSTATS=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
+CONFIG_HIST_TRIGGERS=y
+CONFIG_UNWINDER_FRAME_POINTER=y
diff --git a/arch/x86/crypto/TEST_MAPPING b/arch/x86/crypto/TEST_MAPPING
new file mode 100644
index 0000000..1fdefa6
--- /dev/null
+++ b/arch/x86/crypto/TEST_MAPPING
@@ -0,0 +1,305 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.DefaultDialerOperationsTest"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/arch/x86/entry/TEST_MAPPING b/arch/x86/entry/TEST_MAPPING
new file mode 100644
index 0000000..ced3111
--- /dev/null
+++ b/arch/x86/entry/TEST_MAPPING
@@ -0,0 +1,245 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.CallTest"
+ }
+ ]
+ }
+ ]
+}
diff --git a/arch/x86/include/asm/TEST_MAPPING b/arch/x86/include/asm/TEST_MAPPING
new file mode 100644
index 0000000..9e5b44e
--- /dev/null
+++ b/arch/x86/include/asm/TEST_MAPPING
@@ -0,0 +1,329 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.EmergencyCallTests"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "VtsBootconfigTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/arch/x86/kernel/fpu/TEST_MAPPING b/arch/x86/kernel/fpu/TEST_MAPPING
new file mode 100644
index 0000000..e6b2d78
--- /dev/null
+++ b/arch/x86/kernel/fpu/TEST_MAPPING
@@ -0,0 +1,329 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.RttOperationsTest"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "VtsBootconfigTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/arch/x86/net/TEST_MAPPING b/arch/x86/net/TEST_MAPPING
new file mode 100644
index 0000000..0db5170
--- /dev/null
+++ b/arch/x86/net/TEST_MAPPING
@@ -0,0 +1,272 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.DataObjectUnitTests"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ]
+}
diff --git a/bazel/abi.bzl b/bazel/abi.bzl
new file mode 100644
index 0000000..1d6f770
--- /dev/null
+++ b/bazel/abi.bzl
@@ -0,0 +1,156 @@
+# SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+# Copyright (C) 2023 The Android Open Source Project
+
+"""
+ABI aware build rules.
+"""
+
+load("@bazel_skylib//lib:paths.bzl", "paths")
+load("@bazel_skylib//rules:native_binary.bzl", "native_binary")
+load("@rules_cc//cc:cc_binary.bzl", "cc_binary")
+load(
+ "@rules_pkg//pkg:mappings.bzl",
+ "pkg_files",
+ "strip_prefix",
+)
+load(
+ "//build/kernel/kleaf:kernel.bzl",
+ "android_filegroup",
+)
+
+visibility("private")
+
+_ALL_ABIS = ["arm", "arm64", "x86", "x86_64"]
+
+def _copy_with_abi(
+ name,
+ visibility = None,
+ path_prefix = None,
+ abis = None,
+ out = None):
+ if not path_prefix:
+ path_prefix = ""
+ if not abis:
+ abis = _ALL_ABIS
+ if not out:
+ out = name
+
+ for abi in abis:
+ cpu = abi
+ if abi == "x86":
+ cpu = "i386"
+ android_filegroup(
+ name = "{name}_{abi}_bin".format(name = name, abi = abi),
+ srcs = [":{name}".format(name = name)],
+ cpu = cpu,
+ visibility = visibility,
+ )
+ pkg_files(
+ name = "{name}_{abi}".format(name = name, abi = abi),
+ srcs = [":{name}_{abi}_bin".format(name = name, abi = abi)],
+ renames = {
+ ":{name}_{abi}_bin".format(name = name, abi = abi): paths.join(path_prefix, abi, out),
+ },
+ strip_prefix = strip_prefix.from_pkg(),
+ visibility = visibility,
+ )
+
+def cc_binary_with_abi(
+ name,
+ path_prefix = None,
+ abis = None,
+ visibility = None,
+ out = None,
+ **kwargs):
+ """A cc_binary replacement that generates output in each subdirectory named by abi.
+
+ For example:
+ ```
+ cc_binary_with_abi(
+ name = "a_binary",
+ abis = ["x86_64", "arm64"],
+ path_prefix = "my/path",
+ )
+ ```
+ generates 2 rules:
+ * Rule a_binary_x86_64: Builds the cc_binary and put output in my/path/x86_64/a_binary.
+ * Rule a_binary_arm64: Builds the cc_binary and put output in my/path/arm64/a_binary.
+
+ Args:
+ name: the name of the build rule.
+ path_prefix: [Nonconfigurable](https://bazel.build/reference/be/common-definitions#configurable-attributes).
+ The path prefix to attach to output.
+ abis: [Nonconfigurable](https://bazel.build/reference/be/common-definitions#configurable-attributes).
+ The intended abis to generate. Default is arm64 & x86_64.
+ visibility: [Nonconfigurable](https://bazel.build/reference/be/common-definitions#configurable-attributes).
+ The visibility attribute on a target controls whether the target can be used in other packages.
+ out: [Nonconfigurable](https://bazel.build/reference/be/common-definitions#configurable-attributes).
+ The output filename. Default is `name`.
+ **kwargs: the rest args that cc_binary uses.
+ """
+ cc_binary(
+ name = name,
+ visibility = visibility,
+ **kwargs
+ )
+
+ _copy_with_abi(
+ name = name,
+ path_prefix = path_prefix,
+ abis = abis,
+ visibility = visibility,
+ out = out,
+ )
+
+def sh_binary_with_abi(
+ name,
+ path_prefix = None,
+ abis = None,
+ visibility = None,
+ out = None,
+ **kwargs):
+ """A sh_binary replacement that generates output in each subdirectory named by abi.
+
+ For example:
+ ```
+ sh_binary_with_abi(
+ name = "a_binary",
+ abis = ["x86_64", "arm64"],
+ path_prefix = "my/path",
+ )
+ ```
+ generates 2 rules:
+ * Rule a_binary_x86_64: Copies a_binary and put output in my/path/x86_64/a_binary.
+ * Rule a_binary_arm64: Copies a_binary and put output in my/path/arm64/a_binary.
+
+ Args:
+ name: the name of the build rule.
+ path_prefix: [Nonconfigurable](https://bazel.build/reference/be/common-definitions#configurable-attributes).
+ The path prefix to attach to output.
+ abis: [Nonconfigurable](https://bazel.build/reference/be/common-definitions#configurable-attributes).
+ The intended abis to generate. Default is arm64 & x86_64.
+ visibility: [Nonconfigurable](https://bazel.build/reference/be/common-definitions#configurable-attributes).
+ The visibility attribute on a target controls whether the target can be used in other packages.
+ out: [Nonconfigurable](https://bazel.build/reference/be/common-definitions#configurable-attributes).
+ The output filename. Default is `name`.
+ **kwargs: the rest args that native_binary uses.
+ """
+ if not out:
+ out = name
+
+ # Uses native_binary instead of sh_binary because sh_binary is not
+ # compatible with copy_file (sh_binary generates more than 1 outs).
+ native_binary(
+ name = name,
+ visibility = visibility,
+ out = out,
+ **kwargs
+ )
+
+ _copy_with_abi(
+ name = name,
+ path_prefix = path_prefix,
+ abis = abis,
+ visibility = visibility,
+ out = out,
+ )
diff --git a/bazel/constants.scl b/bazel/constants.scl
new file mode 100644
index 0000000..a1cdc9e
--- /dev/null
+++ b/bazel/constants.scl
@@ -0,0 +1,6 @@
+BRANCH="android-mainline"
+CLANG_VERSION="r584948b"
+RUSTC_VERSION="1.93.1"
+AARCH64_NDK_TRIPLE="aarch64-linux-android31"
+X86_64_NDK_TRIPLE="x86_64-linux-android31"
+ARM_NDK_TRIPLE="armv7a-linux-androideabi31"
diff --git a/bazel/modules.bzl b/bazel/modules.bzl
new file mode 100644
index 0000000..b88bfed
--- /dev/null
+++ b/bazel/modules.bzl
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+# Copyright (C) 2025 The Android Open Source Project
+
+"""Re-exports of symbols for external usage regarding to lists of modules.
+"""
+
+load(
+ ":bazel/modules_private.bzl",
+ _get_gki_modules_list = "get_gki_modules_list",
+ _get_kunit_modules_list = "get_kunit_modules_list",
+)
+
+visibility("public")
+
+get_gki_modules_list = _get_gki_modules_list
+get_kunit_modules_list = _get_kunit_modules_list
diff --git a/bazel/modules_private.bzl b/bazel/modules_private.bzl
new file mode 100644
index 0000000..4e69b63
--- /dev/null
+++ b/bazel/modules_private.bzl
@@ -0,0 +1,351 @@
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (C) 2022 The Android Open Source Project
+
+"""
+This module contains a full list of kernel modules
+ compiled by GKI.
+"""
+
+visibility("private")
+
+_COMMON_GKI_MODULES_LIST = [
+ # keep sorted
+ "drivers/block/virtio_blk.ko",
+ "drivers/block/zram/zram.ko",
+ "drivers/bluetooth/btbcm.ko",
+ "drivers/bluetooth/btqca.ko",
+ "drivers/bluetooth/btsdio.ko",
+ "drivers/bluetooth/hci_uart.ko",
+ "drivers/char/virtio_console.ko",
+ "drivers/gnss/gnss.ko",
+ "drivers/misc/vcpu_stall_detector.ko",
+ "drivers/net/can/dev/can-dev.ko",
+ "drivers/net/can/slcan/slcan.ko",
+ "drivers/net/can/vcan.ko",
+ "drivers/net/macsec.ko",
+ "drivers/net/mii.ko",
+ "drivers/net/ppp/bsd_comp.ko",
+ "drivers/net/ppp/ppp_deflate.ko",
+ "drivers/net/ppp/ppp_generic.ko",
+ "drivers/net/ppp/ppp_mppe.ko",
+ "drivers/net/ppp/pppox.ko",
+ "drivers/net/ppp/pptp.ko",
+ "drivers/net/slip/slhc.ko",
+ "drivers/net/usb/aqc111.ko",
+ "drivers/net/usb/asix.ko",
+ "drivers/net/usb/ax88179_178a.ko",
+ "drivers/net/usb/cdc_eem.ko",
+ "drivers/net/usb/cdc_ether.ko",
+ "drivers/net/usb/cdc_ncm.ko",
+ "drivers/net/usb/r8152.ko",
+ "drivers/net/usb/r8153_ecm.ko",
+ "drivers/net/usb/rtl8150.ko",
+ "drivers/net/usb/usbnet.ko",
+ "drivers/net/wwan/wwan.ko",
+ "drivers/pps/pps_core.ko",
+ "drivers/ptp/ptp.ko",
+ "drivers/thunderbolt/thunderbolt.ko",
+ "drivers/usb/class/cdc-acm.ko",
+ "drivers/usb/mon/usbmon.ko",
+ "drivers/usb/serial/ftdi_sio.ko",
+ "drivers/usb/serial/usbserial.ko",
+ "drivers/virtio/virtio_balloon.ko",
+ "drivers/virtio/virtio_pci.ko",
+ "drivers/virtio/virtio_pci_legacy_dev.ko",
+ "drivers/virtio/virtio_pci_modern_dev.ko",
+ "fs/netfs/netfs.ko",
+ "kernel/kheaders.ko",
+ "lib/crc/crc-ccitt.ko",
+ "lib/crypto/libarc4.ko",
+ "mm/zsmalloc.ko",
+ "net/6lowpan/6lowpan.ko",
+ "net/6lowpan/nhc_dest.ko",
+ "net/6lowpan/nhc_fragment.ko",
+ "net/6lowpan/nhc_hop.ko",
+ "net/6lowpan/nhc_ipv6.ko",
+ "net/6lowpan/nhc_mobility.ko",
+ "net/6lowpan/nhc_routing.ko",
+ "net/6lowpan/nhc_udp.ko",
+ "net/8021q/8021q.ko",
+ "net/9p/9pnet.ko",
+ "net/9p/9pnet_fd.ko",
+ "net/bluetooth/bluetooth.ko",
+ "net/bluetooth/hidp/hidp.ko",
+ "net/bluetooth/rfcomm/rfcomm.ko",
+ "net/can/can.ko",
+ "net/can/can-bcm.ko",
+ "net/can/can-gw.ko",
+ "net/can/can-raw.ko",
+ "net/ieee802154/6lowpan/ieee802154_6lowpan.ko",
+ "net/ieee802154/ieee802154.ko",
+ "net/ieee802154/ieee802154_socket.ko",
+ "net/l2tp/l2tp_core.ko",
+ "net/l2tp/l2tp_ppp.ko",
+ "net/mac802154/mac802154.ko",
+ "net/nfc/nfc.ko",
+ "net/rfkill/rfkill.ko",
+ "net/tipc/tipc.ko",
+ "net/tipc/tipc_diag.ko",
+ "net/vmw_vsock/vmw_vsock_virtio_transport.ko",
+]
+
+_RUST_GKI_MODULES_LIST = [
+]
+
+_ARM_GKI_MODULES_LIST = [
+ # keep sorted
+ "drivers/ptp/ptp_kvm.ko",
+]
+
+_ARM64_GKI_MODULES_LIST = [
+ # keep sorted
+ "drivers/char/hw_random/cctrng.ko",
+ "drivers/misc/open-dice.ko",
+ "drivers/ptp/ptp_kvm.ko",
+]
+
+_X86_GKI_MODULES_LIST = [
+ # keep sorted
+ "drivers/power/sequencing/pwrseq-core.ko",
+ "drivers/ptp/ptp_kvm.ko",
+]
+
+_X86_64_GKI_MODULES_LIST = [
+ # keep sorted
+ "drivers/power/sequencing/pwrseq-core.ko",
+ "drivers/ptp/ptp_kvm.ko",
+]
+
+def _apply(map_each, lst):
+ if not map_each:
+ return lst
+ ret = []
+ for elem in lst:
+ mapped = map_each(elem)
+ if mapped:
+ ret.append(mapped)
+ return ret
+
+def _get_gki_modules_list_minus_select(arch, map_each):
+ """ Provides the list of GKI modules, minus those in select() branches.
+
+ Args:
+ arch: One of [arm, arm64, i386, x86_64].
+ map_each: A function that takes the module name as parameter, and returns
+ the mapped value. If the module should be filtered out, the function
+ should return None.
+
+ Returns:
+ The list of GKI modules for the given |arch|.
+ """
+ if not arch in ("arm64", "x86_64", "arm", "i386"):
+ fail("{}: arch {} not supported. Use one of [arm, arm64, i386, x86_64]".format(
+ str(native.package_relative_label(":x")).removesuffix(":x"),
+ arch,
+ ))
+
+ if arch == "arm":
+ return _apply(map_each, _COMMON_GKI_MODULES_LIST + _ARM_GKI_MODULES_LIST)
+
+ if arch == "i386":
+ return _apply(map_each, _COMMON_GKI_MODULES_LIST + _X86_GKI_MODULES_LIST)
+
+ gki_modules_list = _apply(map_each, [] + _COMMON_GKI_MODULES_LIST)
+ if arch == "arm64":
+ gki_modules_list += _apply(map_each, _ARM64_GKI_MODULES_LIST)
+ elif arch == "x86_64":
+ gki_modules_list += _apply(map_each, _X86_64_GKI_MODULES_LIST)
+
+ return gki_modules_list
+
+# buildifier: disable=unnamed-macro
+def get_gki_modules_list(arch = None, map_each = None):
+ """Provides the list of GKI modules.
+
+ Args:
+ arch: One of [arm, arm64, i386, x86_64].
+ map_each: A function that takes the module name as parameter, and
+ returns the mapped value. If the module should be filtered out, the
+ function should return None.
+
+ Returns:
+ An opaque expression that represents the list of GKI modules for the
+ given |arch|. Do not treat the returned value as a list (e.g. use
+ list comprehension); instead, use the |map_each| argument.
+ """
+
+ ret = _get_gki_modules_list_minus_select(arch, map_each)
+
+ # CONFIG_RUST depends on !CONFIG_KASAN_SW_TAGS
+ ret += select({
+ "//build/kernel/kleaf:kasan_sw_tags_is_true": [],
+ "//conditions:default": _apply(map_each, _RUST_GKI_MODULES_LIST),
+ })
+
+ return ret
+
+# buildifier: disable=unnamed-macro
+def get_gki_modules_superset(arch = None, map_each = None):
+ """Provides the list of superset of GKI modules.
+
+ This includes all modules on each branch of the conditionals. For example,
+ Rust modules may always be included regardless of the value of
+ --kasan_sw_tags.
+
+ Args:
+ arch: One of [arm, arm64, i386, x86_64].
+ map_each: A function that takes the module name as parameter, and
+ returns the mapped value. If the module should be filtered out, the
+ function should return None.
+
+ Returns:
+ A list that contains the superset of GKI modules for the given |arch|.
+ """
+ return _get_gki_modules_list_minus_select(arch, map_each) + \
+ _apply(map_each, _RUST_GKI_MODULES_LIST)
+
+_KUNIT_FRAMEWORK_MODULES = [
+ "lib/kunit/kunit.ko",
+]
+
+# Modules defined by tools/testing/kunit/configs/android/kunit_defconfig
+_KUNIT_COMMON_MODULES_LIST = [
+ # keep sorted
+ "drivers/android/tests/binder_alloc_kunit.ko",
+ "drivers/base/regmap/regmap-kunit.ko",
+ "drivers/base/regmap/regmap-ram.ko",
+ "drivers/base/regmap/regmap-raw-ram.ko",
+ "drivers/hid/hid-uclogic-test.ko",
+ "drivers/iio/test/iio-test-format.ko",
+ "drivers/input/tests/input_test.ko",
+ "drivers/of/of_kunit_helpers.ko",
+ "drivers/rtc/test_rtc_lib.ko",
+ "fs/ext4/ext4-inode-test.ko",
+ "fs/fat/fat_test.ko",
+ "kernel/time/time_test.ko",
+ "lib/crypto/tests/blake2b_kunit.ko",
+ "lib/crypto/tests/blake2s_kunit.ko",
+ "lib/crypto/tests/curve25519_kunit.ko",
+ "lib/crypto/tests/md5_kunit.ko",
+ "lib/crypto/tests/poly1305_kunit.ko",
+ "lib/crypto/tests/polyval_kunit.ko",
+ "lib/crypto/tests/sha1_kunit.ko",
+ "lib/crypto/tests/sha224_kunit.ko",
+ "lib/crypto/tests/sha256_kunit.ko",
+ "lib/crypto/tests/sha384_kunit.ko",
+ "lib/crypto/tests/sha3_kunit.ko",
+ "lib/crypto/tests/sha512_kunit.ko",
+ "lib/kunit/kunit-example-test.ko",
+ "lib/kunit/kunit-test.ko",
+ "lib/kunit/platform-test.ko",
+ # "mm/kfence/kfence_test.ko",
+ "net/core/dev_addr_lists_test.ko",
+ "sound/soc/soc-topology-test.ko",
+ "sound/soc/soc-utils-test.ko",
+]
+
+# Modules defined by tools/testing/kunit/configs/android/kunit_clk_defconfig
+_KUNIT_CLK_MODULES_LIST = [
+ "drivers/clk/clk-gate_test.ko",
+ "drivers/clk/clk-test.ko",
+ "drivers/clk/clk_kunit_helpers.ko",
+]
+
+def _get_kunit_modules_list_minus_select(arch, map_each):
+ """ Provides the list of KUnit modules, minus those in select() branches.
+
+ Args:
+ arch: One of [arm, arm64, i386, x86_64].
+ map_each: A function that takes the module name as parameter, and returns
+ the mapped value. If the module should be filtered out, the function
+ should return None.
+ Returns:
+ The list of KUnit modules for the given |arch|.
+ """
+ if not arch in ("arm64", "x86_64", "arm", "i386"):
+ fail("{}: arch {} not supported. Use one of [arm, arm64, i386, x86_64]".format(
+ str(native.package_relative_label(":x")).removesuffix(":x"),
+ arch,
+ ))
+
+ kunit_modules_list = _KUNIT_FRAMEWORK_MODULES + _KUNIT_COMMON_MODULES_LIST
+ if arch == "arm":
+ kunit_modules_list += _KUNIT_CLK_MODULES_LIST
+ elif arch == "arm64":
+ kunit_modules_list += _KUNIT_CLK_MODULES_LIST
+ elif arch == "i386":
+ kunit_modules_list.append("drivers/clk/clk_kunit_helpers.ko")
+ elif arch == "x86_64":
+ kunit_modules_list.append("drivers/clk/clk_kunit_helpers.ko")
+
+ return _apply(map_each, kunit_modules_list)
+
+# buildifier: disable=unnamed-macro
+def get_kunit_modules_list(arch = None, map_each = None):
+ """ Provides the list of KUnit modules.
+
+ Args:
+ arch: One of [arm, arm64, i386, x86_64].
+ map_each: A function that takes the module name as parameter, and returns
+ the mapped value. If the module should be filtered out, the function
+ should return None.
+
+ Returns:
+ An opaque expression that represents the list of Kunit modules for the
+ given |arch|. Do not treat the returned value as a list (e.g. use
+ list comprehension); instead, use the |map_each| argument.
+ """
+
+ return select({
+ "//conditions:default": _get_kunit_modules_list_minus_select(arch, map_each),
+ })
+
+# buildifier: disable=unnamed-macro
+def get_kunit_modules_superset(arch = None, map_each = None):
+ """Provides the list of superset of KUnit modules.
+
+ This includes all modules on each branch of the conditionals.
+
+ Args:
+ arch: One of [arm, arm64, i386, x86_64].
+ map_each: A function that takes the module name as parameter, and
+ returns the mapped value. If the module should be filtered out, the
+ function should return None.
+
+ Returns:
+ A list of superset of KUnit modules for the given |arch|.
+ """
+ return _get_kunit_modules_list_minus_select(arch, map_each)
+
+_COMMON_UNPROTECTED_MODULES_LIST = []
+
+# buildifier: disable=unused-variable
+def get_gki_unprotected_modules_list(arch = None):
+ return select({
+ "//conditions:default": _COMMON_UNPROTECTED_MODULES_LIST,
+ })
+
+# buildifier: disable=unnamed-macro
+def get_gki_kunit_modules(arch, page_size = None):
+ """Returns the list of labels pointing to the GKI modules for KUnit.
+
+ Args:
+ arch: one of arm64, x86_64
+ page_size: if arch is arm64, the page_size ("4k" or "16k")
+
+ Returns:
+ The list of labels pointing to the GKI modules for KUnit.
+ """
+ if arch == "arm64":
+ if page_size == "16k":
+ return get_kunit_modules_list(arch, map_each = lambda e: ":kernel_aarch64_16k/" + e)
+ if page_size == "4k":
+ return get_kunit_modules_list(arch, map_each = lambda e: ":kernel_aarch64/" + e)
+ if arch == "x86_64":
+ return get_kunit_modules_list(arch, map_each = lambda e: ":kernel_x86_64/" + e)
+
+ fail("{}: arch {} (page_size {}) not supported. Use one of [arm64, x86_64]".format(
+ str(native.package_relative_label(":x")).removesuffix(":x"),
+ arch,
+ page_size,
+ ))
diff --git a/bazel/test/BUILD.bazel b/bazel/test/BUILD.bazel
new file mode 100644
index 0000000..cde7be9
--- /dev/null
+++ b/bazel/test/BUILD.bazel
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+# Copyright (C) 2026 The Android Open Source Project
+
+load(":kleaf_test.bzl", "ddk_headers_build_test")
+
+ddk_headers_build_test(
+ name = "ddk_headers_build_test",
+)
+
+test_suite(
+ name = "test",
+ tests = [
+ ":ddk_headers_build_test",
+ ],
+)
diff --git a/bazel/test/kleaf_test.bzl b/bazel/test/kleaf_test.bzl
new file mode 100644
index 0000000..73c42da
--- /dev/null
+++ b/bazel/test/kleaf_test.bzl
@@ -0,0 +1,58 @@
+# SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+# Copyright (C) 2026 The Android Open Source Project
+
+"""Tests on Kleaf using ACK / GKI as a baseline."""
+
+load("@bazel_skylib//rules:build_test.bzl", "build_test")
+
+_ALLOW_DDK_UNSAFE_HEADERS_SETTING = "//build/kernel/kleaf:allow_ddk_unsafe_headers"
+
+def _ddk_unsafe_headers_transition_impl(_settings, _attr):
+ return {_ALLOW_DDK_UNSAFE_HEADERS_SETTING: True}
+
+_ddk_unsafe_headers_transition = transition(
+ implementation = _ddk_unsafe_headers_transition_impl,
+ inputs = [],
+ outputs = [_ALLOW_DDK_UNSAFE_HEADERS_SETTING],
+)
+
+# This is a simple wrapper to trigger the transition to add unsafe headers.
+def _ddk_headers_wrapper_impl(ctx):
+ return ctx.attr.target[DefaultInfo]
+
+_ddk_headers_wrapper = rule(
+ implementation = _ddk_headers_wrapper_impl,
+ # test = True,
+ attrs = {
+ "target": attr.label(
+ cfg = "exec",
+ ),
+ "_allowlist_function_transition": attr.label(
+ default = "@bazel_tools//tools/allowlists/function_transition_allowlist",
+ ),
+ },
+ cfg = _ddk_unsafe_headers_transition,
+)
+
+def ddk_headers_build_test(
+ name,
+ **kwargs):
+ """Define a test to check DDK headers build correctly.
+
+ Args:
+ name: Name of the test
+ **kwargs: additional kwargs common to all rules.
+ """
+
+ _ddk_headers_wrapper(
+ name = name + "_ddk_headers_wrapped",
+ target = "//common:all_headers",
+ )
+
+ build_test(
+ name = name,
+ targets = [
+ name + "_ddk_headers_wrapped",
+ ],
+ **kwargs
+ )
diff --git a/block/OWNERS b/block/OWNERS
new file mode 100644
index 0000000..2641e06
--- /dev/null
+++ b/block/OWNERS
@@ -0,0 +1,2 @@
+bvanassche@google.com
+jaegeuk@google.com
diff --git a/block/TEST_MAPPING b/block/TEST_MAPPING
new file mode 100644
index 0000000..5c31f31
--- /dev/null
+++ b/block/TEST_MAPPING
@@ -0,0 +1,337 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsWifiBroadcastsHostTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.ExtendedInCallServiceTest"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "VtsBootconfigTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/block/bio.c b/block/bio.c
index d80d5d2..f1f9538 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -271,6 +271,9 @@ void bio_init(struct bio *bio, struct block_device *bdev, struct bio_vec *table,
#endif
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
bio->bi_crypt_context = NULL;
+#if IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
+ bio->bi_skip_dm_default_key = false;
+#endif
#endif
#ifdef CONFIG_BLK_DEV_INTEGRITY
bio->bi_integrity = NULL;
diff --git a/block/blk-crypto-fallback.c b/block/blk-crypto-fallback.c
index 61f5954..5b25641 100644
--- a/block/blk-crypto-fallback.c
+++ b/block/blk-crypto-fallback.c
@@ -188,6 +188,7 @@ static struct bio *blk_crypto_alloc_enc_bio(struct bio *bio_src,
bio->bi_write_stream = bio_src->bi_write_stream;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio_clone_blkg_association(bio, bio_src);
+ bio_clone_skip_dm_default_key(bio, bio_src);
/*
* Move page array up in the allocated memory for the bio vecs as far as
diff --git a/block/blk-crypto.c b/block/blk-crypto.c
index 856d3c5..40a99a85 100644
--- a/block/blk-crypto.c
+++ b/block/blk-crypto.c
@@ -116,6 +116,7 @@ void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
bio->bi_crypt_context = bc;
}
+EXPORT_SYMBOL_GPL(bio_crypt_set_ctx);
void __bio_crypt_free_ctx(struct bio *bio)
{
@@ -349,6 +350,7 @@ int blk_crypto_init_key(struct blk_crypto_key *blk_key,
return 0;
}
+EXPORT_SYMBOL_GPL(blk_crypto_init_key);
bool blk_crypto_config_supported_natively(struct block_device *bdev,
const struct blk_crypto_config *cfg)
@@ -399,6 +401,7 @@ int blk_crypto_start_using_key(struct block_device *bdev,
}
return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
}
+EXPORT_SYMBOL_GPL(blk_crypto_start_using_key);
/**
* blk_crypto_evict_key() - Evict a blk_crypto_key from a block_device
diff --git a/build.config.constants b/build.config.constants
new file mode 120000
index 0000000..cc08ddcb
--- /dev/null
+++ b/build.config.constants
@@ -0,0 +1 @@
+bazel/constants.scl
\ No newline at end of file
diff --git a/crypto/OWNERS b/crypto/OWNERS
new file mode 100644
index 0000000..8d5734b
--- /dev/null
+++ b/crypto/OWNERS
@@ -0,0 +1,2 @@
+ardb@google.com
+ebiggers@google.com
diff --git a/drivers/OWNERS b/drivers/OWNERS
new file mode 100644
index 0000000..1ee7a9c
--- /dev/null
+++ b/drivers/OWNERS
@@ -0,0 +1,7 @@
+per-file base/**=gregkh@google.com,saravanak@google.com
+per-file block/**=akailash@google.com
+per-file md/**=akailash@google.com,paullawrence@google.com
+per-file md/dm-verity*=ebiggers@google.com,samitolvanen@google.com
+per-file net/**=file:/net/OWNERS
+per-file scsi/**=bvanassche@google.com,jaegeuk@google.com
+per-file {tty,usb}/**=gregkh@google.com
diff --git a/drivers/amba/Kconfig b/drivers/amba/Kconfig
index 14bb61f..de499bb 100644
--- a/drivers/amba/Kconfig
+++ b/drivers/amba/Kconfig
@@ -6,7 +6,7 @@
config TEGRA_AHB
bool "Enable AHB driver for NVIDIA Tegra SoCs" if COMPILE_TEST
- default y if ARCH_TEGRA
+ default y if ARCH_TEGRA && !GKI_HACKS_TO_FIX
help
Adds AHB configuration functionality for NVIDIA Tegra SoCs,
which controls AHB bus master arbitration and some performance
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index e2e402c..6d3efec 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -62,4 +62,61 @@
test-specific freelist, which allows this KUnit module to be loaded
for testing without interfering with a running system.
+config ANDROID_VENDOR_HOOKS
+ bool "Android Vendor Hooks"
+ depends on TRACEPOINTS
+ help
+ Enable vendor hooks implemented as tracepoints
+
+ Allow vendor modules to attach to tracepoint "hooks" defined via
+ DECLARE_HOOK or DECLARE_RESTRICTED_HOOK.
+
+config ANDROID_DEBUG_KINFO
+ bool "Android Debug Kernel Information Support"
+ depends on KALLSYMS
+ help
+ This supports kernel information backup for bootloader usage.
+ Specifics:
+ - The kallsyms symbols for unwind_backtrace
+ - Page directory pointer
+ - UTS_RELEASE
+ - BUILD_INFO(ro.build.fingerprint)
+
+config ANDROID_KABI_RESERVE
+ bool "Android KABI reserve padding"
+ depends on 64BIT
+ default y
+ help
+ This option enables the padding that the Android GKI kernel adds
+ to many different kernel structures to support an in-kernel stable ABI
+ over the lifespan of support for the kernel.
+
+ Only disable this option if you have a system that needs the Android
+ kernel drivers, but is NOT an Android GKI kernel image. If disabled
+ it has the possibility to make the kernel static and runtime image
+ slightly smaller but will NOT be supported by the Google Android
+ kernel team.
+
+ If even slightly unsure, say Y.
+
+config ANDROID_VENDOR_OEM_DATA
+ bool "Android vendor and OEM data padding"
+ depends on 64BIT
+ default y
+ help
+ This option enables the padding that the Android GKI kernel adds
+ to many different kernel structures to support an in-kernel stable ABI
+ over the lifespan of support for the kernel as well as OEM additional
+ fields that are needed by some of the Android kernel tracepoints. The
+ macros enabled by this option are used to enable padding in vendor modules
+ used for the above specified purposes.
+
+ Only disable this option if you have a system that needs the Android
+ kernel drivers, but is NOT an Android GKI kernel image and you do NOT
+ use the Android kernel tracepoints. If disabled it has the possibility
+ to make the kernel static and runtime image slightly smaller but will
+ NOT be supported by the Google Android kernel team.
+
+ If even slightly unsure, say Y.
+
endmenu
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index e0c650d3..265aa16 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -5,3 +5,5 @@
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o binder_netlink.o
obj-$(CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST) += tests/
obj-$(CONFIG_ANDROID_BINDER_IPC_RUST) += binder/
+obj-$(CONFIG_ANDROID_VENDOR_HOOKS) += vendor_hooks.o
+obj-$(CONFIG_ANDROID_DEBUG_KINFO) += debug_kinfo.o
diff --git a/drivers/android/TEST_MAPPING b/drivers/android/TEST_MAPPING
new file mode 100644
index 0000000..59a2ca7
--- /dev/null
+++ b/drivers/android/TEST_MAPPING
@@ -0,0 +1,329 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.NonUiInCallServiceTest"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "VtsBootconfigTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 21f91d9f..acc9f0f 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -70,6 +70,7 @@
#include <kunit/visibility.h>
+#include <uapi/linux/sched/types.h>
#include <uapi/linux/android/binder.h>
#include <linux/cacheflush.h>
@@ -677,22 +678,189 @@ static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
}
-static void binder_set_nice(long nice)
+static bool is_rt_policy(int policy)
{
- long min_nice;
+ return policy == SCHED_FIFO || policy == SCHED_RR;
+}
- if (can_nice(current, nice)) {
- set_user_nice(current, nice);
+static bool is_fair_policy(int policy)
+{
+ return policy == SCHED_NORMAL || policy == SCHED_BATCH;
+}
+
+static bool binder_supported_policy(int policy)
+{
+ return is_fair_policy(policy) || is_rt_policy(policy);
+}
+
+static int to_userspace_prio(int policy, int kernel_priority)
+{
+ if (is_fair_policy(policy))
+ return PRIO_TO_NICE(kernel_priority);
+ else
+ return MAX_RT_PRIO - 1 - kernel_priority;
+}
+
+static int to_kernel_prio(int policy, int user_priority)
+{
+ if (is_fair_policy(policy))
+ return NICE_TO_PRIO(user_priority);
+ else
+ return MAX_RT_PRIO - 1 - user_priority;
+}
+
+static void binder_do_set_priority(struct binder_thread *thread,
+ const struct binder_priority *desired,
+ bool verify)
+{
+ struct task_struct *task = thread->task;
+ int priority; /* user-space prio value */
+ bool has_cap_nice;
+ unsigned int policy = desired->sched_policy;
+
+ if (task->policy == policy && task->prio == desired->prio) {
+ spin_lock(&thread->prio_lock);
+ if (thread->prio_state == BINDER_PRIO_PENDING)
+ thread->prio_state = BINDER_PRIO_SET;
+ spin_unlock(&thread->prio_lock);
return;
}
- min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
- binder_debug(BINDER_DEBUG_PRIORITY_CAP,
- "%d: nice value %ld not allowed use %ld instead\n",
- current->pid, nice, min_nice);
- set_user_nice(current, min_nice);
- if (min_nice <= MAX_NICE)
+
+ has_cap_nice = has_capability_noaudit(task, CAP_SYS_NICE);
+
+ priority = to_userspace_prio(policy, desired->prio);
+
+ if (verify && is_rt_policy(policy) && !has_cap_nice) {
+ long max_rtprio = task_rlimit(task, RLIMIT_RTPRIO);
+
+ if (max_rtprio == 0) {
+ policy = SCHED_NORMAL;
+ priority = MIN_NICE;
+ } else if (priority > max_rtprio) {
+ priority = max_rtprio;
+ }
+ }
+
+ if (verify && is_fair_policy(policy) && !has_cap_nice) {
+ long min_nice = rlimit_to_nice(task_rlimit(task, RLIMIT_NICE));
+
+ if (min_nice > MAX_NICE) {
+ binder_user_error("%d RLIMIT_NICE not set\n",
+ task->pid);
+ return;
+ } else if (priority < min_nice) {
+ priority = min_nice;
+ }
+ }
+
+ if (policy != desired->sched_policy ||
+ to_kernel_prio(policy, priority) != desired->prio)
+ binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+ "%d: priority %d not allowed, using %d instead\n",
+ task->pid, desired->prio,
+ to_kernel_prio(policy, priority));
+
+ trace_binder_set_priority(task->tgid, task->pid, task->prio,
+ to_kernel_prio(policy, priority),
+ desired->prio);
+
+ spin_lock(&thread->prio_lock);
+ if (!verify && thread->prio_state == BINDER_PRIO_ABORT) {
+ /*
+ * A new priority has been set by an incoming nested
+ * transaction. Abort this priority restore and allow
+ * the transaction to run at the new desired priority.
+ */
+ spin_unlock(&thread->prio_lock);
+ binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+ "%d: %s: aborting priority restore\n",
+ thread->pid, __func__);
return;
- binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
+ }
+
+ /* Set the actual priority */
+ if (task->policy != policy || is_rt_policy(policy)) {
+ struct sched_param params;
+
+ params.sched_priority = is_rt_policy(policy) ? priority : 0;
+
+ sched_setscheduler_nocheck(task,
+ policy | SCHED_RESET_ON_FORK,
+ ¶ms);
+ }
+ if (is_fair_policy(policy))
+ set_user_nice(task, priority);
+
+ thread->prio_state = BINDER_PRIO_SET;
+ spin_unlock(&thread->prio_lock);
+}
+
+static void binder_set_priority(struct binder_thread *thread,
+ const struct binder_priority *desired)
+{
+ binder_do_set_priority(thread, desired, /* verify = */ true);
+}
+
+static void binder_restore_priority(struct binder_thread *thread,
+ const struct binder_priority *desired)
+{
+ binder_do_set_priority(thread, desired, /* verify = */ false);
+}
+
+static void binder_transaction_priority(struct binder_thread *thread,
+ struct binder_transaction *t,
+ struct binder_node *node)
+{
+ struct task_struct *task = thread->task;
+ struct binder_priority desired = t->priority;
+ const struct binder_priority node_prio = {
+ .sched_policy = node->sched_policy,
+ .prio = node->min_priority,
+ };
+
+ if (t->set_priority_called)
+ return;
+
+ t->set_priority_called = true;
+
+ if (!node->inherit_rt && is_rt_policy(desired.sched_policy)) {
+ desired.prio = NICE_TO_PRIO(0);
+ desired.sched_policy = SCHED_NORMAL;
+ }
+
+ if (node_prio.prio < desired.prio ||
+ (node_prio.prio == desired.prio &&
+ node_prio.sched_policy == SCHED_FIFO)) {
+ /*
+ * In case the minimum priority on the node is
+ * higher (lower value), use that priority. If
+ * the priority is the same, but the node uses
+ * SCHED_FIFO, prefer SCHED_FIFO, since it can
+ * run unbounded, unlike SCHED_RR.
+ */
+ desired = node_prio;
+ }
+
+ spin_lock(&thread->prio_lock);
+ if (thread->prio_state == BINDER_PRIO_PENDING) {
+ /*
+ * Task is in the process of changing priorities
+ * saving its current values would be incorrect.
+ * Instead, save the pending priority and signal
+ * the task to abort the priority restore.
+ */
+ t->saved_priority = thread->prio_next;
+ thread->prio_state = BINDER_PRIO_ABORT;
+ binder_debug(BINDER_DEBUG_PRIORITY_CAP,
+ "%d: saved pending priority %d\n",
+ current->pid, thread->prio_next.prio);
+ } else {
+ t->saved_priority.sched_policy = task->policy;
+ t->saved_priority.prio = task->normal_prio;
+ }
+ spin_unlock(&thread->prio_lock);
+
+ binder_set_priority(thread, &desired);
}
static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
@@ -745,6 +913,7 @@ static struct binder_node *binder_init_node_ilocked(
binder_uintptr_t ptr = fp ? fp->binder : 0;
binder_uintptr_t cookie = fp ? fp->cookie : 0;
__u32 flags = fp ? fp->flags : 0;
+ s8 priority;
assert_spin_locked(&proc->inner_lock);
@@ -777,8 +946,12 @@ static struct binder_node *binder_init_node_ilocked(
node->ptr = ptr;
node->cookie = cookie;
node->work.type = BINDER_WORK_NODE;
- node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+ priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
+ node->sched_policy = (flags & FLAT_BINDER_FLAG_SCHED_POLICY_MASK) >>
+ FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT;
+ node->min_priority = to_kernel_prio(node->sched_policy, priority);
node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
+ node->inherit_rt = !!(flags & FLAT_BINDER_FLAG_INHERIT_RT);
node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
spin_lock_init(&node->lock);
INIT_LIST_HEAD(&node->work.entry);
@@ -2852,6 +3025,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
BUG_ON(!node);
binder_node_lock(node);
+
if (oneway) {
BUG_ON(thread);
if (node->has_async_transaction)
@@ -2878,6 +3052,7 @@ static int binder_proc_transaction(struct binder_transaction *t,
thread = binder_select_thread_ilocked(proc);
if (thread) {
+ binder_transaction_priority(thread, t, node);
binder_enqueue_thread_work_ilocked(thread, &t->work);
} else if (!pending_async) {
binder_enqueue_work_ilocked(&t->work, &proc->todo);
@@ -3084,6 +3259,7 @@ static void binder_transaction(struct binder_proc *proc,
struct list_head pf_head;
const void __user *user_buffer = (const void __user *)
(uintptr_t)tr->data.ptr.buffer;
+ bool is_nested = false;
INIT_LIST_HEAD(&sgc_head);
INIT_LIST_HEAD(&pf_head);
@@ -3120,7 +3296,6 @@ static void binder_transaction(struct binder_proc *proc,
t->sender_euid = task_euid(proc->tsk);
t->code = tr->code;
t->flags = tr->flags;
- t->priority = task_nice(current);
t->work.type = BINDER_WORK_TRANSACTION;
t->is_async = !reply && (tr->flags & TF_ONE_WAY);
t->is_reply = reply;
@@ -3157,7 +3332,6 @@ static void binder_transaction(struct binder_proc *proc,
}
thread->transaction_stack = in_reply_to->to_parent;
binder_inner_proc_unlock(proc);
- binder_set_nice(in_reply_to->saved_priority);
target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
if (target_thread == NULL) {
/* annotation for sparse */
@@ -3305,6 +3479,7 @@ static void binder_transaction(struct binder_proc *proc,
atomic_inc(&from->tmp_ref);
target_thread = from;
spin_unlock(&tmp->lock);
+ is_nested = true;
break;
}
spin_unlock(&tmp->lock);
@@ -3346,6 +3521,17 @@ static void binder_transaction(struct binder_proc *proc,
(u64)tr->data_size, (u64)tr->offsets_size,
(u64)extra_buffers_size);
+ t->is_nested = is_nested;
+ if (!(t->flags & TF_ONE_WAY) &&
+ binder_supported_policy(current->policy)) {
+ /* Inherit supported policies for synchronous transactions */
+ t->priority.sched_policy = current->policy;
+ t->priority.prio = current->prio;
+ } else {
+ /* Otherwise, fall back to the default priority */
+ t->priority = target_proc->default_priority;
+ }
+
if (target_node && target_node->txn_security_ctx) {
u32 secid;
size_t added_size;
@@ -3759,7 +3945,14 @@ static void binder_transaction(struct binder_proc *proc,
binder_enqueue_thread_work_ilocked(target_thread, &t->work);
target_proc->outstanding_txns++;
binder_inner_proc_unlock(target_proc);
+ if (in_reply_to->is_nested) {
+ spin_lock(&thread->prio_lock);
+ thread->prio_state = BINDER_PRIO_PENDING;
+ thread->prio_next = in_reply_to->saved_priority;
+ spin_unlock(&thread->prio_lock);
+ }
wake_up_interruptible_sync(&target_thread->wait);
+ binder_restore_priority(thread, &in_reply_to->saved_priority);
binder_free_transaction(in_reply_to);
} else if (!(t->flags & TF_ONE_WAY)) {
BUG_ON(t->buffer->async_transaction != 0);
@@ -3903,6 +4096,7 @@ static void binder_transaction(struct binder_proc *proc,
BUG_ON(thread->return_error.cmd != BR_OK);
if (in_reply_to) {
+ binder_restore_priority(thread, &in_reply_to->saved_priority);
binder_set_txn_from_error(in_reply_to, t_debug_id,
return_error, return_error_param);
thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
@@ -4762,7 +4956,7 @@ static int binder_thread_read(struct binder_proc *proc,
wait_event_interruptible(binder_user_error_wait,
binder_stop_on_user_error < 2);
}
- binder_set_nice(proc->default_priority);
+ binder_restore_priority(thread, &proc->default_priority);
}
if (non_block) {
@@ -5035,13 +5229,7 @@ static int binder_thread_read(struct binder_proc *proc,
trd->target.ptr = target_node->ptr;
trd->cookie = target_node->cookie;
- t->saved_priority = task_nice(current);
- if (t->priority < target_node->min_priority &&
- !(t->flags & TF_ONE_WAY))
- binder_set_nice(t->priority);
- else if (!(t->flags & TF_ONE_WAY) ||
- t->saved_priority > target_node->min_priority)
- binder_set_nice(target_node->min_priority);
+ binder_transaction_priority(thread, t, target_node);
cmd = BR_TRANSACTION;
} else {
trd->target.ptr = 0;
@@ -5269,6 +5457,8 @@ static struct binder_thread *binder_get_thread_ilocked(
binder_stats_created(BINDER_STAT_THREAD);
thread->proc = proc;
thread->pid = current->pid;
+ get_task_struct(current);
+ thread->task = current;
atomic_set(&thread->tmp_ref, 0);
init_waitqueue_head(&thread->wait);
INIT_LIST_HEAD(&thread->todo);
@@ -5279,6 +5469,8 @@ static struct binder_thread *binder_get_thread_ilocked(
thread->return_error.cmd = BR_OK;
thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
thread->reply_error.cmd = BR_OK;
+ spin_lock_init(&thread->prio_lock);
+ thread->prio_state = BINDER_PRIO_SET;
thread->ee.command = BR_OK;
INIT_LIST_HEAD(&new_thread->waiting_thread_node);
return thread;
@@ -5333,6 +5525,7 @@ static void binder_free_thread(struct binder_thread *thread)
BUG_ON(!list_empty(&thread->todo));
binder_stats_deleted(BINDER_STAT_THREAD);
binder_proc_dec_tmpref(thread->proc);
+ put_task_struct(thread->task);
kfree(thread);
}
@@ -6072,7 +6265,14 @@ static int binder_open(struct inode *nodp, struct file *filp)
proc->cred = get_cred(filp->f_cred);
INIT_LIST_HEAD(&proc->todo);
init_waitqueue_head(&proc->freeze_wait);
- proc->default_priority = task_nice(current);
+ if (binder_supported_policy(current->policy)) {
+ proc->default_priority.sched_policy = current->policy;
+ proc->default_priority.prio = current->normal_prio;
+ } else {
+ proc->default_priority.sched_policy = SCHED_NORMAL;
+ proc->default_priority.prio = NICE_TO_PRIO(0);
+ }
+
/* binderfs stashes devices in i_private */
if (is_binderfs_device(nodp)) {
binder_dev = nodp->i_private;
@@ -6398,13 +6598,14 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
spin_lock(&t->lock);
to_proc = t->to_proc;
seq_printf(m,
- "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld a%d r%d elapsed %lldms",
+ "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %d:%d a%d r%d elapsed %lldms",
prefix, t->debug_id, t,
t->from_pid,
t->from_tid,
to_proc ? to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0,
- t->code, t->flags, t->priority, t->is_async, t->is_reply,
+ t->code, t->flags, t->priority.sched_policy,
+ t->priority.prio, t->is_async, t->is_reply,
ktime_ms_delta(current_time, t->start_time));
spin_unlock(&t->lock);
@@ -6541,7 +6742,8 @@ static void print_binder_node_nilocked(struct seq_file *m,
else
seq_printf(m, " node %d: u%016llx c%016llx", node->debug_id,
(u64)node->ptr, (u64)node->cookie);
- seq_printf(m, " hs %d hw %d ls %d lw %d is %d iw %d tr %d",
+ seq_printf(m, " pri %d:%d hs %d hw %d ls %d lw %d is %d iw %d tr %d",
+ node->sched_policy, node->min_priority,
node->has_strong_ref, node->has_weak_ref,
node->local_strong_refs, node->local_weak_refs,
node->internal_strong_refs, count, node->tmp_refs);
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 342574b..a7cb2a4 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -216,10 +216,13 @@ struct binder_error {
* and by @lock)
* @has_async_transaction: async transaction to node in progress
* (protected by @lock)
+ * @sched_policy: minimum scheduling policy for node
+ * (invariant after initialized)
* @accept_fds: file descriptor operations supported for node
* (invariant after initialized)
* @min_priority: minimum scheduling priority
* (invariant after initialized)
+ * @inherit_rt: inherit RT scheduling policy from caller
* @txn_security_ctx: require sender's security context
* (invariant after initialized)
* @async_todo: list of async work items
@@ -257,6 +260,8 @@ struct binder_node {
/*
* invariant after initialization
*/
+ u8 sched_policy:2;
+ u8 inherit_rt:1;
u8 accept_fds:1;
u8 txn_security_ctx:1;
u8 min_priority;
@@ -337,6 +342,28 @@ struct binder_ref {
};
/**
+ * struct binder_priority - scheduler policy and priority
+ * @sched_policy scheduler policy
+ * @prio [100..139] for SCHED_NORMAL, [0..99] for FIFO/RT
+ *
+ * The binder driver supports inheriting the following scheduler policies:
+ * SCHED_NORMAL
+ * SCHED_BATCH
+ * SCHED_FIFO
+ * SCHED_RR
+ */
+struct binder_priority {
+ unsigned int sched_policy;
+ int prio;
+};
+
+enum binder_prio_state {
+ BINDER_PRIO_SET, /* desired priority set */
+ BINDER_PRIO_PENDING, /* initiated a saved priority restore */
+ BINDER_PRIO_ABORT, /* abort the pending priority restore */
+};
+
+/**
* struct binder_proc - binder process bookkeeping
* @proc_node: element for binder_procs list
* @threads: rbtree of binder_threads in this proc
@@ -441,7 +468,7 @@ struct binder_proc {
int requested_threads;
int requested_threads_started;
int tmp_ref;
- long default_priority;
+ struct binder_priority default_priority;
struct dentry *debugfs_entry;
struct binder_alloc alloc;
struct binder_context *context;
@@ -486,6 +513,13 @@ struct binder_proc {
* @is_dead: thread is dead and awaiting free
* when outstanding transactions are cleaned up
* (protected by @proc->inner_lock)
+ * @task: struct task_struct for this thread
+ * @prio_lock: protects thread priority fields
+ * @prio_next: saved priority to be restored next
+ * (protected by @prio_lock)
+ * @prio_state: state of the priority restore process as
+ * defined by enum binder_prio_state
+ * (protected by @prio_lock)
*
* Bookkeeping structure for binder threads.
*/
@@ -506,6 +540,10 @@ struct binder_thread {
struct binder_stats stats;
atomic_t tmp_ref;
bool is_dead;
+ struct task_struct *task;
+ spinlock_t prio_lock;
+ struct binder_priority prio_next;
+ enum binder_prio_state prio_state;
};
/**
@@ -543,8 +581,10 @@ struct binder_transaction {
struct binder_buffer *buffer;
unsigned int code;
unsigned int flags;
- long priority;
- long saved_priority;
+ struct binder_priority priority;
+ struct binder_priority saved_priority;
+ bool set_priority_called;
+ bool is_nested;
kuid_t sender_euid;
ktime_t start_time;
struct list_head fd_fixups;
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index fa5eb61..df216b3e 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -55,6 +55,30 @@ DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_ioctl_done);
DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_write_done);
DEFINE_BINDER_FUNCTION_RETURN_EVENT(binder_read_done);
+TRACE_EVENT(binder_set_priority,
+ TP_PROTO(int proc, int thread, unsigned int old_prio,
+ unsigned int new_prio, unsigned int desired_prio),
+ TP_ARGS(proc, thread, old_prio, new_prio, desired_prio),
+
+ TP_STRUCT__entry(
+ __field(int, proc)
+ __field(int, thread)
+ __field(unsigned int, old_prio)
+ __field(unsigned int, new_prio)
+ __field(unsigned int, desired_prio)
+ ),
+ TP_fast_assign(
+ __entry->proc = proc;
+ __entry->thread = thread;
+ __entry->old_prio = old_prio;
+ __entry->new_prio = new_prio;
+ __entry->desired_prio = desired_prio;
+ ),
+ TP_printk("proc=%d thread=%d old=%d => new=%d desired=%d",
+ __entry->proc, __entry->thread, __entry->old_prio,
+ __entry->new_prio, __entry->desired_prio)
+);
+
TRACE_EVENT(binder_wait_for_work,
TP_PROTO(bool proc_work, bool transaction_stack, bool thread_todo),
TP_ARGS(proc_work, transaction_stack, thread_todo),
diff --git a/drivers/android/debug_kinfo.c b/drivers/android/debug_kinfo.c
new file mode 100644
index 0000000..ef950611
--- /dev/null
+++ b/drivers/android/debug_kinfo.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * debug_kinfo.c - backup kernel information for bootloader usage
+ *
+ * Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
+ * Copyright 2021 Google LLC
+ */
+
+#include <linux/platform_device.h>
+#include <linux/kallsyms.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/pgtable.h>
+#include <asm/module.h>
+#include "debug_kinfo.h"
+
+/*
+ * These will be re-linked against their real values
+ * during the second link stage.
+ */
+extern const int kallsyms_offsets[] __weak;
+extern const u8 kallsyms_names[] __weak;
+extern const u8 kallsyms_seqs_of_names[] __weak;
+
+/*
+ * Tell the compiler that the count isn't in the small data section if the arch
+ * has one (eg: FRV).
+ */
+extern const unsigned int kallsyms_num_syms __weak
+__section(".rodata");
+
+extern const u8 kallsyms_token_table[] __weak;
+extern const u16 kallsyms_token_index[] __weak;
+
+extern const unsigned int kallsyms_markers[] __weak;
+
+static void *all_info_addr;
+static u32 all_info_size;
+
+static void update_kernel_all_info(struct kernel_all_info *all_info)
+{
+ int index;
+ struct kernel_info *info;
+ u32 *checksum_info;
+
+ all_info->magic_number = DEBUG_KINFO_MAGIC;
+ all_info->combined_checksum = 0;
+
+ info = &(all_info->info);
+ checksum_info = (u32 *)info;
+ for (index = 0; index < sizeof(*info) / sizeof(u32); index++)
+ all_info->combined_checksum ^= checksum_info[index];
+}
+
+static int build_info_set(const char *str, const struct kernel_param *kp)
+{
+ struct kernel_all_info *all_info;
+ size_t build_info_size;
+ int ret = 0;
+
+ if (all_info_addr == 0 || all_info_size == 0) {
+ ret = -EPERM;
+ goto Exit;
+ }
+
+ all_info = (struct kernel_all_info *)all_info_addr;
+ build_info_size = sizeof(all_info->info.build_info);
+
+ memcpy(&all_info->info.build_info, str, min(build_info_size - 1, strlen(str)));
+ update_kernel_all_info(all_info);
+
+ if (strlen(str) > build_info_size) {
+ pr_warn("%s: Build info buffer (len: %zd) can't hold entire string '%s'\n",
+ __func__, build_info_size, str);
+ ret = -ENOMEM;
+ }
+
+Exit:
+ return ret;
+}
+
+static const struct kernel_param_ops build_info_op = {
+ .set = build_info_set,
+};
+
+module_param_cb(build_info, &build_info_op, NULL, 0200);
+MODULE_PARM_DESC(build_info, "Write build info to field 'build_info' of debug kinfo.");
+
+static int debug_kinfo_probe(struct platform_device *pdev)
+{
+ struct device_node *mem_region;
+ struct reserved_mem *rmem;
+ struct kernel_all_info *all_info;
+ struct kernel_info *info;
+
+ mem_region = of_parse_phandle(pdev->dev.of_node, "memory-region", 0);
+ if (!mem_region) {
+ dev_warn(&pdev->dev, "no such memory-region\n");
+ return -ENODEV;
+ }
+
+ rmem = of_reserved_mem_lookup(mem_region);
+ if (!rmem) {
+ dev_warn(&pdev->dev, "no such reserved mem of node name %s\n",
+ pdev->dev.of_node->name);
+ return -ENODEV;
+ }
+
+ /* Need to wait for reserved memory to be mapped */
+ if (!rmem->priv) {
+ return -EPROBE_DEFER;
+ }
+
+ if (!rmem->base || !rmem->size) {
+ dev_warn(&pdev->dev, "unexpected reserved memory\n");
+ return -EINVAL;
+ }
+
+ if (rmem->size < sizeof(struct kernel_all_info)) {
+ dev_warn(&pdev->dev, "unexpected reserved memory size\n");
+ return -EINVAL;
+ }
+
+ all_info_addr = rmem->priv;
+ all_info_size = rmem->size;
+
+ memset(all_info_addr, 0, sizeof(struct kernel_all_info));
+ all_info = (struct kernel_all_info *)all_info_addr;
+ info = &(all_info->info);
+ info->enabled_all = IS_ENABLED(CONFIG_KALLSYMS_ALL);
+ info->enabled_absolute_percpu = IS_ENABLED(CONFIG_KALLSYMS_ABSOLUTE_PERCPU);
+ info->enabled_cfi_clang = IS_ENABLED(CONFIG_CFI);
+ info->num_syms = kallsyms_num_syms;
+ info->name_len = KSYM_NAME_LEN;
+ info->bit_per_long = BITS_PER_LONG;
+ info->module_name_len = MODULE_NAME_LEN;
+ info->symbol_len = KSYM_SYMBOL_LEN;
+ info->_offsets_pa = (u64)__pa_symbol((volatile void *)kallsyms_offsets);
+ info->_text_pa = (u64)__pa_symbol(_text);
+ info->_stext_pa = (u64)__pa_symbol(_stext);
+ info->_etext_pa = (u64)__pa_symbol(_etext);
+ info->_sinittext_pa = (u64)__pa_symbol(_sinittext);
+ info->_einittext_pa = (u64)__pa_symbol(_einittext);
+ info->_end_pa = (u64)__pa_symbol(_end);
+ info->_names_pa = (u64)__pa_symbol((volatile void *)kallsyms_names);
+ info->_token_table_pa = (u64)__pa_symbol((volatile void *)kallsyms_token_table);
+ info->_token_index_pa = (u64)__pa_symbol((volatile void *)kallsyms_token_index);
+ info->_markers_pa = (u64)__pa_symbol((volatile void *)kallsyms_markers);
+ info->_seqs_of_names_pa = (u64)__pa_symbol((volatile void *)kallsyms_seqs_of_names);
+ info->thread_size = THREAD_SIZE;
+ info->swapper_pg_dir_pa = (u64)__pa_symbol(swapper_pg_dir);
+ strscpy(info->last_uts_release, init_utsname()->release, sizeof(info->last_uts_release));
+ info->enabled_modules_tree_lookup = IS_ENABLED(CONFIG_MODULES_TREE_LOOKUP);
+ info->mod_mem_offset = offsetof(struct module, mem);
+ info->mod_kallsyms_offset = offsetof(struct module, kallsyms);
+
+ update_kernel_all_info(all_info);
+
+ return 0;
+}
+
+static const struct of_device_id debug_kinfo_of_match[] = {
+ { .compatible = "google,debug-kinfo" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, debug_kinfo_of_match);
+
+static struct platform_driver debug_kinfo_driver = {
+ .probe = debug_kinfo_probe,
+ .driver = {
+ .name = "debug-kinfo",
+ .of_match_table = of_match_ptr(debug_kinfo_of_match),
+ },
+};
+module_platform_driver(debug_kinfo_driver);
+
+MODULE_AUTHOR("Jone Chou <jonechou@google.com>");
+MODULE_DESCRIPTION("Debug Kinfo Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/android/debug_kinfo.h b/drivers/android/debug_kinfo.h
new file mode 100644
index 0000000..07c1500
--- /dev/null
+++ b/drivers/android/debug_kinfo.h
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * debug_kinfo.h - backup kernel information for bootloader usage
+ *
+ * Copyright 2021 Google LLC
+ */
+
+#ifndef DEBUG_KINFO_H
+#define DEBUG_KINFO_H
+
+#include <linux/utsname.h>
+
+#define BUILD_INFO_LEN 256
+#define DEBUG_KINFO_MAGIC 0xCCEEDDFF
+
+/*
+ * Header structure must be byte-packed, since the table is provided to
+ * bootloader.
+ */
+struct kernel_info {
+ /* For kallsyms */
+ __u8 enabled_all;
+ __u8 enabled_base_relative;
+ __u8 enabled_absolute_percpu;
+ __u8 enabled_cfi_clang;
+ __u32 num_syms;
+ __u16 name_len;
+ __u16 bit_per_long;
+ __u16 module_name_len;
+ __u16 symbol_len;
+ /* Start from 6.19 */
+ __u64 _reserved;
+ __u64 _text_pa;
+ __u64 _stext_pa;
+ __u64 _etext_pa;
+ __u64 _sinittext_pa;
+ __u64 _einittext_pa;
+ __u64 _end_pa;
+ __u64 _offsets_pa;
+ __u64 _names_pa;
+ __u64 _token_table_pa;
+ __u64 _token_index_pa;
+ __u64 _markers_pa;
+ __u64 _seqs_of_names_pa;
+
+ /* For frame pointer */
+ __u32 thread_size;
+
+ /* For virt_to_phys */
+ __u64 swapper_pg_dir_pa;
+
+ /* For linux banner */
+ __u8 last_uts_release[__NEW_UTS_LEN];
+
+ /* Info of running build */
+ __u8 build_info[BUILD_INFO_LEN];
+
+ /* For module kallsyms */
+ __u32 enabled_modules_tree_lookup;
+ __u32 mod_mem_offset;
+ __u32 mod_kallsyms_offset;
+} __packed;
+
+struct kernel_all_info {
+ __u32 magic_number;
+ __u32 combined_checksum;
+ struct kernel_info info;
+} __packed;
+
+#endif // DEBUG_KINFO_H
diff --git a/drivers/android/vendor_hooks.c b/drivers/android/vendor_hooks.c
new file mode 100644
index 0000000..207e336
--- /dev/null
+++ b/drivers/android/vendor_hooks.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* vendor_hook.c
+ *
+ * Android Vendor Hook Support
+ *
+ * Copyright 2020 Google LLC
+ */
+
+#define CREATE_TRACE_POINTS
+#include <trace/hooks/vendor_hooks.h>
+#include <linux/tracepoint.h>
+
+/* keep-sorted start */
+#include <trace/hooks/avc.h>
+#include <trace/hooks/cgroup.h>
+#include <trace/hooks/cpufreq.h>
+#include <trace/hooks/cpuidle.h>
+#include <trace/hooks/cpuidle_psci.h>
+#include <trace/hooks/debug.h>
+#include <trace/hooks/epoch.h>
+#include <trace/hooks/fpsimd.h>
+#include <trace/hooks/gic.h>
+#include <trace/hooks/gic_v3.h>
+#include <trace/hooks/iommu.h>
+#include <trace/hooks/mpam.h>
+#include <trace/hooks/net.h>
+#include <trace/hooks/pm_domain.h>
+#include <trace/hooks/printk.h>
+#include <trace/hooks/reboot.h>
+#include <trace/hooks/remoteproc.h>
+#include <trace/hooks/selinux.h>
+#include <trace/hooks/signal.h>
+#include <trace/hooks/sys.h>
+#include <trace/hooks/syscall_check.h>
+#include <trace/hooks/sysrqcrash.h>
+#include <trace/hooks/timer.h>
+#include <trace/hooks/ufshcd.h>
+#include <trace/hooks/vmscan.h>
+/* keep-sorted end */
+
+/*
+ * Export tracepoints that act as a bare tracehook (ie: have no trace event
+ * associated with them) to allow external modules to probe them.
+ */
+
+/* keep-sorted start */
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_attach);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_cgroup_online);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_hw_protection_shutdown);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_iommu_setup_dma_ops);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_insert);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_lookup);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_node_delete);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_avc_node_replace);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_selinux_is_initialized);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_balance_anon_file_reclaim);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_show_max_freq);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ufs_reprogram_all_keys);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_allow_domain_state);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cgroup_attach);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_bpf_syscall);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_file_open);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_check_mmap_file);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_idle_enter);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpu_idle_exit);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpufreq_online);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpuidle_psci_enter);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cpuidle_psci_exit);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_send_sig_info);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_gic_set_affinity);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_iovad_alloc_iova);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_iommu_iovad_free_iova);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ipi_stop);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_is_fpsimd_save);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_mpam_set);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_printk_caller);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_printk_caller_id);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_printk_ext_header);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_printk_hotplug);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ptype_head);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rproc_recovery);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rproc_recovery_set);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_resume_epoch_val);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_show_suspend_epoch_val);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_syscall_prctl_finished);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_sysrq_crash);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_timer_calc_index);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_check_int_errors);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_compl_command);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_fill_prdt);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_prepare_command);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_command);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_tm_command);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_send_uic_command);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_ufs_update_sysfs);
+/* keep-sorted end */
diff --git a/drivers/base/TEST_MAPPING b/drivers/base/TEST_MAPPING
new file mode 100644
index 0000000..d5dea7b
--- /dev/null
+++ b/drivers/base/TEST_MAPPING
@@ -0,0 +1,329 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.InCallServiceFlagChecker"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "VtsBootconfigTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c
index 8c5e47c..7f4270b 100644
--- a/drivers/base/arch_topology.c
+++ b/drivers/base/arch_topology.c
@@ -26,6 +26,9 @@
#define CREATE_TRACE_POINTS
#include <trace/events/hw_pressure.h>
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/sched.h>
+
static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
static struct cpumask scale_freq_counters_mask;
static bool scale_freq_invariant;
@@ -162,6 +165,7 @@ void topology_set_freq_scale(const struct cpumask *cpus, unsigned long cur_freq,
}
DEFINE_PER_CPU(unsigned long, hw_pressure);
+EXPORT_PER_CPU_SYMBOL_GPL(hw_pressure);
/**
* topology_update_hw_pressure() - Update HW pressure for CPUs
@@ -201,8 +205,10 @@ void topology_update_hw_pressure(const struct cpumask *cpus,
trace_hw_pressure_update(cpu, pressure);
- for_each_cpu(cpu, cpus)
+ for_each_cpu(cpu, cpus) {
WRITE_ONCE(per_cpu(hw_pressure, cpu), pressure);
+ trace_android_rvh_update_thermal_stats(cpu);
+ }
}
EXPORT_SYMBOL_GPL(topology_update_hw_pressure);
@@ -210,6 +216,8 @@ static void update_topology_flags_workfn(struct work_struct *work);
static DECLARE_WORK(update_topology_flags_work, update_topology_flags_workfn);
static int update_topology;
+bool topology_update_done;
+EXPORT_SYMBOL_GPL(topology_update_done);
int topology_update_cpu_topology(void)
{
@@ -224,6 +232,8 @@ static void update_topology_flags_workfn(struct work_struct *work)
{
update_topology = 1;
rebuild_sched_domains();
+ topology_update_done = true;
+ trace_android_vh_update_topology_flags_workfn(NULL);
pr_debug("sched_domain hierarchy rebuilt, flags updated\n");
update_topology = 0;
}
diff --git a/drivers/base/base.h b/drivers/base/base.h
index 79d031d..1af95ac 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -179,19 +179,10 @@ void device_release_driver_internal(struct device *dev, const struct device_driv
void driver_detach(const struct device_driver *drv);
void driver_deferred_probe_del(struct device *dev);
void device_set_deferred_probe_reason(const struct device *dev, struct va_format *vaf);
-static inline int driver_match_device_locked(const struct device_driver *drv,
- struct device *dev)
-{
- device_lock_assert(dev);
-
- return drv->bus->match ? drv->bus->match(dev, drv) : 1;
-}
-
static inline int driver_match_device(const struct device_driver *drv,
struct device *dev)
{
- guard(device)(dev);
- return driver_match_device_locked(drv, dev);
+ return drv->bus->match ? drv->bus->match(dev, drv) : 1;
}
static inline void dev_sync_state(struct device *dev)
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 0354f20..bea8da5 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -928,7 +928,7 @@ static int __device_attach_driver(struct device_driver *drv, void *_data)
bool async_allowed;
int ret;
- ret = driver_match_device_locked(drv, dev);
+ ret = driver_match_device(drv, dev);
if (ret == 0) {
/* no match */
return 0;
diff --git a/drivers/base/power/TEST_MAPPING b/drivers/base/power/TEST_MAPPING
new file mode 100644
index 0000000..0917e71
--- /dev/null
+++ b/drivers/base/power/TEST_MAPPING
@@ -0,0 +1,223 @@
+{
+ "imports": [
+ {
+ "path": "frameworks/base/services/core/java/com/android/server"
+ },
+ {
+ "path": "frameworks/base/core/java/com/android/internal/app"
+ },
+ {
+ "path": "frameworks/base/apex/jobscheduler/service/java/com/android/server/job"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsSilentUpdateHostTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsJobSchedulerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsSuspendAppsTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ }
+ ]
+}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 189de52..f04f5a8 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -35,6 +35,7 @@
#include <linux/devfreq.h>
#include <linux/timer.h>
#include <linux/nmi.h>
+#include <linux/wakeup_reason.h>
#include "../base.h"
#include "power.h"
@@ -57,6 +58,7 @@ static LIST_HEAD(dpm_suspended_list);
static LIST_HEAD(dpm_late_early_list);
static LIST_HEAD(dpm_noirq_list);
+struct suspend_stats suspend_stats;
static DEFINE_MUTEX(dpm_list_mtx);
static pm_message_t pm_transition;
@@ -1474,6 +1476,8 @@ static void device_suspend_noirq(struct device *dev, pm_message_t state, bool as
error = dpm_run_callback(callback, dev, state, info);
if (error) {
WRITE_ONCE(async_error, error);
+ log_suspend_abort_reason("Device %s failed to %s noirq: error %d",
+ dev_name(dev), pm_verb(state.event), error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async noirq" : " noirq", error);
goto Complete;
@@ -1684,6 +1688,8 @@ static void device_suspend_late(struct device *dev, pm_message_t state, bool asy
error = dpm_run_callback(callback, dev, state, info);
if (error) {
WRITE_ONCE(async_error, error);
+ log_suspend_abort_reason("Device %s failed to %s late: error %d",
+ dev_name(dev), pm_verb(state.event), error);
dpm_save_failed_dev(dev_name(dev));
pm_dev_err(dev, state, async ? " async late" : " late", error);
pm_runtime_enable(dev);
@@ -1972,6 +1978,9 @@ static void device_suspend(struct device *dev, pm_message_t state, bool async)
dpm_propagate_wakeup_to_parent(dev);
dpm_clear_superiors_direct_complete(dev);
+ } else {
+ log_suspend_abort_reason("Device %s failed to %s: error %d",
+ dev_name(dev), pm_verb(state.event), error);
}
device_unlock(dev);
@@ -2249,6 +2258,9 @@ int dpm_prepare(pm_message_t state)
} else {
dev_info(dev, "not prepared for power transition: code %d\n",
error);
+ log_suspend_abort_reason("Device %s not prepared for power transition: code %d",
+ dev_name(dev), error);
+ dpm_save_failed_dev(dev_name(dev));
}
mutex_unlock(&dpm_list_mtx);
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index b8e48a0..0703b346 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -15,6 +15,9 @@
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/pm_wakeirq.h>
+#include <linux/irq.h>
+#include <linux/irqdesc.h>
+#include <linux/wakeup_reason.h>
#include <trace/events/power.h>
#include "power.h"
@@ -834,6 +837,37 @@ void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard)
}
EXPORT_SYMBOL_GPL(pm_wakeup_dev_event);
+void pm_get_active_wakeup_sources(char *pending_wakeup_source, size_t max)
+{
+ struct wakeup_source *ws, *last_active_ws = NULL;
+ int len = 0;
+ bool active = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ws, &wakeup_sources, entry) {
+ if (ws->active && len < max) {
+ if (!active)
+ len += scnprintf(pending_wakeup_source, max,
+ "Pending Wakeup Sources: ");
+ len += scnprintf(pending_wakeup_source + len, max - len,
+ "%s ", ws->name);
+ active = true;
+ } else if (!active &&
+ (!last_active_ws ||
+ ktime_to_ns(ws->last_time) >
+ ktime_to_ns(last_active_ws->last_time))) {
+ last_active_ws = ws;
+ }
+ }
+ if (!active && last_active_ws) {
+ scnprintf(pending_wakeup_source, max,
+ "Last active Wakeup Source: %s",
+ last_active_ws->name);
+ }
+ rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(pm_get_active_wakeup_sources);
+
void pm_print_active_wakeup_sources(void)
{
struct wakeup_source *ws;
@@ -872,6 +906,7 @@ bool pm_wakeup_pending(void)
{
unsigned long flags;
bool ret = false;
+ char suspend_abort[MAX_SUSPEND_ABORT_LEN];
raw_spin_lock_irqsave(&events_lock, flags);
if (events_check_enabled) {
@@ -886,6 +921,10 @@ bool pm_wakeup_pending(void)
if (ret) {
pm_pr_dbg("Wakeup pending, aborting suspend\n");
pm_print_active_wakeup_sources();
+ pm_get_active_wakeup_sources(suspend_abort,
+ MAX_SUSPEND_ABORT_LEN);
+ log_suspend_abort_reason(suspend_abort);
+ pr_info("PM: %s\n", suspend_abort);
}
return ret || atomic_read(&pm_abort_suspend) > 0;
@@ -938,8 +977,21 @@ void pm_system_irq_wakeup(unsigned int irq_number)
raw_spin_unlock_irqrestore(&wakeup_irq_lock, flags);
- if (irq_number)
+ if (irq_number) {
+ struct irq_desc *desc;
+ const char *name = "null";
+
+ desc = irq_to_desc(irq_number);
+ if (desc == NULL)
+ name = "stray irq";
+ else if (desc->action && desc->action->name)
+ name = desc->action->name;
+
+ log_irq_wakeup_reason(irq_number);
+ pr_warn("%s: %d triggered %s\n", __func__, irq_number, name);
+
pm_system_wakeup();
+ }
}
unsigned int pm_wakeup_irq(void)
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index 483adb7..151eedb 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/suspend.h>
#include <trace/events/power.h>
+#include <linux/wakeup_reason.h>
static LIST_HEAD(syscore_list);
static DEFINE_MUTEX(syscore_lock);
@@ -74,6 +75,8 @@ int syscore_suspend(void)
return 0;
err_out:
+ log_suspend_abort_reason("System core suspend callback %pS failed",
+ syscore->ops->suspend);
pr_err("PM: System core suspend callback %pS failed.\n",
syscore->ops->suspend);
diff --git a/drivers/block/TEST_MAPPING b/drivers/block/TEST_MAPPING
new file mode 100644
index 0000000..b6be939
--- /dev/null
+++ b/drivers/block/TEST_MAPPING
@@ -0,0 +1,263 @@
+{
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsJobSchedulerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 0000913..84201b7 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -52,8 +52,7 @@ struct loop_device {
int lo_flags;
char lo_file_name[LO_NAME_SIZE];
- struct file *lo_backing_file;
- unsigned int lo_min_dio_size;
+ struct file * lo_backing_file;
struct block_device *lo_device;
gfp_t old_gfp_mask;
@@ -178,14 +177,29 @@ static loff_t lo_calculate_size(struct loop_device *lo, struct file *file)
* of backing device, and the logical block size of loop is bigger than that of
* the backing device.
*/
+static bool lo_bdev_can_use_dio(struct loop_device *lo,
+ struct block_device *backing_bdev)
+{
+ unsigned int sb_bsize = bdev_logical_block_size(backing_bdev);
+
+ if (queue_logical_block_size(lo->lo_queue) < sb_bsize)
+ return false;
+ if (lo->lo_offset & (sb_bsize - 1))
+ return false;
+ return true;
+}
+
static bool lo_can_use_dio(struct loop_device *lo)
{
+ struct inode *inode = lo->lo_backing_file->f_mapping->host;
+
if (!(lo->lo_backing_file->f_mode & FMODE_CAN_ODIRECT))
return false;
- if (queue_logical_block_size(lo->lo_queue) < lo->lo_min_dio_size)
- return false;
- if (lo->lo_offset & (lo->lo_min_dio_size - 1))
- return false;
+
+ if (S_ISBLK(inode->i_mode))
+ return lo_bdev_can_use_dio(lo, I_BDEV(inode));
+ if (inode->i_sb->s_bdev)
+ return lo_bdev_can_use_dio(lo, inode->i_sb->s_bdev);
return true;
}
@@ -451,28 +465,6 @@ static void loop_reread_partitions(struct loop_device *lo)
__func__, lo->lo_number, lo->lo_file_name, rc);
}
-static unsigned int loop_query_min_dio_size(struct loop_device *lo)
-{
- struct file *file = lo->lo_backing_file;
- struct block_device *sb_bdev = file->f_mapping->host->i_sb->s_bdev;
- struct kstat st;
-
- /*
- * Use the minimal dio alignment of the file system if provided.
- */
- if (!vfs_getattr(&file->f_path, &st, STATX_DIOALIGN, 0) &&
- (st.result_mask & STATX_DIOALIGN))
- return st.dio_offset_align;
-
- /*
- * In a perfect world this wouldn't be needed, but as of Linux 6.13 only
- * a handful of file systems support the STATX_DIOALIGN flag.
- */
- if (sb_bdev)
- return bdev_logical_block_size(sb_bdev);
- return SECTOR_SIZE;
-}
-
static inline int is_loop_device(struct file *file)
{
struct inode *i = file->f_mapping->host;
@@ -513,7 +505,6 @@ static void loop_assign_backing_file(struct loop_device *lo, struct file *file)
lo->old_gfp_mask & ~(__GFP_IO | __GFP_FS));
if (lo->lo_backing_file->f_flags & O_DIRECT)
lo->lo_flags |= LO_FLAGS_DIRECT_IO;
- lo->lo_min_dio_size = loop_query_min_dio_size(lo);
}
static int loop_check_backing_file(struct file *file)
@@ -936,11 +927,12 @@ loop_set_status_from_info(struct loop_device *lo,
return 0;
}
-static unsigned int loop_default_blocksize(struct loop_device *lo)
+static unsigned int loop_default_blocksize(struct loop_device *lo,
+ struct block_device *backing_bdev)
{
- /* In case of direct I/O, match underlying minimum I/O size */
- if (lo->lo_flags & LO_FLAGS_DIRECT_IO)
- return lo->lo_min_dio_size;
+ /* In case of direct I/O, match underlying block size */
+ if ((lo->lo_flags & LO_FLAGS_DIRECT_IO) && backing_bdev)
+ return bdev_logical_block_size(backing_bdev);
return SECTOR_SIZE;
}
@@ -958,7 +950,7 @@ static void loop_update_limits(struct loop_device *lo, struct queue_limits *lim,
backing_bdev = inode->i_sb->s_bdev;
if (!bsize)
- bsize = loop_default_blocksize(lo);
+ bsize = loop_default_blocksize(lo, backing_bdev);
loop_get_discard_config(lo, &granularity, &max_discard_sectors);
diff --git a/drivers/block/zram/TEST_MAPPING b/drivers/block/zram/TEST_MAPPING
new file mode 100644
index 0000000..2bff97b
--- /dev/null
+++ b/drivers/block/zram/TEST_MAPPING
@@ -0,0 +1,245 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.NullBindingTest"
+ }
+ ]
+ }
+ ]
+}
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index bca3340..a6a568f 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -1697,11 +1697,7 @@ static int comp_params_store(struct zram *zram, u32 prio, s32 level,
comp_params_reset(zram, prio);
if (dict_path) {
- sz = kernel_read_file_from_path(dict_path, 0,
- &zram->params[prio].dict,
- INT_MAX,
- NULL,
- READING_POLICY);
+ sz = read_comp_algo_dictionary(&zram->params[prio].dict, dict_path);
if (sz < 0)
return -EINVAL;
}
diff --git a/drivers/char/TEST_MAPPING b/drivers/char/TEST_MAPPING
new file mode 100644
index 0000000..59a2ca7
--- /dev/null
+++ b/drivers/char/TEST_MAPPING
@@ -0,0 +1,329 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.NonUiInCallServiceTest"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "VtsBootconfigTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 47093cd..b607784 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -83,6 +83,8 @@ struct clk_core {
unsigned long flags;
bool orphan;
bool rpm_enabled;
+ bool need_sync;
+ bool boot_enabled;
unsigned int enable_count;
unsigned int prepare_count;
unsigned int protect_count;
@@ -1451,6 +1453,10 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
hlist_for_each_entry(child, &core->children, child_node)
clk_unprepare_unused_subtree(child);
+ if (dev_has_sync_state(core->dev) &&
+ !(core->flags & CLK_DONT_HOLD_STATE))
+ return;
+
if (core->prepare_count)
return;
@@ -1477,6 +1483,10 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
hlist_for_each_entry(child, &core->children, child_node)
clk_disable_unused_subtree(child);
+ if (dev_has_sync_state(core->dev) &&
+ !(core->flags & CLK_DONT_HOLD_STATE))
+ return;
+
if (core->flags & CLK_OPS_PARENT_ENABLE)
clk_core_prepare_enable(core->parent);
@@ -1557,6 +1567,38 @@ static int __init clk_disable_unused(void)
}
late_initcall_sync(clk_disable_unused);
+static void clk_unprepare_disable_dev_subtree(struct clk_core *core,
+ struct device *dev)
+{
+ struct clk_core *child;
+
+ lockdep_assert_held(&prepare_lock);
+
+ hlist_for_each_entry(child, &core->children, child_node)
+ clk_unprepare_disable_dev_subtree(child, dev);
+
+ if (core->dev != dev || !core->need_sync)
+ return;
+
+ clk_core_disable_unprepare(core);
+}
+
+void clk_sync_state(struct device *dev)
+{
+ struct clk_core *core;
+
+ clk_prepare_lock();
+
+ hlist_for_each_entry(core, &clk_root_list, child_node)
+ clk_unprepare_disable_dev_subtree(core, dev);
+
+ hlist_for_each_entry(core, &clk_orphan_list, child_node)
+ clk_unprepare_disable_dev_subtree(core, dev);
+
+ clk_prepare_unlock();
+}
+EXPORT_SYMBOL_GPL(clk_sync_state);
+
static int clk_core_determine_round_nolock(struct clk_core *core,
struct clk_rate_request *req)
{
@@ -2062,6 +2104,33 @@ int clk_hw_get_parent_index(struct clk_hw *hw)
}
EXPORT_SYMBOL_GPL(clk_hw_get_parent_index);
+static void clk_core_hold_state(struct clk_core *core)
+{
+ if (core->need_sync || !core->boot_enabled)
+ return;
+
+ if (core->orphan || !dev_has_sync_state(core->dev))
+ return;
+
+ if (core->flags & CLK_DONT_HOLD_STATE)
+ return;
+
+ core->need_sync = !clk_core_prepare_enable(core);
+}
+
+static void __clk_core_update_orphan_hold_state(struct clk_core *core)
+{
+ struct clk_core *child;
+
+ if (core->orphan)
+ return;
+
+ clk_core_hold_state(core);
+
+ hlist_for_each_entry(child, &core->children, child_node)
+ __clk_core_update_orphan_hold_state(child);
+}
+
/*
* Update the orphan status of @core and all its children.
*/
@@ -2369,6 +2438,13 @@ static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
fail_clk = core;
}
+ if (core->ops->pre_rate_change) {
+ ret = core->ops->pre_rate_change(core->hw, core->rate,
+ core->new_rate);
+ if (ret)
+ fail_clk = core;
+ }
+
hlist_for_each_entry(child, &core->children, child_node) {
/* Skip children who will be reparented to another clock */
if (child->new_parent && child->new_parent != core)
@@ -2463,6 +2539,9 @@ static void clk_change_rate(struct clk_core *core)
if (core->flags & CLK_RECALC_NEW_RATES)
(void)clk_calc_new_rates(core, core->new_rate);
+ if (core->ops->post_rate_change)
+ core->ops->post_rate_change(core->hw, old_rate, core->rate);
+
/*
* Use safe iteration, as change_rate can actually swap parents
* for certain clock types.
@@ -3430,7 +3509,7 @@ static int clk_dump_show(struct seq_file *s, void *data)
}
DEFINE_SHOW_ATTRIBUTE(clk_dump);
-#undef CLOCK_ALLOW_WRITE_DEBUGFS
+#define CLOCK_ALLOW_WRITE_DEBUGFS
#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
/*
* This can be dangerous, therefore don't provide any real compile time
@@ -3791,24 +3870,6 @@ static int __init clk_debug_init(void)
{
struct clk_core *core;
-#ifdef CLOCK_ALLOW_WRITE_DEBUGFS
- pr_warn("\n");
- pr_warn("********************************************************************\n");
- pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
- pr_warn("** **\n");
- pr_warn("** WRITEABLE clk DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n");
- pr_warn("** **\n");
- pr_warn("** This means that this kernel is built to expose clk operations **\n");
- pr_warn("** such as parent or rate setting, enabling, disabling, etc. **\n");
- pr_warn("** to userspace, which may compromise security on your system. **\n");
- pr_warn("** **\n");
- pr_warn("** If you see this message and you are not debugging the **\n");
- pr_warn("** kernel, report this immediately to your vendor! **\n");
- pr_warn("** **\n");
- pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
- pr_warn("********************************************************************\n");
-#endif
-
rootdir = debugfs_create_dir("clk", NULL);
debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
@@ -3861,6 +3922,7 @@ static void clk_core_reparent_orphans_nolock(void)
__clk_set_parent_after(orphan, parent, NULL);
__clk_recalc_accuracies(orphan);
__clk_recalc_rates(orphan, true, 0);
+ __clk_core_update_orphan_hold_state(orphan);
/*
* __clk_init_parent() will set the initial req_rate to
@@ -4046,6 +4108,8 @@ static int __clk_core_init(struct clk_core *core)
rate = 0;
core->rate = core->req_rate = rate;
+ core->boot_enabled = clk_core_is_enabled(core);
+
/*
* Enable CLK_IS_CRITICAL clocks so newly added critical clocks
* don't get accidentally disabled when walking the orphan tree and
@@ -4068,6 +4132,7 @@ static int __clk_core_init(struct clk_core *core)
}
}
+ clk_core_hold_state(core);
clk_core_reparent_orphans_nolock();
out:
clk_pm_runtime_put(core);
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index 78e43f6..56c5bc2 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -3,6 +3,7 @@
* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
+#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
@@ -871,6 +872,7 @@ static struct platform_driver disp_cc_sdm845_driver = {
.driver = {
.name = "disp_cc-sdm845",
.of_match_table = disp_cc_sdm845_match_table,
+ .sync_state = clk_sync_state,
},
};
diff --git a/drivers/clk/qcom/gcc-msm8998.c b/drivers/clk/qcom/gcc-msm8998.c
index c9701f7..71f257c 100644
--- a/drivers/clk/qcom/gcc-msm8998.c
+++ b/drivers/clk/qcom/gcc-msm8998.c
@@ -3368,6 +3368,7 @@ static struct platform_driver gcc_msm8998_driver = {
.driver = {
.name = "gcc-msm8998",
.of_match_table = gcc_msm8998_match_table,
+ .sync_state = clk_sync_state,
},
};
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 6d0f9ce..a8e9c1c 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -4014,6 +4014,7 @@ static struct platform_driver gcc_sdm845_driver = {
.driver = {
.name = "gcc-sdm845",
.of_match_table = gcc_sdm845_match_table,
+ .sync_state = clk_sync_state,
},
};
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index 0d63b11..b633d4c 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -201,6 +201,7 @@ static struct platform_driver gpu_cc_sdm845_driver = {
.driver = {
.name = "sdm845-gpucc",
.of_match_table = gpu_cc_sdm845_match_table,
+ .sync_state = clk_sync_state,
},
};
diff --git a/drivers/clk/qcom/videocc-sdm845.c b/drivers/clk/qcom/videocc-sdm845.c
index 6dedc80..4cf163c 100644
--- a/drivers/clk/qcom/videocc-sdm845.c
+++ b/drivers/clk/qcom/videocc-sdm845.c
@@ -337,6 +337,7 @@ static struct platform_driver video_cc_sdm845_driver = {
.driver = {
.name = "sdm845-videocc",
.of_match_table = video_cc_sdm845_match_table,
+ .sync_state = clk_sync_state,
},
};
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index fd91127..ab33a39 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -153,7 +153,7 @@
such as RTL9301, RTL9302 or RTL9303.
config SUN4I_TIMER
- bool "Sun4i timer driver" if COMPILE_TEST
+ bool "Sun4i timer driver"
depends on HAS_IOMEM
select CLKSRC_MMIO
select TIMER_OF
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 78702a0..e6e5b29 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -34,6 +34,13 @@
If in doubt, say N.
+config CPU_FREQ_TIMES
+ bool "CPU frequency time-in-state statistics"
+ help
+ Export CPU time-in-state information through procfs.
+
+ If in doubt, say N.
+
choice
prompt "Default CPUFreq governor"
default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1110_CPUFREQ
@@ -253,6 +260,15 @@
If in doubt, say N.
+config CPUFREQ_DUMMY
+ tristate "Dummy CPU frequency driver"
+ help
+ This option adds a generic dummy CPUfreq driver, which sets a fake
+ 2-frequency table when initializing each policy and otherwise does
+ nothing.
+
+ If in doubt, say N
+
if X86
source "drivers/cpufreq/Kconfig.x86"
endif
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 4014bc9..3e073fe 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -227,7 +227,7 @@
tristate "Tegra20/30 CPUFreq support"
depends on ARCH_TEGRA || COMPILE_TEST
depends on CPUFREQ_DT
- default ARCH_TEGRA
+ default ARCH_TEGRA_2x_SOC || ARCH_TEGRA_3x_SOC
help
This adds the CPUFreq driver support for Tegra20/30 SOCs.
@@ -235,7 +235,7 @@
tristate "Tegra124 CPUFreq support"
depends on ARCH_TEGRA || COMPILE_TEST
depends on CPUFREQ_DT
- default ARCH_TEGRA
+ default ARCH_TEGRA_114_SOC || ARCH_TEGRA_124_SOC || ARCH_TEGRA_132_SOC || ARCH_TEGRA_210_SOC
help
This adds the CPUFreq driver support for Tegra124 SOCs.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 385c9fc..c470369 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -5,7 +5,10 @@
# CPUfreq stats
obj-$(CONFIG_CPU_FREQ_STAT) += cpufreq_stats.o
-# CPUfreq governors
+# CPUfreq times
+obj-$(CONFIG_CPU_FREQ_TIMES) += cpufreq_times.o
+
+# CPUfreq governors
obj-$(CONFIG_CPU_FREQ_GOV_PERFORMANCE) += cpufreq_performance.o
obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
@@ -19,6 +22,8 @@
obj-$(CONFIG_CPUFREQ_DT_PLATDEV) += cpufreq-dt-platdev.o
obj-$(CONFIG_CPUFREQ_VIRT) += virtual-cpufreq.o
+obj-$(CONFIG_CPUFREQ_DUMMY) += dummy-cpufreq.o
+
# Traces
CFLAGS_amd-pstate-trace.o := -I$(src)
CFLAGS_powernv-cpufreq.o := -I$(src)
diff --git a/drivers/cpufreq/TEST_MAPPING b/drivers/cpufreq/TEST_MAPPING
new file mode 100644
index 0000000..947e83a
--- /dev/null
+++ b/drivers/cpufreq/TEST_MAPPING
@@ -0,0 +1,249 @@
+{
+ "imports": [
+ {
+ "path": "frameworks/base/services/core/java/com/android/server"
+ },
+ {
+ "path": "frameworks/base/core/java/com/android/internal/app"
+ },
+ {
+ "path": "frameworks/base/apex/jobscheduler/service/java/com/android/server/job"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsSilentUpdateHostTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsJobSchedulerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsSuspendAppsTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ }
+ ]
+}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 277884d..c437d48 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -16,6 +16,7 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpufreq_times.h>
#include <linux/cpu_cooling.h>
#include <linux/delay.h>
#include <linux/device.h>
@@ -31,6 +32,7 @@
#include <linux/tick.h>
#include <linux/units.h>
#include <trace/events/power.h>
+#include <trace/hooks/cpufreq.h>
static LIST_HEAD(cpufreq_policy_list);
@@ -358,6 +360,7 @@ static void cpufreq_notify_transition(struct cpufreq_policy *policy,
CPUFREQ_POSTCHANGE, freqs);
cpufreq_stats_record_transition(policy, freqs->new);
+ cpufreq_times_record_transition(policy, freqs->new);
policy->cur = freqs->new;
}
}
@@ -712,8 +715,15 @@ static ssize_t show_##file_name \
return sysfs_emit(buf, "%u\n", policy->object); \
}
+static ssize_t show_cpuinfo_max_freq(struct cpufreq_policy *policy, char *buf)
+{
+ unsigned int max_freq = policy->cpuinfo.max_freq;
+
+ trace_android_rvh_show_max_freq(policy, &max_freq);
+ return sprintf(buf, "%u\n", max_freq);
+}
+
show_one(cpuinfo_min_freq, cpuinfo.min_freq);
-show_one(cpuinfo_max_freq, cpuinfo.max_freq);
show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
show_one(scaling_min_freq, min);
show_one(scaling_max_freq, max);
@@ -1485,6 +1495,8 @@ static int cpufreq_policy_online(struct cpufreq_policy *policy,
goto out_destroy_policy;
}
+ trace_android_vh_cpufreq_online(policy);
+
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_CREATE_POLICY, policy);
} else {
@@ -1547,6 +1559,7 @@ static int cpufreq_policy_online(struct cpufreq_policy *policy,
goto out_destroy_policy;
cpufreq_stats_create_table(policy);
+ cpufreq_times_create_policy(policy);
write_lock_irqsave(&cpufreq_driver_lock, flags);
list_add(&policy->policy_list, &cpufreq_policy_list);
@@ -2726,6 +2739,7 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
return ret;
}
+EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_frequency_limits);
static void cpufreq_policy_refresh(struct cpufreq_policy *policy)
{
diff --git a/drivers/cpufreq/cpufreq_times.c b/drivers/cpufreq/cpufreq_times.c
new file mode 100644
index 0000000..80e271e
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_times.c
@@ -0,0 +1,210 @@
+/* drivers/cpufreq/cpufreq_times.c
+ *
+ * Copyright (C) 2018 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/cpufreq.h>
+#include <linux/cpufreq_times.h>
+#include <linux/jiffies.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+
+static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
+
+/**
+ * struct cpu_freqs - per-cpu frequency information
+ * @offset: start of these freqs' stats in task time_in_state array
+ * @max_state: number of entries in freq_table
+ * @last_index: index in freq_table of last frequency switched to
+ * @freq_table: list of available frequencies
+ */
+struct cpu_freqs {
+ unsigned int offset;
+ unsigned int max_state;
+ unsigned int last_index;
+ unsigned int freq_table[];
+};
+
+static struct cpu_freqs *all_freqs[NR_CPUS];
+
+static unsigned int next_offset;
+
+void cpufreq_task_times_init(struct task_struct *p)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&task_time_in_state_lock, flags);
+ p->time_in_state = NULL;
+ spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+ p->max_state = 0;
+}
+
+void cpufreq_task_times_alloc(struct task_struct *p)
+{
+ void *temp;
+ unsigned long flags;
+ unsigned int max_state = READ_ONCE(next_offset);
+
+ /* We use one array to avoid multiple allocs per task */
+ temp = kcalloc(max_state, sizeof(p->time_in_state[0]), GFP_ATOMIC);
+ if (!temp)
+ return;
+
+ spin_lock_irqsave(&task_time_in_state_lock, flags);
+ p->time_in_state = temp;
+ spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+ p->max_state = max_state;
+}
+
+/* Caller must hold task_time_in_state_lock */
+static int cpufreq_task_times_realloc_locked(struct task_struct *p)
+{
+ void *temp;
+ unsigned int max_state = READ_ONCE(next_offset);
+
+ temp = krealloc(p->time_in_state, max_state * sizeof(u64), GFP_ATOMIC);
+ if (!temp)
+ return -ENOMEM;
+ p->time_in_state = temp;
+ memset(p->time_in_state + p->max_state, 0,
+ (max_state - p->max_state) * sizeof(u64));
+ p->max_state = max_state;
+ return 0;
+}
+
+void cpufreq_task_times_exit(struct task_struct *p)
+{
+ unsigned long flags;
+ void *temp;
+
+ if (!p->time_in_state)
+ return;
+
+ spin_lock_irqsave(&task_time_in_state_lock, flags);
+ temp = p->time_in_state;
+ p->time_in_state = NULL;
+ spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+ kfree(temp);
+}
+
+int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *p)
+{
+ unsigned int cpu, i;
+ u64 cputime;
+ unsigned long flags;
+ struct cpu_freqs *freqs;
+ struct cpu_freqs *last_freqs = NULL;
+
+ spin_lock_irqsave(&task_time_in_state_lock, flags);
+ for_each_possible_cpu(cpu) {
+ freqs = all_freqs[cpu];
+ if (!freqs || freqs == last_freqs)
+ continue;
+ last_freqs = freqs;
+
+ seq_printf(m, "cpu%u\n", cpu);
+ for (i = 0; i < freqs->max_state; i++) {
+ cputime = 0;
+ if (freqs->offset + i < p->max_state &&
+ p->time_in_state)
+ cputime = p->time_in_state[freqs->offset + i];
+ seq_printf(m, "%u %lu\n", freqs->freq_table[i],
+ (unsigned long)nsec_to_clock_t(cputime));
+ }
+ }
+ spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+ return 0;
+}
+
+void cpufreq_acct_update_power(struct task_struct *p, u64 cputime)
+{
+ unsigned long flags;
+ unsigned int state;
+ struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
+
+ if (!freqs || is_idle_task(p) || p->flags & PF_EXITING)
+ return;
+
+ state = freqs->offset + READ_ONCE(freqs->last_index);
+
+ spin_lock_irqsave(&task_time_in_state_lock, flags);
+ if ((state < p->max_state || !cpufreq_task_times_realloc_locked(p)) &&
+ p->time_in_state)
+ p->time_in_state[state] += cputime;
+ spin_unlock_irqrestore(&task_time_in_state_lock, flags);
+}
+
+static int cpufreq_times_get_index(struct cpu_freqs *freqs, unsigned int freq)
+{
+ int index;
+ for (index = 0; index < freqs->max_state; ++index) {
+ if (freqs->freq_table[index] == freq)
+ return index;
+ }
+ return -1;
+}
+
+void cpufreq_times_create_policy(struct cpufreq_policy *policy)
+{
+ int cpu, index = 0;
+ unsigned int count = 0;
+ struct cpufreq_frequency_table *pos, *table;
+ struct cpu_freqs *freqs;
+ void *tmp;
+
+ if (all_freqs[policy->cpu])
+ return;
+
+ table = policy->freq_table;
+ if (!table)
+ return;
+
+ cpufreq_for_each_valid_entry(pos, table)
+ count++;
+
+ tmp = kzalloc(struct_size(freqs, freq_table, count), GFP_KERNEL);
+ if (!tmp)
+ return;
+
+ freqs = tmp;
+ freqs->max_state = count;
+
+ cpufreq_for_each_valid_entry(pos, table)
+ freqs->freq_table[index++] = pos->frequency;
+
+ index = cpufreq_times_get_index(freqs, policy->cur);
+ if (index >= 0)
+ WRITE_ONCE(freqs->last_index, index);
+
+ freqs->offset = next_offset;
+ WRITE_ONCE(next_offset, freqs->offset + count);
+ for_each_cpu(cpu, policy->related_cpus)
+ all_freqs[cpu] = freqs;
+}
+
+void cpufreq_times_record_transition(struct cpufreq_policy *policy,
+ unsigned int new_freq)
+{
+ int index;
+ struct cpu_freqs *freqs = all_freqs[policy->cpu];
+ if (!freqs)
+ return;
+
+ index = cpufreq_times_get_index(freqs, new_freq);
+ if (index >= 0)
+ WRITE_ONCE(freqs->last_index, index);
+}
diff --git a/drivers/cpufreq/dummy-cpufreq.c b/drivers/cpufreq/dummy-cpufreq.c
new file mode 100644
index 0000000..173b841
--- /dev/null
+++ b/drivers/cpufreq/dummy-cpufreq.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Google, Inc.
+ */
+#include <linux/cpufreq.h>
+#include <linux/module.h>
+
+static struct cpufreq_frequency_table freq_table[] = {
+ { .frequency = 1 },
+ { .frequency = 2 },
+ { .frequency = CPUFREQ_TABLE_END },
+};
+
+static int dummy_cpufreq_target_index(struct cpufreq_policy *policy,
+ unsigned int index)
+{
+ return 0;
+}
+
+static int dummy_cpufreq_driver_init(struct cpufreq_policy *policy)
+{
+ policy->freq_table = freq_table;
+ return 0;
+}
+
+static unsigned int dummy_cpufreq_get(unsigned int cpu)
+{
+ return 1;
+}
+
+static int dummy_cpufreq_verify(struct cpufreq_policy_data *data)
+{
+ return 0;
+}
+
+static struct cpufreq_driver dummy_cpufreq_driver = {
+ .name = "dummy",
+ .target_index = dummy_cpufreq_target_index,
+ .init = dummy_cpufreq_driver_init,
+ .get = dummy_cpufreq_get,
+ .verify = dummy_cpufreq_verify,
+};
+
+static int __init dummy_cpufreq_init(void)
+{
+ return cpufreq_register_driver(&dummy_cpufreq_driver);
+}
+
+static void __exit dummy_cpufreq_exit(void)
+{
+ cpufreq_unregister_driver(&dummy_cpufreq_driver);
+}
+
+module_init(dummy_cpufreq_init);
+module_exit(dummy_cpufreq_exit);
+
+MODULE_AUTHOR("Connor O'Brien <connoro@google.com>");
+MODULE_DESCRIPTION("dummy cpufreq driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 7f251daf0..2de11c3 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -9,6 +9,7 @@
#include <linux/cpufreq.h>
#include <linux/module.h>
+#include <trace/hooks/cpufreq.h>
/*********************************************************************
* FREQUENCY TABLE HELPERS *
diff --git a/drivers/cpuidle/TEST_MAPPING b/drivers/cpuidle/TEST_MAPPING
new file mode 100644
index 0000000..0917e71
--- /dev/null
+++ b/drivers/cpuidle/TEST_MAPPING
@@ -0,0 +1,223 @@
+{
+ "imports": [
+ {
+ "path": "frameworks/base/services/core/java/com/android/server"
+ },
+ {
+ "path": "frameworks/base/core/java/com/android/internal/app"
+ },
+ {
+ "path": "frameworks/base/apex/jobscheduler/service/java/com/android/server/job"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsSilentUpdateHostTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsJobSchedulerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsSuspendAppsTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ }
+ ]
+}
diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c
index dcf20ea5..48d64fe 100644
--- a/drivers/cpuidle/cpuidle-psci.c
+++ b/drivers/cpuidle/cpuidle-psci.c
@@ -25,6 +25,7 @@
#include <linux/syscore_ops.h>
#include <asm/cpuidle.h>
+#include <trace/hooks/cpuidle_psci.h>
#include <trace/events/power.h>
#include "cpuidle-psci.h"
@@ -77,6 +78,8 @@ static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
return -1;
/* Do runtime PM to manage a hierarchical CPU toplogy. */
+ trace_android_vh_cpuidle_psci_enter(dev, s2idle);
+
if (s2idle)
dev_pm_genpd_suspend(pd_dev);
else
@@ -95,6 +98,8 @@ static __cpuidle int __psci_enter_domain_idle_state(struct cpuidle_device *dev,
else
pm_runtime_get_sync(pd_dev);
+ trace_android_vh_cpuidle_psci_exit(dev, s2idle);
+
cpu_pm_exit();
/* Correct domain-idlestate statistics if we failed to enter. */
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 65fbb8e..c4c4f08 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -27,6 +27,7 @@
#include <linux/mmu_context.h>
#include <linux/context_tracking.h>
#include <trace/events/power.h>
+#include <trace/hooks/cpuidle.h>
#include "cpuidle.h"
@@ -220,13 +221,24 @@ noinstr int cpuidle_enter_state(struct cpuidle_device *dev,
{
int entered_state;
- struct cpuidle_state *target_state = &drv->states[index];
- bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
+ struct cpuidle_state *target_state;
+ bool broadcast;
ktime_t time_start, time_end;
instrumentation_begin();
/*
+ * The vendor hook may modify index, which means target_state and
+ * broadcast must be assigned after the vendor hook.
+ */
+ trace_android_vh_cpu_idle_enter(&index, dev);
+ if (index < 0)
+ return index;
+
+ target_state = &drv->states[index];
+ broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
+
+ /*
* Tell the time framework to switch to a broadcast timer because our
* local timer will be shut down. If a local timer is used from another
* CPU as a broadcast timer, this call may fail if it is not available.
@@ -281,6 +293,7 @@ noinstr int cpuidle_enter_state(struct cpuidle_device *dev,
sched_clock_idle_wakeup_event();
time_end = ns_to_ktime(local_clock_noinstr());
trace_cpu_idle(PWR_EVENT_EXIT, dev->cpu);
+ trace_android_vh_cpu_idle_exit(entered_state, dev);
/* The cpu is no longer idle or about to enter idle. */
sched_idle_set_state(NULL);
diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
index 5d0e7f7..80449c6 100644
--- a/drivers/cpuidle/governor.c
+++ b/drivers/cpuidle/governor.c
@@ -101,6 +101,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
return ret;
}
+EXPORT_SYMBOL_GPL(cpuidle_register_governor);
/**
* cpuidle_governor_latency_req - Compute a latency constraint for CPU
@@ -121,3 +122,4 @@ s64 cpuidle_governor_latency_req(unsigned int cpu)
return (s64)device_req * NSEC_PER_USEC;
}
+EXPORT_SYMBOL_GPL(cpuidle_governor_latency_req);
diff --git a/drivers/dma-buf/TEST_MAPPING b/drivers/dma-buf/TEST_MAPPING
new file mode 100644
index 0000000..1a38e0e
--- /dev/null
+++ b/drivers/dma-buf/TEST_MAPPING
@@ -0,0 +1,312 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.SelfManagedConnectionServiceTest"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index 1171187..a01422e 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -112,6 +112,7 @@ struct dma_buf *dma_buf_iter_begin(void)
mutex_unlock(&dmabuf_list_mutex);
return ret;
}
+EXPORT_SYMBOL_NS_GPL(dma_buf_iter_begin, "DMA_BUF");
/**
* dma_buf_iter_next - continue iteration through global list of all DMA buffers
@@ -146,6 +147,7 @@ struct dma_buf *dma_buf_iter_next(struct dma_buf *dmabuf)
mutex_unlock(&dmabuf_list_mutex);
return ret;
}
+EXPORT_SYMBOL_NS_GPL(dma_buf_iter_next, "DMA_BUF");
static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
{
@@ -1490,6 +1492,30 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, "DMA_BUF");
+int dma_buf_begin_cpu_access_partial(struct dma_buf *dmabuf,
+ enum dma_data_direction direction,
+ unsigned int offset, unsigned int len)
+{
+ int ret = 0;
+
+ if (WARN_ON(!dmabuf))
+ return -EINVAL;
+
+ if (dmabuf->ops->begin_cpu_access_partial)
+ ret = dmabuf->ops->begin_cpu_access_partial(dmabuf, direction,
+ offset, len);
+
+ /* Ensure that all fences are waited upon - but we first allow
+ * the native handler the chance to do so more efficiently if it
+ * chooses. A double invocation here will be reasonably cheap no-op.
+ */
+ if (ret == 0)
+ ret = __dma_buf_begin_cpu_access(dmabuf, direction);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access_partial);
+
/**
* dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
* cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
@@ -1518,6 +1544,21 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
}
EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, "DMA_BUF");
+int dma_buf_end_cpu_access_partial(struct dma_buf *dmabuf,
+ enum dma_data_direction direction,
+ unsigned int offset, unsigned int len)
+{
+ int ret = 0;
+
+ WARN_ON(!dmabuf);
+
+ if (dmabuf->ops->end_cpu_access_partial)
+ ret = dmabuf->ops->end_cpu_access_partial(dmabuf, direction,
+ offset, len);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access_partial);
/**
* dma_buf_mmap - Setup up a userspace mmap with the given vma
@@ -1682,6 +1723,20 @@ void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
}
EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, "DMA_BUF");
+int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags)
+{
+ int ret = 0;
+
+ if (WARN_ON(!dmabuf) || !flags)
+ return -EINVAL;
+
+ if (dmabuf->ops->get_flags)
+ ret = dmabuf->ops->get_flags(dmabuf, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dma_buf_get_flags);
+
#ifdef CONFIG_DEBUG_FS
static int dma_buf_debug_show(struct seq_file *s, void *unused)
{
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
index ac5f868..a10eebf 100644
--- a/drivers/dma-buf/dma-heap.c
+++ b/drivers/dma-buf/dma-heap.c
@@ -28,9 +28,10 @@
* @name: used for debugging/device-node name
* @ops: ops struct for this heap
* @priv: private data for this heap
- * @heap_devt: heap device node
- * @list: list head connecting to list of heaps
- * @heap_cdev: heap char device
+ * @heap_devt heap device node
+ * @list list head connecting to list of heaps
+ * @heap_cdev heap char device
+ * @heap_dev heap device struct
*
* Represents a heap of memory from which buffers can be made.
*/
@@ -41,6 +42,8 @@ struct dma_heap {
dev_t heap_devt;
struct list_head list;
struct cdev heap_cdev;
+ struct kref refcount;
+ struct device *heap_dev;
};
static LIST_HEAD(heap_list);
@@ -50,26 +53,65 @@ static struct class *dma_heap_class;
static DEFINE_XARRAY_ALLOC(dma_heap_minors);
bool __read_mostly mem_accounting;
+EXPORT_SYMBOL_GPL(mem_accounting);
module_param(mem_accounting, bool, 0444);
MODULE_PARM_DESC(mem_accounting,
"Enable cgroup-based memory accounting for dma-buf heap allocations (default=false).");
-static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
- u32 fd_flags,
- u64 heap_flags)
+struct dma_heap *dma_heap_find(const char *name)
{
- struct dma_buf *dmabuf;
- int fd;
+ struct dma_heap *h;
+ mutex_lock(&heap_list_lock);
+ list_for_each_entry(h, &heap_list, list) {
+ if (!strcmp(h->name, name)) {
+ kref_get(&h->refcount);
+ mutex_unlock(&heap_list_lock);
+ return h;
+ }
+ }
+ mutex_unlock(&heap_list_lock);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(dma_heap_find);
+
+
+void dma_heap_buffer_free(struct dma_buf *dmabuf)
+{
+ dma_buf_put(dmabuf);
+}
+EXPORT_SYMBOL_GPL(dma_heap_buffer_free);
+
+struct dma_buf *dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
+ u32 fd_flags,
+ u64 heap_flags)
+{
+ if (fd_flags & ~DMA_HEAP_VALID_FD_FLAGS)
+ return ERR_PTR(-EINVAL);
+
+ if (heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS)
+ return ERR_PTR(-EINVAL);
/*
* Allocations from all heaps have to begin
* and end on page boundaries.
*/
len = PAGE_ALIGN(len);
if (!len)
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
- dmabuf = heap->ops->allocate(heap, len, fd_flags, heap_flags);
+ return heap->ops->allocate(heap, len, fd_flags, heap_flags);
+}
+EXPORT_SYMBOL_GPL(dma_heap_buffer_alloc);
+
+int dma_heap_bufferfd_alloc(struct dma_heap *heap, size_t len,
+ u32 fd_flags,
+ u64 heap_flags)
+{
+ struct dma_buf *dmabuf;
+ int fd;
+
+ dmabuf = dma_heap_buffer_alloc(heap, len, fd_flags, heap_flags);
+
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
@@ -79,7 +121,9 @@ static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
/* just return, as put will call release and that will free */
}
return fd;
+
}
+EXPORT_SYMBOL_GPL(dma_heap_bufferfd_alloc);
static int dma_heap_open(struct inode *inode, struct file *file)
{
@@ -107,15 +151,9 @@ static long dma_heap_ioctl_allocate(struct file *file, void *data)
if (heap_allocation->fd)
return -EINVAL;
- if (heap_allocation->fd_flags & ~DMA_HEAP_VALID_FD_FLAGS)
- return -EINVAL;
-
- if (heap_allocation->heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS)
- return -EINVAL;
-
- fd = dma_heap_buffer_alloc(heap, heap_allocation->len,
- heap_allocation->fd_flags,
- heap_allocation->heap_flags);
+ fd = dma_heap_bufferfd_alloc(heap, heap_allocation->len,
+ heap_allocation->fd_flags,
+ heap_allocation->heap_flags);
if (fd < 0)
return fd;
@@ -210,6 +248,46 @@ void *dma_heap_get_drvdata(struct dma_heap *heap)
}
EXPORT_SYMBOL_NS_GPL(dma_heap_get_drvdata, "DMA_BUF_HEAP");
+static void dma_heap_release(struct kref *ref)
+{
+ struct dma_heap *heap = container_of(ref, struct dma_heap, refcount);
+ int minor = MINOR(heap->heap_devt);
+
+ /* Note, we already holding the heap_list_lock here */
+ list_del(&heap->list);
+
+ device_destroy(dma_heap_class, heap->heap_devt);
+ cdev_del(&heap->heap_cdev);
+ xa_erase(&dma_heap_minors, minor);
+
+ kfree(heap);
+}
+
+void dma_heap_put(struct dma_heap *h)
+{
+ /*
+ * Take the heap_list_lock now to avoid racing with code
+ * scanning the list and then taking a kref.
+ */
+ mutex_lock(&heap_list_lock);
+ kref_put(&h->refcount, dma_heap_release);
+ mutex_unlock(&heap_list_lock);
+}
+EXPORT_SYMBOL_GPL(dma_heap_put);
+
+/**
+ * dma_heap_get_dev() - get device struct for the heap
+ * @heap: DMA-Heap to retrieve device struct from
+ *
+ * Returns:
+ * The device struct for the heap.
+ */
+struct device *dma_heap_get_dev(struct dma_heap *heap)
+{
+ return heap->heap_dev;
+}
+EXPORT_SYMBOL_GPL(dma_heap_get_dev);
+
/**
* dma_heap_get_name - get heap name
* @heap: DMA-Heap to retrieve the name of
@@ -230,7 +308,6 @@ EXPORT_SYMBOL_NS_GPL(dma_heap_get_name, "DMA_BUF_HEAP");
struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
{
struct dma_heap *heap, *h, *err_ret;
- struct device *dev_ret;
unsigned int minor;
int ret;
@@ -248,6 +325,7 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
if (!heap)
return ERR_PTR(-ENOMEM);
+ kref_init(&heap->refcount);
heap->name = exp_info->name;
heap->ops = exp_info->ops;
heap->priv = exp_info->priv;
@@ -272,17 +350,20 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
goto err1;
}
- dev_ret = device_create(dma_heap_class,
- NULL,
- heap->heap_devt,
- NULL,
- heap->name);
- if (IS_ERR(dev_ret)) {
+ heap->heap_dev = device_create(dma_heap_class,
+ NULL,
+ heap->heap_devt,
+ NULL,
+ heap->name);
+ if (IS_ERR(heap->heap_dev)) {
pr_err("dma_heap: Unable to create device\n");
- err_ret = ERR_CAST(dev_ret);
+ err_ret = ERR_CAST(heap->heap_dev);
goto err2;
}
+ /* Make sure it doesn't disappear on us */
+ heap->heap_dev = get_device(heap->heap_dev);
+
mutex_lock(&heap_list_lock);
/* check the name is unique */
list_for_each_entry(h, &heap_list, list) {
@@ -291,6 +372,7 @@ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
pr_err("dma_heap: Already registered heap named %s\n",
exp_info->name);
err_ret = ERR_PTR(-EINVAL);
+ put_device(heap->heap_dev);
goto err3;
}
}
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
index a5eef06..e273fb1 100644
--- a/drivers/dma-buf/heaps/Kconfig
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -1,12 +1,12 @@
config DMABUF_HEAPS_SYSTEM
- bool "DMA-BUF System Heap"
+ tristate "DMA-BUF System Heap"
depends on DMABUF_HEAPS
help
Choose this option to enable the system dmabuf heap. The system heap
is backed by pages from the buddy allocator. If in doubt, say Y.
config DMABUF_HEAPS_CMA
- bool "DMA-BUF CMA Heap"
+ tristate "DMA-BUF CMA Heap"
depends on DMABUF_HEAPS && DMA_CMA
help
Choose this option to enable dma-buf CMA heap. This heap is backed
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index bd3370b..05798886 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -33,6 +33,7 @@
static struct cma *dma_areas[MAX_CMA_AREAS] __initdata;
static unsigned int dma_areas_num __initdata;
+#if IS_BUILTIN(CONFIG_DMABUF_HEAPS_CMA)
int __init dma_heap_cma_register_heap(struct cma *cma)
{
if (dma_areas_num >= ARRAY_SIZE(dma_areas))
@@ -42,6 +43,7 @@ int __init dma_heap_cma_register_heap(struct cma *cma)
return 0;
}
+#endif
struct cma_heap {
struct dma_heap *heap;
@@ -119,9 +121,10 @@ static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachme
{
struct dma_heap_attachment *a = attachment->priv;
struct sg_table *table = &a->table;
+ int attrs = attachment->dma_map_attrs;
int ret;
- ret = dma_map_sgtable(attachment->dev, table, direction, 0);
+ ret = dma_map_sgtable(attachment->dev, table, direction, attrs);
if (ret)
return ERR_PTR(-ENOMEM);
a->mapped = true;
@@ -133,9 +136,10 @@ static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
struct dma_heap_attachment *a = attachment->priv;
+ int attrs = attachment->dma_map_attrs;
a->mapped = false;
- dma_unmap_sgtable(attachment->dev, table, direction, 0);
+ dma_unmap_sgtable(attachment->dev, table, direction, attrs);
}
static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
@@ -438,3 +442,6 @@ static int __init add_cma_heaps(void)
}
module_init(add_cma_heaps);
MODULE_DESCRIPTION("DMA-BUF CMA Heap");
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS("DMA_BUF");
+MODULE_IMPORT_NS("DMA_BUF_HEAP");
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
index b3650d8..7b132c3 100644
--- a/drivers/dma-buf/heaps/system_heap.c
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -11,14 +11,17 @@
*/
#include <linux/dma-buf.h>
+#include <linux/dma-direct.h>
#include <linux/dma-mapping.h>
#include <linux/dma-heap.h>
#include <linux/err.h>
#include <linux/highmem.h>
+#include <linux/iommu.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/printk.h>
#include <linux/scatterlist.h>
-#include <linux/slab.h>
+#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
struct system_heap_buffer {
@@ -29,6 +32,8 @@ struct system_heap_buffer {
struct sg_table sg_table;
int vmap_cnt;
void *vaddr;
+
+ bool uncached;
};
struct dma_heap_attachment {
@@ -36,6 +41,8 @@ struct dma_heap_attachment {
struct sg_table table;
struct list_head list;
bool mapped;
+
+ bool uncached;
};
#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO)
@@ -52,6 +59,28 @@ static gfp_t order_flags[] = {HIGH_ORDER_GFP, HIGH_ORDER_GFP, LOW_ORDER_GFP};
static const unsigned int orders[] = {8, 4, 0};
#define NUM_ORDERS ARRAY_SIZE(orders)
+static bool needs_swiotlb_bounce(struct device *dev, struct sg_table *table)
+{
+ struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct scatterlist *sg;
+ int i;
+
+ for_each_sgtable_dma_sg(table, sg, i) {
+ // SG_DMA_SWIOTLB is set only for dma-iommu, not for dma-direct
+ if (domain && IS_ENABLED(CONFIG_NEED_SG_DMA_FLAGS)) {
+ if (sg_dma_is_swiotlb(table->sgl))
+ return true;
+ } else {
+ phys_addr_t paddr = domain ?
+ iommu_iova_to_phys(domain, sg_dma_address(sg)) :
+ dma_to_phys(dev, sg_dma_address(sg));
+ if (swiotlb_find_pool(dev, paddr))
+ return true;
+ }
+ }
+ return false;
+}
+
static int dup_sg_table(struct sg_table *from, struct sg_table *to)
{
struct scatterlist *sg, *new_sg;
@@ -90,7 +119,7 @@ static int system_heap_attach(struct dma_buf *dmabuf,
a->dev = attachment->dev;
INIT_LIST_HEAD(&a->list);
a->mapped = false;
-
+ a->uncached = buffer->uncached;
attachment->priv = a;
mutex_lock(&buffer->lock);
@@ -119,12 +148,23 @@ static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attac
{
struct dma_heap_attachment *a = attachment->priv;
struct sg_table *table = &a->table;
+ int attr = attachment->dma_map_attrs;
int ret;
- ret = dma_map_sgtable(attachment->dev, table, direction, 0);
+ if (a->uncached)
+ attr |= DMA_ATTR_SKIP_CPU_SYNC;
+
+ ret = dma_map_sgtable(attachment->dev, table, direction, attr);
if (ret)
return ERR_PTR(ret);
+ if (a->uncached && needs_swiotlb_bounce(attachment->dev, table)) {
+ pr_err("Cannot map uncached system heap buffer for %s, as it requires SWIOTLB",
+ dev_name(attachment->dev));
+ dma_unmap_sgtable(attachment->dev, table, direction, attr);
+ return ERR_PTR(-EINVAL);
+ }
+
a->mapped = true;
return table;
}
@@ -134,9 +174,12 @@ static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
enum dma_data_direction direction)
{
struct dma_heap_attachment *a = attachment->priv;
+ int attr = attachment->dma_map_attrs;
+ if (a->uncached)
+ attr |= DMA_ATTR_SKIP_CPU_SYNC;
a->mapped = false;
- dma_unmap_sgtable(attachment->dev, table, direction, 0);
+ dma_unmap_sgtable(attachment->dev, table, direction, attr);
}
static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
@@ -150,10 +193,12 @@ static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
if (buffer->vmap_cnt)
invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
- list_for_each_entry(a, &buffer->attachments, list) {
- if (!a->mapped)
- continue;
- dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
+ if (!buffer->uncached) {
+ list_for_each_entry(a, &buffer->attachments, list) {
+ if (!a->mapped)
+ continue;
+ dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
+ }
}
mutex_unlock(&buffer->lock);
@@ -171,10 +216,12 @@ static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
if (buffer->vmap_cnt)
flush_kernel_vmap_range(buffer->vaddr, buffer->len);
- list_for_each_entry(a, &buffer->attachments, list) {
- if (!a->mapped)
- continue;
- dma_sync_sgtable_for_device(a->dev, &a->table, direction);
+ if (!buffer->uncached) {
+ list_for_each_entry(a, &buffer->attachments, list) {
+ if (!a->mapped)
+ continue;
+ dma_sync_sgtable_for_device(a->dev, &a->table, direction);
+ }
}
mutex_unlock(&buffer->lock);
@@ -190,6 +237,9 @@ static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
struct scatterlist *sg;
int i, ret;
+ if (buffer->uncached)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
for_each_sgtable_sg(table, sg, i) {
unsigned long n = sg->length >> PAGE_SHIFT;
@@ -225,17 +275,21 @@ static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
struct page **pages = vmalloc(sizeof(struct page *) * npages);
struct page **tmp = pages;
struct sg_page_iter piter;
+ pgprot_t pgprot = PAGE_KERNEL;
void *vaddr;
if (!pages)
return ERR_PTR(-ENOMEM);
+ if (buffer->uncached)
+ pgprot = pgprot_writecombine(PAGE_KERNEL);
+
for_each_sgtable_page(table, &piter, 0) {
WARN_ON(tmp - pages >= npages);
*tmp++ = sg_page_iter_page(&piter);
}
- vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
+ vaddr = vmap(pages, npages, VM_MAP, pgprot);
vfree(pages);
if (!vaddr)
@@ -338,10 +392,11 @@ static struct page *alloc_largest_available(unsigned long size,
return NULL;
}
-static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
- unsigned long len,
- u32 fd_flags,
- u64 heap_flags)
+static struct dma_buf *system_heap_do_allocate(struct dma_heap *heap,
+ unsigned long len,
+ u32 fd_flags,
+ u64 heap_flags,
+ bool uncached)
{
struct system_heap_buffer *buffer;
DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
@@ -362,6 +417,7 @@ static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
mutex_init(&buffer->lock);
buffer->heap = heap;
buffer->len = len;
+ buffer->uncached = uncached;
INIT_LIST_HEAD(&pages);
i = 0;
@@ -407,6 +463,18 @@ static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
ret = PTR_ERR(dmabuf);
goto free_pages;
}
+
+ /*
+ * For uncached buffers, we need to initially flush cpu cache, since
+ * the __GFP_ZERO on the allocation means the zeroing was done by the
+ * cpu and thus it is likely cached. Map (and implicitly flush) and
+ * unmap it now so we don't get corruption later on.
+ */
+ if (buffer->uncached) {
+ dma_map_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
+ dma_unmap_sgtable(dma_heap_get_dev(heap), table, DMA_BIDIRECTIONAL, 0);
+ }
+
return dmabuf;
free_pages:
@@ -424,13 +492,44 @@ static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
return ERR_PTR(ret);
}
+static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ u32 fd_flags,
+ u64 heap_flags)
+{
+ return system_heap_do_allocate(heap, len, fd_flags, heap_flags, false);
+}
+
static const struct dma_heap_ops system_heap_ops = {
.allocate = system_heap_allocate,
};
+static struct dma_buf *system_uncached_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ u32 fd_flags,
+ u64 heap_flags)
+{
+ return system_heap_do_allocate(heap, len, fd_flags, heap_flags, true);
+}
+
+/* Dummy function to be used until we can call coerce_mask_and_coherent */
+static struct dma_buf *system_uncached_heap_not_initialized(struct dma_heap *heap,
+ unsigned long len,
+ u32 fd_flags,
+ u64 heap_flags)
+{
+ return ERR_PTR(-EBUSY);
+}
+
+static struct dma_heap_ops system_uncached_heap_ops = {
+ /* After system_heap_create is complete, we will swap this */
+ .allocate = system_uncached_heap_not_initialized,
+};
+
static int __init system_heap_create(void)
{
struct dma_heap_export_info exp_info;
+ struct dma_heap *sys_uncached_heap;
struct dma_heap *sys_heap;
exp_info.name = "system";
@@ -441,6 +540,21 @@ static int __init system_heap_create(void)
if (IS_ERR(sys_heap))
return PTR_ERR(sys_heap);
+ exp_info.name = "system-uncached";
+ exp_info.ops = &system_uncached_heap_ops;
+ exp_info.priv = NULL;
+
+ sys_uncached_heap = dma_heap_add(&exp_info);
+ if (IS_ERR(sys_uncached_heap))
+ return PTR_ERR(sys_uncached_heap);
+
+ dma_coerce_mask_and_coherent(dma_heap_get_dev(sys_uncached_heap), DMA_BIT_MASK(64));
+ mb(); /* make sure we only set allocate after dma_mask is set */
+ system_uncached_heap_ops.allocate = system_uncached_heap_allocate;
+
return 0;
}
module_init(system_heap_create);
+MODULE_LICENSE("GPL v2");
+MODULE_IMPORT_NS("DMA_BUF");
+MODULE_IMPORT_NS("DMA_BUF_HEAP");
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index ef1ac68..69ce681 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -552,6 +552,7 @@ static struct gpio_desc *of_find_gpio_rename(struct device_node *np,
* establish that GPIO properties should be named
* "foo-gpios" so we have this special kludge for them.
*/
+ { "max77759,extbst-ctl", NULL, "maxim,max77759chrg" },
#if IS_ENABLED(CONFIG_REGULATOR_ARIZONA_LDO1)
{ "wlf,ldoena", NULL, NULL }, /* Arizona */
#endif
diff --git a/drivers/gpu/drm/TEST_MAPPING b/drivers/gpu/drm/TEST_MAPPING
new file mode 100644
index 0000000..d9dc244
--- /dev/null
+++ b/drivers/gpu/drm/TEST_MAPPING
@@ -0,0 +1,88 @@
+{
+ "desktop-postsubmit": [
+ {
+ "name": "IgtCoreAuthTestCases"
+ },
+ {
+ "name": "IgtDrmMmTestCases"
+ },
+ {
+ "name": "IgtKmsAddfbBasicTestCases"
+ },
+ {
+ "name": "IgtKmsAtomicTestCases"
+ },
+ {
+ "name": "IgtKmsAtomicInterruptibleTestCases"
+ },
+ {
+ "name": "IgtKmsBwTestCases"
+ },
+ {
+ "name": "IgtKmsColorTestCases"
+ },
+ {
+ "name": "IgtKmsConcurrentTestCases"
+ },
+ {
+ "name": "IgtKmsContentProtectionTestCases"
+ },
+ {
+ "name": "IgtKmsCursorEdgeWalkTestCases"
+ },
+ {
+ "name": "IgtKmsDisplayModesTestCases"
+ },
+ {
+ "name": "IgtKmsHdmiInjectTestCases"
+ },
+ {
+ "name": "IgtKmsHdrTestCases"
+ },
+ {
+ "name": "IgtKmsInvalidModeTestCases"
+ },
+ {
+ "name": "IgtKmsPipeCrcBasicTestCases"
+ },
+ {
+ "name": "IgtKmsPlaneCursorTestCases"
+ },
+ {
+ "name": "IgtKmsPlaneLowresTestCases"
+ },
+ {
+ "name": "IgtKmsPlaneMultipleTestCases"
+ },
+ {
+ "name": "IgtKmsPlaneScalingTestCases"
+ },
+ {
+ "name": "IgtKmsPlaneTestCases"
+ },
+ {
+ "name": "IgtKmsPropertiesTestCases"
+ },
+ {
+ "name": "IgtKmsRotationCrcTestCases"
+ },
+ {
+ "name": "IgtKmsScalingModesTestCases"
+ },
+ {
+ "name": "IgtKmsSetmodeTestCases"
+ },
+ {
+ "name": "IgtKmsSysfsEdidTimingTestCases"
+ },
+ {
+ "name": "IgtKmsTiledDisplayTestCases"
+ },
+ {
+ "name": "IgtKmsVBlankTestCases"
+ },
+ {
+ "name": "IgtKmsVrrTestCases"
+ }
+ ]
+}
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index ff19315..20cc843 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -692,9 +692,9 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CLOSEFB, drm_mode_closefb_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, 0),
- DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_CREATE_DUMB, drm_mode_create_dumb_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_MAP_DUMB, drm_mode_mmap_dumb_ioctl, DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_DESTROY_DUMB, drm_mode_destroy_dumb_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_GETPROPERTIES, drm_mode_obj_get_properties_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_OBJ_SETPROPERTY, drm_mode_obj_set_property_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_CURSOR2, drm_mode_cursor2_ioctl, DRM_MASTER),
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 3f8e025..010a734 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -2621,6 +2621,7 @@ void drm_mode_convert_to_umode(struct drm_mode_modeinfo *out,
strscpy_pad(out->name, in->name, sizeof(out->name));
}
+EXPORT_SYMBOL_GPL(drm_mode_convert_to_umode);
/**
* drm_mode_convert_umode - convert a modeinfo into a drm_display_mode
@@ -2696,6 +2697,7 @@ int drm_mode_convert_umode(struct drm_device *dev,
return 0;
}
+EXPORT_SYMBOL_GPL(drm_mode_convert_umode);
/**
* drm_mode_is_420_only - if a given videomode can be only supported in YCBCR420
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index f1dae95..fe1dcd6 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -325,10 +325,6 @@ virtio_gpu_user_framebuffer_create(struct drm_device *dev,
struct virtio_gpu_framebuffer *virtio_gpu_fb;
int ret;
- if (mode_cmd->pixel_format != DRM_FORMAT_HOST_XRGB8888 &&
- mode_cmd->pixel_format != DRM_FORMAT_HOST_ARGB8888)
- return ERR_PTR(-ENOENT);
-
/* lookup object associated with res handle */
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (!obj)
@@ -367,7 +363,6 @@ int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev)
if (ret)
return ret;
- vgdev->ddev->mode_config.quirk_addfb_prefer_host_byte_order = true;
vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs;
/* modes will be validated against the framebuffer size */
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index a126d1b..88b0ae8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -35,7 +35,14 @@
#include "virtgpu_drv.h"
static const uint32_t virtio_gpu_formats[] = {
- DRM_FORMAT_HOST_XRGB8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_ABGR8888,
};
static const uint32_t virtio_gpu_cursor_formats[] = {
@@ -47,6 +54,32 @@ uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
uint32_t format;
switch (drm_fourcc) {
+#ifdef __BIG_ENDIAN
+ case DRM_FORMAT_XRGB8888:
+ format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM;
+ break;
+ case DRM_FORMAT_ARGB8888:
+ format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
+ break;
+ case DRM_FORMAT_BGRX8888:
+ format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
+ break;
+ case DRM_FORMAT_BGRA8888:
+ format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM;
+ break;
+ case DRM_FORMAT_RGBX8888:
+ format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
+ break;
+ case DRM_FORMAT_RGBA8888:
+ format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
+ break;
+#else
case DRM_FORMAT_XRGB8888:
format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
break;
@@ -59,6 +92,19 @@ uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc)
case DRM_FORMAT_BGRA8888:
format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM;
break;
+ case DRM_FORMAT_RGBX8888:
+ format = VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM;
+ break;
+ case DRM_FORMAT_RGBA8888:
+ format = VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ format = VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ format = VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM;
+ break;
+#endif
default:
/*
* This should not happen, we handle everything listed
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index f20dc46..f44e6e7 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -990,6 +990,9 @@ static const struct hid_usage_entry hid_usage_table[] = {
{ 0x0c, 0x01c9, "ALContactSync" },
{ 0x0c, 0x01ca, "ALNavigation" },
{ 0x0c, 0x01cb, "ALContextawareDesktopAssistant" },
+ { 0x0c, 0x01cc, "ALActionOnSelection" },
+ { 0x0c, 0x01cd, "ALContextualInsertion" },
+ { 0x0c, 0x01ce, "ALContextualQuery" },
{ 0x0c, 0x0200, "GenericGUIApplicationControls" },
{ 0x0c, 0x0201, "ACNew" },
{ 0x0c, 0x0202, "ACOpen" },
@@ -3375,6 +3378,9 @@ static const char *keys[KEY_MAX + 1] = {
[KEY_BRIGHTNESS_MIN] = "BrightnessMin",
[KEY_BRIGHTNESS_MAX] = "BrightnessMax",
[KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
+ [KEY_ACTION_ON_SELECTION] = "ActionOnSelection",
+ [KEY_CONTEXTUAL_INSERT] = "ContextualInsert",
+ [KEY_CONTEXTUAL_QUERY] = "ContextualQuery",
[KEY_KBDINPUTASSIST_PREV] = "KbdInputAssistPrev",
[KEY_KBDINPUTASSIST_NEXT] = "KbdInputAssistNext",
[KEY_KBDINPUTASSIST_PREVGROUP] = "KbdInputAssistPrevGroup",
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index d5308ad..d4d731b 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1223,6 +1223,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
case 0x1bc: map_key_clear(KEY_MESSENGER); break;
case 0x1bd: map_key_clear(KEY_INFO); break;
case 0x1cb: map_key_clear(KEY_ASSISTANT); break;
+ case 0x1cc: map_key_clear(KEY_ACTION_ON_SELECTION); break;
+ case 0x1cd: map_key_clear(KEY_CONTEXTUAL_INSERT); break;
+ case 0x1ce: map_key_clear(KEY_CONTEXTUAL_QUERY); break;
case 0x201: map_key_clear(KEY_NEW); break;
case 0x202: map_key_clear(KEY_OPEN); break;
case 0x203: map_key_clear(KEY_CLOSE); break;
diff --git a/drivers/interconnect/debugfs-client.c b/drivers/interconnect/debugfs-client.c
index 5107bff..8f87b50 100644
--- a/drivers/interconnect/debugfs-client.c
+++ b/drivers/interconnect/debugfs-client.c
@@ -13,7 +13,7 @@
* configuration option for this feature.
* People who want to use this will need to modify the source code directly.
*/
-#undef INTERCONNECT_ALLOW_WRITE_DEBUGFS
+#define INTERCONNECT_ALLOW_WRITE_DEBUGFS
#if defined(INTERCONNECT_ALLOW_WRITE_DEBUGFS) && defined(CONFIG_DEBUG_FS)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c
index 5dac64b..095bfc1 100644
--- a/drivers/iommu/dma-iommu.c
+++ b/drivers/iommu/dma-iommu.c
@@ -33,6 +33,7 @@
#include <linux/swiotlb.h>
#include <linux/vmalloc.h>
#include <trace/events/swiotlb.h>
+#include <trace/hooks/iommu.h>
#include "dma-iommu.h"
#include "iommu-pages.h"
@@ -789,6 +790,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift, true);
done:
+ trace_android_vh_iommu_iovad_alloc_iova(dev, iovad, (dma_addr_t)iova << shift, size);
return (dma_addr_t)iova << shift;
}
@@ -807,6 +809,8 @@ static void iommu_dma_free_iova(struct iommu_domain *domain, dma_addr_t iova,
else
free_iova_fast(iovad, iova_pfn(iovad, iova),
size >> iova_shift(iovad));
+
+ trace_android_vh_iommu_iovad_free_iova(iovad, iova, size);
}
static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
@@ -2106,6 +2110,8 @@ void iommu_setup_dma_ops(struct device *dev, struct iommu_domain *domain)
if (dev->dma_iommu && iommu_dma_init_domain(domain, dev))
goto out_err;
+ trace_android_rvh_iommu_setup_dma_ops(dev);
+
return;
out_err:
pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 20f13b6..fb1f8c8 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -21,6 +21,7 @@
#include <linux/refcount.h>
#include <linux/slab.h>
#include <linux/iopoll.h>
+#include <trace/hooks/gic_v3.h>
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic-common.h>
@@ -1454,6 +1455,9 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
reg = gic_dist_base(d) + offset + (index * 8);
val = gic_cpu_to_affinity(cpu);
+ trace_android_rvh_gic_v3_set_affinity(d, mask_val, &val, force, gic_dist_base(d),
+ gic_data.redist_regions[0].redist_base,
+ gic_data.redist_stride);
gic_write_irouter(val, reg);
/*
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index ec70c84..1a8b1a4 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -40,6 +40,7 @@
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqchip/arm-gic.h>
+#include <trace/hooks/gic.h>
#include <asm/cputype.h>
#include <asm/irq.h>
@@ -814,6 +815,8 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
writeb_relaxed(gic_cpu_map[cpu], reg);
irq_data_update_effective_affinity(d, cpumask_of(cpu));
+ trace_android_vh_gic_set_affinity(d, mask_val, force, gic_cpu_map, reg);
+
return IRQ_SET_MASK_OK_DONE;
}
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index c58a9a8..1a29133 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -313,6 +313,27 @@
If unsure, say N.
+config DM_DEFAULT_KEY
+ tristate "Default-key target support"
+ depends on BLK_DEV_DM
+ depends on BLK_INLINE_ENCRYPTION
+ # dm-default-key doesn't require -o inlinecrypt, but it does currently
+ # rely on the inline encryption hooks being built into the kernel.
+ depends on FS_ENCRYPTION_INLINE_CRYPT
+ help
+ This device-mapper target allows you to create a device that
+ assigns a default encryption key to bios that aren't for the
+ contents of an encrypted file.
+
+ This ensures that all blocks on-disk will be encrypted with
+ some key, without the performance hit of file contents being
+ encrypted twice when fscrypt (File-Based Encryption) is used.
+
+ It is only appropriate to use dm-default-key when key
+ configuration is tightly controlled, like it is in Android,
+ such that all fscrypt keys are at least as hard to compromise
+ as the default key.
+
config DM_SNAPSHOT
tristate "Snapshot target"
depends on BLK_DEV_DM
@@ -688,6 +709,32 @@
Enables audit logging of several security relevant events in the
particular device-mapper targets, especially the integrity target.
+config DM_BOW
+ tristate "Backup block device"
+ depends on BLK_DEV_DM
+ select DM_BUFIO
+ help
+ This device-mapper target takes a device and keeps a log of all
+ changes using free blocks identified by issuing a trim command.
+ This can then be restored by running a command line utility,
+ or committed by simply replacing the target.
+
+ If unsure, say N.
+
+config DM_USER
+ tristate "Block device in userspace"
+ depends on BLK_DEV_DM
+ default y
+ help
+ This device-mapper target allows a userspace daemon to provide the
+ contents of a block device. See
+ <file:Documentation/block/dm-user.rst> for more information.
+
+ To compile this code as a module, choose M here: the module will be
+ called dm-user.
+
+ If unsure, say N.
+
source "drivers/md/dm-vdo/Kconfig"
source "drivers/md/dm-pcache/Kconfig"
diff --git a/drivers/md/Makefile b/drivers/md/Makefile
index c338cc6f..c82ab0d 100644
--- a/drivers/md/Makefile
+++ b/drivers/md/Makefile
@@ -55,6 +55,7 @@
obj-$(CONFIG_DM_BUFIO) += dm-bufio.o
obj-$(CONFIG_DM_BIO_PRISON) += dm-bio-prison.o
obj-$(CONFIG_DM_CRYPT) += dm-crypt.o
+obj-$(CONFIG_DM_DEFAULT_KEY) += dm-default-key.o
obj-$(CONFIG_DM_DELAY) += dm-delay.o
obj-$(CONFIG_DM_DUST) += dm-dust.o
obj-$(CONFIG_DM_FLAKEY) += dm-flakey.o
@@ -84,6 +85,8 @@
obj-$(CONFIG_DM_ZONED) += dm-zoned.o
obj-$(CONFIG_DM_WRITECACHE) += dm-writecache.o
obj-$(CONFIG_SECURITY_LOADPIN_VERITY) += dm-verity-loadpin.o
+obj-$(CONFIG_DM_BOW) += dm-bow.o
+obj-$(CONFIG_DM_USER) += dm-user.o
ifeq ($(CONFIG_DM_INIT),y)
dm-mod-objs += dm-init.o
diff --git a/drivers/md/TEST_MAPPING b/drivers/md/TEST_MAPPING
new file mode 100644
index 0000000..4e4d211
--- /dev/null
+++ b/drivers/md/TEST_MAPPING
@@ -0,0 +1,314 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.ConferenceTest"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "VtsBootconfigTest"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/drivers/md/dm-bow.c b/drivers/md/dm-bow.c
new file mode 100644
index 0000000..bed1597
--- /dev/null
+++ b/drivers/md/dm-bow.c
@@ -0,0 +1,1348 @@
+/*
+ * Copyright (C) 2018 Google Limited.
+ *
+ * This file is released under the GPL.
+ */
+
+#include "dm.h"
+#include "dm-core.h"
+
+#include <linux/crc32.h>
+#include <linux/dm-bufio.h>
+#include <linux/module.h>
+
+#define DM_MSG_PREFIX "bow"
+
+struct log_entry {
+ u64 source;
+ u64 dest;
+ u32 size;
+ u32 checksum;
+} __packed;
+
+struct log_sector {
+ u32 magic;
+ u16 header_version;
+ u16 header_size;
+ u32 block_size;
+ u32 count;
+ u32 sequence;
+ sector_t sector0;
+ struct log_entry entries[];
+} __packed;
+
+/*
+ * MAGIC is BOW in ascii
+ */
+#define MAGIC 0x00574f42
+#define HEADER_VERSION 0x0100
+
+/*
+ * A sorted set of ranges representing the state of the data on the device.
+ * Use an rb_tree for fast lookup of a given sector
+ * Consecutive ranges are always of different type - operations on this
+ * set must merge matching consecutive ranges.
+ *
+ * Top range is always of type TOP
+ */
+struct bow_range {
+ struct rb_node node;
+ sector_t sector;
+ enum {
+ INVALID, /* Type not set */
+ SECTOR0, /* First sector - holds log record */
+ SECTOR0_CURRENT,/* Live contents of sector0 */
+ UNCHANGED, /* Original contents */
+ TRIMMED, /* Range has been trimmed */
+ CHANGED, /* Range has been changed */
+ BACKUP, /* Range is being used as a backup */
+ TOP, /* Final range - sector is size of device */
+ } type;
+ struct list_head trimmed_list; /* list of TRIMMED ranges */
+};
+
+static const char * const readable_type[] = {
+ "Invalid",
+ "Sector0",
+ "Sector0_current",
+ "Unchanged",
+ "Free",
+ "Changed",
+ "Backup",
+ "Top",
+};
+
+enum state {
+ TRIM,
+ CHECKPOINT,
+ COMMITTED,
+};
+
+struct bow_context {
+ struct dm_dev *dev;
+ u32 block_size;
+ u32 block_shift;
+ struct workqueue_struct *workqueue;
+ struct dm_bufio_client *bufio;
+ struct mutex ranges_lock; /* Hold to access this struct and/or ranges */
+ struct rb_root ranges;
+ struct dm_kobject_holder kobj_holder; /* for sysfs attributes */
+ struct mutex state_lock;
+ atomic_t state; /* One of the enum state values above */
+ u64 trims_total;
+ struct log_sector *log_sector;
+ struct list_head trimmed_list;
+ bool forward_trims;
+};
+
+static sector_t range_top(struct bow_range *br)
+{
+ return container_of(rb_next(&br->node), struct bow_range, node)
+ ->sector;
+}
+
+static u64 range_size(struct bow_range *br)
+{
+ return (range_top(br) - br->sector) * SECTOR_SIZE;
+}
+
+static sector_t bvec_top(struct bvec_iter *bi_iter)
+{
+ return bi_iter->bi_sector + bi_iter->bi_size / SECTOR_SIZE;
+}
+
+/*
+ * Find the first range that overlaps with bi_iter
+ * bi_iter is set to the size of the overlapping sub-range
+ */
+static struct bow_range *find_first_overlapping_range(struct rb_root *ranges,
+ struct bvec_iter *bi_iter)
+{
+ struct rb_node *node = ranges->rb_node;
+ struct bow_range *br;
+
+ while (node) {
+ br = container_of(node, struct bow_range, node);
+
+ if (br->sector <= bi_iter->bi_sector
+ && bi_iter->bi_sector < range_top(br))
+ break;
+
+ if (bi_iter->bi_sector < br->sector)
+ node = node->rb_left;
+ else
+ node = node->rb_right;
+ }
+
+ WARN_ON(!node);
+ if (!node)
+ return NULL;
+
+ if (range_top(br) - bi_iter->bi_sector
+ < bi_iter->bi_size >> SECTOR_SHIFT)
+ bi_iter->bi_size = (range_top(br) - bi_iter->bi_sector)
+ << SECTOR_SHIFT;
+
+ return br;
+}
+
+static void add_before(struct rb_root *ranges, struct bow_range *new_br,
+ struct bow_range *existing)
+{
+ struct rb_node *parent = &(existing->node);
+ struct rb_node **link = &(parent->rb_left);
+
+ while (*link) {
+ parent = *link;
+ link = &((*link)->rb_right);
+ }
+
+ rb_link_node(&new_br->node, parent, link);
+ rb_insert_color(&new_br->node, ranges);
+}
+
+/*
+ * Given a range br returned by find_first_overlapping_range, split br into a
+ * leading range, a range matching the bi_iter and a trailing range.
+ * Leading and trailing may end up size 0 and will then be deleted. The
+ * new range matching the bi_iter is then returned and should have its type
+ * and type specific fields populated.
+ * If bi_iter runs off the end of the range, bi_iter is truncated accordingly
+ */
+static int split_range(struct bow_context *bc, struct bow_range **br,
+ struct bvec_iter *bi_iter)
+{
+ struct bow_range *new_br;
+
+ if (bi_iter->bi_sector < (*br)->sector) {
+ WARN_ON(true);
+ return BLK_STS_IOERR;
+ }
+
+ if (bi_iter->bi_sector > (*br)->sector) {
+ struct bow_range *leading_br =
+ kzalloc(sizeof(*leading_br), GFP_KERNEL);
+
+ if (!leading_br)
+ return BLK_STS_RESOURCE;
+
+ *leading_br = **br;
+ if (leading_br->type == TRIMMED)
+ list_add(&leading_br->trimmed_list, &bc->trimmed_list);
+
+ add_before(&bc->ranges, leading_br, *br);
+ (*br)->sector = bi_iter->bi_sector;
+ }
+
+ if (bvec_top(bi_iter) >= range_top(*br)) {
+ bi_iter->bi_size = (range_top(*br) - (*br)->sector)
+ * SECTOR_SIZE;
+ return BLK_STS_OK;
+ }
+
+ /* new_br will be the beginning, existing br will be the tail */
+ new_br = kzalloc(sizeof(*new_br), GFP_KERNEL);
+ if (!new_br)
+ return BLK_STS_RESOURCE;
+
+ new_br->sector = (*br)->sector;
+ (*br)->sector = bvec_top(bi_iter);
+ add_before(&bc->ranges, new_br, *br);
+ *br = new_br;
+
+ return BLK_STS_OK;
+}
+
+/*
+ * Sets type of a range. May merge range into surrounding ranges
+ * Since br may be invalidated, always sets br to NULL to prevent
+ * usage after this is called
+ */
+static void set_type(struct bow_context *bc, struct bow_range **br, int type)
+{
+ struct bow_range *prev = container_of(rb_prev(&(*br)->node),
+ struct bow_range, node);
+ struct bow_range *next = container_of(rb_next(&(*br)->node),
+ struct bow_range, node);
+
+ if ((*br)->type == TRIMMED) {
+ bc->trims_total -= range_size(*br);
+ list_del(&(*br)->trimmed_list);
+ }
+
+ if (type == TRIMMED) {
+ bc->trims_total += range_size(*br);
+ list_add(&(*br)->trimmed_list, &bc->trimmed_list);
+ }
+
+ (*br)->type = type;
+
+ if (next->type == type) {
+ if (type == TRIMMED)
+ list_del(&next->trimmed_list);
+ rb_erase(&next->node, &bc->ranges);
+ kfree(next);
+ }
+
+ if (prev->type == type) {
+ if (type == TRIMMED)
+ list_del(&(*br)->trimmed_list);
+ rb_erase(&(*br)->node, &bc->ranges);
+ kfree(*br);
+ }
+
+ *br = NULL;
+}
+
+static struct bow_range *find_free_range(struct bow_context *bc)
+{
+ if (list_empty(&bc->trimmed_list)) {
+ DMERR("Unable to find free space to back up to");
+ return NULL;
+ }
+
+ return list_first_entry(&bc->trimmed_list, struct bow_range,
+ trimmed_list);
+}
+
+static sector_t sector_to_page(struct bow_context const *bc, sector_t sector)
+{
+ WARN_ON((sector & (((sector_t)1 << (bc->block_shift - SECTOR_SHIFT)) - 1))
+ != 0);
+ return sector >> (bc->block_shift - SECTOR_SHIFT);
+}
+
+static int copy_data(struct bow_context const *bc,
+ struct bow_range *source, struct bow_range *dest,
+ u32 *checksum)
+{
+ int i;
+
+ if (range_size(source) != range_size(dest)) {
+ WARN_ON(1);
+ return BLK_STS_IOERR;
+ }
+
+ if (checksum)
+ *checksum = sector_to_page(bc, source->sector);
+
+ for (i = 0; i < range_size(source) >> bc->block_shift; ++i) {
+ struct dm_buffer *read_buffer, *write_buffer;
+ u8 *read, *write;
+ sector_t page = sector_to_page(bc, source->sector) + i;
+
+ read = dm_bufio_read(bc->bufio, page, &read_buffer);
+ if (IS_ERR(read)) {
+ DMERR("Cannot read page %llu",
+ (unsigned long long)page);
+ return PTR_ERR(read);
+ }
+
+ if (checksum)
+ *checksum = crc32(*checksum, read, bc->block_size);
+
+ write = dm_bufio_new(bc->bufio,
+ sector_to_page(bc, dest->sector) + i,
+ &write_buffer);
+ if (IS_ERR(write)) {
+ DMERR("Cannot write sector");
+ dm_bufio_release(read_buffer);
+ return PTR_ERR(write);
+ }
+
+ memcpy(write, read, bc->block_size);
+
+ dm_bufio_mark_buffer_dirty(write_buffer);
+ dm_bufio_release(write_buffer);
+ dm_bufio_release(read_buffer);
+ }
+
+ dm_bufio_write_dirty_buffers(bc->bufio);
+ return BLK_STS_OK;
+}
+
+/****** logging functions ******/
+
+static int add_log_entry(struct bow_context *bc, sector_t source, sector_t dest,
+ unsigned int size, u32 checksum);
+
+static int backup_log_sector(struct bow_context *bc)
+{
+ struct bow_range *first_br, *free_br;
+ struct bvec_iter bi_iter;
+ u32 checksum = 0;
+ int ret;
+
+ first_br = container_of(rb_first(&bc->ranges), struct bow_range, node);
+
+ if (first_br->type != SECTOR0) {
+ WARN_ON(1);
+ return BLK_STS_IOERR;
+ }
+
+ if (range_size(first_br) != bc->block_size) {
+ WARN_ON(1);
+ return BLK_STS_IOERR;
+ }
+
+ free_br = find_free_range(bc);
+ /* No space left - return this error to userspace */
+ if (!free_br)
+ return BLK_STS_NOSPC;
+ bi_iter.bi_sector = free_br->sector;
+ bi_iter.bi_size = bc->block_size;
+ ret = split_range(bc, &free_br, &bi_iter);
+ if (ret)
+ return ret;
+ if (bi_iter.bi_size != bc->block_size) {
+ WARN_ON(1);
+ return BLK_STS_IOERR;
+ }
+
+ ret = copy_data(bc, first_br, free_br, &checksum);
+ if (ret)
+ return ret;
+
+ bc->log_sector->count = 0;
+ bc->log_sector->sequence++;
+ ret = add_log_entry(bc, first_br->sector, free_br->sector,
+ range_size(first_br), checksum);
+ if (ret)
+ return ret;
+
+ set_type(bc, &free_br, BACKUP);
+ return BLK_STS_OK;
+}
+
+static int add_log_entry(struct bow_context *bc, sector_t source, sector_t dest,
+ unsigned int size, u32 checksum)
+{
+ struct dm_buffer *sector_buffer;
+ u8 *sector;
+
+ if (sizeof(struct log_sector)
+ + sizeof(struct log_entry) * (bc->log_sector->count + 1)
+ > bc->block_size) {
+ int ret = backup_log_sector(bc);
+
+ if (ret)
+ return ret;
+ }
+
+ sector = dm_bufio_new(bc->bufio, 0, §or_buffer);
+ if (IS_ERR(sector)) {
+ DMERR("Cannot write boot sector");
+ dm_bufio_release(sector_buffer);
+ return BLK_STS_NOSPC;
+ }
+
+ bc->log_sector->entries[bc->log_sector->count].source = source;
+ bc->log_sector->entries[bc->log_sector->count].dest = dest;
+ bc->log_sector->entries[bc->log_sector->count].size = size;
+ bc->log_sector->entries[bc->log_sector->count].checksum = checksum;
+ bc->log_sector->count++;
+
+ memcpy(sector, bc->log_sector, bc->block_size);
+ dm_bufio_mark_buffer_dirty(sector_buffer);
+ dm_bufio_release(sector_buffer);
+ dm_bufio_write_dirty_buffers(bc->bufio);
+ return BLK_STS_OK;
+}
+
+static int prepare_log(struct bow_context *bc)
+{
+ struct bow_range *free_br, *first_br;
+ struct bvec_iter bi_iter;
+ u32 checksum = 0;
+ int ret;
+
+ /* Carve out first sector as log sector */
+ first_br = container_of(rb_first(&bc->ranges), struct bow_range, node);
+ if (first_br->type != UNCHANGED) {
+ WARN_ON(1);
+ return BLK_STS_IOERR;
+ }
+
+ if (range_size(first_br) < bc->block_size) {
+ WARN_ON(1);
+ return BLK_STS_IOERR;
+ }
+ bi_iter.bi_sector = 0;
+ bi_iter.bi_size = bc->block_size;
+ ret = split_range(bc, &first_br, &bi_iter);
+ if (ret)
+ return ret;
+ first_br->type = SECTOR0;
+ if (range_size(first_br) != bc->block_size) {
+ WARN_ON(1);
+ return BLK_STS_IOERR;
+ }
+
+ /* Find free sector for active sector0 reads/writes */
+ free_br = find_free_range(bc);
+ if (!free_br)
+ return BLK_STS_NOSPC;
+ bi_iter.bi_sector = free_br->sector;
+ bi_iter.bi_size = bc->block_size;
+ ret = split_range(bc, &free_br, &bi_iter);
+ if (ret)
+ return ret;
+
+ /* Copy data */
+ ret = copy_data(bc, first_br, free_br, NULL);
+ if (ret)
+ return ret;
+
+ bc->log_sector->sector0 = free_br->sector;
+
+ set_type(bc, &free_br, SECTOR0_CURRENT);
+
+ /* Find free sector to back up original sector zero */
+ free_br = find_free_range(bc);
+ if (!free_br)
+ return BLK_STS_NOSPC;
+ bi_iter.bi_sector = free_br->sector;
+ bi_iter.bi_size = bc->block_size;
+ ret = split_range(bc, &free_br, &bi_iter);
+ if (ret)
+ return ret;
+
+ /* Back up */
+ ret = copy_data(bc, first_br, free_br, &checksum);
+ if (ret)
+ return ret;
+
+ /*
+ * Set up our replacement boot sector - it will get written when we
+ * add the first log entry, which we do immediately
+ */
+ bc->log_sector->magic = MAGIC;
+ bc->log_sector->header_version = HEADER_VERSION;
+ bc->log_sector->header_size = sizeof(*bc->log_sector);
+ bc->log_sector->block_size = bc->block_size;
+ bc->log_sector->count = 0;
+ bc->log_sector->sequence = 0;
+
+ /* Add log entry */
+ ret = add_log_entry(bc, first_br->sector, free_br->sector,
+ range_size(first_br), checksum);
+ if (ret)
+ return ret;
+
+ set_type(bc, &free_br, BACKUP);
+ return BLK_STS_OK;
+}
+
+static struct bow_range *find_sector0_current(struct bow_context *bc)
+{
+ struct bvec_iter bi_iter;
+
+ bi_iter.bi_sector = bc->log_sector->sector0;
+ bi_iter.bi_size = bc->block_size;
+ return find_first_overlapping_range(&bc->ranges, &bi_iter);
+}
+
+/****** sysfs interface functions ******/
+
+static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct bow_context *bc = container_of(kobj, struct bow_context,
+ kobj_holder.kobj);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", atomic_read(&bc->state));
+}
+
+static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct bow_context *bc = container_of(kobj, struct bow_context,
+ kobj_holder.kobj);
+ enum state state, original_state;
+ int ret;
+
+ state = buf[0] - '0';
+ if (state < TRIM || state > COMMITTED) {
+ DMERR("State value %d out of range", state);
+ return -EINVAL;
+ }
+
+ /* Block new writes until state change is complete. */
+ mutex_lock(&bc->state_lock);
+
+ /* Flush any already-queued writes before the state change. */
+ flush_workqueue(bc->workqueue);
+
+ mutex_lock(&bc->ranges_lock);
+ original_state = atomic_read(&bc->state);
+ if (state != original_state + 1) {
+ DMERR("Invalid state change from %d to %d",
+ original_state, state);
+ ret = -EINVAL;
+ goto bad;
+ }
+
+ DMINFO("Switching to state %s", state == CHECKPOINT ? "Checkpoint"
+ : state == COMMITTED ? "Committed" : "Unknown");
+
+ if (state == CHECKPOINT) {
+ ret = prepare_log(bc);
+ if (ret) {
+ DMERR("Failed to switch to checkpoint state");
+ goto bad;
+ }
+ } else if (state == COMMITTED) {
+ struct bow_range *br = find_sector0_current(bc);
+ struct bow_range *sector0_br =
+ container_of(rb_first(&bc->ranges), struct bow_range,
+ node);
+
+ ret = copy_data(bc, br, sector0_br, 0);
+ if (ret) {
+ DMERR("Failed to switch to committed state");
+ goto bad;
+ }
+ }
+ atomic_inc(&bc->state);
+ ret = count;
+
+bad:
+ mutex_unlock(&bc->ranges_lock);
+ mutex_unlock(&bc->state_lock);
+
+ return ret;
+}
+
+static ssize_t free_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct bow_context *bc = container_of(kobj, struct bow_context,
+ kobj_holder.kobj);
+ u64 trims_total;
+
+ mutex_lock(&bc->ranges_lock);
+ trims_total = bc->trims_total;
+ mutex_unlock(&bc->ranges_lock);
+
+ return scnprintf(buf, PAGE_SIZE, "%llu\n", trims_total);
+}
+
+static struct kobj_attribute attr_state = __ATTR_RW(state);
+static struct kobj_attribute attr_free = __ATTR_RO(free);
+
+static struct attribute *bow_attrs[] = {
+ &attr_state.attr,
+ &attr_free.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(bow);
+
+static struct kobj_type bow_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .default_groups = bow_groups,
+ .release = dm_kobject_release
+};
+
+/****** constructor/destructor ******/
+
+static void dm_bow_dtr(struct dm_target *ti)
+{
+ struct bow_context *bc = (struct bow_context *) ti->private;
+ struct kobject *kobj;
+
+ if (bc->workqueue)
+ destroy_workqueue(bc->workqueue);
+ if (bc->bufio)
+ dm_bufio_client_destroy(bc->bufio);
+
+ kobj = &bc->kobj_holder.kobj;
+ if (kobj->state_initialized) {
+ kobject_put(kobj);
+ wait_for_completion(dm_get_completion_from_kobject(kobj));
+ }
+
+ mutex_lock(&bc->ranges_lock);
+ while (rb_first(&bc->ranges)) {
+ struct bow_range *br = container_of(rb_first(&bc->ranges),
+ struct bow_range, node);
+
+ rb_erase(&br->node, &bc->ranges);
+ kfree(br);
+ }
+ mutex_unlock(&bc->ranges_lock);
+
+ mutex_destroy(&bc->ranges_lock);
+ kfree(bc->log_sector);
+ kfree(bc);
+}
+
+static void dm_bow_io_hints(struct dm_target *ti, struct queue_limits *limits)
+{
+ struct bow_context *bc = ti->private;
+ const unsigned int block_size = bc->block_size;
+
+ limits->logical_block_size =
+ max_t(unsigned int, limits->logical_block_size, block_size);
+ limits->physical_block_size =
+ max_t(unsigned int, limits->physical_block_size, block_size);
+ limits->io_min = max_t(unsigned int, limits->io_min, block_size);
+
+ if (limits->max_discard_sectors == 0) {
+ limits->discard_granularity = 1 << 12;
+ limits->max_hw_discard_sectors = 1 << 15;
+ limits->max_discard_sectors = 1 << 15;
+ bc->forward_trims = false;
+ } else {
+ limits->discard_granularity = 1 << 12;
+ bc->forward_trims = true;
+ }
+}
+
+static int dm_bow_ctr_optional(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct bow_context *bc = ti->private;
+ struct dm_arg_set as;
+ static const struct dm_arg _args[] = {
+ {0, 1, "Invalid number of feature args"},
+ };
+ unsigned int opt_params;
+ const char *opt_string;
+ int err;
+ char dummy;
+
+ as.argc = argc;
+ as.argv = argv;
+
+ err = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
+ if (err)
+ return err;
+
+ while (opt_params--) {
+ opt_string = dm_shift_arg(&as);
+ if (!opt_string) {
+ ti->error = "Not enough feature arguments";
+ return -EINVAL;
+ }
+
+ if (sscanf(opt_string, "block_size:%u%c",
+ &bc->block_size, &dummy) == 1) {
+ if (bc->block_size < SECTOR_SIZE ||
+ bc->block_size > 4096 ||
+ !is_power_of_2(bc->block_size)) {
+ ti->error = "Invalid block_size";
+ return -EINVAL;
+ }
+ } else {
+ ti->error = "Invalid feature arguments";
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int dm_bow_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct bow_context *bc;
+ struct bow_range *br;
+ int ret;
+
+ if (argc < 1) {
+ ti->error = "Invalid argument count";
+ return -EINVAL;
+ }
+
+ bc = kzalloc(sizeof(*bc), GFP_KERNEL);
+ if (!bc) {
+ ti->error = "Cannot allocate bow context";
+ return -ENOMEM;
+ }
+
+ ti->num_flush_bios = 1;
+ ti->num_discard_bios = 1;
+ ti->private = bc;
+
+ ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table),
+ &bc->dev);
+ if (ret) {
+ ti->error = "Device lookup failed";
+ goto bad;
+ }
+
+ bc->block_size =
+ bdev_get_queue(bc->dev->bdev)->limits.logical_block_size;
+ if (argc > 1) {
+ ret = dm_bow_ctr_optional(ti, argc - 1, &argv[1]);
+ if (ret)
+ goto bad;
+ }
+
+ bc->block_shift = ilog2(bc->block_size);
+ bc->log_sector = kzalloc(bc->block_size, GFP_KERNEL);
+ if (!bc->log_sector) {
+ ti->error = "Cannot allocate log sector";
+ goto bad;
+ }
+
+ init_completion(&bc->kobj_holder.completion);
+ mutex_init(&bc->state_lock);
+ mutex_init(&bc->ranges_lock);
+ bc->ranges = RB_ROOT;
+ bc->bufio = dm_bufio_client_create(bc->dev->bdev, bc->block_size, 1, 0,
+ NULL, NULL, 0);
+ if (IS_ERR(bc->bufio)) {
+ ti->error = "Cannot initialize dm-bufio";
+ ret = PTR_ERR(bc->bufio);
+ bc->bufio = NULL;
+ goto bad;
+ }
+
+ bc->workqueue = alloc_workqueue("dm-bow",
+ WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM
+ | WQ_UNBOUND, num_online_cpus());
+ if (!bc->workqueue) {
+ ti->error = "Cannot allocate workqueue";
+ ret = -ENOMEM;
+ goto bad;
+ }
+
+ INIT_LIST_HEAD(&bc->trimmed_list);
+
+ br = kzalloc(sizeof(*br), GFP_KERNEL);
+ if (!br) {
+ ti->error = "Cannot allocate ranges";
+ ret = -ENOMEM;
+ goto bad;
+ }
+
+ br->sector = ti->len;
+ br->type = TOP;
+ rb_link_node(&br->node, NULL, &bc->ranges.rb_node);
+ rb_insert_color(&br->node, &bc->ranges);
+
+ br = kzalloc(sizeof(*br), GFP_KERNEL);
+ if (!br) {
+ ti->error = "Cannot allocate ranges";
+ ret = -ENOMEM;
+ goto bad;
+ }
+
+ br->sector = 0;
+ br->type = UNCHANGED;
+ rb_link_node(&br->node, bc->ranges.rb_node,
+ &bc->ranges.rb_node->rb_left);
+ rb_insert_color(&br->node, &bc->ranges);
+
+ ti->discards_supported = true;
+
+ return 0;
+
+bad:
+ dm_bow_dtr(ti);
+ return ret;
+}
+
+static void dm_bow_resume(struct dm_target *ti)
+{
+ struct mapped_device *md = dm_table_get_md(ti->table);
+ struct bow_context *bc = ti->private;
+ int ret;
+
+ if (bc->kobj_holder.kobj.state_initialized)
+ return;
+
+ ret = kobject_init_and_add(&bc->kobj_holder.kobj, &bow_ktype,
+ &disk_to_dev(dm_disk(md))->kobj, "%s",
+ "bow");
+ if (ret)
+ ti->error = "Cannot create sysfs node";
+}
+
+/****** Handle writes ******/
+
+static int prepare_unchanged_range(struct bow_context *bc, struct bow_range *br,
+ struct bvec_iter *bi_iter,
+ bool record_checksum)
+{
+ struct bow_range *backup_br;
+ struct bvec_iter backup_bi;
+ sector_t log_source, log_dest;
+ unsigned int log_size;
+ u32 checksum = 0;
+ int ret;
+ int original_type;
+ sector_t sector0;
+
+ /* Find a free range */
+ backup_br = find_free_range(bc);
+ if (!backup_br)
+ return BLK_STS_NOSPC;
+
+ /* Carve out a backup range. This may be smaller than the br given */
+ backup_bi.bi_sector = backup_br->sector;
+ backup_bi.bi_size = min(range_size(backup_br), (u64) bi_iter->bi_size);
+ ret = split_range(bc, &backup_br, &backup_bi);
+ if (ret)
+ return ret;
+
+ /*
+ * Carve out a changed range. This will not be smaller than the backup
+ * br since the backup br is smaller than the source range and iterator
+ */
+ bi_iter->bi_size = backup_bi.bi_size;
+ ret = split_range(bc, &br, bi_iter);
+ if (ret)
+ return ret;
+ if (range_size(br) != range_size(backup_br)) {
+ WARN_ON(1);
+ return BLK_STS_IOERR;
+ }
+
+
+ /* Copy data over */
+ ret = copy_data(bc, br, backup_br, record_checksum ? &checksum : NULL);
+ if (ret)
+ return ret;
+
+ /* Add an entry to the log */
+ log_source = br->sector;
+ log_dest = backup_br->sector;
+ log_size = range_size(br);
+
+ /*
+ * Set the types. Note that since set_type also amalgamates ranges
+ * we have to set both sectors to their final type before calling
+ * set_type on either
+ */
+ original_type = br->type;
+ sector0 = backup_br->sector;
+ bc->trims_total -= range_size(backup_br);
+ if (backup_br->type == TRIMMED)
+ list_del(&backup_br->trimmed_list);
+ backup_br->type = br->type == SECTOR0_CURRENT ? SECTOR0_CURRENT
+ : BACKUP;
+ br->type = CHANGED;
+ set_type(bc, &backup_br, backup_br->type);
+
+ /*
+ * Add the log entry after marking the backup sector, since adding a log
+ * can cause another backup
+ */
+ ret = add_log_entry(bc, log_source, log_dest, log_size, checksum);
+ if (ret) {
+ br->type = original_type;
+ return ret;
+ }
+
+ /* Now it is safe to mark this backup successful */
+ if (original_type == SECTOR0_CURRENT)
+ bc->log_sector->sector0 = sector0;
+
+ set_type(bc, &br, br->type);
+ return ret;
+}
+
+static int prepare_free_range(struct bow_context *bc, struct bow_range *br,
+ struct bvec_iter *bi_iter)
+{
+ int ret;
+
+ ret = split_range(bc, &br, bi_iter);
+ if (ret)
+ return ret;
+ set_type(bc, &br, CHANGED);
+ return BLK_STS_OK;
+}
+
+static int prepare_changed_range(struct bow_context *bc, struct bow_range *br,
+ struct bvec_iter *bi_iter)
+{
+ /* Nothing to do ... */
+ return BLK_STS_OK;
+}
+
+static int prepare_one_range(struct bow_context *bc,
+ struct bvec_iter *bi_iter)
+{
+ struct bow_range *br = find_first_overlapping_range(&bc->ranges,
+ bi_iter);
+ switch (br->type) {
+ case CHANGED:
+ return prepare_changed_range(bc, br, bi_iter);
+
+ case TRIMMED:
+ return prepare_free_range(bc, br, bi_iter);
+
+ case UNCHANGED:
+ case BACKUP:
+ return prepare_unchanged_range(bc, br, bi_iter, true);
+
+ /*
+ * We cannot track the checksum for the active sector0, since it
+ * may change at any point.
+ */
+ case SECTOR0_CURRENT:
+ return prepare_unchanged_range(bc, br, bi_iter, false);
+
+ case SECTOR0: /* Handled in the dm_bow_map */
+ case TOP: /* Illegal - top is off the end of the device */
+ default:
+ WARN_ON(1);
+ return BLK_STS_IOERR;
+ }
+}
+
+struct write_work {
+ struct work_struct work;
+ struct bow_context *bc;
+ struct bio *bio;
+};
+
+static void bow_write(struct work_struct *work)
+{
+ struct write_work *ww = container_of(work, struct write_work, work);
+ struct bow_context *bc = ww->bc;
+ struct bio *bio = ww->bio;
+ struct bvec_iter bi_iter = bio->bi_iter;
+ int ret = BLK_STS_OK;
+
+ kfree(ww);
+
+ mutex_lock(&bc->ranges_lock);
+ do {
+ ret = prepare_one_range(bc, &bi_iter);
+ bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
+ bi_iter.bi_size = bio->bi_iter.bi_size
+ - (bi_iter.bi_sector - bio->bi_iter.bi_sector)
+ * SECTOR_SIZE;
+ } while (!ret && bi_iter.bi_size);
+
+ mutex_unlock(&bc->ranges_lock);
+
+ if (!ret) {
+ bio_set_dev(bio, bc->dev->bdev);
+ submit_bio(bio);
+ } else {
+ DMERR("Write failure with error %d", -ret);
+ bio->bi_status = ret;
+ bio_endio(bio);
+ }
+}
+
+static int queue_write(struct bow_context *bc, struct bio *bio)
+{
+ struct write_work *ww = kmalloc(sizeof(*ww), GFP_NOIO | __GFP_NORETRY
+ | __GFP_NOMEMALLOC | __GFP_NOWARN);
+ if (!ww) {
+ DMERR("Failed to allocate write_work");
+ return -ENOMEM;
+ }
+
+ INIT_WORK(&ww->work, bow_write);
+ ww->bc = bc;
+ ww->bio = bio;
+ queue_work(bc->workqueue, &ww->work);
+ return DM_MAPIO_SUBMITTED;
+}
+
+static int handle_sector0(struct bow_context *bc, struct bio *bio)
+{
+ int ret = DM_MAPIO_REMAPPED;
+
+ if (bio->bi_iter.bi_size > bc->block_size) {
+ struct bio * split = bio_split(bio,
+ bc->block_size >> SECTOR_SHIFT,
+ GFP_NOIO,
+ &fs_bio_set);
+ if (!split) {
+ DMERR("Failed to split bio");
+ bio->bi_status = BLK_STS_RESOURCE;
+ bio_endio(bio);
+ return DM_MAPIO_SUBMITTED;
+ }
+
+ bio_chain(split, bio);
+ split->bi_iter.bi_sector = bc->log_sector->sector0;
+ bio_set_dev(split, bc->dev->bdev);
+ submit_bio(split);
+
+ if (bio_data_dir(bio) == WRITE)
+ ret = queue_write(bc, bio);
+ } else {
+ bio->bi_iter.bi_sector = bc->log_sector->sector0;
+ }
+
+ return ret;
+}
+
+static int add_trim(struct bow_context *bc, struct bio *bio)
+{
+ struct bow_range *br;
+ struct bvec_iter bi_iter = bio->bi_iter;
+
+ DMDEBUG("add_trim: %llu, %u",
+ (unsigned long long)bio->bi_iter.bi_sector,
+ bio->bi_iter.bi_size);
+
+ do {
+ br = find_first_overlapping_range(&bc->ranges, &bi_iter);
+
+ switch (br->type) {
+ case UNCHANGED:
+ if (!split_range(bc, &br, &bi_iter))
+ set_type(bc, &br, TRIMMED);
+ break;
+
+ case TRIMMED:
+ /* Nothing to do */
+ break;
+
+ default:
+ /* No other case is legal in TRIM state */
+ WARN_ON(true);
+ break;
+ }
+
+ bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
+ bi_iter.bi_size = bio->bi_iter.bi_size
+ - (bi_iter.bi_sector - bio->bi_iter.bi_sector)
+ * SECTOR_SIZE;
+
+ } while (bi_iter.bi_size);
+
+ bio_endio(bio);
+ return DM_MAPIO_SUBMITTED;
+}
+
+static int remove_trim(struct bow_context *bc, struct bio *bio)
+{
+ struct bow_range *br;
+ struct bvec_iter bi_iter = bio->bi_iter;
+
+ DMDEBUG("remove_trim: %llu, %u",
+ (unsigned long long)bio->bi_iter.bi_sector,
+ bio->bi_iter.bi_size);
+
+ do {
+ br = find_first_overlapping_range(&bc->ranges, &bi_iter);
+
+ switch (br->type) {
+ case UNCHANGED:
+ /* Nothing to do */
+ break;
+
+ case TRIMMED:
+ if (!split_range(bc, &br, &bi_iter))
+ set_type(bc, &br, UNCHANGED);
+ break;
+
+ default:
+ /* No other case is legal in TRIM state */
+ WARN_ON(true);
+ break;
+ }
+
+ bi_iter.bi_sector += bi_iter.bi_size / SECTOR_SIZE;
+ bi_iter.bi_size = bio->bi_iter.bi_size
+ - (bi_iter.bi_sector - bio->bi_iter.bi_sector)
+ * SECTOR_SIZE;
+
+ } while (bi_iter.bi_size);
+
+ return DM_MAPIO_REMAPPED;
+}
+
+static int remap_unless_illegal_trim(struct bow_context *bc, struct bio *bio)
+{
+ if (!bc->forward_trims && bio_op(bio) == REQ_OP_DISCARD) {
+ bio->bi_status = BLK_STS_NOTSUPP;
+ bio_endio(bio);
+ return DM_MAPIO_SUBMITTED;
+ } else {
+ bio_set_dev(bio, bc->dev->bdev);
+ return DM_MAPIO_REMAPPED;
+ }
+}
+
+/****** dm interface ******/
+
+static int dm_bow_map(struct dm_target *ti, struct bio *bio)
+{
+ int ret = DM_MAPIO_REMAPPED;
+ struct bow_context *bc = ti->private;
+
+ /* Fast path when already committed or when performing a read. */
+
+ if (likely(bc->state.counter == COMMITTED))
+ return remap_unless_illegal_trim(bc, bio);
+
+ if (bio_data_dir(bio) == READ && bio->bi_iter.bi_sector != 0)
+ return remap_unless_illegal_trim(bc, bio);
+
+ if (bio->bi_iter.bi_size == 0)
+ return remap_unless_illegal_trim(bc, bio);
+
+ /*
+ * Fall back to the slower path when we may be in TRIM/CHECKPOINT.
+ * Operations must wait for any pending state changes to complete.
+ */
+
+ mutex_lock(&bc->state_lock);
+
+ if (atomic_read(&bc->state) != COMMITTED) {
+ enum state state;
+
+ mutex_lock(&bc->ranges_lock);
+ state = atomic_read(&bc->state);
+ if (state == TRIM) {
+ if (bio_op(bio) == REQ_OP_DISCARD)
+ ret = add_trim(bc, bio);
+ else if (bio_data_dir(bio) == WRITE)
+ ret = remove_trim(bc, bio);
+ /* else pass-through */
+ } else if (state == CHECKPOINT) {
+ if (bio->bi_iter.bi_sector == 0)
+ ret = handle_sector0(bc, bio);
+ else if (bio_op(bio) == REQ_OP_DISCARD) {
+ /*
+ * Ignore discard requests in CHECKPOINT state.
+ * Passing them through would physically erase data that we
+ * are trying to protect, creating a state mismatch.
+ * We complete the bio with success and stop processing.
+ */
+ bio_endio(bio);
+ ret = DM_MAPIO_SUBMITTED;
+ } else if (bio_data_dir(bio) == WRITE)
+ ret = queue_write(bc, bio);
+ /* else pass-through */
+ }
+ /* else pass-through */
+ mutex_unlock(&bc->ranges_lock);
+ }
+
+ mutex_unlock(&bc->state_lock);
+
+ if (ret == DM_MAPIO_REMAPPED)
+ return remap_unless_illegal_trim(bc, bio);
+
+ return ret;
+}
+
+static void dm_bow_tablestatus(struct dm_target *ti, char *result,
+ unsigned int maxlen)
+{
+ char *end = result + maxlen;
+ struct bow_context *bc = ti->private;
+ struct rb_node *i;
+ int trimmed_list_length = 0;
+ int trimmed_range_count = 0;
+ struct bow_range *br;
+
+ if (maxlen == 0)
+ return;
+ result[0] = 0;
+
+ list_for_each_entry(br, &bc->trimmed_list, trimmed_list)
+ if (br->type == TRIMMED) {
+ ++trimmed_list_length;
+ } else {
+ scnprintf(result, end - result,
+ "ERROR: non-trimmed entry in trimmed_list");
+ return;
+ }
+
+ if (!rb_first(&bc->ranges)) {
+ scnprintf(result, end - result, "ERROR: Empty ranges");
+ return;
+ }
+
+ if (container_of(rb_first(&bc->ranges), struct bow_range, node)
+ ->sector) {
+ scnprintf(result, end - result,
+ "ERROR: First range does not start at sector 0");
+ return;
+ }
+
+ mutex_lock(&bc->ranges_lock);
+ for (i = rb_first(&bc->ranges); i; i = rb_next(i)) {
+ struct bow_range *br = container_of(i, struct bow_range, node);
+
+ result += scnprintf(result, end - result, "%s: %llu",
+ readable_type[br->type],
+ (unsigned long long)br->sector);
+ if (result >= end)
+ goto unlock;
+
+ result += scnprintf(result, end - result, "\n");
+ if (result >= end)
+ goto unlock;
+
+ if (br->type == TRIMMED)
+ ++trimmed_range_count;
+
+ if (br->type == TOP) {
+ if (br->sector != ti->len) {
+ scnprintf(result, end - result,
+ "\nERROR: Top sector is incorrect");
+ }
+
+ if (&br->node != rb_last(&bc->ranges)) {
+ scnprintf(result, end - result,
+ "\nERROR: Top sector is not last");
+ }
+
+ break;
+ }
+
+ if (!rb_next(i)) {
+ scnprintf(result, end - result,
+ "\nERROR: Last range not of type TOP");
+ goto unlock;
+ }
+
+ if (br->sector > range_top(br)) {
+ scnprintf(result, end - result,
+ "\nERROR: sectors out of order");
+ goto unlock;
+ }
+ }
+
+ if (trimmed_range_count != trimmed_list_length)
+ scnprintf(result, end - result,
+ "\nERROR: not all trimmed ranges in trimmed list");
+
+unlock:
+ mutex_unlock(&bc->ranges_lock);
+}
+
+static void dm_bow_status(struct dm_target *ti, status_type_t type,
+ unsigned int status_flags, char *result,
+ unsigned int maxlen)
+{
+ switch (type) {
+ case STATUSTYPE_INFO:
+ case STATUSTYPE_IMA:
+ if (maxlen)
+ result[0] = 0;
+ break;
+
+ case STATUSTYPE_TABLE:
+ dm_bow_tablestatus(ti, result, maxlen);
+ break;
+ }
+}
+
+static int dm_bow_prepare_ioctl(struct dm_target *ti,
+ struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
+{
+ struct bow_context *bc = ti->private;
+ struct dm_dev *dev = bc->dev;
+
+ *bdev = dev->bdev;
+ /* Only pass ioctls through if the device sizes match exactly. */
+ return ti->len != bdev_nr_sectors(dev->bdev);
+}
+
+static int dm_bow_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn, void *data)
+{
+ struct bow_context *bc = ti->private;
+
+ return fn(ti, bc->dev, 0, ti->len, data);
+}
+
+static struct target_type bow_target = {
+ .name = "bow",
+ .version = {1, 2, 0},
+ .features = DM_TARGET_PASSES_CRYPTO,
+ .module = THIS_MODULE,
+ .ctr = dm_bow_ctr,
+ .resume = dm_bow_resume,
+ .dtr = dm_bow_dtr,
+ .map = dm_bow_map,
+ .status = dm_bow_status,
+ .prepare_ioctl = dm_bow_prepare_ioctl,
+ .iterate_devices = dm_bow_iterate_devices,
+ .io_hints = dm_bow_io_hints,
+};
+
+static int __init dm_bow_init(void)
+{
+ int r = dm_register_target(&bow_target);
+
+ if (r < 0)
+ DMERR("registering bow failed %d", r);
+ return r;
+}
+
+static void dm_bow_exit(void)
+{
+ dm_unregister_target(&bow_target);
+}
+
+MODULE_LICENSE("GPL");
+
+module_init(dm_bow_init);
+module_exit(dm_bow_exit);
diff --git a/drivers/md/dm-default-key.c b/drivers/md/dm-default-key.c
new file mode 100644
index 0000000..3b2a8e7
--- /dev/null
+++ b/drivers/md/dm-default-key.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2017 Google, Inc.
+ */
+
+#include <linux/blk-crypto.h>
+#include <linux/device-mapper.h>
+#include <linux/hex.h>
+#include <linux/module.h>
+
+#define DM_MSG_PREFIX "default-key"
+
+static const struct dm_default_key_cipher {
+ const char *name;
+ enum blk_crypto_mode_num mode_num;
+ int key_size;
+} dm_default_key_ciphers[] = {
+ {
+ .name = "aes-xts-plain64",
+ .mode_num = BLK_ENCRYPTION_MODE_AES_256_XTS,
+ .key_size = 64,
+ }, {
+ .name = "xchacha12,aes-adiantum-plain64",
+ .mode_num = BLK_ENCRYPTION_MODE_ADIANTUM,
+ .key_size = 32,
+ },
+};
+
+/**
+ * struct dm_default_c - private data of a default-key target
+ * @dev: the underlying device
+ * @start: starting sector of the range of @dev which this target actually maps.
+ * For this purpose a "sector" is 512 bytes.
+ * @cipher_string: the name of the encryption algorithm being used
+ * @iv_offset: starting offset for IVs. IVs are generated as if the target were
+ * preceded by @iv_offset 512-byte sectors.
+ * @sector_size: crypto sector size in bytes (usually 4096)
+ * @sector_bits: log2(sector_size)
+ * @key: the encryption key to use
+ * @max_dun: the maximum DUN that may be used (computed from other params)
+ */
+struct default_key_c {
+ struct dm_dev *dev;
+ sector_t start;
+ const char *cipher_string;
+ u64 iv_offset;
+ unsigned int sector_size;
+ unsigned int sector_bits;
+ struct blk_crypto_key key;
+ enum blk_crypto_key_type key_type;
+ u64 max_dun;
+};
+
+static const struct dm_default_key_cipher *
+lookup_cipher(const char *cipher_string)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dm_default_key_ciphers); i++) {
+ if (strcmp(cipher_string, dm_default_key_ciphers[i].name) == 0)
+ return &dm_default_key_ciphers[i];
+ }
+ return NULL;
+}
+
+static void default_key_dtr(struct dm_target *ti)
+{
+ struct default_key_c *dkc = ti->private;
+ struct blk_crypto_key *blk_key = &dkc->key;
+
+ if (dkc->dev) {
+ if (blk_key->size > 0)
+ blk_crypto_evict_key(dkc->dev->bdev, blk_key);
+ dm_put_device(ti, dkc->dev);
+ }
+ kfree_sensitive(dkc->cipher_string);
+ kfree_sensitive(dkc);
+}
+
+static int default_key_ctr_optional(struct dm_target *ti,
+ unsigned int argc, char **argv)
+{
+ struct default_key_c *dkc = ti->private;
+ struct dm_arg_set as;
+ static const struct dm_arg _args[] = {
+ {0, 4, "Invalid number of feature args"},
+ };
+ unsigned int opt_params;
+ const char *opt_string;
+ bool iv_large_sectors = false;
+ char dummy;
+ int err;
+
+ as.argc = argc;
+ as.argv = argv;
+
+ err = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
+ if (err)
+ return err;
+
+ while (opt_params--) {
+ opt_string = dm_shift_arg(&as);
+ if (!opt_string) {
+ ti->error = "Not enough feature arguments";
+ return -EINVAL;
+ }
+ if (!strcmp(opt_string, "allow_discards")) {
+ ti->num_discard_bios = 1;
+ } else if (sscanf(opt_string, "sector_size:%u%c",
+ &dkc->sector_size, &dummy) == 1) {
+ if (dkc->sector_size < SECTOR_SIZE ||
+ dkc->sector_size > 4096 ||
+ !is_power_of_2(dkc->sector_size)) {
+ ti->error = "Invalid sector_size";
+ return -EINVAL;
+ }
+ } else if (!strcmp(opt_string, "iv_large_sectors")) {
+ iv_large_sectors = true;
+ } else if (!strcmp(opt_string, "wrappedkey_v0")) {
+ dkc->key_type = BLK_CRYPTO_KEY_TYPE_HW_WRAPPED;
+ } else {
+ ti->error = "Invalid feature arguments";
+ return -EINVAL;
+ }
+ }
+
+ /* dm-default-key doesn't implement iv_large_sectors=false. */
+ if (dkc->sector_size != SECTOR_SIZE && !iv_large_sectors) {
+ ti->error = "iv_large_sectors must be specified";
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Construct a default-key mapping:
+ * <cipher> <key> <iv_offset> <dev_path> <start>
+ *
+ * This syntax matches dm-crypt's, but lots of unneeded functionality has been
+ * removed. Also, dm-default-key requires that the "iv_large_sectors" option be
+ * given whenever a non-default sector size is used.
+ */
+static int default_key_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct default_key_c *dkc;
+ const struct dm_default_key_cipher *cipher;
+ u8 key_bytes[BLK_CRYPTO_MAX_ANY_KEY_SIZE];
+ unsigned int key_size;
+ unsigned int dun_bytes;
+ unsigned long long tmpll;
+ char dummy;
+ int err;
+
+ if (argc < 5) {
+ ti->error = "Not enough arguments";
+ return -EINVAL;
+ }
+
+ dkc = kzalloc(sizeof(*dkc), GFP_KERNEL);
+ if (!dkc) {
+ ti->error = "Out of memory";
+ return -ENOMEM;
+ }
+ ti->private = dkc;
+ dkc->key_type = BLK_CRYPTO_KEY_TYPE_RAW;
+
+ /* <cipher> */
+ dkc->cipher_string = kstrdup(argv[0], GFP_KERNEL);
+ if (!dkc->cipher_string) {
+ ti->error = "Out of memory";
+ err = -ENOMEM;
+ goto bad;
+ }
+ cipher = lookup_cipher(dkc->cipher_string);
+ if (!cipher) {
+ ti->error = "Unsupported cipher";
+ err = -EINVAL;
+ goto bad;
+ }
+
+ /* <key> */
+ key_size = strlen(argv[1]);
+ if (key_size > 2 * BLK_CRYPTO_MAX_ANY_KEY_SIZE || key_size % 2) {
+ ti->error = "Invalid keysize";
+ err = -EINVAL;
+ goto bad;
+ }
+ key_size /= 2;
+ if (hex2bin(key_bytes, argv[1], key_size) != 0) {
+ ti->error = "Malformed key string";
+ err = -EINVAL;
+ goto bad;
+ }
+
+ /* <iv_offset> */
+ if (sscanf(argv[2], "%llu%c", &dkc->iv_offset, &dummy) != 1) {
+ ti->error = "Invalid iv_offset sector";
+ err = -EINVAL;
+ goto bad;
+ }
+
+ /* <dev_path> */
+ err = dm_get_device(ti, argv[3], dm_table_get_mode(ti->table),
+ &dkc->dev);
+ if (err) {
+ ti->error = "Device lookup failed";
+ goto bad;
+ }
+
+ /* <start> */
+ if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1 ||
+ tmpll != (sector_t)tmpll) {
+ ti->error = "Invalid start sector";
+ err = -EINVAL;
+ goto bad;
+ }
+ dkc->start = tmpll;
+
+ if (bdev_is_zoned(dkc->dev->bdev)) {
+ /*
+ * All zone append writes to a zone of a zoned block device will
+ * have the same BIO sector, the start of the zone. When the
+ * cypher IV mode uses sector values, all data targeting a
+ * zone will be encrypted using the first sector numbers of the
+ * zone. This will not result in write errors but will
+ * cause most reads to fail as reads will use the sector values
+ * for the actual data locations, resulting in IV mismatch.
+ * To avoid this problem, ask DM core to emulate zone append
+ * operations with regular writes.
+ */
+ DMDEBUG("Zone append operations will be emulated");
+ ti->emulate_zone_append = true;
+ }
+
+ /* optional arguments */
+ dkc->sector_size = SECTOR_SIZE;
+ if (argc > 5) {
+ err = default_key_ctr_optional(ti, argc - 5, &argv[5]);
+ if (err)
+ goto bad;
+ }
+ dkc->sector_bits = ilog2(dkc->sector_size);
+ if (ti->len & ((dkc->sector_size >> SECTOR_SHIFT) - 1)) {
+ ti->error = "Device size is not a multiple of sector_size";
+ err = -EINVAL;
+ goto bad;
+ }
+
+ dkc->max_dun = (dkc->iv_offset + ti->len - 1) >>
+ (dkc->sector_bits - SECTOR_SHIFT);
+ dun_bytes = DIV_ROUND_UP(fls64(dkc->max_dun), 8);
+
+ err = blk_crypto_init_key(&dkc->key, key_bytes, key_size,
+ dkc->key_type, cipher->mode_num,
+ dun_bytes, dkc->sector_size);
+ if (err) {
+ ti->error = "Error initializing blk-crypto key";
+ goto bad;
+ }
+
+ err = blk_crypto_start_using_key(dkc->dev->bdev, &dkc->key);
+ if (err) {
+ ti->error = "Error starting to use blk-crypto";
+ goto bad;
+ }
+
+ ti->num_flush_bios = 1;
+
+ err = 0;
+ goto out;
+
+bad:
+ default_key_dtr(ti);
+out:
+ memzero_explicit(key_bytes, sizeof(key_bytes));
+ return err;
+}
+
+static int default_key_map(struct dm_target *ti, struct bio *bio)
+{
+ const struct default_key_c *dkc = ti->private;
+ sector_t sector_in_target;
+ u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE] = { 0 };
+
+ bio_set_dev(bio, dkc->dev->bdev);
+
+ /*
+ * If the bio is a device-level request which doesn't target a specific
+ * sector, there's nothing more to do.
+ */
+ if (bio_sectors(bio) == 0)
+ return DM_MAPIO_REMAPPED;
+
+ /* Map the bio's sector to the underlying device. (512-byte sectors) */
+ sector_in_target = dm_target_offset(ti, bio->bi_iter.bi_sector);
+ bio->bi_iter.bi_sector = dkc->start + sector_in_target;
+
+ /*
+ * If the bio should skip dm-default-key (i.e. if it's for an encrypted
+ * file's contents), or if it doesn't have any data (e.g. if it's a
+ * DISCARD request), there's nothing more to do.
+ */
+ if (bio_should_skip_dm_default_key(bio) || !bio_has_data(bio))
+ return DM_MAPIO_REMAPPED;
+
+ /*
+ * Else, dm-default-key needs to set this bio's encryption context.
+ * It must not already have one.
+ */
+ if (WARN_ON_ONCE(bio_has_crypt_ctx(bio)))
+ return DM_MAPIO_KILL;
+
+ /* Calculate the DUN and enforce data-unit (crypto sector) alignment. */
+ dun[0] = dkc->iv_offset + sector_in_target; /* 512-byte sectors */
+ if (dun[0] & ((dkc->sector_size >> SECTOR_SHIFT) - 1))
+ return DM_MAPIO_KILL;
+ dun[0] >>= dkc->sector_bits - SECTOR_SHIFT; /* crypto sectors */
+
+ /*
+ * This check isn't necessary as we should have calculated max_dun
+ * correctly, but be safe.
+ */
+ if (WARN_ON_ONCE(dun[0] > dkc->max_dun))
+ return DM_MAPIO_KILL;
+
+ bio_crypt_set_ctx(bio, &dkc->key, dun, GFP_NOIO);
+
+ /*
+ * Since we've added an encryption context to the bio and
+ * blk-crypto-fallback may be needed to process it, it's necessary to
+ * use the fallback-aware bio submission code rather than
+ * unconditionally returning DM_MAPIO_REMAPPED.
+ *
+ * To get the correct accounting for a dm target in the case where
+ * __blk_crypto_submit_bio() doesn't take ownership of the bio (returns
+ * true), call __blk_crypto_submit_bio() directly and return
+ * DM_MAPIO_REMAPPED in that case, rather than relying on
+ * blk_crypto_submit_bio() which calls submit_bio() in that case.
+ */
+ if (__blk_crypto_submit_bio(bio))
+ return DM_MAPIO_REMAPPED;
+ return DM_MAPIO_SUBMITTED;
+}
+
+static void default_key_status(struct dm_target *ti, status_type_t type,
+ unsigned int status_flags, char *result,
+ unsigned int maxlen)
+{
+ const struct default_key_c *dkc = ti->private;
+ unsigned int sz = 0;
+ int num_feature_args = 0;
+
+ switch (type) {
+ case STATUSTYPE_INFO:
+ case STATUSTYPE_IMA:
+ result[0] = '\0';
+ break;
+
+ case STATUSTYPE_TABLE:
+ /* Omit the key for now. */
+ DMEMIT("%s - %llu %s %llu", dkc->cipher_string, dkc->iv_offset,
+ dkc->dev->name, (unsigned long long)dkc->start);
+
+ num_feature_args += !!ti->num_discard_bios;
+ if (dkc->sector_size != SECTOR_SIZE)
+ num_feature_args += 2;
+ if (dkc->key_type == BLK_CRYPTO_KEY_TYPE_HW_WRAPPED)
+ num_feature_args += 1;
+ if (num_feature_args != 0) {
+ DMEMIT(" %d", num_feature_args);
+ if (ti->num_discard_bios)
+ DMEMIT(" allow_discards");
+ if (dkc->sector_size != SECTOR_SIZE) {
+ DMEMIT(" sector_size:%u", dkc->sector_size);
+ DMEMIT(" iv_large_sectors");
+ }
+ if (dkc->key_type == BLK_CRYPTO_KEY_TYPE_HW_WRAPPED)
+ DMEMIT(" wrappedkey_v0");
+ }
+ break;
+ }
+}
+
+static int default_key_prepare_ioctl(struct dm_target *ti,
+ struct block_device **bdev,
+ unsigned int cmd, unsigned long arg,
+ bool *forward)
+{
+ const struct default_key_c *dkc = ti->private;
+ const struct dm_dev *dev = dkc->dev;
+
+ *bdev = dev->bdev;
+
+ /* Only pass ioctls through if the device sizes match exactly. */
+ if (dkc->start != 0 ||
+ ti->len != bdev_nr_sectors(dev->bdev))
+ return 1;
+ return 0;
+}
+
+static int default_key_iterate_devices(struct dm_target *ti,
+ iterate_devices_callout_fn fn,
+ void *data)
+{
+ const struct default_key_c *dkc = ti->private;
+
+ return fn(ti, dkc->dev, dkc->start, ti->len, data);
+}
+
+static void default_key_io_hints(struct dm_target *ti,
+ struct queue_limits *limits)
+{
+ const struct default_key_c *dkc = ti->private;
+ const unsigned int sector_size = dkc->sector_size;
+
+ limits->logical_block_size =
+ max_t(unsigned int, limits->logical_block_size, sector_size);
+ limits->physical_block_size =
+ max_t(unsigned int, limits->physical_block_size, sector_size);
+ limits->io_min = max_t(unsigned int, limits->io_min, sector_size);
+}
+
+#ifdef CONFIG_BLK_DEV_ZONED
+static int default_key_report_zones(struct dm_target *ti,
+ struct dm_report_zones_args *args, unsigned int nr_zones)
+{
+ struct default_key_c *dkc = ti->private;
+
+ return dm_report_zones(dkc->dev->bdev, dkc->start,
+ dkc->start + dm_target_offset(ti, args->next_sector),
+ args, nr_zones);
+}
+#else
+#define default_key_report_zones NULL
+#endif
+
+static struct target_type default_key_target = {
+ .name = "default-key",
+ .version = {2, 1, 0},
+ .features = DM_TARGET_PASSES_CRYPTO | DM_TARGET_ZONED_HM,
+ .report_zones = default_key_report_zones,
+ .module = THIS_MODULE,
+ .ctr = default_key_ctr,
+ .dtr = default_key_dtr,
+ .map = default_key_map,
+ .status = default_key_status,
+ .prepare_ioctl = default_key_prepare_ioctl,
+ .iterate_devices = default_key_iterate_devices,
+ .io_hints = default_key_io_hints,
+};
+
+static int __init dm_default_key_init(void)
+{
+ return dm_register_target(&default_key_target);
+}
+
+static void __exit dm_default_key_exit(void)
+{
+ dm_unregister_target(&default_key_target);
+}
+
+module_init(dm_default_key_init);
+module_exit(dm_default_key_exit);
+
+MODULE_AUTHOR("Paul Lawrence <paullawrence@google.com>");
+MODULE_AUTHOR("Paul Crowley <paulcrowley@google.com>");
+MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
+MODULE_DESCRIPTION(DM_NAME " target for encrypting filesystem metadata");
+MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-user.c b/drivers/md/dm-user.c
new file mode 100644
index 0000000..588855b
--- /dev/null
+++ b/drivers/md/dm-user.c
@@ -0,0 +1,1280 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2020 Google, Inc
+ * Copyright (C) 2020 Palmer Dabbelt <palmerdabbelt@google.com>
+ */
+
+#include <linux/device-mapper.h>
+#include <uapi/linux/dm-user.h>
+
+#include <linux/bio.h>
+#include <linux/init.h>
+#include <linux/mempool.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/uio.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+
+#define DM_MSG_PREFIX "user"
+
+#define MAX_OUTSTANDING_MESSAGES 128
+
+static unsigned int daemon_timeout_msec = 4000;
+module_param_named(dm_user_daemon_timeout_msec, daemon_timeout_msec, uint,
+ 0644);
+MODULE_PARM_DESC(dm_user_daemon_timeout_msec,
+ "IO Timeout in msec if daemon does not process");
+
+/*
+ * dm-user uses four structures:
+ *
+ * - "struct target", the outermost structure, corresponds to a single device
+ * mapper target. This contains the set of outstanding BIOs that have been
+ * provided by DM and are not actively being processed by the user, along
+ * with a misc device that userspace can open to communicate with the
+ * kernel. Each time userspaces opens the misc device a new channel is
+ * created.
+ * - "struct channel", which represents a single active communication channel
+ * with userspace. Userspace may choose arbitrary read/write sizes to use
+ * when processing messages, channels form these into logical accesses.
+ * When userspace responds to a full message the channel completes the BIO
+ * and obtains a new message to process from the target.
+ * - "struct message", which wraps a BIO with the additional information
+ * required by the kernel to sort out what to do with BIOs when they return
+ * from userspace.
+ * - "struct dm_user_message", which is the exact message format that
+ * userspace sees.
+ *
+ * The hot path contains three distinct operations:
+ *
+ * - user_map(), which is provided a BIO from device mapper that is queued
+ * into the target. This allocates and enqueues a new message.
+ * - dev_read(), which dequeues a message, copies it to userspace.
+ * - dev_write(), which looks up a message (keyed by sequence number) and
+ * completes the corresponding BIO.
+ *
+ * Lock ordering (outer to inner)
+ *
+ * 1) miscdevice's global lock. This is held around dev_open, so it has to be
+ * the outermost lock.
+ * 2) target->lock
+ * 3) channel->lock
+ */
+
+struct message {
+ /*
+ * Messages themselves do not need a lock, they're protected by either
+ * the target or channel's lock, depending on which can reference them
+ * directly.
+ */
+ struct dm_user_message msg;
+ struct bio *bio;
+ size_t posn_to_user;
+ size_t total_to_user;
+ size_t posn_from_user;
+ size_t total_from_user;
+
+ struct list_head from_user;
+ struct list_head to_user;
+
+ /*
+ * These are written back from the user. They live in the same spot in
+ * the message, but we need to either keep the old values around or
+ * call a bunch more BIO helpers. These are only valid after write has
+ * adopted the message.
+ */
+ u64 return_type;
+ u64 return_flags;
+
+ struct delayed_work work;
+ bool delayed;
+ struct target *t;
+};
+
+struct target {
+ /*
+ * A target has a single lock, which protects everything in the target
+ * (but does not protect the channels associated with a target).
+ */
+ struct mutex lock;
+
+ /*
+ * There is only one point at which anything blocks: userspace blocks
+ * reading a new message, which is woken up by device mapper providing
+ * a new BIO to process (or tearing down the target). The
+ * corresponding write side doesn't block, instead we treat userspace's
+ * response containing a message that has yet to be mapped as an
+ * invalid operation.
+ */
+ struct wait_queue_head wq;
+
+ /*
+ * Messages are delivered to userspace in order, but may be returned
+ * out of order. This allows userspace to schedule IO if it wants to.
+ */
+ mempool_t message_pool;
+ u64 next_seq_to_map;
+ u64 next_seq_to_user;
+ struct list_head to_user;
+
+ /*
+ * There is a misc device per target. The name is selected by
+ * userspace (via a DM create ioctl argument), and each ends up in
+ * /dev/dm-user/. It looks like a better way to do this may be to have
+ * a filesystem to manage these, but this was more expedient. The
+ * current mechanism is functional, but does result in an arbitrary
+ * number of dynamically created misc devices.
+ */
+ struct miscdevice miscdev;
+
+ /*
+ * Device mapper's target destructor triggers tearing this all down,
+ * but we can't actually free until every channel associated with this
+ * target has been destroyed. Channels each have a reference to their
+ * target, and there is an additional single reference that corresponds
+ * to both DM and the misc device (both of which are destroyed by DM).
+ *
+ * In the common case userspace will be asleep waiting for a new
+ * message when device mapper decides to destroy the target, which
+ * means no new messages will appear. The destroyed flag triggers a
+ * wakeup, which will end up removing the reference.
+ */
+ struct kref references;
+ int dm_destroyed;
+ bool daemon_terminated;
+};
+
+struct channel {
+ struct target *target;
+
+ /*
+ * A channel has a single lock, which prevents multiple reads (or
+ * multiple writes) from conflicting with each other.
+ */
+ struct mutex lock;
+
+ struct message *cur_to_user;
+ struct message *cur_from_user;
+ ssize_t to_user_error;
+ ssize_t from_user_error;
+
+ /*
+ * Once a message has been forwarded to userspace on a channel it must
+ * be responded to on the same channel. This allows us to error out
+ * the messages that have not yet been responded to by a channel when
+ * that channel closes, which makes handling errors more reasonable for
+ * fault-tolerant userspace daemons. It also happens to make avoiding
+ * shared locks between user_map() and dev_read() a lot easier.
+ *
+ * This does preclude a multi-threaded work stealing userspace
+ * implementation (or at least, force a degree of head-of-line blocking
+ * on the response path).
+ */
+ struct list_head from_user;
+
+ /*
+ * Responses from userspace can arrive in arbitrarily small chunks.
+ * We need some place to buffer one up until we can find the
+ * corresponding kernel-side message to continue processing, so instead
+ * of allocating them we just keep one off to the side here. This can
+ * only ever be pointer to by from_user_cur, and will never have a BIO.
+ */
+ struct message scratch_message_from_user;
+};
+
+static void message_kill(struct message *m, mempool_t *pool)
+{
+ m->bio->bi_status = BLK_STS_IOERR;
+ bio_endio(m->bio);
+ mempool_free(m, pool);
+}
+
+static inline bool is_user_space_thread_present(struct target *t)
+{
+ lockdep_assert_held(&t->lock);
+ return (kref_read(&t->references) > 1);
+}
+
+static void process_delayed_work(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct message *msg = container_of(del_work, struct message, work);
+
+ struct target *t = msg->t;
+
+ mutex_lock(&t->lock);
+
+ /*
+ * There is at least one thread to process the IO.
+ */
+ if (is_user_space_thread_present(t)) {
+ mutex_unlock(&t->lock);
+ return;
+ }
+
+ /*
+ * Terminate the IO with an error
+ */
+ list_del(&msg->to_user);
+ pr_err("I/O error: sector %llu: no user-space daemon for %s target\n",
+ msg->bio->bi_iter.bi_sector,
+ t->miscdev.name);
+ message_kill(msg, &t->message_pool);
+ mutex_unlock(&t->lock);
+}
+
+static void enqueue_delayed_work(struct message *m, bool is_delay)
+{
+ unsigned long delay = 0;
+
+ m->delayed = true;
+ INIT_DELAYED_WORK(&m->work, process_delayed_work);
+
+ /*
+ * Snapuserd daemon is the user-space process
+ * which processes IO request from dm-user
+ * when OTA is applied. Per the current design,
+ * when a dm-user target is created, daemon
+ * attaches to target and starts processing
+ * the IO's. Daemon is terminated only when
+ * dm-user target is destroyed.
+ *
+ * If for some reason, daemon crashes or terminates early,
+ * without destroying the dm-user target; then
+ * there is no mechanism to restart the daemon
+ * and start processing the IO's from the same target.
+ * Theoretically, it is possible but that infrastructure
+ * doesn't exist in the android ecosystem.
+ *
+ * Thus, when the daemon terminates, there is no way the IO's
+ * issued on that target will be processed. Hence,
+ * we set the delay to 0 and fail the IO's immediately.
+ *
+ * On the other hand, when a new dm-user target is created,
+ * we wait for the daemon to get attached for the first time.
+ * This primarily happens when init first stage spins up
+ * the daemon. At this point, since the snapshot device is mounted
+ * of a root filesystem, dm-user target may receive IO request
+ * even though daemon is not fully launched. We don't want
+ * to fail those IO requests immediately. Thus, we queue these
+ * requests with a timeout so that daemon is ready to process
+ * those IO requests. Again, if the daemon fails to launch within
+ * the timeout period, then IO's will be failed.
+ */
+ if (is_delay)
+ delay = msecs_to_jiffies(daemon_timeout_msec);
+
+ queue_delayed_work(system_wq, &m->work, delay);
+}
+
+static inline struct target *target_from_target(struct dm_target *target)
+{
+ WARN_ON(target->private == NULL);
+ return target->private;
+}
+
+static inline struct channel *channel_from_file(struct file *file)
+{
+ WARN_ON(file->private_data == NULL);
+ return file->private_data;
+}
+
+static inline struct target *target_from_channel(struct channel *c)
+{
+ WARN_ON(c->target == NULL);
+ return c->target;
+}
+
+static inline size_t bio_size(struct bio *bio)
+{
+ struct bio_vec bvec;
+ struct bvec_iter iter;
+ size_t out = 0;
+
+ bio_for_each_segment (bvec, bio, iter)
+ out += bio_iter_len(bio, iter);
+ return out;
+}
+
+static inline size_t bio_bytes_needed_to_user(struct bio *bio)
+{
+ switch (bio_op(bio)) {
+ case REQ_OP_WRITE:
+ return sizeof(struct dm_user_message) + bio_size(bio);
+ case REQ_OP_READ:
+ case REQ_OP_FLUSH:
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
+ case REQ_OP_WRITE_ZEROES:
+ return sizeof(struct dm_user_message);
+
+ /*
+ * These ops are not passed to userspace under the assumption that
+ * they're not going to be particularly useful in that context.
+ */
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static inline size_t bio_bytes_needed_from_user(struct bio *bio)
+{
+ switch (bio_op(bio)) {
+ case REQ_OP_READ:
+ return sizeof(struct dm_user_message) + bio_size(bio);
+ case REQ_OP_WRITE:
+ case REQ_OP_FLUSH:
+ case REQ_OP_DISCARD:
+ case REQ_OP_SECURE_ERASE:
+ case REQ_OP_WRITE_ZEROES:
+ return sizeof(struct dm_user_message);
+
+ /*
+ * These ops are not passed to userspace under the assumption that
+ * they're not going to be particularly useful in that context.
+ */
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static inline long bio_type_to_user_type(struct bio *bio)
+{
+ switch (bio_op(bio)) {
+ case REQ_OP_READ:
+ return DM_USER_REQ_MAP_READ;
+ case REQ_OP_WRITE:
+ return DM_USER_REQ_MAP_WRITE;
+ case REQ_OP_FLUSH:
+ return DM_USER_REQ_MAP_FLUSH;
+ case REQ_OP_DISCARD:
+ return DM_USER_REQ_MAP_DISCARD;
+ case REQ_OP_SECURE_ERASE:
+ return DM_USER_REQ_MAP_SECURE_ERASE;
+ case REQ_OP_WRITE_ZEROES:
+ return DM_USER_REQ_MAP_WRITE_ZEROES;
+
+ /*
+ * These ops are not passed to userspace under the assumption that
+ * they're not going to be particularly useful in that context.
+ */
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static inline long bio_flags_to_user_flags(struct bio *bio)
+{
+ u64 out = 0;
+ typeof(bio->bi_opf) opf = bio->bi_opf & ~REQ_OP_MASK;
+
+ if (opf & REQ_FAILFAST_DEV) {
+ opf &= ~REQ_FAILFAST_DEV;
+ out |= DM_USER_REQ_MAP_FLAG_FAILFAST_DEV;
+ }
+
+ if (opf & REQ_FAILFAST_TRANSPORT) {
+ opf &= ~REQ_FAILFAST_TRANSPORT;
+ out |= DM_USER_REQ_MAP_FLAG_FAILFAST_TRANSPORT;
+ }
+
+ if (opf & REQ_FAILFAST_DRIVER) {
+ opf &= ~REQ_FAILFAST_DRIVER;
+ out |= DM_USER_REQ_MAP_FLAG_FAILFAST_DRIVER;
+ }
+
+ if (opf & REQ_SYNC) {
+ opf &= ~REQ_SYNC;
+ out |= DM_USER_REQ_MAP_FLAG_SYNC;
+ }
+
+ if (opf & REQ_META) {
+ opf &= ~REQ_META;
+ out |= DM_USER_REQ_MAP_FLAG_META;
+ }
+
+ if (opf & REQ_PRIO) {
+ opf &= ~REQ_PRIO;
+ out |= DM_USER_REQ_MAP_FLAG_PRIO;
+ }
+
+ if (opf & REQ_NOMERGE) {
+ opf &= ~REQ_NOMERGE;
+ out |= DM_USER_REQ_MAP_FLAG_NOMERGE;
+ }
+
+ if (opf & REQ_IDLE) {
+ opf &= ~REQ_IDLE;
+ out |= DM_USER_REQ_MAP_FLAG_IDLE;
+ }
+
+ if (opf & REQ_INTEGRITY) {
+ opf &= ~REQ_INTEGRITY;
+ out |= DM_USER_REQ_MAP_FLAG_INTEGRITY;
+ }
+
+ if (opf & REQ_FUA) {
+ opf &= ~REQ_FUA;
+ out |= DM_USER_REQ_MAP_FLAG_FUA;
+ }
+
+ if (opf & REQ_PREFLUSH) {
+ opf &= ~REQ_PREFLUSH;
+ out |= DM_USER_REQ_MAP_FLAG_PREFLUSH;
+ }
+
+ if (opf & REQ_RAHEAD) {
+ opf &= ~REQ_RAHEAD;
+ out |= DM_USER_REQ_MAP_FLAG_RAHEAD;
+ }
+
+ if (opf & REQ_BACKGROUND) {
+ opf &= ~REQ_BACKGROUND;
+ out |= DM_USER_REQ_MAP_FLAG_BACKGROUND;
+ }
+
+ if (opf & REQ_NOWAIT) {
+ opf &= ~REQ_NOWAIT;
+ out |= DM_USER_REQ_MAP_FLAG_NOWAIT;
+ }
+
+ if (opf & REQ_NOUNMAP) {
+ opf &= ~REQ_NOUNMAP;
+ out |= DM_USER_REQ_MAP_FLAG_NOUNMAP;
+ }
+
+ if (unlikely(opf)) {
+ pr_warn("unsupported BIO type %x\n", opf);
+ return -EOPNOTSUPP;
+ }
+ WARN_ON(out < 0);
+ return out;
+}
+
+/*
+ * Not quite what's in blk-map.c, but instead what I thought the functions in
+ * blk-map did. This one seems more generally useful and I think we could
+ * write the blk-map version in terms of this one. The differences are that
+ * this has a return value that counts, and blk-map uses the BIO _all iters.
+ * Neither advance the BIO iter but don't advance the IOV iter, which is a bit
+ * odd here.
+ */
+static ssize_t bio_copy_from_iter(struct bio *bio, struct iov_iter *iter)
+{
+ struct bio_vec bvec;
+ struct bvec_iter biter;
+ ssize_t out = 0;
+
+ bio_for_each_segment (bvec, bio, biter) {
+ ssize_t ret;
+
+ ret = copy_page_from_iter(bvec.bv_page, bvec.bv_offset,
+ bvec.bv_len, iter);
+
+ /*
+ * FIXME: I thought that IOV copies had a mechanism for
+ * terminating early, if for example a signal came in while
+ * sleeping waiting for a page to be mapped, but I don't see
+ * where that would happen.
+ */
+ WARN_ON(ret < 0);
+ out += ret;
+
+ if (!iov_iter_count(iter))
+ break;
+
+ if (ret < bvec.bv_len)
+ return ret;
+ }
+
+ return out;
+}
+
+static ssize_t bio_copy_to_iter(struct bio *bio, struct iov_iter *iter)
+{
+ struct bio_vec bvec;
+ struct bvec_iter biter;
+ ssize_t out = 0;
+
+ bio_for_each_segment (bvec, bio, biter) {
+ ssize_t ret;
+
+ ret = copy_page_to_iter(bvec.bv_page, bvec.bv_offset,
+ bvec.bv_len, iter);
+
+ /* as above */
+ WARN_ON(ret < 0);
+ out += ret;
+
+ if (!iov_iter_count(iter))
+ break;
+
+ if (ret < bvec.bv_len)
+ return ret;
+ }
+
+ return out;
+}
+
+static ssize_t msg_copy_to_iov(struct message *msg, struct iov_iter *to)
+{
+ ssize_t copied = 0;
+
+ if (!iov_iter_count(to))
+ return 0;
+
+ if (msg->posn_to_user < sizeof(msg->msg)) {
+ copied = copy_to_iter((char *)(&msg->msg) + msg->posn_to_user,
+ sizeof(msg->msg) - msg->posn_to_user, to);
+ } else {
+ copied = bio_copy_to_iter(msg->bio, to);
+ if (copied > 0)
+ bio_advance(msg->bio, copied);
+ }
+
+ if (copied < 0)
+ return copied;
+
+ msg->posn_to_user += copied;
+ return copied;
+}
+
+static ssize_t msg_copy_from_iov(struct message *msg, struct iov_iter *from)
+{
+ ssize_t copied = 0;
+
+ if (!iov_iter_count(from))
+ return 0;
+
+ if (msg->posn_from_user < sizeof(msg->msg)) {
+ copied = copy_from_iter(
+ (char *)(&msg->msg) + msg->posn_from_user,
+ sizeof(msg->msg) - msg->posn_from_user, from);
+ } else {
+ copied = bio_copy_from_iter(msg->bio, from);
+ if (copied > 0)
+ bio_advance(msg->bio, copied);
+ }
+
+ if (copied < 0)
+ return copied;
+
+ msg->posn_from_user += copied;
+ return copied;
+}
+
+static struct message *msg_get_map(struct target *t)
+{
+ struct message *m;
+
+ lockdep_assert_held(&t->lock);
+
+ m = mempool_alloc(&t->message_pool, GFP_NOIO);
+ m->msg.seq = t->next_seq_to_map++;
+ INIT_LIST_HEAD(&m->to_user);
+ INIT_LIST_HEAD(&m->from_user);
+ return m;
+}
+
+static struct message *msg_get_to_user(struct target *t)
+{
+ struct message *m;
+
+ lockdep_assert_held(&t->lock);
+
+ if (list_empty(&t->to_user))
+ return NULL;
+
+ m = list_first_entry(&t->to_user, struct message, to_user);
+
+ list_del(&m->to_user);
+
+ /*
+ * If the IO was queued to workqueue since there
+ * was no daemon to service the IO, then we
+ * will have to cancel the delayed work as the
+ * IO will be processed by this user-space thread.
+ *
+ * If the delayed work was already picked up for
+ * processing, then wait for it to complete. Note
+ * that the IO will not be terminated by the work
+ * queue thread.
+ */
+ if (unlikely(m->delayed)) {
+ mutex_unlock(&t->lock);
+ cancel_delayed_work_sync(&m->work);
+ mutex_lock(&t->lock);
+ }
+ return m;
+}
+
+static struct message *msg_get_from_user(struct channel *c, u64 seq)
+{
+ struct message *m;
+ struct list_head *cur, *tmp;
+
+ lockdep_assert_held(&c->lock);
+
+ list_for_each_safe (cur, tmp, &c->from_user) {
+ m = list_entry(cur, struct message, from_user);
+ if (m->msg.seq == seq) {
+ list_del(&m->from_user);
+ return m;
+ }
+ }
+
+ return NULL;
+}
+
+/*
+ * Returns 0 when there is no work left to do. This must be callable without
+ * holding the target lock, as it is part of the waitqueue's check expression.
+ * When called without the lock it may spuriously indicate there is remaining
+ * work, but when called with the lock it must be accurate.
+ */
+static int target_poll(struct target *t)
+{
+ return !list_empty(&t->to_user) || t->dm_destroyed;
+}
+
+static void target_release(struct kref *ref)
+{
+ struct target *t = container_of(ref, struct target, references);
+ struct list_head *cur, *tmp;
+
+ /*
+ * There may be outstanding BIOs that have not yet been given to
+ * userspace. At this point there's nothing we can do about them, as
+ * there are and will never be any channels.
+ */
+ list_for_each_safe (cur, tmp, &t->to_user) {
+ struct message *m = list_entry(cur, struct message, to_user);
+
+ if (unlikely(m->delayed)) {
+ bool ret;
+
+ mutex_unlock(&t->lock);
+ ret = cancel_delayed_work_sync(&m->work);
+ mutex_lock(&t->lock);
+ if (!ret)
+ continue;
+ }
+ message_kill(m, &t->message_pool);
+ }
+
+ mempool_exit(&t->message_pool);
+ mutex_unlock(&t->lock);
+ mutex_destroy(&t->lock);
+ kfree(t);
+}
+
+static void target_put(struct target *t)
+{
+ /*
+ * This both releases a reference to the target and the lock. We leave
+ * it up to the caller to hold the lock, as they probably needed it for
+ * something else.
+ */
+ lockdep_assert_held(&t->lock);
+
+ if (!kref_put(&t->references, target_release)) {
+ /*
+ * User-space thread is getting terminated.
+ * We need to scan the list for all those
+ * pending IO's which were not processed yet
+ * and put them back to work-queue for delayed
+ * processing.
+ */
+ if (!is_user_space_thread_present(t)) {
+ struct list_head *cur, *tmp;
+
+ list_for_each_safe(cur, tmp, &t->to_user) {
+ struct message *m = list_entry(cur,
+ struct message,
+ to_user);
+ if (!m->delayed)
+ enqueue_delayed_work(m, false);
+ }
+ /*
+ * Daemon attached to this target is terminated.
+ */
+ t->daemon_terminated = true;
+ }
+ mutex_unlock(&t->lock);
+ }
+}
+
+static struct channel *channel_alloc(struct target *t)
+{
+ struct channel *c;
+
+ lockdep_assert_held(&t->lock);
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (c == NULL)
+ return NULL;
+
+ kref_get(&t->references);
+ c->target = t;
+ c->cur_from_user = &c->scratch_message_from_user;
+ mutex_init(&c->lock);
+ INIT_LIST_HEAD(&c->from_user);
+ return c;
+}
+
+static void channel_free(struct channel *c)
+{
+ struct list_head *cur, *tmp;
+
+ lockdep_assert_held(&c->lock);
+
+ /*
+ * There may be outstanding BIOs that have been given to userspace but
+ * have not yet been completed. The channel has been shut down so
+ * there's no way to process the rest of those messages, so we just go
+ * ahead and error out the BIOs. Hopefully whatever's on the other end
+ * can handle the errors. One could imagine splitting the BIOs and
+ * completing as much as we got, but that seems like overkill here.
+ *
+ * Our only other options would be to let the BIO hang around (which
+ * seems way worse) or to resubmit it to userspace in the hope there's
+ * another channel. I don't really like the idea of submitting a
+ * message twice.
+ */
+ if (c->cur_to_user != NULL)
+ message_kill(c->cur_to_user, &c->target->message_pool);
+ if (c->cur_from_user != &c->scratch_message_from_user)
+ message_kill(c->cur_from_user, &c->target->message_pool);
+ list_for_each_safe (cur, tmp, &c->from_user)
+ message_kill(list_entry(cur, struct message, from_user),
+ &c->target->message_pool);
+
+ mutex_lock(&c->target->lock);
+ target_put(c->target);
+ mutex_unlock(&c->lock);
+ mutex_destroy(&c->lock);
+ kfree(c);
+}
+
+static int dev_open(struct inode *inode, struct file *file)
+{
+ struct channel *c;
+ struct target *t;
+
+ /*
+ * This is called by miscdev, which sets private_data to point to the
+ * struct miscdevice that was opened. The rest of our file operations
+ * want to refer to the channel that's been opened, so we swap that
+ * pointer out with a fresh channel.
+ *
+ * This is called with the miscdev lock held, which is also held while
+ * registering/unregistering the miscdev. The miscdev must be
+ * registered for this to get called, which means there must be an
+ * outstanding reference to the target, which means it cannot be freed
+ * out from under us despite us not holding a reference yet.
+ */
+ t = container_of(file->private_data, struct target, miscdev);
+ mutex_lock(&t->lock);
+ file->private_data = c = channel_alloc(t);
+
+ if (c == NULL) {
+ mutex_unlock(&t->lock);
+ return -ENOMEM;
+ }
+
+ mutex_unlock(&t->lock);
+ return 0;
+}
+
+static ssize_t dev_read(struct kiocb *iocb, struct iov_iter *to)
+{
+ struct channel *c = channel_from_file(iocb->ki_filp);
+ ssize_t total_processed = 0;
+ ssize_t processed;
+
+ mutex_lock(&c->lock);
+
+ if (unlikely(c->to_user_error)) {
+ total_processed = c->to_user_error;
+ goto cleanup_unlock;
+ }
+
+ if (c->cur_to_user == NULL) {
+ struct target *t = target_from_channel(c);
+
+ mutex_lock(&t->lock);
+
+ while (!target_poll(t)) {
+ int e;
+
+ mutex_unlock(&t->lock);
+ mutex_unlock(&c->lock);
+ e = wait_event_interruptible(t->wq, target_poll(t));
+ mutex_lock(&c->lock);
+ mutex_lock(&t->lock);
+
+ if (unlikely(e != 0)) {
+ /*
+ * We haven't processed any bytes in either the
+ * BIO or the IOV, so we can just terminate
+ * right now. Elsewhere in the kernel handles
+ * restarting the syscall when appropriate.
+ */
+ total_processed = e;
+ mutex_unlock(&t->lock);
+ goto cleanup_unlock;
+ }
+ }
+
+ if (unlikely(t->dm_destroyed)) {
+ /*
+ * DM has destroyed this target, so just lock
+ * the user out. There's really nothing else
+ * we can do here. Note that we don't actually
+ * tear any thing down until userspace has
+ * closed the FD, as there may still be
+ * outstanding BIOs.
+ *
+ * This is kind of a wacky error code to
+ * return. My goal was really just to try and
+ * find something that wasn't likely to be
+ * returned by anything else in the miscdev
+ * path. The message "block device required"
+ * seems like a somewhat reasonable thing to
+ * say when the target has disappeared out from
+ * under us, but "not block" isn't sensible.
+ */
+ c->to_user_error = total_processed = -ENOTBLK;
+ mutex_unlock(&t->lock);
+ goto cleanup_unlock;
+ }
+
+ /*
+ * Ensures that accesses to the message data are not ordered
+ * before the remote accesses that produce that message data.
+ *
+ * This pairs with the barrier in user_map(), via the
+ * conditional within the while loop above. Also see the lack
+ * of barrier in user_dtr(), which is why this can be after the
+ * destroyed check.
+ */
+ smp_rmb();
+
+ c->cur_to_user = msg_get_to_user(t);
+ WARN_ON(c->cur_to_user == NULL);
+ mutex_unlock(&t->lock);
+ }
+
+ processed = msg_copy_to_iov(c->cur_to_user, to);
+ total_processed += processed;
+
+ WARN_ON(c->cur_to_user->posn_to_user > c->cur_to_user->total_to_user);
+ if (c->cur_to_user->posn_to_user == c->cur_to_user->total_to_user) {
+ struct message *m = c->cur_to_user;
+
+ c->cur_to_user = NULL;
+ list_add_tail(&m->from_user, &c->from_user);
+ }
+
+cleanup_unlock:
+ mutex_unlock(&c->lock);
+ return total_processed;
+}
+
+static ssize_t dev_write(struct kiocb *iocb, struct iov_iter *from)
+{
+ struct channel *c = channel_from_file(iocb->ki_filp);
+ ssize_t total_processed = 0;
+ ssize_t processed;
+
+ mutex_lock(&c->lock);
+
+ if (unlikely(c->from_user_error)) {
+ total_processed = c->from_user_error;
+ goto cleanup_unlock;
+ }
+
+ /*
+ * cur_from_user can never be NULL. If there's no real message it must
+ * point to the scratch space.
+ */
+ WARN_ON(c->cur_from_user == NULL);
+ if (c->cur_from_user->posn_from_user < sizeof(struct dm_user_message)) {
+ struct message *msg, *old;
+
+ processed = msg_copy_from_iov(c->cur_from_user, from);
+ if (processed <= 0) {
+ pr_warn("msg_copy_from_iov() returned %zu\n",
+ processed);
+ c->from_user_error = -EINVAL;
+ goto cleanup_unlock;
+ }
+ total_processed += processed;
+
+ /*
+ * In the unlikely event the user has provided us a very short
+ * write, not even big enough to fill a message, just succeed.
+ * We'll eventually build up enough bytes to do something.
+ */
+ if (unlikely(c->cur_from_user->posn_from_user <
+ sizeof(struct dm_user_message)))
+ goto cleanup_unlock;
+
+ old = c->cur_from_user;
+ mutex_lock(&c->target->lock);
+ msg = msg_get_from_user(c, c->cur_from_user->msg.seq);
+ if (msg == NULL) {
+ pr_info("user provided an invalid messag seq of %llx\n",
+ old->msg.seq);
+ mutex_unlock(&c->target->lock);
+ c->from_user_error = -EINVAL;
+ goto cleanup_unlock;
+ }
+ mutex_unlock(&c->target->lock);
+
+ WARN_ON(old->posn_from_user != sizeof(struct dm_user_message));
+ msg->posn_from_user = sizeof(struct dm_user_message);
+ msg->return_type = old->msg.type;
+ msg->return_flags = old->msg.flags;
+ WARN_ON(msg->posn_from_user > msg->total_from_user);
+ c->cur_from_user = msg;
+ WARN_ON(old != &c->scratch_message_from_user);
+ }
+
+ /*
+ * Userspace can signal an error for single requests by overwriting the
+ * seq field.
+ */
+ switch (c->cur_from_user->return_type) {
+ case DM_USER_RESP_SUCCESS:
+ c->cur_from_user->bio->bi_status = BLK_STS_OK;
+ break;
+ case DM_USER_RESP_ERROR:
+ case DM_USER_RESP_UNSUPPORTED:
+ default:
+ c->cur_from_user->bio->bi_status = BLK_STS_IOERR;
+ goto finish_bio;
+ }
+
+ /*
+ * The op was a success as far as userspace is concerned, so process
+ * whatever data may come along with it. The user may provide the BIO
+ * data in multiple chunks, in which case we don't need to finish the
+ * BIO.
+ */
+ processed = msg_copy_from_iov(c->cur_from_user, from);
+ total_processed += processed;
+
+ if (c->cur_from_user->posn_from_user <
+ c->cur_from_user->total_from_user)
+ goto cleanup_unlock;
+
+finish_bio:
+ /*
+ * When we set up this message the BIO's size matched the
+ * message size, if that's not still the case then something
+ * has gone off the rails.
+ */
+ WARN_ON(bio_size(c->cur_from_user->bio) != 0);
+ bio_endio(c->cur_from_user->bio);
+
+ /*
+ * We don't actually need to take the target lock here, as all
+ * we're doing is freeing the message and mempools have their
+ * own lock. Each channel has its ows scratch message.
+ */
+ WARN_ON(c->cur_from_user == &c->scratch_message_from_user);
+ mempool_free(c->cur_from_user, &c->target->message_pool);
+ c->scratch_message_from_user.posn_from_user = 0;
+ c->cur_from_user = &c->scratch_message_from_user;
+
+cleanup_unlock:
+ mutex_unlock(&c->lock);
+ return total_processed;
+}
+
+static int dev_release(struct inode *inode, struct file *file)
+{
+ struct channel *c;
+
+ c = channel_from_file(file);
+ mutex_lock(&c->lock);
+ channel_free(c);
+
+ return 0;
+}
+
+static const struct file_operations file_operations = {
+ .owner = THIS_MODULE,
+ .open = dev_open,
+ .read_iter = dev_read,
+ .write_iter = dev_write,
+ .release = dev_release,
+};
+
+static int user_ctr(struct dm_target *ti, unsigned int argc, char **argv)
+{
+ struct target *t;
+ int r;
+
+ if (argc != 3) {
+ ti->error = "Invalid argument count";
+ r = -EINVAL;
+ goto cleanup_none;
+ }
+
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (t == NULL) {
+ r = -ENOMEM;
+ goto cleanup_none;
+ }
+ ti->private = t;
+
+ /* Enable more BIO types. */
+ ti->num_discard_bios = 1;
+ ti->discards_supported = true;
+ ti->num_flush_bios = 1;
+ ti->flush_supported = true;
+
+ /*
+ * We begin with a single reference to the target, which is miscdev's
+ * reference. This ensures that the target won't be freed
+ * until after the miscdev has been unregistered and all extant
+ * channels have been closed.
+ */
+ kref_init(&t->references);
+
+ t->daemon_terminated = false;
+ mutex_init(&t->lock);
+ init_waitqueue_head(&t->wq);
+ INIT_LIST_HEAD(&t->to_user);
+ mempool_init_kmalloc_pool(&t->message_pool, MAX_OUTSTANDING_MESSAGES,
+ sizeof(struct message));
+
+ t->miscdev.minor = MISC_DYNAMIC_MINOR;
+ t->miscdev.fops = &file_operations;
+ t->miscdev.name = kasprintf(GFP_KERNEL, "dm-user/%s", argv[2]);
+ if (t->miscdev.name == NULL) {
+ r = -ENOMEM;
+ goto cleanup_message_pool;
+ }
+
+ /*
+ * Once the miscdev is registered it can be opened and therefor
+ * concurrent references to the channel can happen. Holding the target
+ * lock during misc_register() could deadlock. If registration
+ * succeeds then we will not access the target again so we just stick a
+ * barrier here, which pairs with taking the target lock everywhere
+ * else the target is accessed.
+ *
+ * I forgot where we ended up on the RCpc/RCsc locks. IIU RCsc locks
+ * would mean that we could take the target lock earlier and release it
+ * here instead of the memory barrier. I'm not sure that's any better,
+ * though, and this isn't on a hot path so it probably doesn't matter
+ * either way.
+ */
+ smp_mb();
+
+ r = misc_register(&t->miscdev);
+ if (r) {
+ DMERR("Unable to register miscdev %s for dm-user",
+ t->miscdev.name);
+ r = -ENOMEM;
+ goto cleanup_misc_name;
+ }
+
+ return 0;
+
+cleanup_misc_name:
+ kfree(t->miscdev.name);
+cleanup_message_pool:
+ mempool_exit(&t->message_pool);
+ kfree(t);
+cleanup_none:
+ return r;
+}
+
+static void user_dtr(struct dm_target *ti)
+{
+ struct target *t = target_from_target(ti);
+
+ /*
+ * Removes the miscdev. This must be called without the target lock
+ * held to avoid a possible deadlock because our open implementation is
+ * called holding the miscdev lock and must later take the target lock.
+ *
+ * There is no race here because only DM can register/unregister the
+ * miscdev, and DM ensures that doesn't happen twice. The internal
+ * miscdev lock is sufficient to ensure there are no races between
+ * deregistering the miscdev and open.
+ */
+ misc_deregister(&t->miscdev);
+
+ /*
+ * We are now free to take the target's lock and drop our reference to
+ * the target. There are almost certainly tasks sleeping in read on at
+ * least one of the channels associated with this target, this
+ * explicitly wakes them up and terminates the read.
+ */
+ mutex_lock(&t->lock);
+ /*
+ * No barrier here, as wait/wake ensures that the flag visibility is
+ * correct WRT the wake/sleep state of the target tasks.
+ */
+ t->dm_destroyed = true;
+ wake_up_all(&t->wq);
+ target_put(t);
+}
+
+/*
+ * Consumes a BIO from device mapper, queueing it up for userspace.
+ */
+static int user_map(struct dm_target *ti, struct bio *bio)
+{
+ struct target *t;
+ struct message *entry;
+
+ t = target_from_target(ti);
+ /*
+ * FIXME
+ *
+ * This seems like a bad idea. Specifically, here we're
+ * directly on the IO path when we take the target lock, which may also
+ * be taken from a user context. The user context doesn't actively
+ * trigger anything that may sleep while holding the lock, but this
+ * still seems like a bad idea.
+ *
+ * The obvious way to fix this would be to use a proper queue, which
+ * would result in no shared locks between the direct IO path and user
+ * tasks. I had a version that did this, but the head-of-line blocking
+ * from the circular buffer resulted in us needing a fairly large
+ * allocation in order to avoid situations in which the queue fills up
+ * and everything goes off the rails.
+ *
+ * I could jump through a some hoops to avoid a shared lock while still
+ * allowing for a large queue, but I'm not actually sure that allowing
+ * for very large queues is the right thing to do here. Intuitively it
+ * seems better to keep the queues small in here (essentially sized to
+ * the user latency for performance reasons only) and rely on returning
+ * DM_MAPIO_REQUEUE regularly, as that would give the rest of the
+ * kernel more information.
+ *
+ * I'll spend some time trying to figure out what's going on with
+ * DM_MAPIO_REQUEUE, but if someone has a better idea of how to fix
+ * this I'm all ears.
+ */
+ mutex_lock(&t->lock);
+
+ /*
+ * FIXME
+ *
+ * The assumption here is that there's no benefit to returning
+ * DM_MAPIO_KILL as opposed to just erroring out the BIO, but I'm not
+ * sure that's actually true -- for example, I could imagine users
+ * expecting that submitted BIOs are unlikely to fail and therefor
+ * relying on submission failure to indicate an unsupported type.
+ *
+ * There's two ways I can think of to fix this:
+ * - Add DM arguments that are parsed during the constructor that
+ * allow various dm_target flags to be set that indicate the op
+ * types supported by this target. This may make sense for things
+ * like discard, where DM can already transform the BIOs to a form
+ * that's likely to be supported.
+ * - Some sort of pre-filter that allows userspace to hook in here
+ * and kill BIOs before marking them as submitted. My guess would
+ * be that a userspace round trip is a bad idea here, but a BPF
+ * call seems resonable.
+ *
+ * My guess is that we'd likely want to do both. The first one is easy
+ * and gives DM the proper info, so it seems better. The BPF call
+ * seems overly complex for just this, but one could imagine wanting to
+ * sometimes return _MAPPED and a BPF filter would be the way to do
+ * that.
+ *
+ * For example, in Android we have an in-kernel DM device called
+ * "dm-bow" that takes advange of some portion of the space that has
+ * been discarded on a device to provide opportunistic block-level
+ * backups. While one could imagine just implementing this entirely in
+ * userspace, that would come with an appreciable performance penalty.
+ * Instead one could keep a BPF program that forwards most accesses
+ * directly to the backing block device while informing a userspace
+ * daemon of any discarded space and on writes to blocks that are to be
+ * backed up.
+ */
+ if (unlikely((bio_type_to_user_type(bio) < 0) ||
+ (bio_flags_to_user_flags(bio) < 0))) {
+ mutex_unlock(&t->lock);
+ return DM_MAPIO_KILL;
+ }
+
+ entry = msg_get_map(t);
+ if (unlikely(entry == NULL)) {
+ mutex_unlock(&t->lock);
+ return DM_MAPIO_REQUEUE;
+ }
+
+ entry->msg.type = bio_type_to_user_type(bio);
+ entry->msg.flags = bio_flags_to_user_flags(bio);
+ entry->msg.sector = bio->bi_iter.bi_sector;
+ entry->msg.len = bio_size(bio);
+ entry->bio = bio;
+ entry->posn_to_user = 0;
+ entry->total_to_user = bio_bytes_needed_to_user(bio);
+ entry->posn_from_user = 0;
+ entry->total_from_user = bio_bytes_needed_from_user(bio);
+ entry->delayed = false;
+ entry->t = t;
+ /* Pairs with the barrier in dev_read() */
+ smp_wmb();
+ list_add_tail(&entry->to_user, &t->to_user);
+
+ /*
+ * If there is no daemon to process the IO's,
+ * queue these messages into a workqueue with
+ * a timeout.
+ */
+ if (!is_user_space_thread_present(t))
+ enqueue_delayed_work(entry, !t->daemon_terminated);
+
+ wake_up_interruptible(&t->wq);
+ mutex_unlock(&t->lock);
+ return DM_MAPIO_SUBMITTED;
+}
+
+static struct target_type user_target = {
+ .name = "user",
+ .version = { 1, 0, 0 },
+ .module = THIS_MODULE,
+ .ctr = user_ctr,
+ .dtr = user_dtr,
+ .map = user_map,
+};
+
+static int __init dm_user_init(void)
+{
+ int r;
+
+ r = dm_register_target(&user_target);
+ if (r) {
+ DMERR("register failed %d", r);
+ goto error;
+ }
+
+ return 0;
+
+error:
+ return r;
+}
+
+static void __exit dm_user_exit(void)
+{
+ dm_unregister_target(&user_target);
+}
+
+module_init(dm_user_init);
+module_exit(dm_user_exit);
+MODULE_AUTHOR("Palmer Dabbelt <palmerdabbelt@google.com>");
+MODULE_DESCRIPTION(DM_NAME " target returning blocks from userspace");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig
index 6abc930..d251201 100644
--- a/drivers/media/Kconfig
+++ b/drivers/media/Kconfig
@@ -65,8 +65,7 @@
# Multimedia support - automatically enable V4L2 and DVB core
#
config MEDIA_CAMERA_SUPPORT
- bool
- prompt "Cameras and video grabbers" if MEDIA_SUPPORT_FILTER
+ bool "Cameras and video grabbers"
default y if !MEDIA_SUPPORT_FILTER
help
Enable support for webcams and video grabbers.
@@ -74,8 +73,7 @@
Say Y when you have a webcam or a video capture grabber board.
config MEDIA_ANALOG_TV_SUPPORT
- bool
- prompt "Analog TV" if MEDIA_SUPPORT_FILTER
+ bool "Analog TV"
default y if !MEDIA_SUPPORT_FILTER
help
Enable analog TV support.
@@ -88,8 +86,7 @@
will disable support for them.
config MEDIA_DIGITAL_TV_SUPPORT
- bool
- prompt "Digital TV" if MEDIA_SUPPORT_FILTER
+ bool "Digital TV"
default y if !MEDIA_SUPPORT_FILTER
help
Enable digital TV support.
@@ -98,8 +95,7 @@
hybrid digital TV and analog TV.
config MEDIA_RADIO_SUPPORT
- bool
- prompt "AM/FM radio receivers/transmitters" if MEDIA_SUPPORT_FILTER
+ bool "AM/FM radio receivers/transmitters"
default y if !MEDIA_SUPPORT_FILTER
help
Enable AM/FM radio support.
@@ -114,8 +110,7 @@
disable support for them.
config MEDIA_SDR_SUPPORT
- bool
- prompt "Software defined radio" if MEDIA_SUPPORT_FILTER
+ bool "Software defined radio"
default y if !MEDIA_SUPPORT_FILTER
help
Enable software defined radio support.
@@ -123,8 +118,7 @@
Say Y when you have a software defined radio device.
config MEDIA_PLATFORM_SUPPORT
- bool
- prompt "Platform-specific devices" if MEDIA_SUPPORT_FILTER
+ bool "Platform-specific devices"
default y if !MEDIA_SUPPORT_FILTER
help
Enable support for complex cameras, codecs, and other hardware
@@ -137,8 +131,7 @@
Say Y when you want to be able to see such devices.
config MEDIA_TEST_SUPPORT
- bool
- prompt "Test drivers" if MEDIA_SUPPORT_FILTER
+ bool "Test drivers"
default y if !MEDIA_SUPPORT_FILTER
help
These drivers should not be used on production kernels, but
diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c
index 6ce623a..c743657 100644
--- a/drivers/media/v4l2-core/v4l2-dev.c
+++ b/drivers/media/v4l2-core/v4l2-dev.c
@@ -31,7 +31,7 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-event.h>
-#define VIDEO_NUM_DEVICES 256
+#define VIDEO_NUM_DEVICES 512
#define VIDEO_NAME "video4linux"
#define dprintk(fmt, arg...) do { \
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 5cc79d1..5b92152 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -503,6 +503,14 @@
tristate
default MISC_RTSX_PCI || MISC_RTSX_USB
+config UID_SYS_STATS
+ bool "Per-UID statistics"
+ depends on PROFILING && TASK_IO_ACCOUNTING
+ help
+ Per UID based cpu time statistics exported to /proc/uid_cputime
+ Per UID based io statistics exported to /proc/uid_io
+ Per UID based procstat control in /proc/uid_procstat
+
config HISI_HIKEY_USB
tristate "USB GPIO Hub on HiSilicon Hikey 960/970 Platform"
depends on (OF && GPIOLIB) || COMPILE_TEST
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index b32a259..09c70d9 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -73,5 +73,6 @@
lan966x-pci-objs += lan966x_pci.dtbo.o
obj-$(CONFIG_MCHP_LAN966X_PCI) += lan966x-pci.o
obj-y += keba/
+obj-$(CONFIG_UID_SYS_STATS) += uid_sys_stats.o
obj-y += amd-sbi/
obj-$(CONFIG_MISC_RP1) += rp1/
diff --git a/drivers/misc/uid_sys_stats.c b/drivers/misc/uid_sys_stats.c
new file mode 100644
index 0000000..d5c7886
--- /dev/null
+++ b/drivers/misc/uid_sys_stats.c
@@ -0,0 +1,594 @@
+/* drivers/misc/uid_sys_stats.c
+ *
+ * Copyright (C) 2014 - 2015 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/atomic.h>
+#include <linux/err.h>
+#include <linux/hashtable.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/llist.h>
+#include <linux/mm.h>
+#include <linux/proc_fs.h>
+#include <linux/profile.h>
+#include <linux/sched/cputime.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/spinlock_types.h>
+
+#define UID_HASH_BITS 10
+#define UID_HASH_NUMS (1 << UID_HASH_BITS)
+DECLARE_HASHTABLE(hash_table, UID_HASH_BITS);
+/* uid_lock[bkt] ensure consistency of hash_table[bkt] */
+spinlock_t uid_lock[UID_HASH_NUMS];
+
+#define for_each_bkt(bkt) \
+ for (bkt = 0; bkt < HASH_SIZE(hash_table); bkt++)
+
+/* iterate over all uid_entrys hashing to the same bkt */
+#define for_each_uid_entry(uid_entry, bkt) \
+ hlist_for_each_entry(uid_entry, &hash_table[bkt], hash)
+
+#define for_each_uid_entry_safe(uid_entry, tmp, bkt) \
+ hlist_for_each_entry_safe(uid_entry, tmp,\
+ &hash_table[bkt], hash)
+
+static struct proc_dir_entry *cpu_parent;
+static struct proc_dir_entry *io_parent;
+static struct proc_dir_entry *proc_parent;
+
+struct io_stats {
+ u64 read_bytes;
+ u64 write_bytes;
+ u64 rchar;
+ u64 wchar;
+ u64 fsync;
+};
+
+#define UID_STATE_FOREGROUND 0
+#define UID_STATE_BACKGROUND 1
+#define UID_STATE_TOTAL_LAST 2
+#define UID_STATE_DEAD_TASKS 3
+#define UID_STATE_SIZE 4
+
+#define MAX_TASK_COMM_LEN 256
+
+struct task_entry {
+ char comm[MAX_TASK_COMM_LEN];
+ pid_t pid;
+ struct io_stats io[UID_STATE_SIZE];
+ struct hlist_node hash;
+};
+
+struct uid_entry {
+ uid_t uid;
+ u64 utime;
+ u64 stime;
+ int state;
+ struct io_stats io[UID_STATE_SIZE];
+ struct hlist_node hash;
+};
+
+static void init_hash_table_and_lock(void)
+{
+ int i;
+
+ hash_init(hash_table);
+ for (i = 0; i < UID_HASH_NUMS; i++)
+ spin_lock_init(&uid_lock[i]);
+}
+
+static inline int uid_to_bkt(uid_t uid)
+{
+ return hash_min(uid, HASH_BITS(hash_table));
+}
+
+static inline int trylock_uid(uid_t uid)
+{
+ return spin_trylock(&uid_lock[uid_to_bkt(uid)]);
+}
+
+static inline void lock_uid(uid_t uid)
+{
+ spin_lock(&uid_lock[uid_to_bkt(uid)]);
+}
+
+static inline void unlock_uid(uid_t uid)
+{
+ spin_unlock(&uid_lock[uid_to_bkt(uid)]);
+}
+
+static inline void lock_uid_by_bkt(u32 bkt)
+{
+ spin_lock(&uid_lock[bkt]);
+}
+
+static inline void unlock_uid_by_bkt(u32 bkt)
+{
+ spin_unlock(&uid_lock[bkt]);
+}
+
+static u64 compute_write_bytes(struct task_io_accounting *ioac)
+{
+ if (ioac->write_bytes <= ioac->cancelled_write_bytes)
+ return 0;
+
+ return ioac->write_bytes - ioac->cancelled_write_bytes;
+}
+
+static void compute_io_bucket_stats(struct io_stats *io_bucket,
+ struct io_stats *io_curr,
+ struct io_stats *io_last,
+ struct io_stats *io_dead)
+{
+ /* tasks could switch to another uid group, but its io_last in the
+ * previous uid group could still be positive.
+ * therefore before each update, do an overflow check first
+ */
+ int64_t delta;
+
+ delta = io_curr->read_bytes + io_dead->read_bytes -
+ io_last->read_bytes;
+ io_bucket->read_bytes += delta > 0 ? delta : 0;
+ delta = io_curr->write_bytes + io_dead->write_bytes -
+ io_last->write_bytes;
+ io_bucket->write_bytes += delta > 0 ? delta : 0;
+ delta = io_curr->rchar + io_dead->rchar - io_last->rchar;
+ io_bucket->rchar += delta > 0 ? delta : 0;
+ delta = io_curr->wchar + io_dead->wchar - io_last->wchar;
+ io_bucket->wchar += delta > 0 ? delta : 0;
+ delta = io_curr->fsync + io_dead->fsync - io_last->fsync;
+ io_bucket->fsync += delta > 0 ? delta : 0;
+
+ io_last->read_bytes = io_curr->read_bytes;
+ io_last->write_bytes = io_curr->write_bytes;
+ io_last->rchar = io_curr->rchar;
+ io_last->wchar = io_curr->wchar;
+ io_last->fsync = io_curr->fsync;
+
+ memset(io_dead, 0, sizeof(struct io_stats));
+}
+
+static struct uid_entry *find_uid_entry(uid_t uid)
+{
+ struct uid_entry *uid_entry;
+ u32 bkt = uid_to_bkt(uid);
+
+ for_each_uid_entry(uid_entry, bkt) {
+ if (uid_entry->uid == uid)
+ return uid_entry;
+ }
+ return NULL;
+}
+
+static struct uid_entry *find_or_register_uid(uid_t uid)
+{
+ struct uid_entry *uid_entry;
+
+ uid_entry = find_uid_entry(uid);
+ if (uid_entry)
+ return uid_entry;
+
+ uid_entry = kzalloc(sizeof(struct uid_entry), GFP_ATOMIC);
+ if (!uid_entry)
+ return NULL;
+
+ uid_entry->uid = uid;
+ hash_add(hash_table, &uid_entry->hash, uid);
+
+ return uid_entry;
+}
+
+static void calc_uid_cputime(struct uid_entry *uid_entry,
+ u64 *total_utime, u64 *total_stime)
+{
+ struct user_namespace *user_ns = current_user_ns();
+ struct task_struct *p, *t;
+ u64 utime, stime;
+ uid_t uid;
+
+ rcu_read_lock();
+ for_each_process(p) {
+ uid = from_kuid_munged(user_ns, task_uid(p));
+
+ if (uid != uid_entry->uid)
+ continue;
+
+ for_each_thread(p, t) {
+ /* avoid double accounting of dying threads */
+ if (!(t->flags & PF_EXITING)) {
+ task_cputime_adjusted(t, &utime, &stime);
+ *total_utime += utime;
+ *total_stime += stime;
+ }
+ }
+ }
+ rcu_read_unlock();
+}
+
+static int uid_cputime_show(struct seq_file *m, void *v)
+{
+ struct uid_entry *uid_entry = NULL;
+ u32 bkt;
+
+ for_each_bkt(bkt) {
+ lock_uid_by_bkt(bkt);
+ for_each_uid_entry(uid_entry, bkt) {
+ u64 total_utime = uid_entry->utime;
+ u64 total_stime = uid_entry->stime;
+
+ calc_uid_cputime(uid_entry, &total_utime, &total_stime);
+ seq_printf(m, "%d: %llu %llu\n", uid_entry->uid,
+ ktime_to_us(total_utime), ktime_to_us(total_stime));
+ }
+ unlock_uid_by_bkt(bkt);
+ }
+
+ return 0;
+}
+
+static int uid_cputime_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, uid_cputime_show, pde_data(inode));
+}
+
+static const struct proc_ops uid_cputime_fops = {
+ .proc_open = uid_cputime_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+};
+
+static int uid_remove_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, NULL, NULL);
+}
+
+static ssize_t uid_remove_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *ppos)
+{
+ char uids[128];
+ char *start_uid, *end_uid = NULL;
+ long int uid_start = 0, uid_end = 0;
+
+ if (count >= sizeof(uids))
+ count = sizeof(uids) - 1;
+
+ if (copy_from_user(uids, buffer, count))
+ return -EFAULT;
+
+ uids[count] = '\0';
+ end_uid = uids;
+ start_uid = strsep(&end_uid, "-");
+
+ if (!start_uid || !end_uid)
+ return -EINVAL;
+
+ if (kstrtol(start_uid, 10, &uid_start) != 0 ||
+ kstrtol(end_uid, 10, &uid_end) != 0) {
+ return -EINVAL;
+ }
+
+ for (; uid_start <= uid_end; uid_start++) {
+ struct uid_entry *uid_entry;
+ struct hlist_node *tmp;
+ u32 bkt = uid_to_bkt((uid_t)uid_start);
+
+ lock_uid(uid_start);
+ for_each_uid_entry_safe(uid_entry, tmp, bkt) {
+ if (uid_start == uid_entry->uid) {
+ hash_del(&uid_entry->hash);
+ kfree(uid_entry);
+ }
+ }
+ unlock_uid(uid_start);
+ }
+
+ return count;
+}
+
+static const struct proc_ops uid_remove_fops = {
+ .proc_open = uid_remove_open,
+ .proc_release = single_release,
+ .proc_write = uid_remove_write,
+};
+
+static void __add_uid_io_stats(struct uid_entry *uid_entry,
+ struct task_io_accounting *ioac, int slot)
+{
+ struct io_stats *io_slot = &uid_entry->io[slot];
+
+ io_slot->read_bytes += ioac->read_bytes;
+ io_slot->write_bytes += compute_write_bytes(ioac);
+ io_slot->rchar += ioac->rchar;
+ io_slot->wchar += ioac->wchar;
+}
+
+static void add_uid_io_stats(struct uid_entry *uid_entry,
+ struct task_struct *task, int slot)
+{
+ struct task_entry *task_entry __maybe_unused;
+
+ /* avoid double accounting of dying threads */
+ if (slot != UID_STATE_DEAD_TASKS && (task->flags & PF_EXITING))
+ return;
+
+ __add_uid_io_stats(uid_entry, &task->ioac, slot);
+}
+
+static void update_io_stats_uid(struct uid_entry *uid_entry)
+{
+ struct user_namespace *user_ns = current_user_ns();
+ struct task_struct *p, *t;
+ struct io_stats io;
+
+ memset(&io, 0, sizeof(struct io_stats));
+
+ rcu_read_lock();
+ for_each_process(p) {
+ uid_t uid = from_kuid_munged(user_ns, task_uid(p));
+
+ if (uid != uid_entry->uid)
+ continue;
+
+ for_each_thread(p, t) {
+ /* avoid double accounting of dying threads */
+ if (!(t->flags & PF_EXITING)) {
+ io.read_bytes += t->ioac.read_bytes;
+ io.write_bytes += compute_write_bytes(&t->ioac);
+ io.rchar += t->ioac.rchar;
+ io.wchar += t->ioac.wchar;
+ }
+ }
+ }
+ rcu_read_unlock();
+
+ compute_io_bucket_stats(&uid_entry->io[uid_entry->state], &io,
+ &uid_entry->io[UID_STATE_TOTAL_LAST],
+ &uid_entry->io[UID_STATE_DEAD_TASKS]);
+}
+
+static int uid_io_show(struct seq_file *m, void *v)
+{
+
+ struct uid_entry *uid_entry = NULL;
+ u32 bkt;
+
+ for_each_bkt(bkt) {
+ lock_uid_by_bkt(bkt);
+ for_each_uid_entry(uid_entry, bkt) {
+
+ update_io_stats_uid(uid_entry);
+
+ seq_printf(m, "%d %llu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
+ uid_entry->uid,
+ uid_entry->io[UID_STATE_FOREGROUND].rchar,
+ uid_entry->io[UID_STATE_FOREGROUND].wchar,
+ uid_entry->io[UID_STATE_FOREGROUND].read_bytes,
+ uid_entry->io[UID_STATE_FOREGROUND].write_bytes,
+ uid_entry->io[UID_STATE_BACKGROUND].rchar,
+ uid_entry->io[UID_STATE_BACKGROUND].wchar,
+ uid_entry->io[UID_STATE_BACKGROUND].read_bytes,
+ uid_entry->io[UID_STATE_BACKGROUND].write_bytes,
+ uid_entry->io[UID_STATE_FOREGROUND].fsync,
+ uid_entry->io[UID_STATE_BACKGROUND].fsync);
+ }
+ unlock_uid_by_bkt(bkt);
+ }
+
+ return 0;
+}
+
+static int uid_io_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, uid_io_show, pde_data(inode));
+}
+
+static const struct proc_ops uid_io_fops = {
+ .proc_open = uid_io_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+ .proc_release = single_release,
+};
+
+static int uid_procstat_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, NULL, NULL);
+}
+
+static ssize_t uid_procstat_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *ppos)
+{
+ struct uid_entry *uid_entry;
+ uid_t uid;
+ int argc, state;
+ char input[128];
+
+ if (count >= sizeof(input))
+ return -EINVAL;
+
+ if (copy_from_user(input, buffer, count))
+ return -EFAULT;
+
+ input[count] = '\0';
+
+ argc = sscanf(input, "%u %d", &uid, &state);
+ if (argc != 2)
+ return -EINVAL;
+
+ if (state != UID_STATE_BACKGROUND && state != UID_STATE_FOREGROUND)
+ return -EINVAL;
+
+ lock_uid(uid);
+ uid_entry = find_or_register_uid(uid);
+ if (!uid_entry) {
+ unlock_uid(uid);
+ return -EINVAL;
+ }
+
+ if (uid_entry->state == state) {
+ unlock_uid(uid);
+ return count;
+ }
+
+ update_io_stats_uid(uid_entry);
+ uid_entry->state = state;
+ unlock_uid(uid);
+
+ return count;
+}
+
+static const struct proc_ops uid_procstat_fops = {
+ .proc_open = uid_procstat_open,
+ .proc_release = single_release,
+ .proc_write = uid_procstat_write,
+};
+
+struct update_stats_work {
+ uid_t uid;
+ struct task_io_accounting ioac;
+ u64 utime;
+ u64 stime;
+ struct llist_node node;
+};
+
+static LLIST_HEAD(work_usw);
+
+static void update_stats_workfn(struct work_struct *work)
+{
+ struct update_stats_work *usw, *t;
+ struct uid_entry *uid_entry;
+ struct task_entry *task_entry __maybe_unused;
+ struct llist_node *node;
+
+ node = llist_del_all(&work_usw);
+ llist_for_each_entry_safe(usw, t, node, node) {
+ lock_uid(usw->uid);
+ uid_entry = find_uid_entry(usw->uid);
+ if (!uid_entry)
+ goto next;
+
+ uid_entry->utime += usw->utime;
+ uid_entry->stime += usw->stime;
+
+ __add_uid_io_stats(uid_entry, &usw->ioac, UID_STATE_DEAD_TASKS);
+next:
+ unlock_uid(usw->uid);
+ kfree(usw);
+ }
+
+}
+static DECLARE_WORK(update_stats_work, update_stats_workfn);
+
+static int process_notifier(struct notifier_block *self,
+ unsigned long cmd, void *v)
+{
+ struct task_struct *task = v;
+ struct uid_entry *uid_entry;
+ u64 utime, stime;
+ uid_t uid;
+
+ if (!task)
+ return NOTIFY_OK;
+
+ uid = from_kuid_munged(current_user_ns(), task_uid(task));
+ if (!trylock_uid(uid)) {
+ struct update_stats_work *usw;
+
+ usw = kmalloc(sizeof(struct update_stats_work), GFP_KERNEL);
+ if (usw) {
+ usw->uid = uid;
+ /*
+ * Copy task->ioac since task might be destroyed before
+ * the work is later performed.
+ */
+ usw->ioac = task->ioac;
+ task_cputime_adjusted(task, &usw->utime, &usw->stime);
+ llist_add(&usw->node, &work_usw);
+ schedule_work(&update_stats_work);
+ }
+ return NOTIFY_OK;
+ }
+
+ uid_entry = find_or_register_uid(uid);
+ if (!uid_entry) {
+ pr_err("%s: failed to find uid %d\n", __func__, uid);
+ goto exit;
+ }
+
+ task_cputime_adjusted(task, &utime, &stime);
+ uid_entry->utime += utime;
+ uid_entry->stime += stime;
+
+ add_uid_io_stats(uid_entry, task, UID_STATE_DEAD_TASKS);
+
+exit:
+ unlock_uid(uid);
+ return NOTIFY_OK;
+}
+
+static struct notifier_block process_notifier_block = {
+ .notifier_call = process_notifier,
+};
+
+static int __init proc_uid_sys_stats_init(void)
+{
+ init_hash_table_and_lock();
+
+ cpu_parent = proc_mkdir("uid_cputime", NULL);
+ if (!cpu_parent) {
+ pr_err("%s: failed to create uid_cputime proc entry\n",
+ __func__);
+ goto err;
+ }
+
+ proc_create_data("remove_uid_range", 0222, cpu_parent,
+ &uid_remove_fops, NULL);
+ proc_create_data("show_uid_stat", 0444, cpu_parent,
+ &uid_cputime_fops, NULL);
+
+ io_parent = proc_mkdir("uid_io", NULL);
+ if (!io_parent) {
+ pr_err("%s: failed to create uid_io proc entry\n",
+ __func__);
+ goto err;
+ }
+
+ proc_create_data("stats", 0444, io_parent,
+ &uid_io_fops, NULL);
+
+ proc_parent = proc_mkdir("uid_procstat", NULL);
+ if (!proc_parent) {
+ pr_err("%s: failed to create uid_procstat proc entry\n",
+ __func__);
+ goto err;
+ }
+
+ proc_create_data("set", 0222, proc_parent,
+ &uid_procstat_fops, NULL);
+
+ profile_event_register(PROFILE_TASK_EXIT, &process_notifier_block);
+
+ return 0;
+
+err:
+ remove_proc_subtree("uid_cputime", NULL);
+ remove_proc_subtree("uid_io", NULL);
+ remove_proc_subtree("uid_procstat", NULL);
+ return -ENOMEM;
+}
+
+early_initcall(proc_uid_sys_stats_init);
diff --git a/drivers/net/TEST_MAPPING b/drivers/net/TEST_MAPPING
new file mode 100644
index 0000000..535cc07
--- /dev/null
+++ b/drivers/net/TEST_MAPPING
@@ -0,0 +1,320 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.AccessibilitySystemActionTest"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/drivers/of/unittest-data/Makefile b/drivers/of/unittest-data/Makefile
index 01a966e..c0193e9 100644
--- a/drivers/of/unittest-data/Makefile
+++ b/drivers/of/unittest-data/Makefile
@@ -44,9 +44,7 @@
DTC_FLAGS_testcases += -@
# suppress warnings about intentional errors
-DTC_FLAGS_testcases += -Wno-interrupts_property \
- -Wno-node_name_vs_property_name \
- -Wno-interrupt_map
+DTC_FLAGS_testcases += -Wno-interrupts_property
# Apply overlays statically with fdtoverlay. This is a build time test that
# the overlays can be applied successfully by fdtoverlay. This does not
@@ -96,10 +94,6 @@
apply_static_overlay_2 := overlay.dtbo
-DTC_FLAGS_static_base_1 += -Wno-interrupts_property \
- -Wno-node_name_vs_property_name \
- -Wno-interrupt_map
-
static_test_1-dtbs := static_base_1.dtb $(apply_static_overlay_1)
static_test_2-dtbs := static_base_2.dtb $(apply_static_overlay_2)
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 6ae6189e..ba3ca6f 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -83,6 +83,7 @@ void dw_handle_msi_irq(struct dw_pcie_rp *pp)
generic_handle_demux_domain_irq(pp->irq_domain, irq_off + pos);
}
}
+EXPORT_SYMBOL_GPL(dw_handle_msi_irq);
/* Chained MSI interrupt service routine */
static void dw_chained_msi_isr(struct irq_desc *desc)
diff --git a/drivers/pmdomain/governor.c b/drivers/pmdomain/governor.c
index 96737ab..9ab3936 100644
--- a/drivers/pmdomain/governor.c
+++ b/drivers/pmdomain/governor.c
@@ -13,6 +13,8 @@
#include <linux/cpumask.h>
#include <linux/ktime.h>
+#include <trace/hooks/pm_domain.h>
+
static int dev_update_qos_constraint(struct device *dev, void *data)
{
s64 *constraint_ns_p = data;
@@ -182,6 +184,11 @@ static bool __default_power_down_ok(struct dev_pm_domain *pd,
struct pm_domain_data *pdd;
s64 min_off_time_ns;
s64 off_on_time_ns;
+ bool allow = true;
+
+ trace_android_vh_allow_domain_state(genpd, state, &allow);
+ if (!allow)
+ return false;
off_on_time_ns = genpd->states[state].power_off_latency_ns +
genpd->states[state].power_on_latency_ns;
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index a446d3d..5246943 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -37,6 +37,13 @@ static const struct device_type power_supply_dev_type = {
.groups = power_supply_attr_groups,
};
+struct match_fwnode_array_param {
+ struct fwnode_handle *parent_fwnode;
+ struct power_supply **psy;
+ ssize_t psy_size;
+ ssize_t psy_count;
+};
+
#define POWER_SUPPLY_DEFERRED_REGISTER_TIME msecs_to_jiffies(10)
static bool __power_supply_is_supplied_by(struct power_supply *supplier,
@@ -538,6 +545,77 @@ struct power_supply *power_supply_get_by_reference(struct fwnode_handle *fwnode,
}
EXPORT_SYMBOL_GPL(power_supply_get_by_reference);
+static int power_supply_match_device_fwnode_array(struct device *dev,
+ void *data)
+{
+ struct match_fwnode_array_param *param =
+ (struct match_fwnode_array_param *)data;
+ struct power_supply **psy = param->psy;
+ ssize_t size = param->psy_size;
+ ssize_t *count = ¶m->psy_count;
+
+ if (!dev->parent || dev_fwnode(dev->parent) != param->parent_fwnode)
+ return 0;
+
+ if (*count >= size)
+ return -EOVERFLOW;
+
+ psy[*count] = dev_to_psy(dev);
+ atomic_inc(&psy[*count]->use_cnt);
+ (*count)++;
+
+ return 0;
+}
+
+/**
+ * power_supply_get_by_reference_array() - Similar to
+ * power_supply_get_by_reference but returns an array of power supply
+ * objects which are associated with the phandle.
+ * @fwnode: Pointer to fwnode node holding phandle property.
+ * @property: Name of property holding a power supply name.
+ * @psy: Array of power_supply pointers provided by the client, which is
+ * filled by power_supply_get_by_reference_array.
+ * @size: size of power_supply pointer array.
+ *
+ * If power supply was found, it increases reference count for the
+ * internal power supply's device. The user should power_supply_put()
+ * after usage.
+ *
+ * Return: On success returns the number of power supply objects filled
+ * in the @psy array.
+ * -EOVERFLOW when size of @psy array is not suffice.
+ * -EINVAL when @psy is NULL or @size is 0.
+ * -ENODEV when matching fwnode is not found.
+ */
+int power_supply_get_by_reference_array(struct fwnode_handle *fwnode,
+ const char *property,
+ struct power_supply **psy,
+ ssize_t size)
+{
+ struct fwnode_handle *power_supply_fwnode;
+ int ret;
+ struct match_fwnode_array_param param;
+
+ if (!psy || !size)
+ return -EINVAL;
+
+ power_supply_fwnode = fwnode_find_reference(fwnode, property, 0);
+ if (IS_ERR(power_supply_fwnode))
+ return -ENODEV;
+
+ param.parent_fwnode = power_supply_fwnode;
+ param.psy = psy;
+ param.psy_size = size;
+ param.psy_count = 0;
+ ret = class_for_each_device(&power_supply_class, NULL, ¶m,
+ power_supply_match_device_fwnode_array);
+
+ fwnode_handle_put(power_supply_fwnode);
+
+ return param.psy_count;
+}
+EXPORT_SYMBOL_GPL(power_supply_get_by_reference_array);
+
static void devm_power_supply_put(struct device *dev, void *res)
{
struct power_supply **psy = res;
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index dd3a48d..d39d8dec 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -91,6 +91,7 @@ static const char * const POWER_SUPPLY_CHARGE_TYPE_TEXT[] = {
[POWER_SUPPLY_CHARGE_TYPE_CUSTOM] = "Custom",
[POWER_SUPPLY_CHARGE_TYPE_LONGLIFE] = "Long Life",
[POWER_SUPPLY_CHARGE_TYPE_BYPASS] = "Bypass",
+ [POWER_SUPPLY_CHARGE_TYPE_TAPER_EXT] = "Taper",
};
static const char * const POWER_SUPPLY_HEALTH_TEXT[] = {
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index b087ed2..ab7da39 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -35,6 +35,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/virtio_ring.h>
+#include <trace/hooks/remoteproc.h>
#include "remoteproc_internal.h"
@@ -1887,6 +1888,8 @@ static void rproc_crash_handler_work(struct work_struct *work)
rproc_trigger_recovery(rproc);
out:
+ trace_android_vh_rproc_recovery(rproc);
+
pm_relax(rproc->dev.parent);
}
diff --git a/drivers/remoteproc/remoteproc_sysfs.c b/drivers/remoteproc/remoteproc_sysfs.c
index 138e752..6beaca6 100644
--- a/drivers/remoteproc/remoteproc_sysfs.c
+++ b/drivers/remoteproc/remoteproc_sysfs.c
@@ -5,6 +5,7 @@
#include <linux/remoteproc.h>
#include <linux/slab.h>
+#include <trace/hooks/remoteproc.h>
#include "remoteproc_internal.h"
@@ -50,10 +51,16 @@ static ssize_t recovery_store(struct device *dev,
if (sysfs_streq(buf, "enabled")) {
/* change the flag and begin the recovery process if needed */
+ mutex_lock(&rproc->lock);
rproc->recovery_disabled = false;
+ trace_android_vh_rproc_recovery_set(rproc);
+ mutex_unlock(&rproc->lock);
rproc_trigger_recovery(rproc);
} else if (sysfs_streq(buf, "disabled")) {
+ mutex_lock(&rproc->lock);
rproc->recovery_disabled = true;
+ trace_android_vh_rproc_recovery_set(rproc);
+ mutex_unlock(&rproc->lock);
} else if (sysfs_streq(buf, "recover")) {
/* begin the recovery process without changing the flag */
rproc_trigger_recovery(rproc);
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 2f92cd6..1196375 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -36,6 +36,8 @@
source "drivers/staging/media/Kconfig"
+source "drivers/staging/android/Kconfig"
+
source "drivers/staging/fbtft/Kconfig"
source "drivers/staging/most/Kconfig"
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index f5b8876..f66b8e79 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -8,6 +8,7 @@
obj-$(CONFIG_IIO) += iio/
obj-$(CONFIG_FB_SM750) += sm750fb/
obj-$(CONFIG_MFD_NVEC) += nvec/
+obj-$(CONFIG_ASHMEM) += android/
obj-$(CONFIG_FB_TFT) += fbtft/
obj-$(CONFIG_MOST) += most/
obj-$(CONFIG_GREYBUS) += greybus/
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
new file mode 100644
index 0000000..14586a5
--- /dev/null
+++ b/drivers/staging/android/Kconfig
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: GPL-2.0
+config ASHMEM
+ bool "Enable the Anonymous Shared Memory Subsystem"
+ depends on SHMEM
+ select ANDROID_STAGING
+ help
+ The ashmem subsystem is a new shared memory allocator, similar to
+ POSIX SHM but with different behavior and sporting a simpler
+ file-based API.
+
+ It is, in theory, a good memory allocator for low-memory devices,
+ because it can discard shared memory units when under memory pressure.
+
+config ASHMEM_RUST
+ bool "Use the Rust implementation of Ashmem"
+ depends on ASHMEM && RUST
+
+config ASHMEM_C
+ def_bool ASHMEM && !ASHMEM_RUST
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
new file mode 100644
index 0000000..40baf1e
--- /dev/null
+++ b/drivers/staging/android/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+ccflags-y += -I$(src) # needed for trace events
+
+obj-$(CONFIG_ASHMEM_C) += ashmem-legacy.o
+obj-$(CONFIG_ASHMEM_RUST) += ashmem.o
+ashmem-objs += ashmem.o ashmem_exports.o
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
new file mode 100644
index 0000000..f74eb44
--- /dev/null
+++ b/drivers/staging/android/TODO
@@ -0,0 +1,8 @@
+TODO:
+ - sparse fixes
+ - rename files to be not so "generic"
+ - add proper arch dependencies as needed
+ - audit userspace interfaces to make sure they are sane
+
+Please send patches to Greg Kroah-Hartman <greg@kroah.com> and Cc:
+Arve Hjønnevåg <arve@android.com> and Riley Andrews <riandrews@android.com>
diff --git a/drivers/staging/android/ashmem-legacy.c b/drivers/staging/android/ashmem-legacy.c
new file mode 100644
index 0000000..b08e1e0f
--- /dev/null
+++ b/drivers/staging/android/ashmem-legacy.c
@@ -0,0 +1,1083 @@
+// SPDX-License-Identifier: GPL-2.0
+/* mm/ashmem.c
+ *
+ * Anonymous Shared Memory Subsystem, ashmem
+ *
+ * Copyright (C) 2008 Google, Inc.
+ *
+ * Robert Love <rlove@google.com>
+ */
+
+#define pr_fmt(fmt) "ashmem: " fmt
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/falloc.h>
+#include <linux/miscdevice.h>
+#include <linux/security.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/uaccess.h>
+#include <linux/personality.h>
+#include <linux/bitops.h>
+#include <linux/mutex.h>
+#include <linux/shmem_fs.h>
+#include "ashmem.h"
+
+/**
+ * struct ashmem_area - The anonymous shared memory area
+ * @name: The optional name in /proc/pid/maps
+ * @unpinned_list: The list of all ashmem areas
+ * @file: The shmem-based backing file
+ * @size: The size of the mapping, in bytes
+ * @prot_mask: The allowed protection bits, as vm_flags
+ *
+ * The lifecycle of this structure is from our parent file's open() until
+ * its release(). It is also protected by 'ashmem_mutex'
+ *
+ * Warning: Mappings do NOT pin this structure; It dies on close()
+ */
+struct ashmem_area {
+ char name[ASHMEM_FULL_NAME_LEN];
+ struct list_head unpinned_list;
+ struct file *file;
+ size_t size;
+ unsigned long prot_mask;
+};
+
+/**
+ * struct ashmem_range - A range of unpinned/evictable pages
+ * @lru: The entry in the LRU list
+ * @unpinned: The entry in its area's unpinned list
+ * @asma: The associated anonymous shared memory area.
+ * @pgstart: The starting page (inclusive)
+ * @pgend: The ending page (inclusive)
+ * @purged: The purge status (ASHMEM_NOT or ASHMEM_WAS_PURGED)
+ *
+ * The lifecycle of this structure is from unpin to pin.
+ * It is protected by 'ashmem_mutex'
+ */
+struct ashmem_range {
+ struct list_head lru;
+ struct list_head unpinned;
+ struct ashmem_area *asma;
+ size_t pgstart;
+ size_t pgend;
+ unsigned int purged;
+};
+
+/* LRU list of unpinned pages, protected by ashmem_mutex */
+static LIST_HEAD(ashmem_lru_list);
+
+static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
+
+/*
+ * long lru_count - The count of pages on our LRU list.
+ *
+ * This is protected by ashmem_mutex.
+ */
+static unsigned long lru_count;
+
+/*
+ * ashmem_mutex - protects the list of and each individual ashmem_area
+ *
+ * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
+ */
+static DEFINE_MUTEX(ashmem_mutex);
+
+static struct kmem_cache *ashmem_area_cachep __read_mostly;
+static struct kmem_cache *ashmem_range_cachep __read_mostly;
+
+/*
+ * A separate lockdep class for the backing shmem inodes to resolve the lockdep
+ * warning about the race between kswapd taking fs_reclaim before inode_lock
+ * and write syscall taking inode_lock and then fs_reclaim.
+ * Note that such race is impossible because ashmem does not support write
+ * syscalls operating on the backing shmem.
+ */
+static struct lock_class_key backing_shmem_inode_class;
+
+static inline unsigned long range_size(struct ashmem_range *range)
+{
+ return range->pgend - range->pgstart + 1;
+}
+
+static inline bool range_on_lru(struct ashmem_range *range)
+{
+ return range->purged == ASHMEM_NOT_PURGED;
+}
+
+static inline bool page_range_subsumes_range(struct ashmem_range *range,
+ size_t start, size_t end)
+{
+ return (range->pgstart >= start) && (range->pgend <= end);
+}
+
+static inline bool page_range_subsumed_by_range(struct ashmem_range *range,
+ size_t start, size_t end)
+{
+ return (range->pgstart <= start) && (range->pgend >= end);
+}
+
+static inline bool page_in_range(struct ashmem_range *range, size_t page)
+{
+ return (range->pgstart <= page) && (range->pgend >= page);
+}
+
+static inline bool page_range_in_range(struct ashmem_range *range,
+ size_t start, size_t end)
+{
+ return page_in_range(range, start) || page_in_range(range, end) ||
+ page_range_subsumes_range(range, start, end);
+}
+
+static inline bool range_before_page(struct ashmem_range *range,
+ size_t page)
+{
+ return range->pgend < page;
+}
+
+#define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
+
+/**
+ * lru_add() - Adds a range of memory to the LRU list
+ * @range: The memory range being added.
+ *
+ * The range is first added to the end (tail) of the LRU list.
+ * After this, the size of the range is added to @lru_count
+ */
+static inline void lru_add(struct ashmem_range *range)
+{
+ list_add_tail(&range->lru, &ashmem_lru_list);
+ lru_count += range_size(range);
+}
+
+/**
+ * lru_del() - Removes a range of memory from the LRU list
+ * @range: The memory range being removed
+ *
+ * The range is first deleted from the LRU list.
+ * After this, the size of the range is removed from @lru_count
+ */
+static inline void lru_del(struct ashmem_range *range)
+{
+ list_del(&range->lru);
+ lru_count -= range_size(range);
+}
+
+/**
+ * range_alloc() - Allocates and initializes a new ashmem_range structure
+ * @asma: The associated ashmem_area
+ * @prev_range: The previous ashmem_range in the sorted asma->unpinned list
+ * @purged: Initial purge status (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
+ * @start: The starting page (inclusive)
+ * @end: The ending page (inclusive)
+ * @new_range: The placeholder for the new range
+ *
+ * This function is protected by ashmem_mutex.
+ */
+static void range_alloc(struct ashmem_area *asma,
+ struct ashmem_range *prev_range, unsigned int purged,
+ size_t start, size_t end,
+ struct ashmem_range **new_range)
+{
+ struct ashmem_range *range = *new_range;
+
+ *new_range = NULL;
+ range->asma = asma;
+ range->pgstart = start;
+ range->pgend = end;
+ range->purged = purged;
+
+ list_add_tail(&range->unpinned, &prev_range->unpinned);
+
+ if (range_on_lru(range))
+ lru_add(range);
+}
+
+/**
+ * range_del() - Deletes and deallocates an ashmem_range structure
+ * @range: The associated ashmem_range that has previously been allocated
+ */
+static void range_del(struct ashmem_range *range)
+{
+ list_del(&range->unpinned);
+ if (range_on_lru(range))
+ lru_del(range);
+ kmem_cache_free(ashmem_range_cachep, range);
+}
+
+/**
+ * range_shrink() - Shrinks an ashmem_range
+ * @range: The associated ashmem_range being shrunk
+ * @start: The starting byte of the new range
+ * @end: The ending byte of the new range
+ *
+ * This does not modify the data inside the existing range in any way - It
+ * simply shrinks the boundaries of the range.
+ *
+ * Theoretically, with a little tweaking, this could eventually be changed
+ * to range_resize, and expand the lru_count if the new range is larger.
+ */
+static inline void range_shrink(struct ashmem_range *range,
+ size_t start, size_t end)
+{
+ size_t pre = range_size(range);
+
+ range->pgstart = start;
+ range->pgend = end;
+
+ if (range_on_lru(range))
+ lru_count -= pre - range_size(range);
+}
+
+/**
+ * ashmem_open() - Opens an Anonymous Shared Memory structure
+ * @inode: The backing file's index node(?)
+ * @file: The backing file
+ *
+ * Please note that the ashmem_area is not returned by this function - It is
+ * instead written to "file->private_data".
+ *
+ * Return: 0 if successful, or another code if unsuccessful.
+ */
+static int ashmem_open(struct inode *inode, struct file *file)
+{
+ struct ashmem_area *asma;
+ int ret;
+
+ ret = generic_file_open(inode, file);
+ if (ret)
+ return ret;
+
+ asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
+ if (!asma)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&asma->unpinned_list);
+ memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
+ asma->prot_mask = PROT_MASK;
+ file->private_data = asma;
+
+ return 0;
+}
+
+/**
+ * ashmem_release() - Releases an Anonymous Shared Memory structure
+ * @ignored: The backing file's Index Node(?) - It is ignored here.
+ * @file: The backing file
+ *
+ * Return: 0 if successful. If it is anything else, go have a coffee and
+ * try again.
+ */
+static int ashmem_release(struct inode *ignored, struct file *file)
+{
+ struct ashmem_area *asma = file->private_data;
+ struct ashmem_range *range, *next;
+
+ mutex_lock(&ashmem_mutex);
+ list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
+ range_del(range);
+ mutex_unlock(&ashmem_mutex);
+
+ if (asma->file)
+ fput(asma->file);
+ kmem_cache_free(ashmem_area_cachep, asma);
+
+ return 0;
+}
+
+static ssize_t ashmem_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+ struct ashmem_area *asma = iocb->ki_filp->private_data;
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+
+ /* If size is not set, or set to 0, always return EOF. */
+ if (asma->size == 0)
+ goto out_unlock;
+
+ if (!asma->file) {
+ ret = -EBADF;
+ goto out_unlock;
+ }
+
+ /*
+ * asma and asma->file are used outside the lock here. We assume
+ * once asma->file is set it will never be changed, and will not
+ * be destroyed until all references to the file are dropped and
+ * ashmem_release is called.
+ */
+ mutex_unlock(&ashmem_mutex);
+ ret = vfs_iter_read(asma->file, iter, &iocb->ki_pos, 0);
+ mutex_lock(&ashmem_mutex);
+ if (ret > 0)
+ asma->file->f_pos = iocb->ki_pos;
+out_unlock:
+ mutex_unlock(&ashmem_mutex);
+ return ret;
+}
+
+static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
+{
+ struct ashmem_area *asma = file->private_data;
+ loff_t ret;
+
+ mutex_lock(&ashmem_mutex);
+
+ if (asma->size == 0) {
+ mutex_unlock(&ashmem_mutex);
+ return -EINVAL;
+ }
+
+ if (!asma->file) {
+ mutex_unlock(&ashmem_mutex);
+ return -EBADF;
+ }
+
+ mutex_unlock(&ashmem_mutex);
+
+ ret = vfs_llseek(asma->file, offset, origin);
+ if (ret < 0)
+ return ret;
+
+ /** Copy f_pos from backing file, since f_ops->llseek() sets it */
+ file->f_pos = asma->file->f_pos;
+ return ret;
+}
+
+static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
+{
+ return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
+ _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
+ _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
+}
+
+static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ /* do not allow to mmap ashmem backing shmem file directly */
+ return -EPERM;
+}
+
+static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ static struct file_operations vmfile_fops;
+ struct ashmem_area *asma = file->private_data;
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+
+ /* user needs to SET_SIZE before mapping */
+ if (!asma->size) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* requested mapping size larger than object size */
+ if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* requested protection bits must match our allowed protection mask */
+ if ((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
+ calc_vm_prot_bits(PROT_MASK, 0)) {
+ ret = -EPERM;
+ goto out;
+ }
+ vm_flags_clear(vma, calc_vm_may_flags(~asma->prot_mask));
+
+ if (!asma->file) {
+ char *name = ASHMEM_NAME_DEF;
+ struct file *vmfile;
+ struct inode *inode;
+
+ if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
+ name = asma->name;
+
+ /* ... and allocate the backing shmem file */
+ vmfile = shmem_file_setup(name, asma->size, vma->flags);
+ if (IS_ERR(vmfile)) {
+ ret = PTR_ERR(vmfile);
+ goto out;
+ }
+ vmfile->f_mode |= FMODE_LSEEK;
+ inode = file_inode(vmfile);
+ lockdep_set_class(&inode->i_rwsem, &backing_shmem_inode_class);
+ asma->file = vmfile;
+ /*
+ * override mmap operation of the vmfile so that it can't be
+ * remapped which would lead to creation of a new vma with no
+ * asma permission checks. Have to override get_unmapped_area
+ * as well to prevent VM_BUG_ON check for f_ops modification.
+ */
+ if (!vmfile_fops.mmap) {
+ vmfile_fops = *vmfile->f_op;
+ vmfile_fops.mmap = ashmem_vmfile_mmap;
+ vmfile_fops.get_unmapped_area = mm_get_unmapped_area;
+ }
+ vmfile->f_op = &vmfile_fops;
+ }
+ get_file(asma->file);
+
+ /*
+ * XXX - Reworked to use shmem_zero_setup() instead of
+ * shmem_set_file while we're in staging. -jstultz
+ */
+ if (vma->vm_flags & VM_SHARED) {
+ ret = shmem_zero_setup(vma);
+ if (ret) {
+ fput(asma->file);
+ goto out;
+ }
+ } else {
+ vma_set_anonymous(vma);
+ }
+
+ vma_set_file(vma, asma->file);
+ /* XXX: merge this with the get_file() above if possible */
+ fput(asma->file);
+
+out:
+ mutex_unlock(&ashmem_mutex);
+ return ret;
+}
+
+/*
+ * ashmem_shrink - our cache shrinker, called from mm/vmscan.c
+ *
+ * 'nr_to_scan' is the number of objects to scan for freeing.
+ *
+ * 'gfp_mask' is the mask of the allocation that got us into this mess.
+ *
+ * Return value is the number of objects freed or -1 if we cannot
+ * proceed without risk of deadlock (due to gfp_mask).
+ *
+ * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
+ * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
+ * pages freed.
+ */
+static unsigned long
+ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned long freed = 0;
+
+ /* We might recurse into filesystem code, so bail out if necessary */
+ if (!(sc->gfp_mask & __GFP_FS))
+ return SHRINK_STOP;
+
+ if (!mutex_trylock(&ashmem_mutex))
+ return -1;
+
+ while (!list_empty(&ashmem_lru_list)) {
+ struct ashmem_range *range =
+ list_first_entry(&ashmem_lru_list, typeof(*range), lru);
+ loff_t start = range->pgstart * PAGE_SIZE;
+ loff_t end = (range->pgend + 1) * PAGE_SIZE;
+ struct file *f = range->asma->file;
+
+ get_file(f);
+ atomic_inc(&ashmem_shrink_inflight);
+ range->purged = ASHMEM_WAS_PURGED;
+ lru_del(range);
+
+ freed += range_size(range);
+ mutex_unlock(&ashmem_mutex);
+ f->f_op->fallocate(f,
+ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ start, end - start);
+ fput(f);
+ if (atomic_dec_and_test(&ashmem_shrink_inflight))
+ wake_up_all(&ashmem_shrink_wait);
+ if (!mutex_trylock(&ashmem_mutex))
+ goto out;
+ if (--sc->nr_to_scan <= 0)
+ break;
+ }
+ mutex_unlock(&ashmem_mutex);
+out:
+ return freed;
+}
+
+static unsigned long
+ashmem_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ /*
+ * note that lru_count is count of pages on the lru, not a count of
+ * objects on the list. This means the scan function needs to return the
+ * number of pages freed, not the number of objects scanned.
+ */
+ return lru_count;
+}
+
+static struct shrinker *ashmem_shrinker;
+
+static int __init ashmem_init_shrinker(void)
+{
+ ashmem_shrinker = shrinker_alloc(0, "android-ashmem");
+ if (!ashmem_shrinker)
+ return -ENOMEM;
+
+ ashmem_shrinker->count_objects = ashmem_shrink_count;
+ ashmem_shrinker->scan_objects = ashmem_shrink_scan;
+ /*
+ * XXX (dchinner): I wish people would comment on why they need on
+ * significant changes to the default value here
+ */
+ ashmem_shrinker->seeks = DEFAULT_SEEKS * 4;
+
+ shrinker_register(ashmem_shrinker);
+
+ return 0;
+}
+
+static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
+{
+ int ret = 0;
+
+ mutex_lock(&ashmem_mutex);
+
+ /* the user can only remove, not add, protection bits */
+ if ((asma->prot_mask & prot) != prot) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* does the application expect PROT_READ to imply PROT_EXEC? */
+ if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
+ prot |= PROT_EXEC;
+
+ asma->prot_mask = prot;
+
+out:
+ mutex_unlock(&ashmem_mutex);
+ return ret;
+}
+
+static int set_name(struct ashmem_area *asma, void __user *name)
+{
+ int len;
+ int ret = 0;
+ char local_name[ASHMEM_NAME_LEN];
+
+ /*
+ * Holding the ashmem_mutex while doing a copy_from_user might cause
+ * an data abort which would try to access mmap_lock. If another
+ * thread has invoked ashmem_mmap then it will be holding the
+ * semaphore and will be waiting for ashmem_mutex, there by leading to
+ * deadlock. We'll release the mutex and take the name to a local
+ * variable that does not need protection and later copy the local
+ * variable to the structure member with lock held.
+ */
+ len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
+ if (len < 0)
+ return len;
+
+ mutex_lock(&ashmem_mutex);
+ /* cannot change an existing mapping's name */
+ if (asma->file)
+ ret = -EINVAL;
+ else
+ strscpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name,
+ ASHMEM_NAME_LEN);
+
+ mutex_unlock(&ashmem_mutex);
+ return ret;
+}
+
+static int get_name(struct ashmem_area *asma, void __user *name)
+{
+ int ret = 0;
+ size_t len;
+ /*
+ * Have a local variable to which we'll copy the content
+ * from asma with the lock held. Later we can copy this to the user
+ * space safely without holding any locks. So even if we proceed to
+ * wait for mmap_lock, it won't lead to deadlock.
+ */
+ char local_name[ASHMEM_NAME_LEN];
+
+ mutex_lock(&ashmem_mutex);
+ if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
+ /*
+ * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
+ * prevents us from revealing one user's stack to another.
+ */
+ len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
+ memcpy(local_name, asma->name + ASHMEM_NAME_PREFIX_LEN, len);
+ } else {
+ len = sizeof(ASHMEM_NAME_DEF);
+ memcpy(local_name, ASHMEM_NAME_DEF, len);
+ }
+ mutex_unlock(&ashmem_mutex);
+
+ /*
+ * Now we are just copying from the stack variable to userland
+ * No lock held
+ */
+ if (copy_to_user(name, local_name, len))
+ ret = -EFAULT;
+ return ret;
+}
+
+/*
+ * ashmem_pin - pin the given ashmem region, returning whether it was
+ * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
+ struct ashmem_range **new_range)
+{
+ struct ashmem_range *range, *next;
+ int ret = ASHMEM_NOT_PURGED;
+
+ list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
+ /* moved past last applicable page; we can short circuit */
+ if (range_before_page(range, pgstart))
+ break;
+
+ /*
+ * The user can ask us to pin pages that span multiple ranges,
+ * or to pin pages that aren't even unpinned, so this is messy.
+ *
+ * Four cases:
+ * 1. The requested range subsumes an existing range, so we
+ * just remove the entire matching range.
+ * 2. The requested range overlaps the start of an existing
+ * range, so we just update that range.
+ * 3. The requested range overlaps the end of an existing
+ * range, so we just update that range.
+ * 4. The requested range punches a hole in an existing range,
+ * so we have to update one side of the range and then
+ * create a new range for the other side.
+ */
+ if (page_range_in_range(range, pgstart, pgend)) {
+ ret |= range->purged;
+
+ /* Case #1: Easy. Just nuke the whole thing. */
+ if (page_range_subsumes_range(range, pgstart, pgend)) {
+ range_del(range);
+ continue;
+ }
+
+ /* Case #2: We overlap from the start, so adjust it */
+ if (range->pgstart >= pgstart) {
+ range_shrink(range, pgend + 1, range->pgend);
+ continue;
+ }
+
+ /* Case #3: We overlap from the rear, so adjust it */
+ if (range->pgend <= pgend) {
+ range_shrink(range, range->pgstart,
+ pgstart - 1);
+ continue;
+ }
+
+ /*
+ * Case #4: We eat a chunk out of the middle. A bit
+ * more complicated, we allocate a new range for the
+ * second half and adjust the first chunk's endpoint.
+ */
+ range_alloc(asma, range, range->purged,
+ pgend + 1, range->pgend, new_range);
+ range_shrink(range, range->pgstart, pgstart - 1);
+ break;
+ }
+ }
+
+ return ret;
+}
+
+/*
+ * ashmem_unpin - unpin the given range of pages. Returns zero on success.
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
+ struct ashmem_range **new_range)
+{
+ struct ashmem_range *range = NULL, *iter, *next;
+ unsigned int purged = ASHMEM_NOT_PURGED;
+
+restart:
+ list_for_each_entry_safe(iter, next, &asma->unpinned_list, unpinned) {
+ /* short circuit: this is our insertion point */
+ if (range_before_page(iter, pgstart)) {
+ range = iter;
+ break;
+ }
+
+ /*
+ * The user can ask us to unpin pages that are already entirely
+ * or partially pinned. We handle those two cases here.
+ */
+ if (page_range_subsumed_by_range(iter, pgstart, pgend))
+ return 0;
+ if (page_range_in_range(iter, pgstart, pgend)) {
+ pgstart = min(iter->pgstart, pgstart);
+ pgend = max(iter->pgend, pgend);
+ purged |= iter->purged;
+ range_del(iter);
+ goto restart;
+ }
+ }
+
+ range = list_prepare_entry(range, &asma->unpinned_list, unpinned);
+ range_alloc(asma, range, purged, pgstart, pgend, new_range);
+ return 0;
+}
+
+/*
+ * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
+ * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
+ *
+ * Caller must hold ashmem_mutex.
+ */
+static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
+ size_t pgend)
+{
+ struct ashmem_range *range;
+ int ret = ASHMEM_IS_PINNED;
+
+ list_for_each_entry(range, &asma->unpinned_list, unpinned) {
+ if (range_before_page(range, pgstart))
+ break;
+ if (page_range_in_range(range, pgstart, pgend)) {
+ ret = ASHMEM_IS_UNPINNED;
+ break;
+ }
+ }
+
+ return ret;
+}
+
+static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
+ void __user *p)
+{
+ struct ashmem_pin pin;
+ size_t pgstart, pgend;
+ int ret = -EINVAL;
+ struct ashmem_range *range = NULL;
+
+ if (copy_from_user(&pin, p, sizeof(pin)))
+ return -EFAULT;
+
+ if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) {
+ range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
+ if (!range)
+ return -ENOMEM;
+ }
+
+ mutex_lock(&ashmem_mutex);
+ wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
+
+ if (!asma->file)
+ goto out_unlock;
+
+ /* per custom, you can pass zero for len to mean "everything onward" */
+ if (!pin.len)
+ pin.len = PAGE_ALIGN(asma->size) - pin.offset;
+
+ if ((pin.offset | pin.len) & ~PAGE_MASK)
+ goto out_unlock;
+
+ if (((__u32)-1) - pin.offset < pin.len)
+ goto out_unlock;
+
+ if (PAGE_ALIGN(asma->size) < pin.offset + pin.len)
+ goto out_unlock;
+
+ pgstart = pin.offset / PAGE_SIZE;
+ pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
+
+ switch (cmd) {
+ case ASHMEM_PIN:
+ ret = ashmem_pin(asma, pgstart, pgend, &range);
+ break;
+ case ASHMEM_UNPIN:
+ ret = ashmem_unpin(asma, pgstart, pgend, &range);
+ break;
+ case ASHMEM_GET_PIN_STATUS:
+ ret = ashmem_get_pin_status(asma, pgstart, pgend);
+ break;
+ }
+
+out_unlock:
+ mutex_unlock(&ashmem_mutex);
+ if (range)
+ kmem_cache_free(ashmem_range_cachep, range);
+
+ return ret;
+}
+
+static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ struct ashmem_area *asma = file->private_data;
+ unsigned long ino;
+ long ret = -ENOTTY;
+
+ switch (cmd) {
+ case ASHMEM_SET_NAME:
+ ret = set_name(asma, (void __user *)arg);
+ break;
+ case ASHMEM_GET_NAME:
+ ret = get_name(asma, (void __user *)arg);
+ break;
+ case ASHMEM_SET_SIZE:
+ ret = -EINVAL;
+ mutex_lock(&ashmem_mutex);
+ if (!asma->file) {
+ ret = 0;
+ asma->size = (size_t)arg;
+ }
+ mutex_unlock(&ashmem_mutex);
+ break;
+ case ASHMEM_GET_SIZE:
+ ret = asma->size;
+ break;
+ case ASHMEM_SET_PROT_MASK:
+ ret = set_prot_mask(asma, arg);
+ break;
+ case ASHMEM_GET_PROT_MASK:
+ ret = asma->prot_mask;
+ break;
+ case ASHMEM_PIN:
+ case ASHMEM_UNPIN:
+ case ASHMEM_GET_PIN_STATUS:
+ ret = ashmem_pin_unpin(asma, cmd, (void __user *)arg);
+ break;
+ case ASHMEM_PURGE_ALL_CACHES:
+ ret = -EPERM;
+ if (capable(CAP_SYS_ADMIN)) {
+ struct shrink_control sc = {
+ .gfp_mask = GFP_KERNEL,
+ .nr_to_scan = LONG_MAX,
+ };
+ ret = ashmem_shrink_count(ashmem_shrinker, &sc);
+ ashmem_shrink_scan(ashmem_shrinker, &sc);
+ }
+ break;
+ case ASHMEM_GET_FILE_ID:
+ /* Lock around our check to avoid racing with ashmem_mmap(). */
+ mutex_lock(&ashmem_mutex);
+ if (!asma || !asma->file) {
+ mutex_unlock(&ashmem_mutex);
+ ret = -EINVAL;
+ break;
+ }
+ ino = file_inode(asma->file)->i_ino;
+ mutex_unlock(&ashmem_mutex);
+
+ if (copy_to_user((void __user *)arg, &ino, sizeof(ino))) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
+
+/* support of 32bit userspace on 64bit platforms */
+#ifdef CONFIG_COMPAT
+static long compat_ashmem_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case COMPAT_ASHMEM_SET_SIZE:
+ cmd = ASHMEM_SET_SIZE;
+ break;
+ case COMPAT_ASHMEM_SET_PROT_MASK:
+ cmd = ASHMEM_SET_PROT_MASK;
+ break;
+ }
+ return ashmem_ioctl(file, cmd, arg);
+}
+#endif
+#ifdef CONFIG_PROC_FS
+static void ashmem_show_fdinfo(struct seq_file *m, struct file *file)
+{
+ struct ashmem_area *asma = file->private_data;
+
+ mutex_lock(&ashmem_mutex);
+
+ if (asma->file)
+ seq_printf(m, "inode:\t%ld\n", file_inode(asma->file)->i_ino);
+
+ if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
+ seq_printf(m, "name:\t%s\n",
+ asma->name + ASHMEM_NAME_PREFIX_LEN);
+
+ seq_printf(m, "size:\t%zu\n", asma->size);
+
+ mutex_unlock(&ashmem_mutex);
+}
+#endif
+static const struct file_operations ashmem_fops = {
+ .owner = THIS_MODULE,
+ .open = ashmem_open,
+ .release = ashmem_release,
+ .read_iter = ashmem_read_iter,
+ .llseek = ashmem_llseek,
+ .mmap = ashmem_mmap,
+ .unlocked_ioctl = ashmem_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_ashmem_ioctl,
+#endif
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = ashmem_show_fdinfo,
+#endif
+};
+
+static struct miscdevice ashmem_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "ashmem",
+ .fops = &ashmem_fops,
+};
+
+/**
+ * is_ashmem_file() - Indicates whether a given file structure belongs to an ashmem file.
+ * @file: A pointer to the file structure being inspected.
+ *
+ */
+bool is_ashmem_file(struct file *file)
+{
+ if (file && (file->f_op == &ashmem_fops))
+ return true;
+ return false;
+}
+EXPORT_SYMBOL_GPL(is_ashmem_file);
+
+/**
+ * ashmem_area_name() - Provides the name of a region associated with a given ashmem file.
+ * @file: A pointer to the file structure being inspected.
+ * @name: A pointer to a buffer of at least ASHMEM_FULL_NAME_LEN + 1 (for the NULL terminator).
+ *
+ * This function populates @name with the name of a given ashmem file. If the name has not been
+ * set yet, the buffer is populated with "/dev/ashmem". Otherwise, it is populated with
+ * "/dev/ashmem/name". If the file is not an ashmem file, -EINVAL is returned and the buffer is
+ * not touched.
+ */
+int ashmem_area_name(struct file *file, char *name)
+{
+ struct ashmem_area *asma;
+
+ if (!is_ashmem_file(file) || !name)
+ return -EINVAL;
+
+ asma = file->private_data;
+ mutex_lock(&ashmem_mutex);
+ strscpy(name, asma->name, ASHMEM_FULL_NAME_LEN);
+ mutex_unlock(&ashmem_mutex);
+
+ /*
+ * If the name hasn't been set, the Rust driver will return /dev/ashmem (i.e. it removes
+ * the trailing /, so lets do that here.
+ */
+ if (name[ASHMEM_NAME_PREFIX_LEN] == '\0')
+ name[ASHMEM_NAME_PREFIX_LEN] = '\0';
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ashmem_area_name);
+
+/**
+ * ashmem_area_size() - Provides the size of a region associated with a given ashmem file.
+ * @file: A pointer to the file structure being inspected.
+ *
+ * Returns the size of the region if the file is an ashmem buffer, or 0 otherwise.
+ */
+long ashmem_area_size(struct file *file)
+{
+ struct ashmem_area *asma;
+ ssize_t size;
+
+ if (!is_ashmem_file(file))
+ return 0;
+
+ asma = file->private_data;
+ mutex_lock(&ashmem_mutex);
+ size = asma->size;
+ mutex_unlock(&ashmem_mutex);
+ return size;
+}
+EXPORT_SYMBOL_GPL(ashmem_area_size);
+
+/**
+ * ashmem_area_vmfile() - Provides a pointer to the shmem file structure for an ashmem file.
+ * @file: A pointer to the file structure being inspected.
+ *
+ * Returns a pointer to the underlying shmem file structure for an ashmem file, with its reference
+ * count elevated by 1. It is the caller's responsibility to decrement this reference by invoking
+ * fput() on the return value of this function if it is not NULL. If the given file is not an ashmem
+ * file, then NULL is returned.
+ */
+struct file *ashmem_area_vmfile(struct file *file)
+{
+ struct ashmem_area *asma;
+ struct file *vmfile = NULL;
+
+ if (!is_ashmem_file(file))
+ return NULL;
+
+ asma = file->private_data;
+ mutex_lock(&ashmem_mutex);
+ if (asma->file)
+ vmfile = get_file(asma->file);
+ mutex_unlock(&ashmem_mutex);
+ return vmfile;
+}
+EXPORT_SYMBOL_GPL(ashmem_area_vmfile);
+
+static int __init ashmem_init(void)
+{
+ int ret = -ENOMEM;
+
+ ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
+ sizeof(struct ashmem_area),
+ 0, 0, NULL);
+ if (!ashmem_area_cachep) {
+ pr_err("failed to create slab cache\n");
+ goto out;
+ }
+
+ ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
+ sizeof(struct ashmem_range),
+ 0, SLAB_RECLAIM_ACCOUNT, NULL);
+ if (!ashmem_range_cachep) {
+ pr_err("failed to create slab cache\n");
+ goto out_free1;
+ }
+
+ ret = misc_register(&ashmem_misc);
+ if (ret) {
+ pr_err("failed to register misc device!\n");
+ goto out_free2;
+ }
+
+ ret = ashmem_init_shrinker();
+ if (ret) {
+ pr_err("failed to register shrinker!\n");
+ goto out_demisc;
+ }
+
+ pr_info("initialized\n");
+
+ return 0;
+
+out_demisc:
+ misc_deregister(&ashmem_misc);
+out_free2:
+ kmem_cache_destroy(ashmem_range_cachep);
+out_free1:
+ kmem_cache_destroy(ashmem_area_cachep);
+out:
+ return ret;
+}
+device_initcall(ashmem_init);
diff --git a/drivers/staging/android/ashmem.h b/drivers/staging/android/ashmem.h
new file mode 100644
index 0000000..8011a15
--- /dev/null
+++ b/drivers/staging/android/ashmem.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Apache-2.0 */
+/*
+ * include/linux/ashmem.h
+ *
+ * Copyright 2008 Google Inc.
+ * Author: Robert Love
+ */
+
+#ifndef _LINUX_ASHMEM_H
+#define _LINUX_ASHMEM_H
+
+#include <linux/limits.h>
+#include <linux/ioctl.h>
+#include <linux/compat.h>
+
+#include "uapi/ashmem.h"
+
+#include <linux/shrinker.h>
+static const gfp_t RUST_CONST_HELPER___GFP_FS = ___GFP_FS;
+static const gfp_t RUST_CONST_HELPER___GFP_IO = ___GFP_IO;
+
+#define ASHMEM_NAME_PREFIX "dev/ashmem/"
+#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
+#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
+
+/* support of 32bit userspace on 64bit platforms */
+#ifdef CONFIG_COMPAT
+enum {
+ COMPAT_ASHMEM_SET_SIZE = _IOW(__ASHMEMIOC, 3, compat_size_t),
+ COMPAT_ASHMEM_SET_PROT_MASK = _IOW(__ASHMEMIOC, 5, unsigned int),
+};
+#endif
+
+bool is_ashmem_file(struct file *file);
+int ashmem_area_name(struct file *file, char *name);
+long ashmem_area_size(struct file *file);
+struct file *ashmem_area_vmfile(struct file *file);
+
+long ashmem_memfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+
+#endif /* _LINUX_ASHMEM_H */
diff --git a/drivers/staging/android/ashmem.rs b/drivers/staging/android/ashmem.rs
new file mode 100644
index 0000000..83a3f87
--- /dev/null
+++ b/drivers/staging/android/ashmem.rs
@@ -0,0 +1,759 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Anonymous Shared Memory Subsystem for Android.
+//!
+//! The ashmem subsystem is a new shared memory allocator, similar to POSIX SHM but with different
+//! behavior and sporting a simpler file-based API.
+//!
+//! It is, in theory, a good memory allocator for low-memory devices, because it can discard shared
+//! memory units when under memory pressure.
+
+use core::{
+ pin::Pin,
+ ptr::null_mut,
+ sync::atomic::{AtomicBool, AtomicPtr, AtomicUsize, Ordering},
+};
+use kernel::{
+ bindings::{self, ASHMEM_GET_PIN_STATUS, ASHMEM_PIN, ASHMEM_UNPIN},
+ c_str,
+ error::Result,
+ ffi::c_int,
+ fs::{File, Kiocb, LocalFile},
+ ioctl::_IOC_SIZE,
+ iov::IovIterDest,
+ miscdevice::{loff_t, MiscDevice, MiscDeviceOptions, MiscDeviceRegistration},
+ mm::virt::{flags as vma_flags, VmaNew},
+ page::{page_align, PAGE_MASK, PAGE_SIZE},
+ prelude::*,
+ seq_file::{seq_print, SeqFile},
+ sync::{new_mutex, Mutex, UniqueArc},
+ task::Task,
+ types::ForeignOwnable,
+ uaccess::{UserSlice, UserSliceReader, UserSliceWriter},
+};
+
+const ASHMEM_NAME_LEN: usize = bindings::ASHMEM_NAME_LEN as usize;
+const ASHMEM_FULL_NAME_LEN: usize = bindings::ASHMEM_FULL_NAME_LEN as usize;
+const ASHMEM_NAME_PREFIX_LEN: usize = bindings::ASHMEM_NAME_PREFIX_LEN as usize;
+const ASHMEM_NAME_PREFIX: [u8; ASHMEM_NAME_PREFIX_LEN] = *b"dev/ashmem/";
+
+const ASHMEM_MAX_SIZE: usize = usize::MAX >> 1;
+
+const PROT_READ: usize = bindings::PROT_READ as usize;
+const PROT_EXEC: usize = bindings::PROT_EXEC as usize;
+const PROT_WRITE: usize = bindings::PROT_WRITE as usize;
+const PROT_MASK: usize = PROT_EXEC | PROT_READ | PROT_WRITE;
+
+mod ashmem_shrinker;
+
+mod ashmem_range;
+use ashmem_range::{Area, AshmemGuard, NewRange, ASHMEM_MUTEX, LRU_COUNT};
+
+mod shmem;
+use shmem::ShmemFile;
+
+mod ashmem_toggle;
+use ashmem_toggle::{AshmemToggleExec, AshmemToggleMisc, AshmemToggleRead, AshmemToggleShrinker};
+
+/// Does PROT_READ imply PROT_EXEC for this task?
+fn read_implies_exec(task: &Task) -> bool {
+ // SAFETY: Always safe to read.
+ let personality = unsafe { (*task.as_ptr()).personality };
+ (personality & bindings::READ_IMPLIES_EXEC) != 0
+}
+
+/// Calls `capable(CAP_SYS_ADMIN)`.
+fn has_cap_sys_admin() -> bool {
+ use kernel::bindings::CAP_SYS_ADMIN;
+ unsafe { bindings::capable(CAP_SYS_ADMIN as c_int) }
+}
+
+static NUM_PIN_IOCTLS_WAITING: AtomicUsize = AtomicUsize::new(0);
+static IGNORE_UNSET_PROT_READ: AtomicBool = AtomicBool::new(false);
+static IGNORE_UNSET_PROT_EXEC: AtomicBool = AtomicBool::new(false);
+static ASHMEM_FOPS_PTR: AtomicPtr<bindings::file_operations> = AtomicPtr::new(null_mut());
+
+fn shrinker_should_stop() -> bool {
+ NUM_PIN_IOCTLS_WAITING.load(Ordering::Relaxed) > 0
+}
+
+module! {
+ type: AshmemModule,
+ name: "ashmem_rust",
+ authors: ["Alice Ryhl"],
+ description: "Anonymous Shared Memory Subsystem",
+ license: "GPL",
+}
+
+struct AshmemModule {
+ _misc: Pin<KBox<MiscDeviceRegistration<Ashmem>>>,
+ _toggle_unpin: Pin<KBox<MiscDeviceRegistration<AshmemToggleMisc<AshmemToggleShrinker>>>>,
+ _toggle_read: Pin<KBox<MiscDeviceRegistration<AshmemToggleMisc<AshmemToggleRead>>>>,
+ _toggle_exec: Pin<KBox<MiscDeviceRegistration<AshmemToggleMisc<AshmemToggleExec>>>>,
+}
+
+impl kernel::Module for AshmemModule {
+ fn init(_module: &'static kernel::ThisModule) -> Result<Self> {
+ // SAFETY: Called once since this is the module initializer.
+ unsafe { shmem::SHMEM_FOPS_ONCE.init() };
+ // SAFETY: Called once since this is the module initializer.
+ unsafe { ASHMEM_MUTEX.init() };
+ // SAFETY: Called once since this is the module initializer.
+ unsafe { ashmem_range::ASHMEM_SHRINKER.init() };
+
+ pr_info!("Using Rust implementation.");
+
+ ashmem_range::set_shrinker_enabled(true)?;
+
+ let ashmem_miscdevice_registration = KBox::pin_init(
+ MiscDeviceRegistration::register(MiscDeviceOptions {
+ name: c_str!("ashmem"),
+ }),
+ GFP_KERNEL,
+ )?;
+ let ashmem_miscdevice_ptr = ashmem_miscdevice_registration.as_raw();
+ // SAFETY: ashmem_miscdevice_registration is pinned and is never destroyed, so reading
+ // and storing the fops pointer this way should be fine.
+ let fops_ptr = unsafe { (*ashmem_miscdevice_ptr).fops };
+ ASHMEM_FOPS_PTR.store(fops_ptr.cast_mut(), Ordering::Relaxed);
+
+ Ok(Self {
+ _misc: ashmem_miscdevice_registration,
+ _toggle_unpin: AshmemToggleMisc::<AshmemToggleShrinker>::new()?,
+ _toggle_read: AshmemToggleMisc::<AshmemToggleRead>::new()?,
+ _toggle_exec: AshmemToggleMisc::<AshmemToggleExec>::new()?,
+ })
+ }
+}
+
+/// Represents an open ashmem file.
+#[pin_data]
+struct Ashmem {
+ #[pin]
+ inner: Mutex<AshmemInner>,
+}
+
+#[pin_data]
+struct AshmemInner {
+ size: usize,
+ prot_mask: usize,
+ /// If set, then this holds the ashmem name without the dev/ashmem/ prefix. No zero terminator.
+ name: Option<KVec<u8>>,
+ file: Option<ShmemFile>,
+ area: Area,
+}
+
+#[vtable]
+impl MiscDevice for Ashmem {
+ type Ptr = Pin<KBox<Self>>;
+
+ fn open(_: &File, _: &MiscDeviceRegistration<Ashmem>) -> Result<Pin<KBox<Self>>> {
+ KBox::try_pin_init(
+ try_pin_init! {
+ Ashmem {
+ inner <- new_mutex!(AshmemInner {
+ size: 0,
+ prot_mask: PROT_MASK,
+ name: None,
+ file: None,
+ area: Area::new(),
+ }),
+ }
+ },
+ GFP_KERNEL,
+ )
+ }
+
+ fn mmap(me: Pin<&Ashmem>, _file: &File, vma: &VmaNew) -> Result<()> {
+ let asma = &mut *me.inner.lock();
+
+ // User needs to SET_SIZE before mapping.
+ if asma.size == 0 || asma.size >= ASHMEM_MAX_SIZE {
+ return Err(EINVAL);
+ }
+
+ // Requested mapping size larger than object size.
+ if vma.end() - vma.start() > page_align(asma.size).ok_or(EINVAL)? {
+ return Err(EINVAL);
+ }
+
+ if asma.prot_mask & PROT_WRITE == 0 {
+ vma.try_clear_maywrite().map_err(|_| EPERM)?;
+ }
+ if asma.prot_mask & PROT_EXEC == 0 {
+ vma.try_clear_mayexec().map_err(|_| EPERM)?;
+ }
+ if asma.prot_mask & PROT_READ == 0 {
+ vma.try_clear_mayread().map_err(|_| EPERM)?;
+ }
+
+ let file = match asma.file.as_ref() {
+ Some(file) => file,
+ None => {
+ let mut name_buffer = [0u8; ASHMEM_FULL_NAME_LEN];
+ let name = asma.full_name(&mut name_buffer);
+ asma.file
+ .insert(ShmemFile::new(name, asma.size, vma.flags())?)
+ }
+ };
+
+ if vma.flags() & vma_flags::SHARED != 0 {
+ // We're really using this just to set vm_ops to `shmem_anon_vm_ops`. Anything else it
+ // does is undone by the call to `set_file` below.
+ shmem::zero_setup(vma)?;
+ } else {
+ shmem::vma_set_anonymous(vma);
+ }
+
+ shmem::set_file(vma, file.file());
+ Ok(())
+ }
+
+ fn llseek(me: Pin<&Ashmem>, file: &LocalFile, offset: loff_t, whence: c_int) -> Result<loff_t> {
+ let asma_file = {
+ let asma = me.inner.lock();
+ if asma.size == 0 {
+ return Err(EINVAL);
+ }
+ match asma.file.as_ref() {
+ Some(asma_file) => asma_file.clone(),
+ None => return Err(EBADF),
+ }
+ };
+
+ let ret = asma_file.vfs_llseek(offset, whence)?;
+
+ // SAFETY: We protect the shmem file with the same mechanism as the ashmem file. We are in
+ // llseek, so our caller ensures that accessing f_pos is okay.
+ unsafe { shmem::file_set_fpos(file, shmem::file_get_fpos(asma_file.file())) };
+
+ Ok(ret)
+ }
+
+ fn read_iter(mut kiocb: Kiocb<'_, Self::Ptr>, iov: &mut IovIterDest<'_>) -> Result<usize> {
+ let me = kiocb.file();
+ let asma_file = {
+ let asma = me.inner.lock();
+ if asma.size == 0 {
+ // If size is not set, or set to 0, always return EOF.
+ return Ok(0);
+ }
+ match asma.file.as_ref() {
+ Some(asma_file) => asma_file.clone(),
+ None => return Err(EBADF),
+ }
+ };
+
+ let ret = asma_file.vfs_iter_read(iov, kiocb.ki_pos_mut())?;
+
+ // SAFETY: We protect the shmem file with the same mechanism as the ashmem file. We are in
+ // read_iter, so our caller ensures that accessing f_pos is okay.
+ unsafe { shmem::file_set_fpos(asma_file.file(), kiocb.ki_pos()) };
+
+ Ok(ret as usize)
+ }
+
+ fn ioctl(me: Pin<&Ashmem>, _file: &File, cmd: u32, arg: usize) -> Result<isize> {
+ let size = _IOC_SIZE(cmd);
+ match cmd {
+ bindings::ASHMEM_SET_NAME => {
+ me.set_name(UserSlice::new(UserPtr::from_addr(arg), size).reader())
+ }
+ bindings::ASHMEM_GET_NAME => {
+ me.get_name(UserSlice::new(UserPtr::from_addr(arg), size).writer())
+ }
+ bindings::ASHMEM_SET_SIZE => me.set_size(arg),
+ bindings::ASHMEM_GET_SIZE => me.get_size(),
+ bindings::ASHMEM_SET_PROT_MASK => me.set_prot_mask(arg),
+ bindings::ASHMEM_GET_PROT_MASK => me.get_prot_mask(),
+ bindings::ASHMEM_GET_FILE_ID => {
+ me.get_file_id(UserSlice::new(UserPtr::from_addr(arg), size).writer())
+ }
+ ASHMEM_PIN | ASHMEM_UNPIN | ASHMEM_GET_PIN_STATUS => {
+ me.pin_unpin(cmd, UserSlice::new(UserPtr::from_addr(arg), size).reader())
+ }
+ bindings::ASHMEM_PURGE_ALL_CACHES => me.purge_all_caches(),
+ _ => Err(ENOTTY),
+ }
+ }
+
+ #[cfg(CONFIG_COMPAT)]
+ fn compat_ioctl(me: Pin<&Ashmem>, file: &File, compat_cmd: u32, arg: usize) -> Result<isize> {
+ let cmd = match compat_cmd {
+ bindings::COMPAT_ASHMEM_SET_SIZE => bindings::ASHMEM_SET_SIZE,
+ bindings::COMPAT_ASHMEM_SET_PROT_MASK => bindings::ASHMEM_SET_PROT_MASK,
+ _ => compat_cmd,
+ };
+ Self::ioctl(me, file, cmd, arg)
+ }
+
+ fn show_fdinfo(me: Pin<&Ashmem>, m: &SeqFile, _file: &File) {
+ let asma = me.inner.lock();
+
+ if let Some(file) = asma.file.as_ref() {
+ seq_print!(m, "inode:\t{}\n", file.inode_ino());
+ }
+ if let Some(name) = asma.name.as_ref() {
+ let name = core::str::from_utf8(name).unwrap_or("<invalid utf-8>");
+ seq_print!(m, "name:\t{}\n", name);
+ }
+ seq_print!(m, "size\t{}\n", asma.size);
+ }
+}
+
+impl Ashmem {
+ fn set_name(&self, reader: UserSliceReader) -> Result<isize> {
+ let mut buf = [0u8; ASHMEM_NAME_LEN];
+ let name = reader.strcpy_into_buf(&mut buf)?.to_bytes();
+
+ let mut v = KVec::with_capacity(name.len(), GFP_KERNEL)?;
+ v.extend_from_slice(name, GFP_KERNEL)?;
+
+ let mut asma = self.inner.lock();
+ if asma.file.is_some() {
+ return Err(EINVAL);
+ }
+ asma.name = Some(v);
+ Ok(0)
+ }
+
+ fn get_name(&self, mut writer: UserSliceWriter) -> Result<isize> {
+ let mut local_name = [0u8; ASHMEM_NAME_LEN];
+ let asma = self.inner.lock();
+ let name = asma.name.as_deref().unwrap_or(b"dev/ashmem");
+ let len = name.len();
+ let len_with_nul = len + 1;
+ if local_name.len() < len_with_nul {
+ // This shouldn't happen in practice since `set_name` will refuse to store a string
+ // that is too long.
+ return Err(EINVAL);
+ }
+ local_name[..len].copy_from_slice(name);
+ local_name[len] = 0;
+ drop(asma);
+
+ writer.write_slice(&local_name[..len_with_nul])?;
+ Ok(0)
+ }
+
+ fn set_size(&self, size: usize) -> Result<isize> {
+ let mut asma = self.inner.lock();
+ if asma.file.is_some() {
+ return Err(EINVAL);
+ }
+ asma.size = size;
+ Ok(0)
+ }
+
+ fn get_size(&self) -> Result<isize> {
+ Ok(self.inner.lock().size as isize)
+ }
+
+ fn set_prot_mask(&self, mut prot: usize) -> Result<isize> {
+ let mut asma = self.inner.lock();
+
+ if (prot & PROT_READ != 0) && read_implies_exec(current!()) {
+ prot |= PROT_EXEC;
+ }
+
+ if IGNORE_UNSET_PROT_READ.load(Ordering::Relaxed) {
+ // Add back PROT_READ if asma.prot_mask has it.
+ prot |= asma.prot_mask & PROT_READ;
+ }
+
+ if IGNORE_UNSET_PROT_EXEC.load(Ordering::Relaxed) {
+ // Add back PROT_EXEC if asma.prot_mask has it.
+ prot |= asma.prot_mask & PROT_EXEC;
+ }
+
+ // The user can only remove, not add, protection bits.
+ if (asma.prot_mask & prot) != prot {
+ return Err(EINVAL);
+ }
+
+ asma.prot_mask = prot;
+ Ok(0)
+ }
+
+ fn get_prot_mask(&self) -> Result<isize> {
+ Ok(self.inner.lock().prot_mask as isize)
+ }
+
+ fn get_file_id(&self, mut writer: UserSliceWriter) -> Result<isize> {
+ let ino = {
+ let asma = self.inner.lock();
+ let Some(file) = asma.file.as_ref() else {
+ return Err(EINVAL);
+ };
+ file.inode_ino()
+ };
+ writer.write(&ino)?;
+ Ok(0)
+ }
+
+ fn pin_unpin(&self, cmd: u32, mut reader: UserSliceReader) -> Result<isize> {
+ let (offset, cmd_len) = {
+ #[allow(dead_code)] // spurious warning because it is never explicitly constructed
+ #[repr(transparent)]
+ struct AshmemPin(bindings::ashmem_pin);
+ // SAFETY: All bit-patterns are valid for `ashmem_pin`.
+ unsafe impl kernel::transmute::FromBytes for AshmemPin {}
+ let AshmemPin(pin) = reader.read()?;
+ (pin.offset as usize, pin.len as usize)
+ };
+
+ // If `pin`/`unpin` needs a new range, they will take it from this `Option`. Otherwise,
+ // they will leave it here, and it gets dropped after the mutexes are released.
+ let new_range = if cmd == ASHMEM_GET_PIN_STATUS {
+ None
+ } else {
+ Some(UniqueArc::new_uninit(GFP_KERNEL)?)
+ };
+
+ NUM_PIN_IOCTLS_WAITING.fetch_add(1, Ordering::Relaxed);
+ let mut guard = AshmemGuard(ASHMEM_MUTEX.lock());
+ NUM_PIN_IOCTLS_WAITING.fetch_sub(1, Ordering::Relaxed);
+
+ // C ashmem waits for in-flight shrinkers here using a separate mechanism, but we don't
+ // release the lock when calling `punch_hole` in the shrinker, so we don't need to do that.
+
+ let asma = &mut *self.inner.lock();
+ let mut new_range = match asma.file.as_ref() {
+ Some(file) => new_range.map(|alloc| NewRange { file, alloc }),
+ None => return Err(EINVAL),
+ };
+
+ let max_size = page_align(asma.size).ok_or(EINVAL)?;
+ let remaining = max_size.checked_sub(offset).ok_or(EINVAL)?;
+
+ // Per custom, you can pass zero for len to mean "everything onward".
+ let len = if cmd_len == 0 { remaining } else { cmd_len };
+
+ if (offset | len) & !PAGE_MASK != 0 {
+ return Err(EINVAL);
+ }
+ let len_plus_offset = offset.checked_add(len).ok_or(EINVAL)?;
+ if max_size < len_plus_offset {
+ return Err(EINVAL);
+ }
+
+ let pgstart = offset / PAGE_SIZE;
+ let pgend = pgstart + (len / PAGE_SIZE) - 1;
+
+ match cmd {
+ ASHMEM_PIN => {
+ if asma.area.pin(pgstart, pgend, &mut new_range, &mut guard) {
+ Ok(bindings::ASHMEM_WAS_PURGED as isize)
+ } else {
+ Ok(bindings::ASHMEM_NOT_PURGED as isize)
+ }
+ }
+ ASHMEM_UNPIN => {
+ asma.area.unpin(pgstart, pgend, &mut new_range, &mut guard);
+ Ok(0)
+ }
+ ASHMEM_GET_PIN_STATUS => {
+ if asma
+ .area
+ .range_has_unpinned_page(pgstart, pgend, &mut guard)
+ {
+ Ok(bindings::ASHMEM_IS_UNPINNED as isize)
+ } else {
+ Ok(bindings::ASHMEM_IS_PINNED as isize)
+ }
+ }
+ _ => unreachable!(),
+ }
+ }
+
+ fn purge_all_caches(&self) -> Result<isize> {
+ if !has_cap_sys_admin() {
+ return Err(EPERM);
+ }
+ let mut guard = AshmemGuard(ASHMEM_MUTEX.lock());
+ let total_num_pages = LRU_COUNT.load(Ordering::Relaxed);
+ let _num_freed = guard.free_lru(usize::MAX);
+ // ASHMEM_PURGE_ALL_CACHES returns the total number of pages even if we stopped early.
+ Ok(isize::try_from(total_num_pages).unwrap_or(isize::MAX))
+ }
+}
+
+impl AshmemInner {
+ /// Get the full name.
+ ///
+ /// If the name is `Some(name)`, then this returns `dev/ashmem/name\0`.
+ ///
+ /// If the name is `None`, then this returns `dev/ashmem\0`.
+ fn full_name<'name>(&self, name: &'name mut [u8; ASHMEM_FULL_NAME_LEN]) -> &'name CStr {
+ name[..ASHMEM_NAME_PREFIX_LEN].copy_from_slice(&ASHMEM_NAME_PREFIX);
+ if let Some(set_name) = self.name.as_deref() {
+ name[ASHMEM_NAME_PREFIX_LEN..][..set_name.len()].copy_from_slice(set_name);
+ } else {
+ // Remove last slash if no name set.
+ name[ASHMEM_NAME_PREFIX_LEN - 1] = 0;
+ }
+ name[ASHMEM_FULL_NAME_LEN - 1] = 0;
+
+ // This unwrap only fails if there's no nul-byte, but we just added one at the end above.
+ let len_with_nul = name
+ .iter()
+ .position(|&c| c == 0)
+ .map(|len| len + 1)
+ .unwrap();
+
+ // This unwrap fails if the last byte is not a nul-byte, or if there are any nul-bytes
+ // before the last byte. Neither of those are possible here since `len_with_nul` is the
+ // index of the first nul-byte in `name`.
+ CStr::from_bytes_with_nul(&name[..len_with_nul]).unwrap()
+ }
+}
+
+#[no_mangle]
+unsafe extern "C" fn ashmem_memfd_ioctl(file: *mut bindings::file, cmd: u32, arg: usize) -> isize {
+ #[cfg(CONFIG_COMPAT)]
+ let cmd = match cmd {
+ bindings::COMPAT_ASHMEM_SET_SIZE => bindings::ASHMEM_SET_SIZE,
+ bindings::COMPAT_ASHMEM_SET_PROT_MASK => bindings::ASHMEM_SET_PROT_MASK,
+ cmd => cmd,
+ };
+
+ // SAFETY:
+ // * The file is valid for the duration of this call.
+ // * There is no active fdget_pos region on the file on this thread.
+ let file = unsafe { File::from_raw_file(file) };
+
+ match ashmem_memfd_ioctl_inner(file, cmd, arg) {
+ Ok(ret) => ret,
+ Err(err) => err.to_errno() as isize,
+ }
+}
+
+fn ashmem_memfd_ioctl_inner(file: &File, cmd: u32, arg: usize) -> Result<isize> {
+ use kernel::bindings::{F_ADD_SEALS, F_GET_SEALS, F_SEAL_FUTURE_WRITE, F_SEAL_WRITE};
+ const WRITE_SEALS_MASK: usize = (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE) as usize;
+
+ /// # Safety
+ /// The file must be a memfd file.
+ unsafe fn get_seals(file: &File) -> Result<usize> {
+ // SAFETY: This is a memfd file.
+ let seals: isize = unsafe { bindings::memfd_fcntl(file.as_ptr(), F_GET_SEALS, 0) };
+ if seals < 0 {
+ return Err(Error::from_errno(seals as i32));
+ }
+ Ok(seals as usize)
+ }
+
+ let size = _IOC_SIZE(cmd);
+ match cmd {
+ bindings::ASHMEM_GET_NAME => {
+ let file_ptr = file.as_ptr();
+ // SAFETY: It's safe to access a file's dentry.
+ let dentry = unsafe { (*file_ptr).__bindgen_anon_1.f_path.dentry };
+ // SAFETY: memfd stores the supplied name at this location. A default value is stored
+ // when no name is supplied, so this is always a valid string.
+ let full_name = unsafe {
+ core::slice::from_raw_parts(
+ (*dentry).__bindgen_anon_1.d_name.name,
+ (*dentry)
+ .__bindgen_anon_1
+ .d_name
+ .__bindgen_anon_1
+ .__bindgen_anon_1
+ .len as usize,
+ )
+ };
+
+ let name = full_name.strip_prefix(b"memfd:").unwrap_or(full_name);
+ let max = usize::min(name.len(), ASHMEM_NAME_LEN);
+
+ let mut local_name = [0u8; ASHMEM_NAME_LEN];
+ local_name[..max].copy_from_slice(&name[..max]);
+ local_name[ASHMEM_NAME_LEN - 1] = 0;
+
+ let mut writer = UserSlice::new(UserPtr::from_addr(arg), size).writer();
+ writer.write_slice(&local_name)?;
+ Ok(0)
+ }
+ bindings::ASHMEM_GET_SIZE => {
+ let file_ptr = file.as_ptr();
+ // SAFETY: It's safe to access a file's inode.
+ let inode = unsafe { (*file_ptr).f_inode };
+ // SAFETY: It's safe to read the size of an inode.
+ let size = unsafe { bindings::i_size_read(inode) };
+ Ok(size as isize)
+ }
+ bindings::ASHMEM_SET_PROT_MASK => {
+ // SAFETY: This is a memfd file.
+ let seals = unsafe { get_seals(file) }?;
+ let mut prot = arg;
+
+ // The memfd compat layer does not support unsetting these.
+ prot |= PROT_READ | PROT_EXEC;
+
+ let is_writable = seals & WRITE_SEALS_MASK == 0;
+ let should_be_writable = prot & PROT_WRITE != 0;
+
+ if !is_writable && should_be_writable {
+ // Can't add PROT bits.
+ return Err(EINVAL);
+ }
+
+ if is_writable && !should_be_writable {
+ // SAFETY: This is a memfd file.
+ let ret = unsafe {
+ bindings::memfd_fcntl(file.as_ptr(), F_ADD_SEALS, F_SEAL_FUTURE_WRITE)
+ };
+ if ret < 0 {
+ return Err(Error::from_errno(ret as i32));
+ }
+ }
+ Ok(0)
+ }
+ bindings::ASHMEM_GET_PROT_MASK => {
+ // SAFETY: This is a memfd file.
+ let seals = unsafe { get_seals(file) }?;
+
+ let mut prot = PROT_READ | PROT_EXEC;
+ if seals & WRITE_SEALS_MASK == 0 {
+ prot |= PROT_WRITE;
+ }
+ Ok(prot as isize)
+ }
+ bindings::ASHMEM_GET_FILE_ID => {
+ // SAFETY: Accessing the ino is always okay.
+ let ino = unsafe { (*(*file.as_ptr()).f_inode).i_ino as usize };
+
+ let mut writer = UserSlice::new(UserPtr::from_addr(arg), size).writer();
+ writer.write(&ino)?;
+ Ok(0)
+ }
+ // Just ignore unpin requests.
+ ASHMEM_PIN => Ok(bindings::ASHMEM_NOT_PURGED as isize),
+ ASHMEM_UNPIN => Ok(0),
+ ASHMEM_GET_PIN_STATUS => Ok(bindings::ASHMEM_IS_PINNED as isize),
+ bindings::ASHMEM_PURGE_ALL_CACHES => {
+ if !has_cap_sys_admin() {
+ return Err(EPERM);
+ }
+ Ok(0)
+ }
+ // We do not need to implement SET_NAME or SET_SIZE. The ioctls in this function are only
+ // called when you:
+ //
+ // 1. Think you have an ashmem fd.
+ // 2. But actually have a memfd fd.
+ //
+ // This can only happen if you created the fd through the libcutils library, and that
+ // library sets the name and size in the fd constructor where it knows whether ashmem or
+ // memfd is used, so we should never end up here.
+ bindings::ASHMEM_SET_NAME => Err(EINVAL),
+ bindings::ASHMEM_SET_SIZE => Err(EINVAL),
+ _ => Err(EINVAL),
+ }
+}
+
+/// # Safety
+///
+/// The caller must ensure that `file` is valid for the duration of this function.
+#[no_mangle]
+unsafe extern "C" fn is_ashmem_file(file: *mut bindings::file) -> bool {
+ let ashmem_fops_ptr = ASHMEM_FOPS_PTR.load(Ordering::Relaxed);
+ if file.is_null() || ashmem_fops_ptr.is_null() {
+ return false;
+ }
+
+ // SAFETY: Accessing the f_op field of a non-NULL file structure is always okay.
+ let fops_ptr = unsafe { (*file).f_op };
+ fops_ptr == ashmem_fops_ptr
+}
+
+/// # Safety
+///
+/// The caller must ensure that `file` references a valid file for the duration of 'a.
+unsafe fn get_ashmem_area<'a>(file: *mut bindings::file) -> Result<&'a Ashmem, Error> {
+ // SAFETY: Caller ensures that file is valid, so this should be safe.
+ if unsafe { is_ashmem_file(file) } {
+ return Err(EINVAL);
+ }
+
+ // SAFETY: Given that this is an ashmem file, it should be safe to access the private_data
+ // field containing the Ashmem struct.
+ let private = unsafe { (*file).private_data };
+ // SAFETY: Since this is an ashmem file, we know the type of the struct and can reference it
+ // safely.
+ let ashmem = unsafe { <<Ashmem as MiscDevice>::Ptr as ForeignOwnable>::borrow(private) };
+ Ok(ashmem.get_ref())
+}
+
+/// # Safety
+///
+/// The caller must ensure the following prior to invoking this function:
+/// 1. `name` is valid for writing and at least of size ASHMEM_FULL_NAME_LEN.
+/// 2. `file` is valid for the duration of this function.
+#[no_mangle]
+unsafe extern "C" fn ashmem_area_name(
+ file: *mut bindings::file,
+ name: *mut kernel::ffi::c_char,
+) -> c_int {
+ if name.is_null() {
+ return EINVAL.to_errno() as c_int;
+ }
+
+ // SAFETY: file is valid for the duration of this function.
+ match unsafe { get_ashmem_area(file) } {
+ Ok(ashmem) => {
+ let name_buffer = name.cast::<[u8; ASHMEM_FULL_NAME_LEN]>();
+ // SAFETY: Caller guarantees that the pointer is valid for writing.
+ ashmem.inner.lock().full_name(unsafe { &mut *name_buffer });
+ 0
+ }
+ Err(err) => err.to_errno() as c_int,
+ }
+}
+
+/// # Safety
+///
+/// The caller must ensure that `file` is valid for the duration of this function.
+#[no_mangle]
+unsafe extern "C" fn ashmem_area_size(file: *mut bindings::file) -> isize {
+ // SAFETY: file is valid for the duration of this function.
+ let ashmem = match unsafe { get_ashmem_area(file) } {
+ Ok(area) => area,
+ Err(_err) => return 0,
+ };
+
+ match ashmem.get_size() {
+ Ok(size) => size,
+ Err(_err) => 0,
+ }
+}
+
+/// # Safety
+///
+/// The caller must ensure that `file` is valid for the duration of this function.
+///
+/// If this function returns a non-NULL pointer to a file structure, the refcount for that
+/// file will be incremented by 1. It is the caller's responsibility to decrement the refcount
+/// when the file is no longer needed.
+#[no_mangle]
+unsafe extern "C" fn ashmem_area_vmfile(file: *mut bindings::file) -> *mut bindings::file {
+ // SAFETY: file is valid for the duration of this function.
+ let ashmem = match unsafe { get_ashmem_area(file) } {
+ Ok(area) => area,
+ Err(_err) => return null_mut(),
+ };
+
+ let asma = &mut *ashmem.inner.lock();
+ match asma.file.as_ref() {
+ Some(shmem_file) => {
+ let shmem_file_ptr = shmem_file.file().as_ptr();
+ // SAFETY: file is valid for the duration of the function, which means shmem file is
+ // also valid at this point.
+ unsafe { bindings::get_file(shmem_file_ptr) };
+ shmem_file_ptr
+ }
+ None => null_mut(),
+ }
+}
diff --git a/drivers/staging/android/ashmem_exports.c b/drivers/staging/android/ashmem_exports.c
new file mode 100644
index 0000000..3731104
--- /dev/null
+++ b/drivers/staging/android/ashmem_exports.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Symbols exported from the Ashmem Rust driver for loadable kernel modules to use.
+ *
+ * Copyright (c) 2025, Google LLC.
+ */
+
+#include <linux/export.h>
+
+#include "ashmem.h"
+
+/*
+ * List symbols that need to be exported to loadable kernel modules below. This is needed because
+ * the logic that exports symbols from Rust crates only considers the crates under the rust/
+ * directory at the root of the kernel repo. It currently does not support exporting symbols from
+ * other crates.
+ */
+EXPORT_SYMBOL_GPL(is_ashmem_file);
+EXPORT_SYMBOL_GPL(ashmem_area_name);
+EXPORT_SYMBOL_GPL(ashmem_area_size);
+EXPORT_SYMBOL_GPL(ashmem_area_vmfile);
diff --git a/drivers/staging/android/ashmem_range.rs b/drivers/staging/android/ashmem_range.rs
new file mode 100644
index 0000000..ed198d0
--- /dev/null
+++ b/drivers/staging/android/ashmem_range.rs
@@ -0,0 +1,543 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Keeps track of unpinned ranges in an ashmem file.
+
+use crate::{
+ ashmem_shrinker::{
+ self, CountObjects, ScanObjects, ShrinkControl, Shrinker, ShrinkerBuilder,
+ ShrinkerRegistration,
+ },
+ shmem::ShmemFile,
+ AshmemModule,
+};
+use core::{
+ mem::MaybeUninit,
+ pin::Pin,
+ sync::atomic::{AtomicUsize, Ordering},
+};
+use kernel::{
+ c_str,
+ list::{List, ListArc, ListLinks},
+ page::PAGE_SIZE,
+ prelude::*,
+ sync::{GlobalGuard, GlobalLockedBy, UniqueArc},
+};
+
+// Only updated with ASHMEM_MUTEX held, but the shrinker will read it without the mutex.
+pub(crate) static LRU_COUNT: AtomicUsize = AtomicUsize::new(0);
+
+pub(crate) struct AshmemLru {
+ lru_list: List<Range, 0>,
+}
+
+/// Represents ownership of the `ASHMEM_MUTEX` lock.
+///
+/// Using a wrapper struct around `GlobalGuard` so we can add our own methods to the guard.
+pub(crate) struct AshmemGuard(pub(crate) GlobalGuard<ASHMEM_MUTEX>);
+
+// These make `AshmemGuard` inherit the behavior of `GlobalGuard`.
+impl core::ops::Deref for AshmemGuard {
+ type Target = GlobalGuard<ASHMEM_MUTEX>;
+ fn deref(&self) -> &GlobalGuard<ASHMEM_MUTEX> {
+ &self.0
+ }
+}
+impl core::ops::DerefMut for AshmemGuard {
+ fn deref_mut(&mut self) -> &mut GlobalGuard<ASHMEM_MUTEX> {
+ &mut self.0
+ }
+}
+
+impl AshmemGuard {
+ fn shrink_range(&mut self, range: &Range, pgstart: usize, pgend: usize) {
+ let old_size = range.size(self);
+ {
+ let inner = range.inner.as_mut(self);
+ inner.pgstart = pgstart;
+ inner.pgend = pgend;
+ }
+ let new_size = range.size(self);
+
+ // Only change the counter if the range is on the lru list.
+ if !range.purged(self) {
+ let mut lru_count = LRU_COUNT.load(Ordering::Relaxed);
+ lru_count -= old_size;
+ lru_count += new_size;
+ LRU_COUNT.store(lru_count, Ordering::Relaxed);
+ }
+ }
+
+ fn insert_lru(&mut self, range: ListArc<Range>) {
+ // Don't insert the range if it's already purged.
+ if !range.purged(self) {
+ let mut lru_count = LRU_COUNT.load(Ordering::Relaxed);
+ lru_count += range.size(self);
+ LRU_COUNT.store(lru_count, Ordering::Relaxed);
+ self.lru_list.push_front(range);
+ }
+ }
+
+ fn remove_lru(&mut self, range: &Range) -> Option<ListArc<Range>> {
+ // SAFETY: The only list with ID 0 is this list, so the range can't be in some other list
+ // with the same ID.
+ let ret = unsafe { self.lru_list.remove(range) };
+
+ // Only decrement lru_count if the range was actually in the list.
+ if ret.is_some() {
+ let mut lru_count = LRU_COUNT.load(Ordering::Relaxed);
+ lru_count -= range.size(self);
+ LRU_COUNT.store(lru_count, Ordering::Relaxed);
+ }
+
+ ret
+ }
+}
+
+kernel::sync::global_lock! {
+ // SAFETY: We call `init` as the very first thing in the initialization of this module, so
+ // there are no calls to `lock` before `init` is called.
+ pub(crate) unsafe(uninit) static ASHMEM_MUTEX: Mutex<AshmemLru> = AshmemLru {
+ lru_list: List::new(),
+ };
+}
+
+#[pin_data]
+pub(crate) struct Range {
+ /// prev/next pointers for `Area::unpinned_list`.
+ ///
+ /// Note that "unpinned" here refers to the ASHMEM_PIN/UNPIN ioctls, which is unrelated to
+ /// Rust's concept of pinning.
+ #[pin]
+ lru: ListLinks<0>,
+ #[pin]
+ unpinned: ListLinks<1>,
+ file: ShmemFile,
+ pub(crate) inner: GlobalLockedBy<RangeInner, ASHMEM_MUTEX>,
+}
+
+pub(crate) struct RangeInner {
+ pub(crate) pgstart: usize,
+ pub(crate) pgend: usize,
+ pub(crate) purged: bool,
+}
+
+impl Range {
+ pub(crate) fn set_purged(&self, guard: &mut AshmemGuard) {
+ self.inner.as_mut(guard).purged = true;
+ }
+
+ pub(crate) fn purged(&self, guard: &AshmemGuard) -> bool {
+ self.inner.as_ref(guard).purged
+ }
+
+ pub(crate) fn pgstart(&self, guard: &AshmemGuard) -> usize {
+ self.inner.as_ref(guard).pgstart
+ }
+
+ pub(crate) fn pgend(&self, guard: &AshmemGuard) -> usize {
+ self.inner.as_ref(guard).pgend
+ }
+
+ pub(crate) fn size(&self, guard: &AshmemGuard) -> usize {
+ let inner = self.inner.as_ref(guard);
+ inner.pgend - inner.pgstart + 1
+ }
+
+ pub(crate) fn is_before_page(&self, page: usize, guard: &AshmemGuard) -> bool {
+ let inner = self.inner.as_ref(guard);
+ inner.pgend < page
+ }
+
+ pub(crate) fn contains_page(&self, page: usize, guard: &AshmemGuard) -> bool {
+ let inner = self.inner.as_ref(guard);
+ inner.pgstart <= page && inner.pgend >= page
+ }
+
+ pub(crate) fn is_superset_of_range(
+ &self,
+ pgstart: usize,
+ pgend: usize,
+ guard: &AshmemGuard,
+ ) -> bool {
+ let inner = self.inner.as_ref(guard);
+ inner.pgstart <= pgstart && inner.pgend >= pgend
+ }
+
+ pub(crate) fn is_subset_of_range(
+ &self,
+ pgstart: usize,
+ pgend: usize,
+ guard: &AshmemGuard,
+ ) -> bool {
+ let inner = self.inner.as_ref(guard);
+ inner.pgstart >= pgstart && inner.pgend <= pgend
+ }
+
+ pub(crate) fn overlaps_with_range(
+ &self,
+ pgstart: usize,
+ pgend: usize,
+ guard: &AshmemGuard,
+ ) -> bool {
+ self.contains_page(pgstart, guard)
+ || self.contains_page(pgend, guard)
+ || self.is_subset_of_range(pgstart, pgend, guard)
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Range { untracked; }
+ impl ListArcSafe<1> for Range { untracked; }
+}
+
+kernel::list::impl_list_item! {
+ impl ListItem<0> for Range { using ListLinks { self.lru }; }
+ impl ListItem<1> for Range { using ListLinks { self.unpinned }; }
+}
+
+pub(crate) struct Area {
+ /// List of page ranges that have been unpinned by `ASHMEM_UNPIN`.
+ ///
+ /// The ranges are sorted in descending order.
+ unpinned_list: List<Range, 1>,
+}
+
+impl Drop for Area {
+ fn drop(&mut self) {
+ let mut guard = AshmemGuard(super::ASHMEM_MUTEX.lock());
+ for range in &self.unpinned_list {
+ guard.remove_lru(&range);
+ }
+ }
+}
+
+impl Area {
+ pub(crate) fn new() -> Self {
+ Self {
+ unpinned_list: List::new(),
+ }
+ }
+
+ /// Mark the given range of pages as unpinned so they can be reclaimed.
+ ///
+ /// The `new_range` argument must be `Some` when calling this method. If this call needs an
+ /// allocation, it will take it from the option. Otherwise, the allocation is left in the
+ /// option so that the caller can free it after releasing the mutex.
+ pub(crate) fn unpin(
+ &mut self,
+ mut pgstart: usize,
+ mut pgend: usize,
+ new_range: &mut Option<NewRange<'_>>,
+ guard: &mut AshmemGuard,
+ ) {
+ let mut purged = false;
+ let mut cursor = self.unpinned_list.cursor_front();
+ while let Some(next) = cursor.peek_next() {
+ // Short-circuit: this is our insertion point.
+ if next.is_before_page(pgstart, guard) {
+ break;
+ }
+
+ // If the entire range is already unpinned, just return.
+ if next.is_superset_of_range(pgstart, pgend, guard) {
+ return;
+ }
+
+ if next.overlaps_with_range(pgstart, pgend, guard) {
+ pgstart = usize::min(pgstart, next.pgstart(guard));
+ pgend = usize::max(pgend, next.pgend(guard));
+ purged |= next.purged(guard);
+ guard.remove_lru(&next.remove());
+
+ // restart loop
+ cursor = self.unpinned_list.cursor_front();
+ continue;
+ }
+
+ cursor.move_next();
+ }
+
+ let new_range = new_range.take().unwrap().init(RangeInner {
+ pgstart,
+ pgend,
+ purged,
+ });
+
+ let (range_lru, new_range) = ListArc::<Range, 0>::pair_from_pin_unique::<1>(new_range);
+ guard.insert_lru(range_lru);
+ cursor.insert(new_range);
+ }
+
+ /// Mark the given range of pages as pinned so they can't be reclaimed.
+ ///
+ /// Returns whether any of the pages have been reclaimed.
+ ///
+ /// The `new_range` argument must be `Some` when calling this method. If this call needs an
+ /// allocation, it will take it from the option. Otherwise, the allocation is left in the
+ /// option so that the caller can free it after releasing the mutex.
+ pub(crate) fn pin(
+ &mut self,
+ pgstart: usize,
+ pgend: usize,
+ new_range: &mut Option<NewRange<'_>>,
+ guard: &mut AshmemGuard,
+ ) -> bool {
+ let mut purged = false;
+ let mut cursor = self.unpinned_list.cursor_front();
+ while let Some(next) = cursor.peek_next() {
+ // moved past last applicable page; we can short circuit
+ if next.is_before_page(pgstart, guard) {
+ break;
+ }
+
+ // The user can ask us to pin pages that span multiple ranges,
+ // or to pin pages that aren't even unpinned, so this is messy.
+ //
+ // Four cases:
+ // 1. The requested range subsumes an existing range, so we
+ // just remove the entire matching range.
+ // 2. The requested range overlaps the start of an existing
+ // range, so we just update that range.
+ // 3. The requested range overlaps the end of an existing
+ // range, so we just update that range.
+ // 4. The requested range punches a hole in an existing range,
+ // so we have to update one side of the range and then
+ // create a new range for the other side.
+ if next.overlaps_with_range(pgstart, pgend, guard) {
+ purged |= next.purged(guard);
+
+ let curr_pgstart = next.pgstart(guard);
+ let curr_pgend = next.pgend(guard);
+
+ if next.is_subset_of_range(pgstart, pgend, guard) {
+ // Case #1: Easy. Just nuke the whole thing.
+ let removed = next.remove();
+ guard.remove_lru(&removed);
+ continue;
+ } else if curr_pgstart >= pgstart {
+ // Case #2: We overlap from the start, so adjust it.
+ guard.shrink_range(&next, pgend + 1, curr_pgend);
+ } else if curr_pgend <= pgend {
+ // Case #3: We overlap from the rear, so adjust it.
+ guard.shrink_range(&next, curr_pgstart, pgstart - 1);
+ } else {
+ // Case #4: We eat a chunk out of the middle. A bit
+ // more complicated, we allocate a new range for the
+ // second half and adjust the first chunk's endpoint.
+ guard.shrink_range(&next, curr_pgstart, pgstart - 1);
+ let purged = next.purged(guard);
+
+ let new_range = new_range.take().unwrap().init(RangeInner {
+ pgstart: pgend + 1,
+ pgend: curr_pgend,
+ purged,
+ });
+
+ let (range_lru, new_range) =
+ ListArc::<Range, 0>::pair_from_pin_unique::<1>(new_range);
+ guard.insert_lru(range_lru);
+ cursor.insert(new_range);
+ break;
+ }
+ }
+
+ cursor.move_next();
+ }
+ purged
+ }
+
+ pub(crate) fn range_has_unpinned_page(
+ &self,
+ pgstart: usize,
+ pgend: usize,
+ guard: &mut AshmemGuard,
+ ) -> bool {
+ for range in &self.unpinned_list {
+ if range.overlaps_with_range(pgstart, pgend, guard) {
+ return true;
+ }
+ }
+ false
+ }
+}
+
+pub(crate) struct NewRange<'a> {
+ pub(crate) file: &'a ShmemFile,
+ pub(crate) alloc: UniqueArc<MaybeUninit<Range>>,
+}
+
+impl<'a> NewRange<'a> {
+ fn init(self, inner: RangeInner) -> Pin<UniqueArc<Range>> {
+ let new_range = self.alloc.pin_init_with(pin_init!(Range {
+ lru <- ListLinks::new(),
+ unpinned <- ListLinks::new(),
+ file: self.file.clone(),
+ inner: GlobalLockedBy::new(inner),
+ }));
+
+ match new_range {
+ Ok(new_range) => new_range,
+ Err(infallible) => match infallible {},
+ }
+ }
+}
+
+impl AshmemGuard {
+ pub(crate) fn free_lru(&mut self, stop_after: usize) -> usize {
+ let mut freed = 0;
+ while let Some(range) = self.lru_list.pop_back() {
+ let start = range.pgstart(self) * PAGE_SIZE;
+ let end = (range.pgend(self) + 1) * PAGE_SIZE;
+ range.set_purged(self);
+ self.remove_lru(&range);
+ freed += range.size(self);
+
+ // C ashmem releases the mutex and uses a different mechanism to ensure mutual
+ // exclusion with `pin_unpin` operations, but we only hold `ASHMEM_MUTEX` here and in
+ // `pin_unpin`, so we don't need to release the mutex. A different mutex is used for
+ // all of the other ashmem operations.
+ range.file.punch_hole(start, end - start);
+
+ if freed >= stop_after {
+ break;
+ }
+
+ if super::shrinker_should_stop() {
+ break;
+ }
+ }
+ freed
+ }
+}
+
+impl Shrinker for super::AshmemModule {
+ // Our shrinker data is in a global, so we don't need to set the private data.
+ type Ptr = ();
+
+ fn count_objects(_: (), _sc: ShrinkControl<'_>) -> CountObjects {
+ let count = LRU_COUNT.load(super::Ordering::Relaxed);
+ if count == 0 {
+ CountObjects::EMPTY
+ } else {
+ CountObjects::new(count)
+ }
+ }
+
+ fn scan_objects(_: (), sc: ShrinkControl<'_>) -> ScanObjects {
+ if !sc.reclaim_fs_allowed() {
+ return ScanObjects::STOP;
+ }
+
+ let Some(guard) = super::ASHMEM_MUTEX.try_lock() else {
+ return ScanObjects::STOP;
+ };
+ let mut guard = AshmemGuard(guard);
+
+ let num_freed = guard.free_lru(sc.nr_to_scan());
+ ScanObjects::from_count(num_freed)
+ }
+}
+
+/// Make line below shorter.
+type AshmemShrinkerType = Option<ShrinkerRegistration<AshmemModule>>;
+
+kernel::sync::global_lock! {
+ // SAFETY: We call `init` as the very first thing in the initialization of this module, so
+ // there are no calls to `lock` before `init` is called.
+ pub(crate) unsafe(uninit) static ASHMEM_SHRINKER: Mutex<AshmemShrinkerType> = None;
+}
+
+pub(crate) fn set_shrinker_enabled(enabled: bool) -> Result<()> {
+ let mut shrinker = ASHMEM_SHRINKER.lock();
+ if enabled {
+ if shrinker.is_none() {
+ let mut builder = ShrinkerBuilder::new(c_str!("android-ashmem"))?;
+ builder.set_seeks(4 * ashmem_shrinker::DEFAULT_SEEKS);
+ *shrinker = Some(builder.register(()));
+ }
+ } else {
+ *shrinker = None;
+ }
+ Ok(())
+}
+
+pub(crate) fn get_shrinker_enabled() -> bool {
+ ASHMEM_SHRINKER.lock().is_some()
+}
+
+#[cfg(test)]
+fn range_test() -> Result {
+ fn get_random(max: usize) -> usize {
+ let rng = unsafe { kernel::bindings::get_random_u64() };
+ (rng % max as u64) as usize
+ }
+
+ fn memset(slice: &mut [bool], value: bool) {
+ for ptr in slice {
+ *ptr = value;
+ }
+ }
+
+ const SIZE: usize = 16;
+
+ let file = ShmemFile::new(c_str!("test_file"), SIZE * PAGE_SIZE, 0)?;
+ let mut area = Area::new();
+ let mut unpinned = [false; SIZE];
+
+ let mut new_range = None;
+ for _ in 0..SIZE {
+ let start = get_random(SIZE);
+ let end = get_random(SIZE - start) + start;
+ let op = get_random(2) == 0;
+
+ if new_range.is_none() {
+ new_range = Some(NewRange {
+ file: &file,
+ alloc: UniqueArc::new_uninit(GFP_KERNEL)?,
+ });
+ }
+ let mut lock = AshmemGuard(ASHMEM_MUTEX.lock());
+ if op {
+ pr_err!("Unpinning {start} to {end}.");
+ area.unpin(start, end, &mut new_range, &mut lock);
+ memset(&mut unpinned[start..=end], true);
+ } else {
+ pr_err!("Pinning {start} to {end}.");
+ area.pin(start, end, &mut new_range, &mut lock);
+ memset(&mut unpinned[start..=end], false);
+ }
+
+ for item in &area.unpinned_list {
+ pr_err!(
+ "Seeing range {} to {}.",
+ item.pgstart(&lock),
+ item.pgend(&lock)
+ );
+ }
+
+ let mut cursor = area.unpinned_list.cursor_back();
+ let mut fail = false;
+ for i in 0..SIZE {
+ let mut target = false;
+ while let Some(prev) = cursor.peek_prev() {
+ if prev.pgend(&lock) < i {
+ cursor.move_prev();
+ continue;
+ }
+ target = prev.pgstart(&lock) <= i;
+ break;
+ }
+ if target != unpinned[i] {
+ pr_err!("Mismatch on {i}!");
+ fail = true;
+ }
+ }
+ if fail {
+ return Err(EINVAL);
+ }
+ }
+ pr_err!("Test completed successfully!");
+ Ok(())
+}
diff --git a/drivers/staging/android/ashmem_shrinker.rs b/drivers/staging/android/ashmem_shrinker.rs
new file mode 100644
index 0000000..a62c141
--- /dev/null
+++ b/drivers/staging/android/ashmem_shrinker.rs
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+#![allow(unreachable_pub, dead_code)]
+//! Shrinker for handling memory pressure.
+//!
+//! C header: [`include/linux/shrinker.h`](srctree/include/linux/shrinker.h)
+
+use kernel::{
+ alloc::AllocError,
+ bindings, c_str,
+ ffi::{c_int, c_long, c_ulong, c_void},
+ str::{CStr, CStrExt as _},
+ types::ForeignOwnable,
+};
+
+use core::{marker::PhantomData, ptr::NonNull};
+
+const SHRINK_STOP: c_ulong = bindings::SHRINK_STOP as c_ulong;
+const SHRINK_EMPTY: c_ulong = bindings::SHRINK_EMPTY as c_ulong;
+
+/// The default value for the number of seeks needed to recreate an object.
+pub const DEFAULT_SEEKS: u32 = bindings::DEFAULT_SEEKS;
+
+/// An unregistered shrinker.
+///
+/// This type can be used to modify the settings of the shrinker before it is registered.
+///
+/// # Invariants
+///
+/// The `shrinker` pointer references an unregistered shrinker.
+pub struct ShrinkerBuilder {
+ shrinker: NonNull<bindings::shrinker>,
+}
+
+// SAFETY: Moving an unregistered shrinker between threads is okay.
+unsafe impl Send for ShrinkerBuilder {}
+// SAFETY: An unregistered shrinker is thread safe.
+unsafe impl Sync for ShrinkerBuilder {}
+
+impl ShrinkerBuilder {
+ /// Create a new shrinker.
+ pub fn new(name: &CStr) -> Result<Self, AllocError> {
+ // TODO: Support numa/memcg aware shrinkers once list_lru is available.
+ let flags = 0;
+
+ // SAFETY: Passing `0` as flags is okay. Using `%s` as the format string is okay when we
+ // pass a nul-terminated string as the string for `%s` to print.
+ let ptr = unsafe {
+ bindings::shrinker_alloc(flags, c_str!("%s").as_char_ptr(), name.as_char_ptr())
+ };
+
+ let shrinker = NonNull::new(ptr).ok_or(AllocError)?;
+
+ // INVARIANT: The allocated shrinker is unregistered.
+ Ok(Self { shrinker })
+ }
+
+ /// Create a new shrinker using format arguments for the name.
+ pub fn new_fmt(name: core::fmt::Arguments<'_>) -> Result<Self, AllocError> {
+ // TODO: Support numa/memcg aware shrinkers once list_lru is available.
+ let flags = 0;
+
+ // SAFETY: Passing `0` as flags is okay. Using `%pA` as the format string is okay when we
+ // pass a `fmt::Arguments` as the value to print.
+ let ptr = unsafe {
+ bindings::shrinker_alloc(
+ flags,
+ c_str!("%pA").as_char_ptr(),
+ &name as *const _ as *const c_void,
+ )
+ };
+
+ let shrinker = NonNull::new(ptr).ok_or(AllocError)?;
+
+ // INVARIANT: The allocated shrinker is unregistered.
+ Ok(Self { shrinker })
+ }
+
+ /// Set the number of seeks needed to recreate an object.
+ pub fn set_seeks(&mut self, seeks: u32) {
+ unsafe { (*self.shrinker.as_ptr()).seeks = seeks as c_int };
+ }
+
+ /// Set the batch size for reclaiming on this shrinker.
+ pub fn set_batch(&mut self, batch: usize) {
+ unsafe { (*self.shrinker.as_ptr()).batch = batch as c_long };
+ }
+
+ /// Register the shrinker.
+ ///
+ /// The provided pointer is used as the private data, and the type `T` determines the callbacks
+ /// that the shrinker will use.
+ pub fn register<T: Shrinker>(self, private_data: T::Ptr) -> ShrinkerRegistration<T> {
+ let shrinker = self.shrinker;
+ let ptr = shrinker.as_ptr();
+
+ // The destructor of `self` calls `shrinker_free`, so skip the destructor.
+ core::mem::forget(self);
+
+ let private_data_ptr = <T::Ptr as ForeignOwnable>::into_foreign(private_data);
+
+ // SAFETY: We own the private data, so we can assign to it.
+ unsafe { (*ptr).private_data = private_data_ptr };
+ // SAFETY: The shrinker is not yet registered, so we can update this field.
+ unsafe { (*ptr).count_objects = Some(rust_count_objects::<T>) };
+ // SAFETY: The shrinker is not yet registered, so we can update this field.
+ unsafe { (*ptr).scan_objects = Some(rust_scan_objects::<T>) };
+
+ // SAFETY: The shrinker is unregistered, so it's safe to register it.
+ unsafe { bindings::shrinker_register(ptr) };
+
+ ShrinkerRegistration {
+ shrinker,
+ _phantom: PhantomData,
+ }
+ }
+}
+
+impl Drop for ShrinkerBuilder {
+ fn drop(&mut self) {
+ // SAFETY: The shrinker is a valid but unregistered shrinker, and we will not use it
+ // anymore.
+ unsafe { bindings::shrinker_free(self.shrinker.as_ptr()) };
+ }
+}
+
+/// A shrinker that is registered with the kernel.
+///
+/// # Invariants
+///
+/// The `shrinker` pointer refers to a registered shrinker using `T` as the private data.
+pub struct ShrinkerRegistration<T: Shrinker> {
+ shrinker: NonNull<bindings::shrinker>,
+ _phantom: PhantomData<T::Ptr>,
+}
+
+// SAFETY: This allows you to deregister the shrinker from a different thread, which means that
+// private data could be dropped from any thread.
+unsafe impl<T: Shrinker> Send for ShrinkerRegistration<T> where T::Ptr: Send {}
+// SAFETY: The only thing you can do with an immutable reference is access the private data, which
+// is okay to access in parallel as the `Shrinker` trait requires the private data to be `Sync`.
+unsafe impl<T: Shrinker> Sync for ShrinkerRegistration<T> {}
+
+impl<T: Shrinker> ShrinkerRegistration<T> {
+ /// Access the private data in this shrinker.
+ pub fn private_data(&self) -> <T::Ptr as ForeignOwnable>::Borrowed<'_> {
+ // SAFETY: We own the private data, so we can access it.
+ let private = unsafe { (*self.shrinker.as_ptr()).private_data };
+ // SAFETY: By the type invariants, the private data is `T`. This access could happen in
+ // parallel with a shrinker callback, but that's okay as the `Shrinker` trait ensures that
+ // `T::Ptr` is `Sync`.
+ unsafe { <T::Ptr as ForeignOwnable>::borrow(private) }
+ }
+}
+
+impl<T: Shrinker> Drop for ShrinkerRegistration<T> {
+ fn drop(&mut self) {
+ // SAFETY: We own the private data, so we can access it.
+ let private = unsafe { (*self.shrinker.as_ptr()).private_data };
+ // SAFETY: We will not access the shrinker after this call.
+ unsafe { bindings::shrinker_free(self.shrinker.as_ptr()) };
+ // SAFETY: The above call blocked until the completion of any shrinker callbacks, so there
+ // are no longer any users of the private data.
+ drop(unsafe { <T::Ptr as ForeignOwnable>::from_foreign(private) });
+ }
+}
+
+/// Callbacks for a shrinker.
+pub trait Shrinker {
+ /// The pointer type used to store the private data of the shrinker.
+ ///
+ /// Needs to be `Sync` because the shrinker callback could access this value immutably from
+ /// several thread in parallel.
+ type Ptr: ForeignOwnable + Sync;
+
+ /// Count the number of freeable items in the cache.
+ fn count_objects(
+ me: <Self::Ptr as ForeignOwnable>::Borrowed<'_>,
+ sc: ShrinkControl<'_>,
+ ) -> CountObjects;
+
+ /// Free some objects in this cache.
+ fn scan_objects(
+ me: <Self::Ptr as ForeignOwnable>::Borrowed<'_>,
+ sc: ShrinkControl<'_>,
+ ) -> ScanObjects;
+}
+
+/// How many objects are there in the cache?
+///
+/// This is used as the return value of [`Shrinker::count_objects`].
+pub struct CountObjects {
+ inner: c_ulong,
+}
+
+impl CountObjects {
+ /// Indicates that the number of objects is zero.
+ pub const EMPTY: Self = Self {
+ inner: SHRINK_EMPTY,
+ };
+
+ /// The maximum possible number of freeable objects.
+ pub const MAX: Self = Self {
+ // The shrinker code assumes that it can multiply this value by two without overflow.
+ inner: c_ulong::MAX / 2,
+ };
+
+ /// Creates a new `CountObjects` with the given value.
+ ///
+ /// This should be the number of objects that were actually freed. Objects that were scanned
+ /// but not freed should be counted in `nr_scanned` but not here.
+ ///
+ /// If `count` is zero, then this indicates that the real count is unknown. Use
+ /// `CountObjects::EMPTY` to indicate that the shrinker is empty.
+ pub fn new(count: usize) -> Self {
+ if count > Self::MAX.inner as usize {
+ return Self::MAX;
+ }
+
+ Self {
+ inner: count as c_ulong,
+ }
+ }
+}
+
+/// How many objects were freed?
+///
+/// This is used as the return value of [`Shrinker::scan_objects`].
+pub struct ScanObjects {
+ inner: c_ulong,
+}
+
+impl ScanObjects {
+ /// Indicates that the shrinker should stop trying to free objects from this cache due to
+ /// potential deadlocks.
+ pub const STOP: Self = Self { inner: SHRINK_STOP };
+
+ /// The maximum possible number of freeable objects.
+ pub const MAX: Self = Self {
+ inner: SHRINK_STOP - 1,
+ };
+
+ /// Creates a new `CountObjects` with the given value.
+ pub fn from_count(count: usize) -> Self {
+ if count > Self::MAX.inner as usize {
+ return Self::MAX;
+ }
+
+ Self {
+ inner: count as c_ulong,
+ }
+ }
+}
+
+/// This struct is used to pass information from page reclaim to the shrinkers.
+///
+/// # Invariants
+///
+/// `ptr` has exclusive access to a valid `struct shrink_control`.
+pub struct ShrinkControl<'a> {
+ ptr: NonNull<bindings::shrink_control>,
+ _phantom: PhantomData<&'a bindings::shrink_control>,
+}
+
+impl<'a> ShrinkControl<'a> {
+ /// Create a `ShrinkControl` from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// The pointer should point at a valid `shrink_control` for the duration of 'a.
+ pub unsafe fn from_raw(ptr: *mut bindings::shrink_control) -> Self {
+ Self {
+ // SAFETY: Caller promises that this pointer is valid.
+ ptr: unsafe { NonNull::new_unchecked(ptr) },
+ _phantom: PhantomData,
+ }
+ }
+
+ /// Determines whether it is safe to call into filesystem code.
+ pub fn reclaim_fs_allowed(&self) -> bool {
+ // SAFETY: Okay by type invariants.
+ let mask = unsafe { (*self.ptr.as_ptr()).gfp_mask };
+
+ (mask & bindings::__GFP_FS) != 0
+ }
+
+ /// Determines whether it is safe to call into IO code.
+ pub fn reclaim_io_allowed(&self) -> bool {
+ // SAFETY: Okay by type invariants.
+ let mask = unsafe { (*self.ptr.as_ptr()).gfp_mask };
+
+ (mask & bindings::__GFP_IO) != 0
+ }
+
+ /// Returns the number of objects that `scan_objects` should try to reclaim.
+ pub fn nr_to_scan(&self) -> usize {
+ // SAFETY: Okay by type invariants.
+ unsafe { (*self.ptr.as_ptr()).nr_to_scan as usize }
+ }
+
+ /// The callback should set this value to the number of objects inspected by the shrinker.
+ pub fn set_nr_scanned(&mut self, val: usize) {
+ // SAFETY: Okay by type invariants.
+ unsafe { (*self.ptr.as_ptr()).nr_scanned = val as c_ulong };
+ }
+}
+
+unsafe extern "C" fn rust_count_objects<T: Shrinker>(
+ shrink: *mut bindings::shrinker,
+ sc: *mut bindings::shrink_control,
+) -> c_ulong {
+ // SAFETY: We own the private data, so we can access it.
+ let private = unsafe { (*shrink).private_data };
+ // SAFETY: This function is only used with shrinkers where `T` is the type of the private data.
+ let private = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
+ // SAFETY: The caller passes a valid `sc` pointer.
+ let sc = unsafe { ShrinkControl::from_raw(sc) };
+
+ let ret = T::count_objects(private, sc);
+ ret.inner
+}
+
+unsafe extern "C" fn rust_scan_objects<T: Shrinker>(
+ shrink: *mut bindings::shrinker,
+ sc: *mut bindings::shrink_control,
+) -> c_ulong {
+ // SAFETY: We own the private data, so we can access it.
+ let private = unsafe { (*shrink).private_data };
+ // SAFETY: This function is only used with shrinkers where `T` is the type of the private data.
+ let private = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
+ // SAFETY: The caller passes a valid `sc` pointer.
+ let sc = unsafe { ShrinkControl::from_raw(sc) };
+
+ let ret = T::scan_objects(private, sc);
+ ret.inner
+}
diff --git a/drivers/staging/android/ashmem_toggle.rs b/drivers/staging/android/ashmem_toggle.rs
new file mode 100644
index 0000000..21a126f3
--- /dev/null
+++ b/drivers/staging/android/ashmem_toggle.rs
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Provides knobs for Rust ashmem.
+
+use crate::{ashmem_range, IGNORE_UNSET_PROT_EXEC, IGNORE_UNSET_PROT_READ};
+use core::{marker::PhantomData, sync::atomic::Ordering};
+use kernel::{
+ c_str,
+ error::to_result,
+ fs::{File, Kiocb},
+ iov::{IovIterDest, IovIterSource},
+ miscdevice::{MiscDevice, MiscDeviceOptions, MiscDeviceRegistration},
+ prelude::*,
+};
+
+fn kstrtobool(kstr: &CStr) -> Result<bool> {
+ let mut res = false;
+ to_result(unsafe { kernel::bindings::kstrtobool(kstr.as_char_ptr(), &mut res) })?;
+ Ok(res)
+}
+
+pub(crate) trait AshmemToggle {
+ const NAME: &'static CStr;
+ fn set(enabled: bool) -> Result<()>;
+ fn get() -> bool;
+}
+
+pub(crate) struct AshmemToggleMisc<T>(PhantomData<T>);
+
+impl<T: AshmemToggle> AshmemToggleMisc<T> {
+ pub(crate) fn new() -> Result<Pin<KBox<MiscDeviceRegistration<AshmemToggleMisc<T>>>>> {
+ KBox::pin_init(
+ MiscDeviceRegistration::register(MiscDeviceOptions { name: T::NAME }),
+ GFP_KERNEL,
+ )
+ }
+}
+
+#[vtable]
+impl<T: AshmemToggle> MiscDevice for AshmemToggleMisc<T> {
+ type Ptr = ();
+ fn open(_: &File, _: &MiscDeviceRegistration<Self>) -> Result<()> {
+ Ok(())
+ }
+ fn read_iter(mut kiocb: Kiocb<'_, Self::Ptr>, iov: &mut IovIterDest<'_>) -> Result<usize> {
+ if kiocb.ki_pos() != 0 {
+ return Ok(0);
+ }
+
+ let data = match T::get() {
+ false => b"0\n",
+ true => b"1\n",
+ };
+
+ // You better give me a buffer with space for at least two bytes.
+ iov.copy_to_iter(data);
+ *kiocb.ki_pos_mut() = 2;
+ Ok(2)
+ }
+ fn write_iter(_kiocb: Kiocb<'_, Self::Ptr>, iov: &mut IovIterSource<'_>) -> Result<usize> {
+ let mut data = [0; 16];
+ let len = iov.copy_from_iter(&mut data[..15]);
+ data[len] = 0;
+ let data = CStr::from_bytes_with_nul(&data[..len + 1]).map_err(|_| EINVAL)?;
+ T::set(kstrtobool(data)?)?;
+ Ok(len)
+ }
+}
+
+pub(crate) struct AshmemToggleShrinker;
+
+impl AshmemToggle for AshmemToggleShrinker {
+ const NAME: &'static CStr = c_str!("ashmem_unpinning_enable");
+ fn set(enabled: bool) -> Result<()> {
+ ashmem_range::set_shrinker_enabled(enabled)
+ }
+ fn get() -> bool {
+ ashmem_range::get_shrinker_enabled()
+ }
+}
+
+pub(crate) struct AshmemToggleRead;
+
+impl AshmemToggle for AshmemToggleRead {
+ const NAME: &'static CStr = c_str!("ashmem_ignore_unset_prot_read");
+ fn set(enabled: bool) -> Result<()> {
+ IGNORE_UNSET_PROT_READ.store(enabled, Ordering::Relaxed);
+ Ok(())
+ }
+ fn get() -> bool {
+ IGNORE_UNSET_PROT_READ.load(Ordering::Relaxed)
+ }
+}
+
+pub(crate) struct AshmemToggleExec;
+
+impl AshmemToggle for AshmemToggleExec {
+ const NAME: &'static CStr = c_str!("ashmem_ignore_unset_prot_exec");
+ fn set(enabled: bool) -> Result<()> {
+ IGNORE_UNSET_PROT_EXEC.store(enabled, Ordering::Relaxed);
+ Ok(())
+ }
+ fn get() -> bool {
+ IGNORE_UNSET_PROT_EXEC.load(Ordering::Relaxed)
+ }
+}
diff --git a/drivers/staging/android/shmem.rs b/drivers/staging/android/shmem.rs
new file mode 100644
index 0000000..3343d7f
--- /dev/null
+++ b/drivers/staging/android/shmem.rs
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Safe rust abstraction around a shmem file for use by ashmem.
+
+use kernel::{
+ bindings,
+ error::{from_err_ptr, to_result, Result},
+ fs::file::{File, LocalFile},
+ iov::IovIterDest,
+ miscdevice::loff_t,
+ mm::virt::{vm_flags_t, VmaNew},
+ prelude::*,
+ str::CStr,
+ types::ARef,
+};
+
+use core::{
+ cell::UnsafeCell,
+ ptr::{addr_of_mut, NonNull},
+};
+
+/// # Safety
+///
+/// Caller must ensure that access to the file position is properly synchronized.
+pub(crate) unsafe fn file_get_fpos(file: &LocalFile) -> loff_t {
+ // SAFETY: Caller ensures that this is okay.
+ unsafe { (*file.as_ptr()).f_pos }
+}
+
+/// # Safety
+///
+/// Caller must ensure that access to the file position is properly synchronized.
+pub(crate) unsafe fn file_set_fpos(file: &LocalFile, pos: loff_t) {
+ // SAFETY: Caller ensures that this is okay.
+ unsafe { (*file.as_ptr()).f_pos = pos };
+}
+
+pub(crate) fn vma_set_anonymous(vma: &VmaNew) {
+ // SAFETY: The `VmaNew` type is only used when the vma is being set up, so this operation is
+ // safe.
+ unsafe { (*vma.as_ptr()).vm_ops = core::ptr::null_mut() };
+}
+
+/// Wrapper around a file that is known to be a shmem file.
+#[derive(Clone)]
+pub(crate) struct ShmemFile {
+ inner: ARef<File>,
+}
+
+impl ShmemFile {
+ /// Create a shmem file for use by ashmem.
+ ///
+ /// This sets up the file with the exact configuration that ashmem needs.
+ pub(crate) fn new(name: &CStr, size: usize, flags: vm_flags_t) -> Result<Self> {
+ // SAFETY: The name is a nul-terminated string.
+ let vmfile = from_err_ptr(unsafe {
+ // VmaNew needs to be converted to use the new type vma_flags_t. In the mean time,
+ // let's do the manual translation similar to the C helper vma_flags_set_word(). The
+ // entire bitmap is first zeroed out and then the flags are stored in the first word.
+ let mut vma_flags: bindings::vma_flags_t = core::mem::zeroed();
+ vma_flags.__vma_flags[0] = flags as _;
+ bindings::shmem_file_setup(name.as_char_ptr(), size as _, vma_flags)
+ })?;
+
+ // SAFETY: The call to `shmem_file_setup` was successful, so `vmfile` is a valid pointer to
+ // a file and we can transfer ownership of the refcount it created to an `ARef<File>`.
+ let vmfile = unsafe { ARef::<File>::from_raw(NonNull::new_unchecked(vmfile.cast())) };
+
+ // The C driver sets the FMODE_LSEEK bit in `f_mode` here. However, that is not necessary
+ // anymore. It was added to the C driver in commit 97fbfef6bd59 ("staging: android: ashmem:
+ // lseek failed due to no FMODE_LSEEK.") since they started using the VFS implementation of
+ // lseek rather than a custom hook, and the VFS version actually checks the permissions.
+ //
+ // However, commit e7478158e137 ("fs: clear or set FMODE_LSEEK based on llseek function")
+ // has since made it so that if lseek is implemented, then FMODE_LSEEK will be set on
+ // pseudo-files by default. Since llseek is implemented on shmem files, we no longer need
+ // to set FMODE_LSEEK.
+
+ set_inode_lockdep_class(&vmfile);
+
+ // SAFETY: We just created the file and have not yet published it, so nobody else is
+ // looking at this field yet.
+ unsafe { (*vmfile.as_ptr()).f_op = get_shmem_fops((*vmfile.as_ptr()).f_op) };
+
+ Ok(Self { inner: vmfile })
+ }
+
+ pub(crate) fn file(&self) -> &File {
+ &self.inner
+ }
+
+ pub(crate) fn vfs_llseek(&self, offset: loff_t, whence: c_int) -> Result<loff_t> {
+ // SAFETY: Just an FFI call. The file is valid.
+ let ret = unsafe { bindings::vfs_llseek(self.inner.as_ptr(), offset, whence) };
+
+ if ret < 0 {
+ Err(Error::from_errno(ret as i32))
+ } else {
+ Ok(ret)
+ }
+ }
+
+ pub(crate) fn vfs_iter_read(
+ &self,
+ iov: &mut IovIterDest<'_>,
+ pos: &mut loff_t,
+ ) -> Result<loff_t> {
+ // SAFETY: Just an FFI call. The file and iov is valid.
+ let ret = unsafe { bindings::vfs_iter_read(self.inner.as_ptr(), iov.as_raw(), pos, 0) };
+
+ if ret < 0 {
+ Err(Error::from_errno(ret as i32))
+ } else {
+ Ok(ret as loff_t)
+ }
+ }
+
+ pub(crate) fn punch_hole(&self, start: usize, len: usize) {
+ use kernel::bindings::{FALLOC_FL_KEEP_SIZE, FALLOC_FL_PUNCH_HOLE};
+
+ let f = self.inner.as_ptr();
+ // SAFETY: f_op of a file is immutable, so okay to read.
+ let fallocate = unsafe { (*(*f).f_op).fallocate };
+
+ if let Some(fallocate) = fallocate {
+ unsafe {
+ fallocate(
+ f,
+ (FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE) as _,
+ start as _,
+ len as _,
+ )
+ };
+ }
+ }
+
+ pub(crate) fn inode_ino(&self) -> usize {
+ // SAFETY: Accessing the ino is always okay.
+ unsafe { (*(*self.inner.as_ptr()).f_inode).i_ino as usize }
+ }
+}
+
+/// Fix the lockdep class of the shmem inode.
+///
+/// A separate lockdep class for the backing shmem inodes to resolve the lockdep warning about the
+/// race between kswapd taking fs_reclaim before inode_lock and write syscall taking inode_lock and
+/// then fs_reclaim. Note that such race is impossible because ashmem does not support write
+/// syscalls operating on the backing shmem.
+fn set_inode_lockdep_class(vmfile: &File) {
+ // SAFETY: This sets the lockdep class correctly.
+ unsafe {
+ let inode = (*vmfile.as_ptr()).f_inode;
+ let lock = addr_of_mut!((*inode).i_rwsem);
+ bindings::lockdep_set_class_rwsem(
+ lock,
+ kernel::static_lock_class!().as_ptr(),
+ kernel::c_str!("backing_shmem_inode_class").as_char_ptr(),
+ )
+ }
+}
+
+pub(crate) fn zero_setup(vma: &VmaNew) -> Result<()> {
+ // SAFETY: The `VmaNew` type is only used when the vma is being set up, so we can set up the
+ // vma.
+ to_result(unsafe { bindings::shmem_zero_setup(vma.as_ptr()) })
+}
+
+pub(crate) fn set_file(vma: &VmaNew, file: &File) {
+ let file = ARef::from(file);
+ // SAFETY: We're setting up the vma, so we can read the file pointer.
+ let old_file = unsafe { (*vma.as_ptr()).vm_file };
+
+ // INVARIANT: This transfers ownership of the refcount we just created to the vma.
+ //
+ // SAFETY: We're setting up the vma, so we can write to the file pointer.
+ unsafe { (*vma.as_ptr()).vm_file = ARef::into_raw(file).as_ptr().cast() };
+
+ if let Some(old_file) = NonNull::new(old_file) {
+ // SAFETY: We took ownership of the file refcount from the vma, so we can drop it.
+ drop(unsafe { ARef::<File>::from_raw(old_file.cast()) });
+ }
+}
+
+// Used to synchronize the initialization of `VMFILE_FOPS`.
+//
+// INVARIANT: Once `SHMEM_FOPS_ONCE` becomes true, `VMFILE_FOPS` is permanently immutable.
+kernel::sync::global_lock! {
+ // SAFETY: We call `init` as the very first thing in the initialization of this module, so
+ // there are no calls to `lock` before `init` is called.
+ pub(super) unsafe(uninit) static SHMEM_FOPS_ONCE: Mutex<bool> = false;
+}
+
+/// # Safety
+///
+/// Must only be used with the fops of a shmem file.
+unsafe fn get_shmem_fops(
+ shmem_fops: *const bindings::file_operations,
+) -> &'static bindings::file_operations {
+ struct FopsHelper {
+ inner: UnsafeCell<bindings::file_operations>,
+ }
+ unsafe impl Sync for FopsHelper {}
+
+ static VMFILE_FOPS: FopsHelper = FopsHelper {
+ // SAFETY: All zeros is valid for `struct file_operations`.
+ inner: UnsafeCell::new(unsafe { core::mem::zeroed() }),
+ };
+
+ let fops_ptr = VMFILE_FOPS.inner.get();
+
+ let mut once_guard = SHMEM_FOPS_ONCE.lock();
+ if !*once_guard {
+ // SAFETY: This points at the file operations of an existing file, so the contents must be
+ // immutable.
+ let mut new_fops = unsafe { *shmem_fops };
+ new_fops.mmap = Some(ashmem_vmfile_mmap);
+ new_fops.get_unmapped_area = Some(bindings::mm_get_unmapped_area);
+ // SAFETY: We hold the `SHMEM_FOPS_ONCE` guard, so there are no other writers. The value of
+ // `SHMEM_FOPS_ONCE` is false, so there are no readers either.
+ unsafe { *fops_ptr = new_fops };
+ *once_guard = true;
+ }
+ drop(once_guard);
+
+ // SAFETY: The value of `SHMEM_FOPS_ONCE` is true, so `VMFILE_FOPS` is never going to change
+ // again.
+ unsafe { &*fops_ptr }
+}
+
+extern "C" fn ashmem_vmfile_mmap(
+ _file: *mut bindings::file,
+ _vma: *mut bindings::vm_area_struct,
+) -> c_int {
+ EPERM.to_errno()
+}
diff --git a/drivers/staging/android/uapi/ashmem.h b/drivers/staging/android/uapi/ashmem.h
new file mode 100644
index 0000000..db88011
--- /dev/null
+++ b/drivers/staging/android/uapi/ashmem.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Apache-2.0 */
+/*
+ * Copyright 2008 Google Inc.
+ * Author: Robert Love
+ */
+
+#ifndef _UAPI_LINUX_ASHMEM_H
+#define _UAPI_LINUX_ASHMEM_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#define ASHMEM_NAME_LEN 256
+
+#define ASHMEM_NAME_DEF "dev/ashmem"
+
+/* Return values from ASHMEM_PIN: Was the mapping purged while unpinned? */
+#define ASHMEM_NOT_PURGED 0
+#define ASHMEM_WAS_PURGED 1
+
+/* Return values from ASHMEM_GET_PIN_STATUS: Is the mapping pinned? */
+#define ASHMEM_IS_UNPINNED 0
+#define ASHMEM_IS_PINNED 1
+
+struct ashmem_pin {
+ __u32 offset; /* offset into region, in bytes, page-aligned */
+ __u32 len; /* length forward from offset, in bytes, page-aligned */
+};
+
+#define __ASHMEMIOC 0x77
+
+enum {
+ ASHMEM_SET_NAME = _IOW(__ASHMEMIOC, 1, char[ASHMEM_NAME_LEN]),
+ ASHMEM_GET_NAME = _IOR(__ASHMEMIOC, 2, char[ASHMEM_NAME_LEN]),
+ ASHMEM_SET_SIZE = _IOW(__ASHMEMIOC, 3, size_t),
+ ASHMEM_GET_SIZE = _IO(__ASHMEMIOC, 4),
+ ASHMEM_SET_PROT_MASK = _IOW(__ASHMEMIOC, 5, unsigned long),
+ ASHMEM_GET_PROT_MASK = _IO(__ASHMEMIOC, 6),
+ ASHMEM_PIN = _IOW(__ASHMEMIOC, 7, struct ashmem_pin),
+ ASHMEM_UNPIN = _IOW(__ASHMEMIOC, 8, struct ashmem_pin),
+ ASHMEM_GET_PIN_STATUS = _IO(__ASHMEMIOC, 9),
+ ASHMEM_PURGE_ALL_CACHES = _IO(__ASHMEMIOC, 10),
+ ASHMEM_GET_FILE_ID = _IOR(__ASHMEMIOC, 11, unsigned long),
+};
+
+#endif /* _UAPI_LINUX_ASHMEM_H */
diff --git a/drivers/thermal/thermal_helpers.c b/drivers/thermal/thermal_helpers.c
index b1152ad..5310656 100644
--- a/drivers/thermal/thermal_helpers.c
+++ b/drivers/thermal/thermal_helpers.c
@@ -208,6 +208,7 @@ void thermal_cdev_update_nocheck(struct thermal_cooling_device *cdev)
__thermal_cdev_update(cdev);
}
+EXPORT_SYMBOL_GPL(thermal_cdev_update);
/**
* thermal_zone_get_slope - return the slope attribute of the thermal zone
diff --git a/drivers/tty/hvc/hvc_console.h b/drivers/tty/hvc/hvc_console.h
index cf4c1af..d2aa2a5 100644
--- a/drivers/tty/hvc/hvc_console.h
+++ b/drivers/tty/hvc/hvc_console.h
@@ -30,7 +30,7 @@
* for the tty device. Since this driver supports hotplug of vty adapters we
* need to make sure we have enough allocated.
*/
-#define HVC_ALLOC_TTY_ADAPTERS 8
+#define HVC_ALLOC_TTY_ADAPTERS 64
struct hvc_struct {
struct tty_port port;
diff --git a/drivers/tty/hvc/hvc_dcc.c b/drivers/tty/hvc/hvc_dcc.c
index dfc5c9c..831d4be 100644
--- a/drivers/tty/hvc/hvc_dcc.c
+++ b/drivers/tty/hvc/hvc_dcc.c
@@ -6,6 +6,7 @@
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/kfifo.h>
+#include <linux/moduleparam.h>
#include <linux/serial.h>
#include <linux/serial_core.h>
#include <linux/smp.h>
@@ -16,6 +17,13 @@
#include "hvc_console.h"
+/*
+ * Disable DCC driver at runtime. Want driver enabled for GKI, but some devices
+ * do not support the registers and crash when driver pokes the registers
+ */
+static bool enable;
+module_param(enable, bool, 0444);
+
/* DCC Status Bits */
#define DCC_STATUS_RX (1 << 30)
#define DCC_STATUS_TX (1 << 29)
@@ -265,7 +273,7 @@ static int __init hvc_dcc_console_init(void)
{
int ret;
- if (!hvc_dcc_check())
+ if (!enable || !hvc_dcc_check())
return -ENODEV;
/* Returns -1 if error */
@@ -279,7 +287,7 @@ static int __init hvc_dcc_init(void)
{
struct hvc_struct *p;
- if (!hvc_dcc_check())
+ if (!enable || !hvc_dcc_check())
return -ENODEV;
if (IS_ENABLED(CONFIG_HVC_DCC_SERIALIZE_SMP)) {
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index f86775c..e1f2cd3 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -219,7 +219,6 @@
config SERIAL_SAMSUNG
tristate "Samsung SoC serial support"
- depends on PLAT_SAMSUNG || ARCH_S5PV210 || ARCH_EXYNOS || ARCH_APPLE || ARCH_ARTPEC || COMPILE_TEST
select SERIAL_CORE
help
Support for the on-chip UARTs on the Samsung
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index c2e4b31..39d0f28 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -55,6 +55,8 @@
#include <asm/ptrace.h>
#include <asm/irq_regs.h>
+#include <trace/hooks/sysrqcrash.h>
+
/* Whether we react on sysrq keys or just ignore them */
static int __read_mostly sysrq_enabled = CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE;
static bool __read_mostly sysrq_always_enabled;
@@ -151,6 +153,8 @@ static void sysrq_handle_crash(u8 key)
/* release the RCU read lock before crashing */
rcu_read_unlock();
+ trace_android_vh_sysrq_crash(current);
+
panic("sysrq triggered crash\n");
}
static const struct sysrq_key_op sysrq_crash_op = {
diff --git a/drivers/ufs/core/ufshcd-crypto.c b/drivers/ufs/core/ufshcd-crypto.c
index 9e63a9d..572080b 100644
--- a/drivers/ufs/core/ufshcd-crypto.c
+++ b/drivers/ufs/core/ufshcd-crypto.c
@@ -6,6 +6,9 @@
#include <ufs/ufshcd.h>
#include "ufshcd-crypto.h"
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/ufshcd.h>
+
/* Blk-crypto modes supported by UFS crypto */
static const struct ufs_crypto_alg_entry {
enum ufs_crypto_alg ufs_alg;
@@ -106,11 +109,15 @@ static int ufshcd_crypto_keyslot_evict(struct blk_crypto_profile *profile,
*/
bool ufshcd_crypto_enable(struct ufs_hba *hba)
{
+ int err = -EOPNOTSUPP;
+
if (!(hba->caps & UFSHCD_CAP_CRYPTO))
return false;
/* Reset might clear all keys, so reprogram all the keys. */
- blk_crypto_reprogram_all_keys(&hba->crypto_profile);
+ trace_android_rvh_ufs_reprogram_all_keys(hba, &err);
+ if (err == -EOPNOTSUPP)
+ blk_crypto_reprogram_all_keys(&hba->crypto_profile);
if (hba->quirks & UFSHCD_QUIRK_BROKEN_CRYPTO_ENABLE)
return false;
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 37c3207..47c215c 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -78,6 +78,7 @@ int ufshcd_mcq_sq_cleanup(struct ufs_hba *hba, int task_tag);
int ufshcd_mcq_abort(struct scsi_cmnd *cmd);
int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
void ufshcd_release_scsi_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd);
+bool ufshcd_is_scsi_cmd(struct scsi_cmnd *cmd);
/**
* enum ufs_descr_fmt - UFS string descriptor format
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 899e663f..e8461a4 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -44,6 +44,9 @@
#define CREATE_TRACE_POINTS
#include "ufs_trace.h"
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/ufshcd.h>
+
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
UTP_TASK_REQ_COMPL |\
UFSHCD_ERROR_MASK)
@@ -442,6 +445,8 @@ static void ufshcd_add_tm_upiu_trace(struct ufs_hba *hba, unsigned int tag,
{
struct utp_task_req_desc *descp = &hba->utmrdl_base_addr[tag];
+ trace_android_vh_ufs_send_tm_command(hba, tag, (int)str_t);
+
if (!trace_ufshcd_upiu_enabled())
return;
@@ -463,6 +468,8 @@ static void ufshcd_add_uic_command_trace(struct ufs_hba *hba,
{
u32 cmd;
+ trace_android_vh_ufs_send_uic_command(hba, ucmd, (int)str_t);
+
if (!trace_ufshcd_uic_command_enabled())
return;
@@ -2354,10 +2361,11 @@ static void ufshcd_update_monitor(struct ufs_hba *hba, struct scsi_cmnd *cmd)
}
/* Returns %true for SCSI commands and %false for device management commands. */
-static bool ufshcd_is_scsi_cmd(struct scsi_cmnd *cmd)
+bool ufshcd_is_scsi_cmd(struct scsi_cmnd *cmd)
{
return !blk_mq_is_reserved_rq(scsi_cmd_to_rq(cmd));
}
+EXPORT_SYMBOL_GPL(ufshcd_is_scsi_cmd);
/**
* ufshcd_send_command - Send SCSI or device management commands
@@ -2378,6 +2386,7 @@ static inline void ufshcd_send_command(struct ufs_hba *hba,
lrbp->issue_time_stamp_local_clock = local_clock();
lrbp->compl_time_stamp = ktime_set(0, 0);
lrbp->compl_time_stamp_local_clock = 0;
+ trace_android_vh_ufs_send_command(hba, cmd);
}
if (ufshcd_is_scsi_cmd(cmd)) {
ufshcd_add_command_trace(hba, cmd, UFS_CMD_SEND);
@@ -2726,12 +2735,22 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct scsi_cmnd *cmd)
{
struct ufshcd_lrb *lrbp = scsi_cmd_priv(cmd);
int sg_segments = scsi_dma_map(cmd);
+ int err;
if (sg_segments < 0)
return sg_segments;
ufshcd_sgl_to_prdt(hba, lrbp, sg_segments, scsi_sglist(cmd));
+ /*
+ * TODO(b/160883801): remove this vendor hook in favor of the upstream
+ * variant op. This isn't possible yet because the upstream variant op
+ * doesn't yet make it possible for the host driver to get the keyslot.
+ */
+ err = 0;
+ trace_android_vh_ufs_fill_prdt(hba, cmd, sg_segments, &err);
+ if (err)
+ return err;
return ufshcd_crypto_fill_prdt(hba, cmd);
}
@@ -3093,6 +3112,12 @@ static enum scsi_qc_status ufshcd_queuecommand(struct Scsi_Host *host,
ufshcd_setup_scsi_cmd(hba, cmd,
ufshcd_scsi_to_upiu_lun(cmd->device->lun), tag);
+ trace_android_vh_ufs_prepare_command(hba, cmd, &err);
+ if (err) {
+ ufshcd_release(hba);
+ goto out;
+ }
+
err = ufshcd_map_sg(hba, cmd);
if (err) {
ufshcd_release(hba);
@@ -3379,7 +3404,7 @@ static inline void ufshcd_init_query(struct ufs_hba *hba,
* Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
* < 0 if another error occurred.
*/
-static int ufshcd_query_flag_retry(struct ufs_hba *hba,
+int ufshcd_query_flag_retry(struct ufs_hba *hba,
enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res)
{
int ret;
@@ -3401,6 +3426,7 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba,
__func__, opcode, idn, ret, retries);
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_query_flag_retry);
/**
* ufshcd_query_flag() - API function for sending flag query requests
@@ -3469,6 +3495,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
ufshcd_dev_man_unlock(hba);
return err;
}
+EXPORT_SYMBOL_GPL(ufshcd_query_flag);
/**
* ufshcd_query_attr - API function for sending attribute requests
@@ -3531,6 +3558,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
ufshcd_dev_man_unlock(hba);
return err;
}
+EXPORT_SYMBOL_GPL(ufshcd_query_attr);
/**
* ufshcd_query_attr_retry() - API function for sending query
@@ -3569,6 +3597,7 @@ int ufshcd_query_attr_retry(struct ufs_hba *hba,
__func__, idn, ret, QUERY_REQ_RETRIES);
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_query_attr_retry);
/*
* Return: 0 upon success; > 0 in case the UFS device reported an OCS error;
@@ -3668,6 +3697,7 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
return err;
}
+EXPORT_SYMBOL_GPL(ufshcd_query_descriptor_retry);
/**
* ufshcd_read_desc_param - read the specified descriptor parameter
@@ -3748,6 +3778,7 @@ int ufshcd_read_desc_param(struct ufs_hba *hba,
kfree(desc_buf);
return ret;
}
+EXPORT_SYMBOL_GPL(ufshcd_read_desc_param);
/**
* struct uc_string_id - unicode string
@@ -5637,6 +5668,7 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
lrbp->compl_time_stamp = ktime_get();
lrbp->compl_time_stamp_local_clock = local_clock();
}
+ trace_android_vh_ufs_compl_command(hba, cmd);
if (ufshcd_is_scsi_cmd(cmd)) {
if (unlikely(ufshcd_should_inform_monitor(hba, cmd)))
ufshcd_update_monitor(hba, cmd);
@@ -6026,7 +6058,7 @@ static inline int ufshcd_get_bkops_status(struct ufs_hba *hba, u32 *status)
* to know whether auto bkops is enabled or disabled after this function
* returns control to it.
*/
-static int ufshcd_bkops_ctrl(struct ufs_hba *hba)
+int ufshcd_bkops_ctrl(struct ufs_hba *hba)
{
enum bkops_status status = hba->urgent_bkops_lvl;
u32 curr_status = 0;
@@ -6051,6 +6083,7 @@ static int ufshcd_bkops_ctrl(struct ufs_hba *hba)
out:
return err;
}
+EXPORT_SYMBOL_GPL(ufshcd_bkops_ctrl);
static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
{
@@ -7030,6 +7063,8 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
queue_eh_work = true;
}
+ trace_android_vh_ufs_check_int_errors(hba, queue_eh_work);
+
if (queue_eh_work) {
/*
* update the transfer error masks to sticky bits, let's do this
@@ -11072,6 +11107,8 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
ufs_sysfs_add_nodes(hba->dev);
async_schedule(ufshcd_async_scan, hba);
+ trace_android_vh_ufs_update_sysfs(hba);
+
device_enable_async_suspend(dev);
ufshcd_pm_qos_init(hba);
return 0;
diff --git a/drivers/usb/TEST_MAPPING b/drivers/usb/TEST_MAPPING
new file mode 100644
index 0000000..896c037
--- /dev/null
+++ b/drivers/usb/TEST_MAPPING
@@ -0,0 +1,194 @@
+{
+ "presubmit": [
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ }
+ ]
+}
diff --git a/drivers/usb/core/TEST_MAPPING b/drivers/usb/core/TEST_MAPPING
new file mode 100644
index 0000000..5a5c8c9
--- /dev/null
+++ b/drivers/usb/core/TEST_MAPPING
@@ -0,0 +1,213 @@
+{
+ "presubmit": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ]
+}
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 7652155..56efb82 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -237,6 +237,17 @@
appropriate symbolic links.
For more information see Documentation/usb/gadget_configfs.rst.
+config ANDROID_USB_CONFIGFS_UEVENT
+ bool "Uevent notification of Gadget State"
+ depends on USB_CONFIGFS
+ help
+ Enable uevent notifications to userspace when gadget state changes.
+ The gadget can be in any of the following three states:
+ "CONNECTED", "DISCONNECTED" or "CONFIGURED".
+ Additionally, selecting this will create the android_usb class of
+ devices, including a "state" attribute for the android_device which
+ shows the gadget state.
+
config USB_CONFIGFS_SERIAL
bool "Generic serial bulk in/out"
depends on USB_CONFIGFS
diff --git a/drivers/usb/gadget/Makefile b/drivers/usb/gadget/Makefile
index 33f1ef9..370b393 100644
--- a/drivers/usb/gadget/Makefile
+++ b/drivers/usb/gadget/Makefile
@@ -8,5 +8,6 @@
obj-$(CONFIG_USB_LIBCOMPOSITE) += libcomposite.o
libcomposite-y := usbstring.o config.o epautoconf.o
libcomposite-y += composite.o functions.o configfs.o u_f.o
+libcomposite-$(CONFIG_ANDROID_USB_CONFIGFS_UEVENT) += android_configfs_uevent.o
obj-$(CONFIG_USB_GADGET) += udc/ function/ legacy/
diff --git a/drivers/usb/gadget/android_configfs_uevent.c b/drivers/usb/gadget/android_configfs_uevent.c
new file mode 100644
index 0000000..10922226
--- /dev/null
+++ b/drivers/usb/gadget/android_configfs_uevent.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2011-2024 Google LLC
+ */
+#include "android_configfs_uevent.h"
+#include <linux/device.h>
+#include <linux/device/class.h>
+#include <linux/err.h>
+#include <linux/kdev_t.h>
+#include <linux/spinlock.h>
+
+static struct android_uevent_opts *android_opts;
+
+static DEFINE_SPINLOCK(opts_lock);
+static DEFINE_IDA(android_ida);
+
+static void android_work(struct work_struct *data)
+{
+ struct android_uevent_opts *opts = container_of(data,
+ struct android_uevent_opts, work);
+
+ char *disconnected_strs[2] = { "USB_STATE=DISCONNECTED", NULL };
+ char *connected_strs[2] = { "USB_STATE=CONNECTED", NULL };
+ char *configured_strs[2] = { "USB_STATE=CONFIGURED", NULL };
+ unsigned long flags;
+ bool disconnected = false;
+ bool connected = false;
+ bool configured = false;
+ bool uevent_sent = false;
+ struct device *dev;
+
+ /*
+ * I believe locking is important due to the fact that we are checking
+ * several conditions here, and if the state changes after checking one
+ * we could potentially drop a uevent to userspace. Additionally, we
+ * want to prevent teardown until after events are sent.
+ */
+ spin_lock_irqsave(&opts_lock, flags);
+
+ /*
+ * If the device does not exist, it means we were torn down after
+ * scheduling this work, but before the work ran, so return to prevent
+ * use after free.
+ */
+ if (!opts->dev) {
+ spin_unlock_irqrestore(&opts_lock, flags);
+ return;
+ }
+
+ /*
+ * Cache the dev pointer in the locked area incase it gets cleared by
+ * android_device_destroy() after we release the lock. The call to
+ * flush_work in the cleanup path ensures we finish our work prior to
+ * destroying the dev which we have cached the pointer to. Ideally,
+ * this would be handled differently (using reference counting), but
+ * for now this should work.
+ */
+ dev = opts->dev;
+
+ if (opts->connected != opts->sw_connected) {
+ if (opts->connected)
+ connected = true;
+ else
+ disconnected = true;
+ opts->sw_connected = opts->connected;
+ }
+ if (opts->configured)
+ configured = true;
+
+ spin_unlock_irqrestore(&opts_lock, flags);
+
+ /*
+ * This is an abuse of uevents, however the android userspace parses
+ * the uevent string for information instead of reading the state from
+ * sysfs entries. This is one of several things about this driver which
+ * would need to change to upstream it. In an attempt to keep the
+ * exising userspace api unmodified until either an upstream solution
+ * is implemented or this functionality is otherwise replaced, leave
+ * the pre-existing logic in place.
+ */
+ if (connected) {
+ if (kobject_uevent_env(&dev->kobj, KOBJ_CHANGE,
+ connected_strs)) {
+ dev_err(dev, "Failed to send connected uevent\n");
+ } else {
+ dev_dbg(dev, "sent uevent %s\n", connected_strs[0]);
+ uevent_sent = true;
+ }
+ }
+
+ if (configured) {
+ if (kobject_uevent_env(&dev->kobj, KOBJ_CHANGE,
+ configured_strs)) {
+ dev_err(dev, "Failed to send configured uevent\n");
+ } else {
+ dev_dbg(dev, "sent uevent %s\n", configured_strs[0]);
+ uevent_sent = true;
+ }
+ }
+
+ if (disconnected) {
+ if (kobject_uevent_env(&dev->kobj, KOBJ_CHANGE,
+ disconnected_strs)) {
+ dev_err(dev, "Failed to send disconnected uevent\n");
+ } else {
+ dev_dbg(dev, "sent uevent %s\n", disconnected_strs[0]);
+ uevent_sent = true;
+ }
+ }
+
+ if (!uevent_sent) {
+ /*
+ * This is an odd case, but not necessarily an error- the state
+ * of the device may have changed since the work was scheduled,
+ * and if the state changed, there is likely another scheduled
+ * work which will send a uevent.
+ */
+ dev_dbg(dev, "did not send uevent\n");
+ }
+}
+
+static ssize_t state_show(struct device *pdev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct android_uevent_opts *opts = dev_get_drvdata(pdev);
+ char *state = "DISCONNECTED";
+
+ if (opts->configured)
+ state = "CONFIGURED";
+ else if (opts->connected)
+ state = "CONNECTED";
+
+ return sysfs_emit(buf, "%s\n", state);
+}
+static DEVICE_ATTR_RO(state);
+
+static struct attribute *android_usb_attrs[] = {
+ &dev_attr_state.attr,
+ NULL,
+};
+
+ATTRIBUTE_GROUPS(android_usb);
+
+static struct class android_usb_class = {
+ .name = "android_usb",
+ .dev_groups = android_usb_groups,
+};
+
+int android_class_create(void)
+{
+ return class_register(&android_usb_class);
+}
+EXPORT_SYMBOL_GPL(android_class_create);
+
+void android_class_destroy(void)
+{
+ class_unregister(&android_usb_class);
+}
+EXPORT_SYMBOL_GPL(android_class_destroy);
+
+int android_device_create(struct android_uevent_opts *opts)
+{
+ unsigned long flags;
+ struct device *dev;
+
+ spin_lock_irqsave(&opts_lock, flags);
+ INIT_WORK(&opts->work, android_work);
+
+ opts->device_id = ida_alloc(&android_ida, GFP_ATOMIC);
+ //Unlock prior to calling device_create() since it may sleep
+ spin_unlock_irqrestore(&opts_lock, flags);
+ if (opts->device_id < 0)
+ return opts->device_id;
+
+ dev = device_create(&android_usb_class, NULL, MKDEV(0, 0),
+ opts, "android%d", opts->device_id);
+
+ spin_lock_irqsave(&opts_lock, flags);
+ if (IS_ERR(dev)) {
+ ida_free(&android_ida, opts->device_id);
+ opts->device_id = -1;
+ spin_unlock_irqrestore(&opts_lock, flags);
+ return PTR_ERR(dev);
+ }
+ opts->dev = dev;
+ ida_init(&opts->function_ida);
+ if (!android_opts)
+ android_opts = opts;
+ spin_unlock_irqrestore(&opts_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(android_device_create);
+
+void android_device_destroy(struct android_uevent_opts *opts)
+{
+ unsigned long flags;
+ struct device *dev;
+
+ /*
+ * This scheme is used to safely cleanup any remaining work. Once
+ * opts->dev is set to NULL, any newly scheduled work will return
+ * after getting the lock and checking for NULL. Any currently
+ * running work finishes with the flush_work (the worker caches
+ * opts->dev so it can continue), before we free the device.
+ *
+ * Ideally, this cleanup would be handled via reference counting, but
+ * there are nuances around device destroy (or the fact that we are
+ * currently statically allocating opts) which prevent this from
+ * being implemented without a significant refactor.
+ */
+ spin_lock_irqsave(&opts_lock, flags);
+ dev = opts->dev;
+ opts->dev = NULL;
+ spin_unlock_irqrestore(&opts_lock, flags);
+
+ flush_work(&opts->work);
+
+ spin_lock_irqsave(&opts_lock, flags);
+ if (opts->device_id >= 0)
+ ida_free(&android_ida, opts->device_id);
+
+ android_opts = NULL;
+ ida_destroy(&opts->function_ida);
+ device_destroy(dev->class, dev->devt);
+ spin_unlock_irqrestore(&opts_lock, flags);
+}
+EXPORT_SYMBOL_GPL(android_device_destroy);
+
+static void __android_set_connected(struct android_uevent_opts *opts,
+ bool connected)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&opts_lock, flags);
+ // Don't send the uevent if connected state is not changed
+ if (opts->connected != connected) {
+ opts->connected = connected;
+ schedule_work(&opts->work);
+ }
+ spin_unlock_irqrestore(&opts_lock, flags);
+}
+
+static void __android_set_configured(struct android_uevent_opts *opts,
+ bool configured)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&opts_lock, flags);
+ // Don't send the uevent if configure state is not changed
+ if (opts->configured != configured) {
+ opts->configured = configured;
+ schedule_work(&opts->work);
+ }
+ spin_unlock_irqrestore(&opts_lock, flags);
+}
+
+void android_set_connected(struct android_uevent_opts *opts)
+{
+ __android_set_connected(opts, true);
+}
+EXPORT_SYMBOL_GPL(android_set_connected);
+
+void android_set_disconnected(struct android_uevent_opts *opts)
+{
+ __android_set_connected(opts, false);
+}
+EXPORT_SYMBOL_GPL(android_set_disconnected);
+
+void android_set_configured(struct android_uevent_opts *opts)
+{
+ __android_set_configured(opts, true);
+}
+EXPORT_SYMBOL_GPL(android_set_configured);
+
+void android_set_unconfigured(struct android_uevent_opts *opts)
+{
+ __android_set_configured(opts, false);
+}
+EXPORT_SYMBOL_GPL(android_set_unconfigured);
+
+struct device *android_create_function_device(char *name, void *drvdata,
+ const struct attribute_group **groups)
+{
+ struct android_uevent_opts *opts;
+ struct device *dev;
+ unsigned long flags;
+ int id;
+
+ spin_lock_irqsave(&opts_lock, flags);
+ opts = android_opts;
+ if (IS_ERR_OR_NULL(opts) || IS_ERR_OR_NULL(opts->dev)) {
+ spin_unlock_irqrestore(&opts_lock, flags);
+ return ERR_PTR(-ENODEV);
+ }
+
+ id = ida_alloc(&opts->function_ida, GFP_ATOMIC);
+ if (id < 0) {
+ spin_unlock_irqrestore(&opts_lock, flags);
+ return ERR_PTR(id);
+ }
+ // device_create_with_groups can sleep, so we must unlock first
+ spin_unlock_irqrestore(&opts_lock, flags);
+ dev = device_create_with_groups(&android_usb_class, opts->dev,
+ MKDEV(0, id), drvdata, groups, name);
+ return dev;
+}
+EXPORT_SYMBOL_GPL(android_create_function_device);
+
+void android_remove_function_device(struct device *dev)
+{
+ struct android_uevent_opts *opts;
+ unsigned long flags;
+
+ device_destroy(&android_usb_class, dev->devt);
+
+ spin_lock_irqsave(&opts_lock, flags);
+ opts = android_opts;
+ if (IS_ERR_OR_NULL(opts)) {
+ spin_unlock_irqrestore(&opts_lock, flags);
+ return;
+ }
+
+ ida_free(&opts->function_ida, MINOR(dev->devt));
+ spin_unlock_irqrestore(&opts_lock, flags);
+}
+EXPORT_SYMBOL_GPL(android_remove_function_device);
diff --git a/drivers/usb/gadget/android_configfs_uevent.h b/drivers/usb/gadget/android_configfs_uevent.h
new file mode 100644
index 0000000..6a56efd
--- /dev/null
+++ b/drivers/usb/gadget/android_configfs_uevent.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2024 Google LLC
+ */
+#ifndef __ANDROID_CONFIGFS_UEVENT_H
+#define __ANDROID_CONFIGFS_UEVENT_H
+
+#ifdef CONFIG_ANDROID_USB_CONFIGFS_UEVENT
+#include <linux/usb/android_configfs_uevent.h>
+
+/**
+ * android_class_create - essentially the __init() function for the
+ * configfs_uevent library, since it is not a standalone driver.
+ *
+ * Creates the android_usb class of device
+ *
+ * Returns: the result of class_register (0 for success, err otherwise)
+ */
+int android_class_create(void);
+
+/**
+ * android_class_destroy - essentially the __exit() function for the
+ * configfs_uevent library, since it is not a standalone driver.
+ *
+ * Removes the android_usb class of devices and performs any necessary
+ * cleanup.
+ */
+void android_class_destroy(void);
+
+/**
+ * android_device_create - Creates an android device instance and
+ * a state attribute file which can be read to determine the state of the
+ * usb gadget.
+ * @opts: contextual data for the configfs_uevent library.
+ *
+ * Note: the state file created by this function mimics the functionaltiy
+ * of the UDC driver and is likely redundant, but maintained for legacy
+ * support.
+ *
+ * The state can be one of "DISCONNECTED", "CONNECTED", or "CONFIGURED"
+ *
+ * Returns: 0 for success, or if an error is encountered during ida_allocation
+ * or device_creation, that error is returned.
+ */
+int android_device_create(struct android_uevent_opts *opts);
+
+/**
+ * android_device_destroy - Removes the android device instance and performs
+ * any necessary cleanup.
+ * @opts: contextual data for the configfs_uevent library.
+ */
+void android_device_destroy(struct android_uevent_opts *opts);
+
+/**
+ * android_set_connected - set the internal state of android_uevent_opts to
+ * connected and schedule the work to emit a uevent with this status update.
+ * @opts: contextual data for the configfs_uevent library
+ *
+ * This should be called by the gadget composite driver when a usb_ctrlrequest
+ * is received by the gadget driver.
+ *
+ * This function locks the android specific android_uevent_opts->lock and
+ * therefore should not require locking the containing composite device
+ * structure as the internal lock is also used in the teardown path of the
+ * composite driver in android_device_destroy().
+ */
+void android_set_connected(struct android_uevent_opts *opts);
+
+/**
+ * android_set_disconnected - reset the internal state of android_uevent_opts to
+ * disconnected and schedule the work to emit a uevent with this status update.
+ * @opts: contextual data for the configfs_uevent library
+ *
+ * This should be called by the gadget composite driver when the link is
+ * disconnected.
+ *
+ * This function locks the android specific android_uevent_opts->lock and
+ * therefore should not require locking the containing composite device
+ * structure as the internal lock is also used in the teardown path of the
+ * composite driver in android_device_destroy().
+ */
+void android_set_disconnected(struct android_uevent_opts *opts);
+
+/**
+ * android_set_configured - set the internal state of android_uevent_opts to
+ * configured and schedule the work to emit a uevent with this status update.
+ * @opts: contextual data for the configfs_uevent library
+ *
+ * This should be called by the gadget composite driver when the configuration
+ * is applied to the gadget composite device
+ *
+ * This function locks the android specific android_uevent_opts->lock and
+ * therefore should not require locking the containing composite device
+ * structure as the internal lock is also used in the teardown path of the
+ * composite driver in android_device_destroy().
+ */
+void android_set_configured(struct android_uevent_opts *opts);
+
+/**
+ * android_set_unconfigured - reset the internal state of android_uevent_opts to
+ * unconfigured and schedule the work to emit a uevent with this status update.
+ * @opts: contextual data for the configfs_uevent library
+ *
+ * This should be called by the gadget composite driver when the gadget
+ * configuration is torn down.
+ *
+ * This function locks the android specific android_uevent_opts->lock and
+ * therefore should not require locking the containing composite device
+ * structure as the internal lock is also used in the teardown path of the
+ * composite driver in android_device_destroy().
+ */
+void android_set_unconfigured(struct android_uevent_opts *opts);
+
+#else
+
+static inline int android_class_create(void)
+{
+ return 0;
+}
+
+static inline void android_class_destroy(void)
+{
+}
+
+static inline int android_device_create(struct android_uevent_opts *opts)
+{
+ return 0;
+}
+
+static inline void android_device_destroy(struct android_uevent_opts *opts)
+{
+}
+
+static inline void android_set_connected(struct android_uevent_opts *opts)
+{
+}
+
+static inline void android_set_disconnected(struct android_uevent_opts *opts)
+{
+}
+
+static inline void android_set_configured(struct android_uevent_opts *opts)
+{
+}
+
+static inline void android_set_unconfigured(struct android_uevent_opts *opts)
+{
+}
+#endif /* CONFIG_ANDROID_USB_CONFIGFS_UEVENT */
+#endif /* __ANDROID_CONFIGFS_UEVENT_H */
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index a902184..579700a 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -22,6 +22,7 @@
#include <linux/unaligned.h>
#include "u_os_desc.h"
+#include "android_configfs_uevent.h"
/**
* struct usb_os_string - represents OS String to be reported by a gadget
@@ -941,6 +942,7 @@ static void reset_config(struct usb_composite_dev *cdev)
bitmap_zero(f->endpoints, 32);
}
cdev->config = NULL;
+ android_set_unconfigured(&cdev->android_opts);
cdev->delayed_status = 0;
}
@@ -1765,6 +1767,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
struct usb_function *iter;
u8 endp;
+ android_set_connected(&cdev->android_opts);
+
if (w_length > USB_COMP_EP0_BUFSIZ) {
if (ctrl->bRequestType & USB_DIR_IN) {
/* Cast away the const, we are going to overwrite on purpose. */
@@ -1899,6 +1903,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
spin_lock(&cdev->lock);
value = set_config(cdev, ctrl, w_value);
spin_unlock(&cdev->lock);
+ android_set_configured(&cdev->android_opts);
break;
case USB_REQ_GET_CONFIGURATION:
if (ctrl->bRequestType != USB_DIR_IN)
@@ -2286,6 +2291,8 @@ static void __composite_disconnect(struct usb_gadget *gadget)
struct usb_composite_dev *cdev = get_gadget_data(gadget);
unsigned long flags;
+ android_set_disconnected(&cdev->android_opts);
+
/* REVISIT: should we have config and device level
* disconnect callbacks?
*/
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 183a25f..2689c94 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -11,6 +11,7 @@
#include <linux/usb/webusb.h>
#include "configfs.h"
#include "u_os_desc.h"
+#include "android_configfs_uevent.h"
static int check_user_usb_string(const char *name,
struct usb_gadget_strings *stringtab_dev)
@@ -286,7 +287,11 @@ static ssize_t gadget_dev_desc_UDC_store(struct config_item *item,
mutex_lock(&gi->lock);
- if (!strlen(name)) {
+ /*
+ * ANDROID: Not exactly sure why we need this "none", but worried it
+ * would break something if removed.
+ */
+ if (!strlen(name) || strcmp(name, "none") == 0) {
ret = unregister_gadget(gi);
if (ret)
goto err;
@@ -2046,10 +2051,16 @@ static struct config_group *gadgets_make(
if (!gi->composite.gadget_driver.function)
goto out_free_driver_name;
+ if (android_device_create(&gi->cdev.android_opts))
+ goto out_free_driver_name_and_function;
+
return &gi->group;
out_free_driver_name:
kfree(gi->composite.gadget_driver.driver.name);
+out_free_driver_name_and_function:
+ kfree(gi->composite.gadget_driver.driver.name);
+ kfree(gi->composite.gadget_driver.function);
err:
kfree(gi);
return ERR_PTR(-ENOMEM);
@@ -2057,6 +2068,10 @@ static struct config_group *gadgets_make(
static void gadgets_drop(struct config_group *group, struct config_item *item)
{
+ struct gadget_info *gi;
+
+ gi = container_of(to_config_group(item), struct gadget_info, group);
+ android_device_destroy(&gi->cdev.android_opts);
config_item_put(item);
}
@@ -2096,7 +2111,13 @@ static int __init gadget_cfs_init(void)
config_group_init(&gadget_subsys.su_group);
+ ret = android_class_create();
+ if (ret)
+ return ret;
+
ret = configfs_register_subsystem(&gadget_subsys);
+ if (ret)
+ android_class_destroy();
return ret;
}
module_init(gadget_cfs_init);
@@ -2104,5 +2125,6 @@ module_init(gadget_cfs_init);
static void __exit gadget_cfs_exit(void)
{
configfs_unregister_subsystem(&gadget_subsys);
+ android_class_destroy();
}
module_exit(gadget_cfs_exit);
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 002c3441..730b440 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -3741,13 +3741,13 @@ static int ffs_func_set_alt(struct usb_function *f,
unsigned long flags;
int ret = 0, intf;
- if (alt > MAX_ALT_SETTINGS)
- return -EINVAL;
-
intf = ffs_func_revmap_intf(func, interface);
if (intf < 0)
return intf;
+ if (alt > MAX_ALT_SETTINGS)
+ return -EINVAL;
+
if (ffs->func)
ffs_func_eps_disable(ffs->func);
diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig
index ce5bc0d..25fbd575 100644
--- a/drivers/virtio/Kconfig
+++ b/drivers/virtio/Kconfig
@@ -172,7 +172,7 @@
If unsure, say 'N'.
config VIRTIO_DMA_SHARED_BUFFER
- tristate
+ tristate "Virtio DMA shared buffer support"
depends on DMA_SHARED_BUFFER
help
This option adds a flavor of dma buffers that are backed by
diff --git a/drivers/virtio/TEST_MAPPING b/drivers/virtio/TEST_MAPPING
new file mode 100644
index 0000000..63f27d41
--- /dev/null
+++ b/drivers/virtio/TEST_MAPPING
@@ -0,0 +1,329 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.BluetoothCallQualityReportTest"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "VtsBootconfigTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/fs/Kconfig b/fs/Kconfig
index 0bfdaeca..fb3d6cf 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -130,6 +130,7 @@
source "fs/autofs/Kconfig"
source "fs/fuse/Kconfig"
source "fs/overlayfs/Kconfig"
+source "fs/incfs/Kconfig"
menu "Caches"
diff --git a/fs/Makefile b/fs/Makefile
index cf4a745..00ab650 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -104,6 +104,7 @@
obj-$(CONFIG_FUSE_FS) += fuse/
obj-$(CONFIG_OVERLAY_FS) += overlayfs/
obj-$(CONFIG_ORANGEFS_FS) += orangefs/
+obj-$(CONFIG_INCREMENTAL_FS) += incfs/
obj-$(CONFIG_UDF_FS) += udf/
obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/
obj-$(CONFIG_OMFS_FS) += omfs/
diff --git a/fs/OWNERS b/fs/OWNERS
new file mode 100644
index 0000000..7780f6b
--- /dev/null
+++ b/fs/OWNERS
@@ -0,0 +1 @@
+per-file {crypto,verity}/**=ebiggers@google.com
diff --git a/fs/TEST_MAPPING b/fs/TEST_MAPPING
new file mode 100644
index 0000000..1261773
--- /dev/null
+++ b/fs/TEST_MAPPING
@@ -0,0 +1,352 @@
+{
+ "imports": [
+ {
+ "path": "frameworks/base/packages/PackageInstaller"
+ },
+ {
+ "path": "packages/modules/AdServices/sdksandbox"
+ },
+ {
+ "path": "frameworks/base/core/java/android/content"
+ },
+ {
+ "path": "system/core/fs_mgr"
+ },
+ {
+ "path": "test/vts-testcase/kernel/dynamic_partitions"
+ }
+
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsPackageInstallTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsWifiBroadcastsHostTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "CtsJobSchedulerTestCases",
+ "options": [
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testCellularConstraintExecutedAndStopped"
+ },
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testConnectivityConstraintExecutes_transitionNetworks"
+ },
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testConnectivityConstraintExecutes_withMobile"
+ },
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testEJMeteredConstraintFails_withMobile_DataSaverOn"
+ },
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testMeteredConstraintFails_withMobile_DataSaverOn"
+ }
+ ]
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsContentTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "libdm_test"
+ },
+ {
+ "name": "liblp_test"
+ },
+ {
+ "name": "vab_legacy_tests"
+ },
+ {
+ "name": "snapuserd_test"
+ },
+ {
+ "name": "KernelApiSysfsTest"
+ },
+ {
+ "name": "KernelDynamicPartitionsTest"
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "VtsBootconfigTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index 4e8e82a..9431ff7 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -521,6 +521,9 @@ struct fscrypt_master_key_secret {
*/
bool is_hw_wrapped;
+ /* True if this key was added using __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED */
+ bool android_compat;
+
/*
* Size of the key in bytes. This remains set even if ->bytes was
* zeroized due to no longer being needed. I.e. we still remember the
diff --git a/fs/crypto/inline_crypt.c b/fs/crypto/inline_crypt.c
index c0852b9..42b974e 100644
--- a/fs/crypto/inline_crypt.c
+++ b/fs/crypto/inline_crypt.c
@@ -298,6 +298,8 @@ static void fscrypt_generate_dun(const struct fscrypt_inode_info *ci,
* otherwise fscrypt_mergeable_bio() won't work as intended.
*
* The encryption context will be freed automatically when the bio is freed.
+ *
+ * This function also handles setting bi_skip_dm_default_key when needed.
*/
void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
u64 first_lblk, gfp_t gfp_mask)
@@ -305,6 +307,9 @@ void fscrypt_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
const struct fscrypt_inode_info *ci;
u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE];
+ if (fscrypt_inode_should_skip_dm_default_key(inode))
+ bio_set_skip_dm_default_key(bio);
+
if (!fscrypt_inode_uses_inline_crypto(inode))
return;
ci = fscrypt_get_inode_info_raw(inode);
@@ -378,6 +383,9 @@ EXPORT_SYMBOL_GPL(fscrypt_set_bio_crypt_ctx_bh);
* another way, such as I/O targeting only a single file (and thus a single key)
* combined with fscrypt_limit_io_blocks() to ensure DUN contiguity.
*
+ * This function also returns false if the next part of the I/O would need to
+ * have a different value for the bi_skip_dm_default_key flag.
+ *
* Return: true iff the I/O is mergeable
*/
bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
@@ -389,6 +397,9 @@ bool fscrypt_mergeable_bio(struct bio *bio, const struct inode *inode,
if (!!bc != fscrypt_inode_uses_inline_crypto(inode))
return false;
+ if (bio_should_skip_dm_default_key(bio) !=
+ fscrypt_inode_should_skip_dm_default_key(inode))
+ return false;
if (!bc)
return true;
ci = fscrypt_get_inode_info_raw(inode);
@@ -423,7 +434,8 @@ bool fscrypt_mergeable_bio_bh(struct bio *bio,
u64 next_lblk;
if (!bh_get_inode_and_lblk_num(next_bh, &inode, &next_lblk))
- return !bio->bi_crypt_context;
+ return !bio->bi_crypt_context &&
+ !bio_should_skip_dm_default_key(bio);
return fscrypt_mergeable_bio(bio, inode, next_lblk);
}
diff --git a/fs/crypto/keyring.c b/fs/crypto/keyring.c
index 9ec6e5e..bb6f341 100644
--- a/fs/crypto/keyring.c
+++ b/fs/crypto/keyring.c
@@ -583,7 +583,8 @@ static int add_master_key(struct super_block *sb,
* different key identifiers by deriving their key
* identifiers using different KDF contexts.
*/
- keyid_kdf_ctx =
+ keyid_kdf_ctx = secret->android_compat ?
+ HKDF_CONTEXT_KEY_IDENTIFIER_FOR_RAW_KEY :
HKDF_CONTEXT_KEY_IDENTIFIER_FOR_HW_WRAPPED_KEY;
}
fscrypt_init_hkdf(&secret->hkdf, kdf_key, kdf_key_size);
@@ -777,6 +778,16 @@ int fscrypt_ioctl_add_key(struct file *filp, void __user *_uarg)
memset(&secret, 0, sizeof(secret));
+ if (arg.__flags) {
+ /* Support for the original Android flag */
+ if (arg.__flags & ~__FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED)
+ return -EINVAL; /* unknown flags */
+ if (arg.flags & FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED)
+ return -EINVAL; /* conflicting flags */
+ arg.flags |= FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED;
+ secret.android_compat = true;
+ }
+
if (arg.flags) {
if (arg.flags & ~FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED)
return -EINVAL;
diff --git a/fs/f2fs/OWNERS b/fs/f2fs/OWNERS
new file mode 100644
index 0000000..6a5c01163
--- /dev/null
+++ b/fs/f2fs/OWNERS
@@ -0,0 +1 @@
+jaegeuk@google.com
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 338df7a..a132570 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -528,6 +528,8 @@ static void f2fs_set_bio_crypt_ctx(struct bio *bio, const struct inode *inode,
*/
if (!fio || !fio->encrypted_page)
fscrypt_set_bio_crypt_ctx(bio, inode, first_idx, gfp_mask);
+ else if (fscrypt_inode_should_skip_dm_default_key(inode))
+ bio_set_skip_dm_default_key(bio);
}
static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
@@ -539,7 +541,9 @@ static bool f2fs_crypt_mergeable_bio(struct bio *bio, const struct inode *inode,
* read/write raw data without encryption.
*/
if (fio && fio->encrypted_page)
- return !bio_has_crypt_ctx(bio);
+ return !bio_has_crypt_ctx(bio) &&
+ (bio_should_skip_dm_default_key(bio) ==
+ fscrypt_inode_should_skip_dm_default_key(inode));
return fscrypt_mergeable_bio(bio, inode, next_idx);
}
diff --git a/fs/fuse/backing.c b/fs/fuse/backing.c
index d95dfa4..07db7f3 100644
--- a/fs/fuse/backing.c
+++ b/fs/fuse/backing.c
@@ -89,9 +89,13 @@ int fuse_backing_open(struct fuse_conn *fc, struct fuse_backing_map *map)
pr_debug("%s: fd=%d flags=0x%x\n", __func__, map->fd, map->flags);
/* TODO: relax CAP_SYS_ADMIN once backing files are visible to lsof */
+ /* Android already restricts access here, and we don't want to grant extra
+ * Permissions to the daemon */
+#if 0
res = -EPERM;
if (!fc->passthrough || !capable(CAP_SYS_ADMIN))
goto out;
+#endif
res = -EINVAL;
if (map->flags || map->padding)
@@ -145,9 +149,13 @@ int fuse_backing_close(struct fuse_conn *fc, int backing_id)
pr_debug("%s: backing_id=%d\n", __func__, backing_id);
/* TODO: relax CAP_SYS_ADMIN once backing files are visible to lsof */
+ /* Android already restricts access here, and we don't want to grant extra
+ * Permissions to the daemon */
+#if 0
err = -EPERM;
if (!fc->passthrough || !capable(CAP_SYS_ADMIN))
goto out;
+#endif
err = -EINVAL;
if (backing_id <= 0)
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 0b0241f..51a055d 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -16,6 +16,7 @@
#include <linux/sched/signal.h>
#include <linux/uio.h>
#include <linux/miscdevice.h>
+#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/slab.h>
@@ -2253,6 +2254,14 @@ static ssize_t fuse_dev_do_write(struct fuse_dev *fud,
err = fuse_copy_out_args(cs, req->args, nbytes);
fuse_copy_finish(cs);
+ if (!err && req->in.h.opcode == FUSE_CANONICAL_PATH) {
+ char *path = (char *)req->args->out_args[0].value;
+
+ path[req->args->out_args[0].size - 1] = 0;
+ req->out.h.error =
+ kern_path(path, 0, req->args->canonical_path);
+ }
+
spin_lock(&fpq->lock);
clear_bit(FR_LOCKED, &req->flags);
if (!fpq->connected)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 7ac6b23..c69dc92 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -523,12 +523,52 @@ static struct vfsmount *fuse_dentry_automount(struct path *path)
return mnt;
}
+/*
+ * Get the canonical path. Since we must translate to a path, this must be done
+ * in the context of the userspace daemon, however, the userspace daemon cannot
+ * look up paths on its own. Instead, we handle the lookup as a special case
+ * inside of the write request.
+ */
+static void fuse_dentry_canonical_path(const struct path *path,
+ struct path *canonical_path)
+{
+ struct inode *inode = d_inode(path->dentry);
+ //struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_mount *fm = get_fuse_mount_super(path->mnt->mnt_sb);
+ FUSE_ARGS(args);
+ char *path_name;
+ int err;
+
+ path_name = (char *)get_zeroed_page(GFP_KERNEL);
+ if (!path_name)
+ goto default_path;
+
+ args.opcode = FUSE_CANONICAL_PATH;
+ args.nodeid = get_node_id(inode);
+ args.in_numargs = 0;
+ args.out_numargs = 1;
+ args.out_args[0].size = PATH_MAX;
+ args.out_args[0].value = path_name;
+ args.canonical_path = canonical_path;
+ args.out_argvar = 1;
+
+ err = fuse_simple_request(fm, &args);
+ free_page((unsigned long)path_name);
+ if (err > 0)
+ return;
+default_path:
+ canonical_path->dentry = path->dentry;
+ canonical_path->mnt = path->mnt;
+ path_get(canonical_path);
+}
+
const struct dentry_operations fuse_dentry_operations = {
.d_revalidate = fuse_dentry_revalidate,
.d_delete = fuse_dentry_delete,
.d_init = fuse_dentry_init,
.d_release = fuse_dentry_release,
.d_automount = fuse_dentry_automount,
+ .d_canonical_path = fuse_dentry_canonical_path,
};
int fuse_valid_type(int m)
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index b1bb715..524b25e 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -2383,8 +2383,16 @@ static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma)
*/
if (fuse_file_passthrough(ff))
return fuse_passthrough_mmap(file, vma);
+ /*
+ * Old Android passthrough did not handle this case, but did allow the mmap to continue.
+ * This will not cleanly handle the case of a shared mmap across passthrough and
+ * nonpassthrough at the same time, although shared mmap through cache and file io through
+ * the lower filesystem should work as expected, at a performance penalty.
+ */
+#if 0
else if (fuse_inode_backing(get_fuse_inode(inode)))
return -ENODEV;
+#endif
/*
* FOPEN_DIRECT_IO handling is special compared to O_DIRECT,
@@ -3210,6 +3218,7 @@ void fuse_init_file_inode(struct inode *inode, unsigned int flags)
INIT_LIST_HEAD(&fi->queued_writes);
fi->writectr = 0;
fi->iocachectr = 0;
+ fi->iopassctr = 0;
init_waitqueue_head(&fi->page_waitq);
init_waitqueue_head(&fi->direct_io_waitq);
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 7f16049..f0b6c3a 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -163,6 +163,9 @@ struct fuse_inode {
/** Number of files/maps using page cache */
int iocachectr;
+ /* Number of files using passthrough */
+ int iopassctr;
+
/* Waitq for writepage completion */
wait_queue_head_t page_waitq;
@@ -296,7 +299,7 @@ struct fuse_file {
wait_queue_head_t poll_wait;
/** Does file hold a fi->iocachectr refcount? */
- enum { IOM_NONE, IOM_CACHED, IOM_UNCACHED } iomode;
+ enum { IOM_NONE, IOM_CACHED, IOM_UNCACHED, IOM_PASSTHROUGH } iomode;
#ifdef CONFIG_FUSE_PASSTHROUGH
/** Reference to backing file in passthrough mode */
@@ -348,8 +351,12 @@ struct fuse_args {
struct fuse_in_arg in_args[4];
struct fuse_arg out_args[2];
void (*end)(struct fuse_mount *fm, struct fuse_args *args, int error);
+
/* Used for kvec iter backed by vmalloc address */
void *vmap_base;
+
+ /* Path used for completing d_canonical_path */
+ struct path *canonical_path;
};
struct fuse_args_pages {
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index e57b8af..5227e77a 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -193,6 +193,7 @@ static void fuse_evict_inode(struct inode *inode)
}
if (S_ISREG(inode->i_mode) && !fuse_is_bad(inode)) {
WARN_ON(fi->iocachectr != 0);
+ WARN_ON(fi->iopassctr != 0);
WARN_ON(!list_empty(&fi->write_files));
WARN_ON(!list_empty(&fi->queued_writes));
}
@@ -1429,16 +1430,11 @@ static void process_init_reply(struct fuse_mount *fm, struct fuse_args *args,
* on a stacked fs (e.g. overlayfs) themselves and with
* max_stack_depth == 1, FUSE fs can be stacked as the
* underlying fs of a stacked fs (e.g. overlayfs).
- *
- * Also don't allow the combination of FUSE_PASSTHROUGH
- * and FUSE_WRITEBACK_CACHE, current design doesn't handle
- * them together.
*/
if (IS_ENABLED(CONFIG_FUSE_PASSTHROUGH) &&
(flags & FUSE_PASSTHROUGH) &&
arg->max_stack_depth > 0 &&
- arg->max_stack_depth <= FILESYSTEM_MAX_STACK_DEPTH &&
- !(flags & FUSE_WRITEBACK_CACHE)) {
+ arg->max_stack_depth <= FILESYSTEM_MAX_STACK_DEPTH) {
fc->passthrough = 1;
fc->max_stack_depth = arg->max_stack_depth;
fm->sb->s_stack_depth = arg->max_stack_depth;
@@ -2234,6 +2230,34 @@ static void fuse_fs_cleanup(void)
static struct kobject *fuse_kobj;
+#ifdef CONFIG_FUSE_PASSTHROUGH
+static ssize_t fuse_passthrough_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buff)
+{
+ return sysfs_emit(buff, "supported\n");
+}
+
+static struct kobj_attribute fuse_passthrough_attr =
+ __ATTR_RO(fuse_passthrough);
+#endif
+
+static struct attribute *fuse_features[] = {
+#ifdef CONFIG_FUSE_PASSTHROUGH
+ &fuse_passthrough_attr.attr,
+#endif
+ NULL,
+};
+
+static const struct attribute_group fuse_features_group = {
+ .name = "features",
+ .attrs = fuse_features,
+};
+
+static const struct attribute_group *attribute_groups[] = {
+ &fuse_features_group,
+ NULL
+};
+
static int fuse_sysfs_init(void)
{
int err;
@@ -2248,8 +2272,13 @@ static int fuse_sysfs_init(void)
if (err)
goto out_fuse_unregister;
+ err = sysfs_create_groups(fuse_kobj, attribute_groups);
+ if (err)
+ goto out_fuse_remove_mount_point;
return 0;
+out_fuse_remove_mount_point:
+ sysfs_remove_mount_point(fuse_kobj, "connections");
out_fuse_unregister:
kobject_put(fuse_kobj);
out_err:
@@ -2258,6 +2287,7 @@ static int fuse_sysfs_init(void)
static void fuse_sysfs_cleanup(void)
{
+ sysfs_remove_groups(fuse_kobj, attribute_groups);
sysfs_remove_mount_point(fuse_kobj, "connections");
kobject_put(fuse_kobj);
}
diff --git a/fs/fuse/iomode.c b/fs/fuse/iomode.c
index 3728933..321562e 100644
--- a/fs/fuse/iomode.c
+++ b/fs/fuse/iomode.c
@@ -51,13 +51,18 @@ int fuse_file_cached_io_open(struct inode *inode, struct fuse_file *ff)
* Check if inode entered passthrough io mode while waiting for parallel
* dio write completion.
*/
+ /* Android's use case requires opening files in both passthrough and non
+ * passthrough modes */
+#if 0
if (fuse_inode_backing(fi)) {
clear_bit(FUSE_I_CACHE_IO_MODE, &fi->state);
spin_unlock(&fi->lock);
return -ETXTBSY;
}
+#endif
WARN_ON(ff->iomode == IOM_UNCACHED);
+ WARN_ON(ff->iomode == IOM_PASSTHROUGH);
if (ff->iomode == IOM_NONE) {
ff->iomode = IOM_CACHED;
if (fi->iocachectr == 0)
@@ -81,7 +86,7 @@ static void fuse_file_cached_io_release(struct fuse_file *ff,
spin_unlock(&fi->lock);
}
-/* Start strictly uncached io mode where cache access is not allowed */
+/* Start strictly uncached io mode where cache access is not allowed if not in passthrough mode */
int fuse_inode_uncached_io_start(struct fuse_inode *fi, struct fuse_backing *fb)
{
struct fuse_backing *oldfb;
@@ -94,11 +99,14 @@ int fuse_inode_uncached_io_start(struct fuse_inode *fi, struct fuse_backing *fb)
err = -EBUSY;
goto unlock;
}
- if (fi->iocachectr > 0) {
+ if (!fb && fi->iocachectr > 0) {
err = -ETXTBSY;
goto unlock;
}
- fi->iocachectr--;
+ if (fb)
+ fi->iopassctr++;
+ else
+ fi->iocachectr--;
/* fuse inode holds a single refcount of backing file */
if (fb && !oldfb) {
@@ -125,7 +133,7 @@ static int fuse_file_uncached_io_open(struct inode *inode,
return err;
WARN_ON(ff->iomode != IOM_NONE);
- ff->iomode = IOM_UNCACHED;
+ ff->iomode = IOM_PASSTHROUGH;
return 0;
}
@@ -154,6 +162,30 @@ static void fuse_file_uncached_io_release(struct fuse_file *ff,
fuse_inode_uncached_io_end(fi);
}
+static void fuse_inode_passthrough_io_end(struct fuse_inode *fi)
+{
+ struct fuse_backing *oldfb = NULL;
+
+ spin_lock(&fi->lock);
+ WARN_ON(fi->iopassctr == 0);
+ fi->iopassctr--;
+ if (!fi->iopassctr) {
+ oldfb = fuse_inode_backing_set(fi, NULL);
+ }
+ spin_unlock(&fi->lock);
+ if (oldfb)
+ fuse_backing_put(oldfb);
+}
+
+/* Drop uncached_io reference from passthrough open */
+static void fuse_file_passthrough_io_release(struct fuse_file *ff,
+ struct fuse_inode *fi)
+{
+ WARN_ON(ff->iomode != IOM_PASSTHROUGH);
+ ff->iomode = IOM_NONE;
+ fuse_inode_passthrough_io_end(fi);
+}
+
/*
* Open flags that are allowed in combination with FOPEN_PASSTHROUGH.
* A combination of FOPEN_PASSTHROUGH and FOPEN_DIRECT_IO means that read/write
@@ -163,7 +195,7 @@ static void fuse_file_uncached_io_release(struct fuse_file *ff,
*/
#define FOPEN_PASSTHROUGH_MASK \
(FOPEN_PASSTHROUGH | FOPEN_DIRECT_IO | FOPEN_PARALLEL_DIRECT_WRITES | \
- FOPEN_NOFLUSH)
+ FOPEN_NOFLUSH | FOPEN_KEEP_CACHE)
static int fuse_file_passthrough_open(struct inode *inode, struct file *file)
{
@@ -197,6 +229,7 @@ int fuse_file_io_open(struct file *file, struct inode *inode)
{
struct fuse_file *ff = file->private_data;
struct fuse_inode *fi = get_fuse_inode(inode);
+ struct fuse_conn *fc = get_fuse_conn(inode);
int err;
/*
@@ -211,7 +244,7 @@ int fuse_file_io_open(struct file *file, struct inode *inode)
* which is already open for passthrough.
*/
err = -EINVAL;
- if (fuse_inode_backing(fi) && !(ff->open_flags & FOPEN_PASSTHROUGH))
+ if (fuse_inode_backing(fi) && !(ff->open_flags & FOPEN_PASSTHROUGH) && !fc->writeback_cache)
goto fail;
/*
@@ -271,5 +304,8 @@ void fuse_file_io_release(struct fuse_file *ff, struct inode *inode)
case IOM_CACHED:
fuse_file_cached_io_release(ff, fi);
break;
+ case IOM_PASSTHROUGH:
+ fuse_file_passthrough_io_release(ff, fi);
+ break;
}
}
diff --git a/fs/fuse/passthrough.c b/fs/fuse/passthrough.c
index 72de97c..66db4f4 100644
--- a/fs/fuse/passthrough.c
+++ b/fs/fuse/passthrough.c
@@ -10,6 +10,7 @@
#include <linux/file.h>
#include <linux/backing-file.h>
#include <linux/splice.h>
+#include <linux/pagemap.h>
static void fuse_file_accessed(struct file *file)
{
@@ -21,8 +22,23 @@ static void fuse_file_accessed(struct file *file)
static void fuse_passthrough_end_write(struct kiocb *iocb, ssize_t ret)
{
struct inode *inode = file_inode(iocb->ki_filp);
+ struct fuse_conn *fc = get_fuse_conn(inode);
+ struct fuse_file *ff = iocb->ki_filp->private_data;
+ struct file *backing_file = fuse_file_passthrough(ff);
+ struct inode *backing_inode = file_inode(backing_file);
- fuse_write_update_attr(inode, iocb->ki_pos, ret);
+ if (!fc->writeback_cache) {
+ fuse_write_update_attr(inode, iocb->ki_pos, ret);
+ } else {
+ inode_set_mtime_to_ts(inode, inode_get_mtime(backing_inode));
+ inode_set_ctime_to_ts(inode, inode_get_ctime(backing_inode));
+ inode->i_blocks = backing_inode->i_blocks;
+ i_size_write(inode, i_size_read(backing_inode));
+ }
+ if (ret > 0) {
+ invalidate_inode_pages2_range(inode->i_mapping,
+ (iocb->ki_pos - ret) >> PAGE_SHIFT, iocb->ki_pos >> PAGE_SHIFT);
+ }
}
ssize_t fuse_passthrough_read_iter(struct kiocb *iocb, struct iov_iter *iter)
@@ -44,6 +60,8 @@ ssize_t fuse_passthrough_read_iter(struct kiocb *iocb, struct iov_iter *iter)
if (!count)
return 0;
+ /* Flush any dirtied cache pages from fuse cache */
+ write_inode_now(file_inode(file), 1);
ret = backing_file_read_iter(backing_file, iter, iocb, iocb->ki_flags,
&ctx);
@@ -94,6 +112,9 @@ ssize_t fuse_passthrough_splice_read(struct file *in, loff_t *ppos,
pr_debug("%s: backing_file=0x%p, pos=%lld, len=%zu, flags=0x%x\n", __func__,
backing_file, *ppos, len, flags);
+ /* Flush any dirtied cache pages from fuse cache */
+ write_inode_now(file_inode(in), 1);
+
init_sync_kiocb(&iocb, in);
iocb.ki_pos = *ppos;
ret = backing_file_splice_read(backing_file, &iocb, pipe, len, flags, &ctx);
diff --git a/fs/incfs/Kconfig b/fs/incfs/Kconfig
new file mode 100644
index 0000000..d4ee0ccd
--- /dev/null
+++ b/fs/incfs/Kconfig
@@ -0,0 +1,15 @@
+config INCREMENTAL_FS
+ tristate "Incremental file system support"
+ depends on BLOCK
+ # incfs does not verify fsverity builtin signatures.
+ depends on !CONFIG_FS_VERITY_BUILTIN_SIGNATURES
+ select DECOMPRESS_LZ4
+ select DECOMPRESS_ZSTD
+ select CRYPTO_LIB_SHA256
+ help
+ Incremental FS is a read-only virtual file system that facilitates execution
+ of programs while their binaries are still being lazily downloaded over the
+ network, USB or pigeon post.
+
+ To compile this file system support as a module, choose M here: the
+ module will be called incrementalfs.
diff --git a/fs/incfs/Makefile b/fs/incfs/Makefile
new file mode 100644
index 0000000..05795d1
--- /dev/null
+++ b/fs/incfs/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_INCREMENTAL_FS) += incrementalfs.o
+
+incrementalfs-y := \
+ data_mgmt.o \
+ format.o \
+ integrity.o \
+ main.o \
+ pseudo_files.o \
+ sysfs.o \
+ vfs.o
+
+incrementalfs-$(CONFIG_FS_VERITY) += verity.o
diff --git a/fs/incfs/OWNERS b/fs/incfs/OWNERS
new file mode 100644
index 0000000..1b97669
--- /dev/null
+++ b/fs/incfs/OWNERS
@@ -0,0 +1,2 @@
+akailash@google.com
+paullawrence@google.com
diff --git a/fs/incfs/data_mgmt.c b/fs/incfs/data_mgmt.c
new file mode 100644
index 0000000..ee8c734
--- /dev/null
+++ b/fs/incfs/data_mgmt.c
@@ -0,0 +1,1891 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Google LLC
+ */
+#include <linux/crc32.h>
+#include <linux/file.h>
+#include <linux/fsverity.h>
+#include <linux/gfp.h>
+#include <linux/hex.h>
+#include <linux/kobject.h>
+#include <linux/ktime.h>
+#include <linux/lz4.h>
+#include <linux/mm.h>
+#include <linux/namei.h>
+#include <linux/pagemap.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
+
+#include "data_mgmt.h"
+#include "format.h"
+#include "integrity.h"
+#include "sysfs.h"
+#include "verity.h"
+
+static int incfs_scan_metadata_chain(struct data_file *df);
+
+static void log_wake_up_all(struct work_struct *work)
+{
+ struct delayed_work *dw = container_of(work, struct delayed_work, work);
+ struct read_log *rl = container_of(dw, struct read_log, ml_wakeup_work);
+ wake_up_all(&rl->ml_notif_wq);
+}
+
+static void zstd_free_workspace(struct work_struct *work)
+{
+ struct delayed_work *dw = container_of(work, struct delayed_work, work);
+ struct mount_info *mi =
+ container_of(dw, struct mount_info, mi_zstd_cleanup_work);
+
+ mutex_lock(&mi->mi_zstd_workspace_mutex);
+ kvfree(mi->mi_zstd_workspace);
+ mi->mi_zstd_workspace = NULL;
+ mi->mi_zstd_stream = NULL;
+ mutex_unlock(&mi->mi_zstd_workspace_mutex);
+}
+
+struct mount_info *incfs_alloc_mount_info(struct super_block *sb,
+ struct mount_options *options,
+ struct path *backing_dir_path)
+{
+ struct mount_info *mi = NULL;
+ int error = 0;
+ struct incfs_sysfs_node *node;
+
+ mi = kzalloc(sizeof(*mi), GFP_NOFS);
+ if (!mi)
+ return ERR_PTR(-ENOMEM);
+
+ mi->mi_sb = sb;
+ mi->mi_backing_dir_path = *backing_dir_path;
+ mi->mi_owner = get_current_cred();
+ path_get(&mi->mi_backing_dir_path);
+ mutex_init(&mi->mi_dir_struct_mutex);
+ init_waitqueue_head(&mi->mi_pending_reads_notif_wq);
+ init_waitqueue_head(&mi->mi_log.ml_notif_wq);
+ init_waitqueue_head(&mi->mi_blocks_written_notif_wq);
+ atomic_set(&mi->mi_blocks_written, 0);
+ INIT_DELAYED_WORK(&mi->mi_log.ml_wakeup_work, log_wake_up_all);
+ spin_lock_init(&mi->mi_log.rl_lock);
+ spin_lock_init(&mi->pending_read_lock);
+ INIT_LIST_HEAD(&mi->mi_reads_list_head);
+ spin_lock_init(&mi->mi_per_uid_read_timeouts_lock);
+ mutex_init(&mi->mi_zstd_workspace_mutex);
+ INIT_DELAYED_WORK(&mi->mi_zstd_cleanup_work, zstd_free_workspace);
+ mutex_init(&mi->mi_le_mutex);
+
+ node = incfs_add_sysfs_node(options->sysfs_name, mi);
+ if (IS_ERR(node)) {
+ error = PTR_ERR(node);
+ goto err;
+ }
+ mi->mi_sysfs_node = node;
+
+ error = incfs_realloc_mount_info(mi, options);
+ if (error)
+ goto err;
+
+ return mi;
+
+err:
+ incfs_free_mount_info(mi);
+ return ERR_PTR(error);
+}
+
+int incfs_realloc_mount_info(struct mount_info *mi,
+ struct mount_options *options)
+{
+ void *new_buffer = NULL;
+ void *old_buffer;
+ size_t new_buffer_size = 0;
+
+ if (options->read_log_pages != mi->mi_options.read_log_pages) {
+ struct read_log_state log_state;
+ /*
+ * Even though having two buffers allocated at once isn't
+ * usually good, allocating a multipage buffer under a spinlock
+ * is even worse, so let's optimize for the shorter lock
+ * duration. It's not end of the world if we fail to increase
+ * the buffer size anyway.
+ */
+ if (options->read_log_pages > 0) {
+ new_buffer_size = PAGE_SIZE * options->read_log_pages;
+ new_buffer = kzalloc(new_buffer_size, GFP_NOFS);
+ if (!new_buffer)
+ return -ENOMEM;
+ }
+
+ spin_lock(&mi->mi_log.rl_lock);
+ old_buffer = mi->mi_log.rl_ring_buf;
+ mi->mi_log.rl_ring_buf = new_buffer;
+ mi->mi_log.rl_size = new_buffer_size;
+ log_state = (struct read_log_state){
+ .generation_id = mi->mi_log.rl_head.generation_id + 1,
+ };
+ mi->mi_log.rl_head = log_state;
+ mi->mi_log.rl_tail = log_state;
+ spin_unlock(&mi->mi_log.rl_lock);
+
+ kfree(old_buffer);
+ }
+
+ if (options->sysfs_name && !mi->mi_sysfs_node)
+ mi->mi_sysfs_node = incfs_add_sysfs_node(options->sysfs_name,
+ mi);
+ else if (!options->sysfs_name && mi->mi_sysfs_node) {
+ incfs_free_sysfs_node(mi->mi_sysfs_node);
+ mi->mi_sysfs_node = NULL;
+ } else if (options->sysfs_name &&
+ strcmp(options->sysfs_name,
+ kobject_name(&mi->mi_sysfs_node->isn_sysfs_node))) {
+ incfs_free_sysfs_node(mi->mi_sysfs_node);
+ mi->mi_sysfs_node = incfs_add_sysfs_node(options->sysfs_name,
+ mi);
+ }
+
+ if (IS_ERR(mi->mi_sysfs_node)) {
+ int err = PTR_ERR(mi->mi_sysfs_node);
+
+ mi->mi_sysfs_node = NULL;
+ return err;
+ }
+
+ mi->mi_options = *options;
+ return 0;
+}
+
+void incfs_free_mount_info(struct mount_info *mi)
+{
+ int i;
+ if (!mi)
+ return;
+
+ flush_delayed_work(&mi->mi_log.ml_wakeup_work);
+ flush_delayed_work(&mi->mi_zstd_cleanup_work);
+
+ dput(mi->mi_index_dir);
+ dput(mi->mi_incomplete_dir);
+ path_put(&mi->mi_backing_dir_path);
+ mutex_destroy(&mi->mi_dir_struct_mutex);
+ mutex_destroy(&mi->mi_zstd_workspace_mutex);
+ put_cred(mi->mi_owner);
+ kfree(mi->mi_log.rl_ring_buf);
+ for (i = 0; i < ARRAY_SIZE(mi->pseudo_file_xattr); ++i)
+ kfree(mi->pseudo_file_xattr[i].data);
+ kfree(mi->mi_per_uid_read_timeouts);
+ incfs_free_sysfs_node(mi->mi_sysfs_node);
+ kfree(mi);
+}
+
+static void data_file_segment_init(struct data_file_segment *segment)
+{
+ init_waitqueue_head(&segment->new_data_arrival_wq);
+ init_rwsem(&segment->rwsem);
+ INIT_LIST_HEAD(&segment->reads_list_head);
+}
+
+char *file_id_to_str(incfs_uuid_t id)
+{
+ char *result = kmalloc(1 + sizeof(id.bytes) * 2, GFP_NOFS);
+ char *end;
+
+ if (!result)
+ return NULL;
+
+ end = bin2hex(result, id.bytes, sizeof(id.bytes));
+ *end = 0;
+ return result;
+}
+
+struct dentry *incfs_lookup_dentry(struct dentry *parent, const char *name)
+{
+ struct inode *inode;
+ struct dentry *result = NULL;
+
+ if (!parent)
+ return ERR_PTR(-EFAULT);
+
+ inode = d_inode(parent);
+ inode_lock_nested(inode, I_MUTEX_PARENT);
+ result = lookup_noperm(&QSTR(name), parent);
+ inode_unlock(inode);
+
+ if (IS_ERR(result))
+ pr_warn("%s err:%ld\n", __func__, PTR_ERR(result));
+
+ return result;
+}
+
+static struct data_file *handle_mapped_file(struct mount_info *mi,
+ struct data_file *df)
+{
+ char *file_id_str;
+ struct dentry *index_file_dentry;
+ struct path path;
+ struct file *bf;
+ struct data_file *result = NULL;
+ const struct cred *old_cred;
+
+ file_id_str = file_id_to_str(df->df_id);
+ if (!file_id_str)
+ return ERR_PTR(-ENOENT);
+
+ index_file_dentry = incfs_lookup_dentry(mi->mi_index_dir,
+ file_id_str);
+ kfree(file_id_str);
+ if (!index_file_dentry)
+ return ERR_PTR(-ENOENT);
+ if (IS_ERR(index_file_dentry))
+ return ERR_CAST(index_file_dentry);
+ if (!d_really_is_positive(index_file_dentry)) {
+ result = ERR_PTR(-ENOENT);
+ goto out;
+ }
+
+ path = (struct path) {
+ .mnt = mi->mi_backing_dir_path.mnt,
+ .dentry = index_file_dentry
+ };
+
+ old_cred = override_creds(mi->mi_owner);
+ bf = dentry_open(&path, O_RDWR | O_NOATIME | O_LARGEFILE,
+ current_cred());
+ revert_creds(old_cred);
+
+ if (IS_ERR(bf)) {
+ result = ERR_CAST(bf);
+ goto out;
+ }
+
+ result = incfs_open_data_file(mi, bf);
+ fput(bf);
+ if (IS_ERR(result))
+ goto out;
+
+ result->df_mapped_offset = df->df_metadata_off;
+
+out:
+ dput(index_file_dentry);
+ return result;
+}
+
+struct data_file *incfs_open_data_file(struct mount_info *mi, struct file *bf)
+{
+ struct data_file *df = NULL;
+ struct backing_file_context *bfc = NULL;
+ int md_records;
+ u64 size;
+ int error = 0;
+ int i;
+
+ if (!bf || !mi)
+ return ERR_PTR(-EFAULT);
+
+ if (!S_ISREG(bf->f_inode->i_mode))
+ return ERR_PTR(-EBADF);
+
+ bfc = incfs_alloc_bfc(mi, bf);
+ if (IS_ERR(bfc))
+ return ERR_CAST(bfc);
+
+ df = kzalloc(sizeof(*df), GFP_NOFS);
+ if (!df) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ mutex_init(&df->df_enable_verity);
+
+ df->df_backing_file_context = bfc;
+ df->df_mount_info = mi;
+ for (i = 0; i < ARRAY_SIZE(df->df_segments); i++)
+ data_file_segment_init(&df->df_segments[i]);
+
+ error = incfs_read_file_header(bfc, &df->df_metadata_off, &df->df_id,
+ &size, &df->df_header_flags);
+
+ if (error)
+ goto out;
+
+ df->df_size = size;
+ if (size > 0)
+ df->df_data_block_count = get_blocks_count_for_size(size);
+
+ if (df->df_header_flags & INCFS_FILE_MAPPED) {
+ struct data_file *mapped_df = handle_mapped_file(mi, df);
+
+ incfs_free_data_file(df);
+ return mapped_df;
+ }
+
+ md_records = incfs_scan_metadata_chain(df);
+ if (md_records < 0)
+ error = md_records;
+
+out:
+ if (error) {
+ incfs_free_bfc(bfc);
+ if (df)
+ df->df_backing_file_context = NULL;
+ incfs_free_data_file(df);
+ return ERR_PTR(error);
+ }
+ return df;
+}
+
+void incfs_free_data_file(struct data_file *df)
+{
+ u32 data_blocks_written, hash_blocks_written;
+
+ if (!df)
+ return;
+
+ data_blocks_written = atomic_read(&df->df_data_blocks_written);
+ hash_blocks_written = atomic_read(&df->df_hash_blocks_written);
+
+ if (data_blocks_written != df->df_initial_data_blocks_written ||
+ hash_blocks_written != df->df_initial_hash_blocks_written) {
+ struct backing_file_context *bfc = df->df_backing_file_context;
+ int error = -1;
+
+ if (bfc && !mutex_lock_interruptible(&bfc->bc_mutex)) {
+ error = incfs_write_status_to_backing_file(
+ df->df_backing_file_context,
+ df->df_status_offset,
+ data_blocks_written,
+ hash_blocks_written);
+ mutex_unlock(&bfc->bc_mutex);
+ }
+
+ if (error)
+ /* Nothing can be done, just warn */
+ pr_warn("incfs: failed to write status to backing file\n");
+ }
+
+ incfs_free_mtree(df->df_hash_tree);
+ incfs_free_bfc(df->df_backing_file_context);
+ kfree(df->df_signature);
+ kfree(df->df_verity_file_digest.data);
+ kfree(df->df_verity_signature);
+ mutex_destroy(&df->df_enable_verity);
+ kfree(df);
+}
+
+int make_inode_ready_for_data_ops(struct mount_info *mi,
+ struct inode *inode,
+ struct file *backing_file)
+{
+ struct inode_info *node = get_incfs_node(inode);
+ struct data_file *df = NULL;
+ int err = 0;
+
+ inode_lock(inode);
+ if (S_ISREG(inode->i_mode)) {
+ if (!node->n_file) {
+ df = incfs_open_data_file(mi, backing_file);
+
+ if (IS_ERR(df))
+ err = PTR_ERR(df);
+ else
+ node->n_file = df;
+ }
+ } else
+ err = -EBADF;
+ inode_unlock(inode);
+ return err;
+}
+
+struct dir_file *incfs_open_dir_file(struct mount_info *mi, struct file *bf)
+{
+ struct dir_file *dir = NULL;
+
+ if (!S_ISDIR(bf->f_inode->i_mode))
+ return ERR_PTR(-EBADF);
+
+ dir = kzalloc(sizeof(*dir), GFP_NOFS);
+ if (!dir)
+ return ERR_PTR(-ENOMEM);
+
+ dir->backing_dir = get_file(bf);
+ dir->mount_info = mi;
+ return dir;
+}
+
+void incfs_free_dir_file(struct dir_file *dir)
+{
+ if (!dir)
+ return;
+ if (dir->backing_dir)
+ fput(dir->backing_dir);
+ kfree(dir);
+}
+
+static ssize_t zstd_decompress_safe(struct mount_info *mi,
+ struct mem_range src, struct mem_range dst)
+{
+ ssize_t result;
+ ZSTD_inBuffer inbuf = {.src = src.data, .size = src.len};
+ ZSTD_outBuffer outbuf = {.dst = dst.data, .size = dst.len};
+
+ result = mutex_lock_interruptible(&mi->mi_zstd_workspace_mutex);
+ if (result)
+ return result;
+
+ if (!mi->mi_zstd_stream) {
+ unsigned int workspace_size = zstd_dstream_workspace_bound(
+ INCFS_DATA_FILE_BLOCK_SIZE);
+ void *workspace = kvmalloc(workspace_size, GFP_NOFS);
+ ZSTD_DStream *stream;
+
+ if (!workspace) {
+ result = -ENOMEM;
+ goto out;
+ }
+
+ stream = zstd_init_dstream(INCFS_DATA_FILE_BLOCK_SIZE, workspace,
+ workspace_size);
+ if (!stream) {
+ kvfree(workspace);
+ result = -EIO;
+ goto out;
+ }
+
+ mi->mi_zstd_workspace = workspace;
+ mi->mi_zstd_stream = stream;
+ }
+
+ result = zstd_decompress_stream(mi->mi_zstd_stream, &outbuf, &inbuf) ?
+ -EBADMSG : outbuf.pos;
+
+ mod_delayed_work(system_wq, &mi->mi_zstd_cleanup_work,
+ msecs_to_jiffies(5000));
+
+out:
+ mutex_unlock(&mi->mi_zstd_workspace_mutex);
+ return result;
+}
+
+static ssize_t decompress(struct mount_info *mi,
+ struct mem_range src, struct mem_range dst, int alg)
+{
+ int result;
+
+ switch (alg) {
+ case INCFS_BLOCK_COMPRESSED_LZ4:
+ result = LZ4_decompress_safe(src.data, dst.data, src.len,
+ dst.len);
+ if (result < 0)
+ return -EBADMSG;
+ return result;
+
+ case INCFS_BLOCK_COMPRESSED_ZSTD:
+ return zstd_decompress_safe(mi, src, dst);
+
+ default:
+ WARN_ON(true);
+ return -EOPNOTSUPP;
+ }
+}
+
+static void log_read_one_record(struct read_log *rl, struct read_log_state *rs)
+{
+ union log_record *record =
+ (union log_record *)((u8 *)rl->rl_ring_buf + rs->next_offset);
+ size_t record_size;
+
+ switch (record->full_record.type) {
+ case FULL:
+ rs->base_record = record->full_record;
+ record_size = sizeof(record->full_record);
+ break;
+
+ case SAME_FILE:
+ rs->base_record.block_index =
+ record->same_file.block_index;
+ rs->base_record.absolute_ts_us +=
+ record->same_file.relative_ts_us;
+ rs->base_record.uid = record->same_file.uid;
+ record_size = sizeof(record->same_file);
+ break;
+
+ case SAME_FILE_CLOSE_BLOCK:
+ rs->base_record.block_index +=
+ record->same_file_close_block.block_index_delta;
+ rs->base_record.absolute_ts_us +=
+ record->same_file_close_block.relative_ts_us;
+ record_size = sizeof(record->same_file_close_block);
+ break;
+
+ case SAME_FILE_CLOSE_BLOCK_SHORT:
+ rs->base_record.block_index +=
+ record->same_file_close_block_short.block_index_delta;
+ rs->base_record.absolute_ts_us +=
+ record->same_file_close_block_short.relative_ts_tens_us * 10;
+ record_size = sizeof(record->same_file_close_block_short);
+ break;
+
+ case SAME_FILE_NEXT_BLOCK:
+ ++rs->base_record.block_index;
+ rs->base_record.absolute_ts_us +=
+ record->same_file_next_block.relative_ts_us;
+ record_size = sizeof(record->same_file_next_block);
+ break;
+
+ case SAME_FILE_NEXT_BLOCK_SHORT:
+ ++rs->base_record.block_index;
+ rs->base_record.absolute_ts_us +=
+ record->same_file_next_block_short.relative_ts_tens_us * 10;
+ record_size = sizeof(record->same_file_next_block_short);
+ break;
+ }
+
+ rs->next_offset += record_size;
+ if (rs->next_offset > rl->rl_size - sizeof(*record)) {
+ rs->next_offset = 0;
+ ++rs->current_pass_no;
+ }
+ ++rs->current_record_no;
+}
+
+static void log_block_read(struct mount_info *mi, incfs_uuid_t *id,
+ int block_index)
+{
+ struct read_log *log = &mi->mi_log;
+ struct read_log_state *head, *tail;
+ s64 now_us;
+ s64 relative_us;
+ union log_record record;
+ size_t record_size;
+ uid_t uid = current_uid().val;
+ int block_delta;
+ bool same_file, same_uid;
+ bool next_block, close_block, very_close_block;
+ bool close_time, very_close_time, very_very_close_time;
+
+ /*
+ * This may read the old value, but it's OK to delay the logging start
+ * right after the configuration update.
+ */
+ if (READ_ONCE(log->rl_size) == 0)
+ return;
+
+ now_us = ktime_to_us(ktime_get());
+
+ spin_lock(&log->rl_lock);
+ if (log->rl_size == 0) {
+ spin_unlock(&log->rl_lock);
+ return;
+ }
+
+ head = &log->rl_head;
+ tail = &log->rl_tail;
+ relative_us = now_us - head->base_record.absolute_ts_us;
+
+ same_file = !memcmp(id, &head->base_record.file_id,
+ sizeof(incfs_uuid_t));
+ same_uid = uid == head->base_record.uid;
+
+ block_delta = block_index - head->base_record.block_index;
+ next_block = block_delta == 1;
+ very_close_block = block_delta >= S8_MIN && block_delta <= S8_MAX;
+ close_block = block_delta >= S16_MIN && block_delta <= S16_MAX;
+
+ very_very_close_time = relative_us < (1 << 5) * 10;
+ very_close_time = relative_us < (1 << 13);
+ close_time = relative_us < (1 << 16);
+
+ if (same_file && same_uid && next_block && very_very_close_time) {
+ record.same_file_next_block_short =
+ (struct same_file_next_block_short){
+ .type = SAME_FILE_NEXT_BLOCK_SHORT,
+ .relative_ts_tens_us = div_s64(relative_us, 10),
+ };
+ record_size = sizeof(struct same_file_next_block_short);
+ } else if (same_file && same_uid && next_block && very_close_time) {
+ record.same_file_next_block = (struct same_file_next_block){
+ .type = SAME_FILE_NEXT_BLOCK,
+ .relative_ts_us = relative_us,
+ };
+ record_size = sizeof(struct same_file_next_block);
+ } else if (same_file && same_uid && very_close_block &&
+ very_very_close_time) {
+ record.same_file_close_block_short =
+ (struct same_file_close_block_short){
+ .type = SAME_FILE_CLOSE_BLOCK_SHORT,
+ .relative_ts_tens_us = div_s64(relative_us, 10),
+ .block_index_delta = block_delta,
+ };
+ record_size = sizeof(struct same_file_close_block_short);
+ } else if (same_file && same_uid && close_block && very_close_time) {
+ record.same_file_close_block = (struct same_file_close_block){
+ .type = SAME_FILE_CLOSE_BLOCK,
+ .relative_ts_us = relative_us,
+ .block_index_delta = block_delta,
+ };
+ record_size = sizeof(struct same_file_close_block);
+ } else if (same_file && close_time) {
+ record.same_file = (struct same_file){
+ .type = SAME_FILE,
+ .block_index = block_index,
+ .relative_ts_us = relative_us,
+ .uid = uid,
+ };
+ record_size = sizeof(struct same_file);
+ } else {
+ record.full_record = (struct full_record){
+ .type = FULL,
+ .block_index = block_index,
+ .file_id = *id,
+ .absolute_ts_us = now_us,
+ .uid = uid,
+ };
+ head->base_record.file_id = *id;
+ record_size = sizeof(struct full_record);
+ }
+
+ head->base_record.block_index = block_index;
+ head->base_record.absolute_ts_us = now_us;
+
+ /* Advance tail beyond area we are going to overwrite */
+ while (tail->current_pass_no < head->current_pass_no &&
+ tail->next_offset < head->next_offset + record_size)
+ log_read_one_record(log, tail);
+
+ memcpy(((u8 *)log->rl_ring_buf) + head->next_offset, &record,
+ record_size);
+ head->next_offset += record_size;
+ if (head->next_offset > log->rl_size - sizeof(record)) {
+ head->next_offset = 0;
+ ++head->current_pass_no;
+ }
+ ++head->current_record_no;
+
+ spin_unlock(&log->rl_lock);
+ schedule_delayed_work(&log->ml_wakeup_work, msecs_to_jiffies(16));
+}
+
+static int validate_hash_tree(struct backing_file_context *bfc, struct file *f,
+ int block_index, struct mem_range data, u8 *buf)
+{
+ struct data_file *df = get_incfs_data_file(f);
+ u8 stored_digest[INCFS_MAX_HASH_SIZE] = {};
+ u8 calculated_digest[INCFS_MAX_HASH_SIZE] = {};
+ struct mtree *tree = NULL;
+ struct incfs_df_signature *sig = NULL;
+ int digest_size;
+ int hash_block_index = block_index;
+ int lvl;
+ int res;
+ loff_t hash_block_offset[INCFS_MAX_MTREE_LEVELS];
+ size_t hash_offset_in_block[INCFS_MAX_MTREE_LEVELS];
+ int hash_per_block;
+ pgoff_t file_pages;
+
+ /*
+ * Memory barrier to make sure tree is fully present if added via enable
+ * verity
+ */
+ tree = smp_load_acquire(&df->df_hash_tree);
+ sig = df->df_signature;
+ if (!tree || !sig)
+ return 0;
+
+ digest_size = tree->alg->digest_size;
+ hash_per_block = INCFS_DATA_FILE_BLOCK_SIZE / digest_size;
+ for (lvl = 0; lvl < tree->depth; lvl++) {
+ loff_t lvl_off = tree->hash_level_suboffset[lvl];
+
+ hash_block_offset[lvl] =
+ lvl_off + round_down(hash_block_index * digest_size,
+ INCFS_DATA_FILE_BLOCK_SIZE);
+ hash_offset_in_block[lvl] = hash_block_index * digest_size %
+ INCFS_DATA_FILE_BLOCK_SIZE;
+ hash_block_index /= hash_per_block;
+ }
+
+ memcpy(stored_digest, tree->root_hash, digest_size);
+
+ file_pages = DIV_ROUND_UP(df->df_size, INCFS_DATA_FILE_BLOCK_SIZE);
+ for (lvl = tree->depth - 1; lvl >= 0; lvl--) {
+ pgoff_t hash_page =
+ file_pages +
+ hash_block_offset[lvl] / INCFS_DATA_FILE_BLOCK_SIZE;
+ struct page *page = find_get_page_flags(
+ f->f_inode->i_mapping, hash_page, FGP_ACCESSED);
+ struct folio *folio;
+
+ if (page && PageChecked(page)) {
+ u8 *addr = kmap_atomic(page);
+
+ memcpy(stored_digest, addr + hash_offset_in_block[lvl],
+ digest_size);
+
+ kunmap_atomic(addr);
+ put_page(page);
+ continue;
+ }
+
+ if (page)
+ put_page(page);
+
+ res = incfs_kread(bfc, buf, INCFS_DATA_FILE_BLOCK_SIZE,
+ hash_block_offset[lvl] + sig->hash_offset);
+ if (res < 0)
+ return res;
+ if (res != INCFS_DATA_FILE_BLOCK_SIZE)
+ return -EIO;
+ res = incfs_hash_block(tree->alg,
+ range(buf, INCFS_DATA_FILE_BLOCK_SIZE),
+ range(calculated_digest, digest_size));
+ if (res)
+ return res;
+
+ if (memcmp(stored_digest, calculated_digest, digest_size)) {
+ int i;
+ bool zero = true;
+
+ pr_warn("incfs: Hash mismatch lvl:%d blk:%d\n",
+ lvl, block_index);
+ for (i = 0; i < digest_size; i++)
+ if (stored_digest[i]) {
+ zero = false;
+ break;
+ }
+
+ if (zero)
+ pr_debug("Note saved_digest all zero - did you forget to load the hashes?\n");
+ return -EBADMSG;
+ }
+
+ memcpy(stored_digest, buf + hash_offset_in_block[lvl],
+ digest_size);
+
+ folio = filemap_grab_folio(f->f_inode->i_mapping, hash_page);
+ if (!IS_ERR(folio)) {
+ u8 *addr = kmap_local_folio(folio, 0);
+
+ memcpy(addr, buf, INCFS_DATA_FILE_BLOCK_SIZE);
+ kunmap_local(addr);
+ folio_set_checked(folio);
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
+ folio_put(folio);
+ }
+ }
+
+ res = incfs_hash_block(tree->alg, data,
+ range(calculated_digest, digest_size));
+ if (res)
+ return res;
+
+ if (memcmp(stored_digest, calculated_digest, digest_size)) {
+ pr_debug("Leaf hash mismatch blk:%d\n", block_index);
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+static struct data_file_segment *get_file_segment(struct data_file *df,
+ int block_index)
+{
+ int seg_idx = block_index % ARRAY_SIZE(df->df_segments);
+
+ return &df->df_segments[seg_idx];
+}
+
+static bool is_data_block_present(struct data_file_block *block)
+{
+ return (block->db_backing_file_data_offset != 0) &&
+ (block->db_stored_size != 0);
+}
+
+static void convert_data_file_block(struct incfs_blockmap_entry *bme,
+ struct data_file_block *res_block)
+{
+ u16 flags = le16_to_cpu(bme->me_flags);
+
+ res_block->db_backing_file_data_offset =
+ le16_to_cpu(bme->me_data_offset_hi);
+ res_block->db_backing_file_data_offset <<= 32;
+ res_block->db_backing_file_data_offset |=
+ le32_to_cpu(bme->me_data_offset_lo);
+ res_block->db_stored_size = le16_to_cpu(bme->me_data_size);
+ res_block->db_comp_alg = flags & INCFS_BLOCK_COMPRESSED_MASK;
+}
+
+static int get_data_file_block(struct data_file *df, int index,
+ struct data_file_block *res_block)
+{
+ struct incfs_blockmap_entry bme = {};
+ struct backing_file_context *bfc = NULL;
+ loff_t blockmap_off = 0;
+ int error = 0;
+
+ if (!df || !res_block)
+ return -EFAULT;
+
+ blockmap_off = df->df_blockmap_off;
+ bfc = df->df_backing_file_context;
+
+ if (index < 0 || blockmap_off == 0)
+ return -EINVAL;
+
+ error = incfs_read_blockmap_entry(bfc, index, blockmap_off, &bme);
+ if (error)
+ return error;
+
+ convert_data_file_block(&bme, res_block);
+ return 0;
+}
+
+static int check_room_for_one_range(u32 size, u32 size_out)
+{
+ if (size_out + sizeof(struct incfs_filled_range) > size)
+ return -ERANGE;
+ return 0;
+}
+
+static int copy_one_range(struct incfs_filled_range *range, void __user *buffer,
+ u32 size, u32 *size_out)
+{
+ int error = check_room_for_one_range(size, *size_out);
+ if (error)
+ return error;
+
+ if (copy_to_user(((char __user *)buffer) + *size_out, range,
+ sizeof(*range)))
+ return -EFAULT;
+
+ *size_out += sizeof(*range);
+ return 0;
+}
+
+#define READ_BLOCKMAP_ENTRIES 512
+int incfs_get_filled_blocks(struct data_file *df,
+ struct incfs_file_data *fd,
+ struct incfs_get_filled_blocks_args *arg)
+{
+ int error = 0;
+ bool in_range = false;
+ struct incfs_filled_range range;
+ void __user *buffer = u64_to_user_ptr(arg->range_buffer);
+ u32 size = arg->range_buffer_size;
+ u32 end_index =
+ arg->end_index ? arg->end_index : df->df_total_block_count;
+ u32 *size_out = &arg->range_buffer_size_out;
+ int i = READ_BLOCKMAP_ENTRIES - 1;
+ int entries_read = 0;
+ struct incfs_blockmap_entry *bme;
+ int data_blocks_filled = 0;
+ int hash_blocks_filled = 0;
+
+ *size_out = 0;
+ if (end_index > df->df_total_block_count)
+ end_index = df->df_total_block_count;
+ arg->total_blocks_out = df->df_total_block_count;
+ arg->data_blocks_out = df->df_data_block_count;
+
+ if (atomic_read(&df->df_data_blocks_written) ==
+ df->df_data_block_count) {
+ pr_debug("File marked full, fast get_filled_blocks");
+ if (arg->start_index > end_index) {
+ arg->index_out = arg->start_index;
+ return 0;
+ }
+ arg->index_out = arg->start_index;
+
+ error = check_room_for_one_range(size, *size_out);
+ if (error)
+ return error;
+
+ range = (struct incfs_filled_range){
+ .begin = arg->start_index,
+ .end = end_index,
+ };
+
+ error = copy_one_range(&range, buffer, size, size_out);
+ if (error)
+ return error;
+ arg->index_out = end_index;
+ return 0;
+ }
+
+ bme = kzalloc(sizeof(*bme) * READ_BLOCKMAP_ENTRIES,
+ GFP_NOFS | __GFP_COMP);
+ if (!bme)
+ return -ENOMEM;
+
+ for (arg->index_out = arg->start_index; arg->index_out < end_index;
+ ++arg->index_out) {
+ struct data_file_block dfb;
+
+ if (++i == READ_BLOCKMAP_ENTRIES) {
+ entries_read = incfs_read_blockmap_entries(
+ df->df_backing_file_context, bme,
+ arg->index_out, READ_BLOCKMAP_ENTRIES,
+ df->df_blockmap_off);
+ if (entries_read < 0) {
+ error = entries_read;
+ break;
+ }
+
+ i = 0;
+ }
+
+ if (i >= entries_read) {
+ error = -EIO;
+ break;
+ }
+
+ convert_data_file_block(bme + i, &dfb);
+
+ if (is_data_block_present(&dfb)) {
+ if (arg->index_out >= df->df_data_block_count)
+ ++hash_blocks_filled;
+ else
+ ++data_blocks_filled;
+ }
+
+ if (is_data_block_present(&dfb) == in_range)
+ continue;
+
+ if (!in_range) {
+ error = check_room_for_one_range(size, *size_out);
+ if (error)
+ break;
+ in_range = true;
+ range.begin = arg->index_out;
+ } else {
+ range.end = arg->index_out;
+ error = copy_one_range(&range, buffer, size, size_out);
+ if (error) {
+ /* there will be another try out of the loop,
+ * it will reset the index_out if it fails too
+ */
+ break;
+ }
+ in_range = false;
+ }
+ }
+
+ if (in_range) {
+ range.end = arg->index_out;
+ error = copy_one_range(&range, buffer, size, size_out);
+ if (error)
+ arg->index_out = range.begin;
+ }
+
+ if (arg->start_index == 0) {
+ fd->fd_get_block_pos = 0;
+ fd->fd_filled_data_blocks = 0;
+ fd->fd_filled_hash_blocks = 0;
+ }
+
+ if (arg->start_index == fd->fd_get_block_pos) {
+ fd->fd_get_block_pos = arg->index_out + 1;
+ fd->fd_filled_data_blocks += data_blocks_filled;
+ fd->fd_filled_hash_blocks += hash_blocks_filled;
+ }
+
+ if (fd->fd_get_block_pos == df->df_total_block_count + 1) {
+ if (fd->fd_filled_data_blocks >
+ atomic_read(&df->df_data_blocks_written))
+ atomic_set(&df->df_data_blocks_written,
+ fd->fd_filled_data_blocks);
+
+ if (fd->fd_filled_hash_blocks >
+ atomic_read(&df->df_hash_blocks_written))
+ atomic_set(&df->df_hash_blocks_written,
+ fd->fd_filled_hash_blocks);
+ }
+
+ kfree(bme);
+ return error;
+}
+
+static bool is_read_done(struct pending_read *read)
+{
+ return atomic_read_acquire(&read->done) != 0;
+}
+
+static void set_read_done(struct pending_read *read)
+{
+ atomic_set_release(&read->done, 1);
+}
+
+/*
+ * Notifies a given data file about pending read from a given block.
+ * Returns a new pending read entry.
+ */
+static struct pending_read *add_pending_read(struct data_file *df,
+ int block_index)
+{
+ struct pending_read *result = NULL;
+ struct data_file_segment *segment = NULL;
+ struct mount_info *mi = NULL;
+
+ segment = get_file_segment(df, block_index);
+ mi = df->df_mount_info;
+
+ result = kzalloc(sizeof(*result), GFP_NOFS);
+ if (!result)
+ return NULL;
+
+ result->file_id = df->df_id;
+ result->block_index = block_index;
+ result->timestamp_us = ktime_to_us(ktime_get());
+ result->uid = current_uid().val;
+
+ spin_lock(&mi->pending_read_lock);
+
+ result->serial_number = ++mi->mi_last_pending_read_number;
+ mi->mi_pending_reads_count++;
+
+ list_add_rcu(&result->mi_reads_list, &mi->mi_reads_list_head);
+ list_add_rcu(&result->segment_reads_list, &segment->reads_list_head);
+
+ spin_unlock(&mi->pending_read_lock);
+
+ wake_up_all(&mi->mi_pending_reads_notif_wq);
+ return result;
+}
+
+static void free_pending_read_entry(struct rcu_head *entry)
+{
+ struct pending_read *read;
+
+ read = container_of(entry, struct pending_read, rcu);
+
+ kfree(read);
+}
+
+/* Notifies a given data file that pending read is completed. */
+static void remove_pending_read(struct data_file *df, struct pending_read *read)
+{
+ struct mount_info *mi = NULL;
+
+ if (!df || !read) {
+ WARN_ON(!df);
+ WARN_ON(!read);
+ return;
+ }
+
+ mi = df->df_mount_info;
+
+ spin_lock(&mi->pending_read_lock);
+
+ list_del_rcu(&read->mi_reads_list);
+ list_del_rcu(&read->segment_reads_list);
+
+ mi->mi_pending_reads_count--;
+
+ spin_unlock(&mi->pending_read_lock);
+
+ /* Don't free. Wait for readers */
+ call_rcu(&read->rcu, free_pending_read_entry);
+}
+
+static void notify_pending_reads(struct mount_info *mi,
+ struct data_file_segment *segment,
+ int index)
+{
+ struct pending_read *entry = NULL;
+
+ /* Notify pending reads waiting for this block. */
+ rcu_read_lock();
+ list_for_each_entry_rcu(entry, &segment->reads_list_head,
+ segment_reads_list) {
+ if (entry->block_index == index)
+ set_read_done(entry);
+ }
+ rcu_read_unlock();
+ wake_up_all(&segment->new_data_arrival_wq);
+
+ atomic_inc(&mi->mi_blocks_written);
+ wake_up_all(&mi->mi_blocks_written_notif_wq);
+}
+
+static int wait_for_data_block(struct data_file *df, int block_index,
+ struct data_file_block *res_block,
+ struct incfs_read_data_file_timeouts *timeouts,
+ unsigned int *delayed_min_us)
+{
+ struct data_file_block block = {};
+ struct data_file_segment *segment = NULL;
+ struct pending_read *read = NULL;
+ struct mount_info *mi = NULL;
+ int error;
+ int wait_res = 0;
+ unsigned int delayed_pending_us = 0;
+ bool delayed_pending = false;
+
+ if (!df || !res_block)
+ return -EFAULT;
+
+ if (block_index < 0 || block_index >= df->df_data_block_count)
+ return -EINVAL;
+
+ if (df->df_blockmap_off <= 0 || !df->df_mount_info)
+ return -ENODATA;
+
+ mi = df->df_mount_info;
+ segment = get_file_segment(df, block_index);
+
+ error = down_read_killable(&segment->rwsem);
+ if (error)
+ return error;
+
+ /* Look up the given block */
+ error = get_data_file_block(df, block_index, &block);
+
+ up_read(&segment->rwsem);
+
+ if (error)
+ return error;
+
+ /* If the block was found, just return it. No need to wait. */
+ if (is_data_block_present(&block)) {
+ *res_block = block;
+ if (timeouts && timeouts->min_time_us) {
+ *delayed_min_us = timeouts->min_time_us;
+ goto out;
+ }
+ return 0;
+ } else {
+ /* If it's not found, create a pending read */
+ if (timeouts && timeouts->max_pending_time_us) {
+ read = add_pending_read(df, block_index);
+ if (!read)
+ return -ENOMEM;
+ } else {
+ log_block_read(mi, &df->df_id, block_index);
+ return -ETIME;
+ }
+ }
+
+ /* Rest of function only applies if timeouts != NULL */
+ if (!timeouts) {
+ pr_warn("incfs: timeouts unexpectedly NULL\n");
+ return -EFSCORRUPTED;
+ }
+
+ /* Wait for notifications about block's arrival */
+ wait_res =
+ wait_event_interruptible_timeout(segment->new_data_arrival_wq,
+ (is_read_done(read)),
+ usecs_to_jiffies(timeouts->max_pending_time_us));
+
+ /* Woke up, the pending read is no longer needed. */
+ remove_pending_read(df, read);
+
+ if (wait_res == 0) {
+ /* Wait has timed out */
+ log_block_read(mi, &df->df_id, block_index);
+ return -ETIME;
+ }
+ if (wait_res < 0) {
+ /*
+ * Only ERESTARTSYS is really expected here when a signal
+ * comes while we wait.
+ */
+ return wait_res;
+ }
+
+ delayed_pending = true;
+ delayed_pending_us = timeouts->max_pending_time_us -
+ jiffies_to_usecs(wait_res);
+ if (timeouts->min_pending_time_us > delayed_pending_us)
+ *delayed_min_us = timeouts->min_pending_time_us -
+ delayed_pending_us;
+
+ error = down_read_killable(&segment->rwsem);
+ if (error)
+ return error;
+
+ /*
+ * Re-read blocks info now, it has just arrived and
+ * should be available.
+ */
+ error = get_data_file_block(df, block_index, &block);
+ if (!error) {
+ if (is_data_block_present(&block))
+ *res_block = block;
+ else {
+ /*
+ * Somehow wait finished successfully but block still
+ * can't be found. It's not normal.
+ */
+ pr_warn("incfs: Wait succeeded but block not found.\n");
+ error = -ENODATA;
+ }
+ }
+ up_read(&segment->rwsem);
+
+out:
+ if (error)
+ return error;
+
+ if (delayed_pending) {
+ mi->mi_reads_delayed_pending++;
+ mi->mi_reads_delayed_pending_us +=
+ delayed_pending_us;
+ }
+
+ if (delayed_min_us && *delayed_min_us) {
+ mi->mi_reads_delayed_min++;
+ mi->mi_reads_delayed_min_us += *delayed_min_us;
+ }
+
+ return 0;
+}
+
+static int incfs_update_sysfs_error(struct file *file, int index, int result,
+ struct mount_info *mi, struct data_file *df)
+{
+ int error;
+
+ if (result >= 0)
+ return 0;
+
+ error = mutex_lock_interruptible(&mi->mi_le_mutex);
+ if (error)
+ return error;
+
+ mi->mi_le_file_id = df->df_id;
+ mi->mi_le_time_us = ktime_to_us(ktime_get());
+ mi->mi_le_page = index;
+ mi->mi_le_errno = result;
+ mi->mi_le_uid = current_uid().val;
+ mutex_unlock(&mi->mi_le_mutex);
+
+ return 0;
+}
+
+ssize_t incfs_read_data_file_block(struct mem_range dst, struct file *f,
+ int index, struct mem_range tmp,
+ struct incfs_read_data_file_timeouts *timeouts,
+ unsigned int *delayed_min_us)
+{
+ loff_t pos;
+ ssize_t result;
+ size_t bytes_to_read;
+ struct mount_info *mi = NULL;
+ struct backing_file_context *bfc = NULL;
+ struct data_file_block block = {};
+ struct data_file *df = get_incfs_data_file(f);
+
+ if (!dst.data || !df || !tmp.data)
+ return -EFAULT;
+
+ if (tmp.len < 2 * INCFS_DATA_FILE_BLOCK_SIZE)
+ return -ERANGE;
+
+ mi = df->df_mount_info;
+ bfc = df->df_backing_file_context;
+
+ result = wait_for_data_block(df, index, &block, timeouts,
+ delayed_min_us);
+ if (result < 0)
+ goto out;
+
+ pos = block.db_backing_file_data_offset;
+ if (block.db_comp_alg == COMPRESSION_NONE) {
+ bytes_to_read = min(dst.len, block.db_stored_size);
+ result = incfs_kread(bfc, dst.data, bytes_to_read, pos);
+
+ /* Some data was read, but not enough */
+ if (result >= 0 && result != bytes_to_read)
+ result = -EIO;
+ } else {
+ bytes_to_read = min(tmp.len, block.db_stored_size);
+ result = incfs_kread(bfc, tmp.data, bytes_to_read, pos);
+ if (result == bytes_to_read) {
+ result =
+ decompress(mi, range(tmp.data, bytes_to_read),
+ dst, block.db_comp_alg);
+ if (result < 0) {
+ const char *name =
+ bfc->bc_file->f_path.dentry->d_name.name;
+
+ pr_warn_once("incfs: Decompression error. %s",
+ name);
+ }
+ } else if (result >= 0) {
+ /* Some data was read, but not enough */
+ result = -EIO;
+ }
+ }
+
+ if (result > 0) {
+ int err = validate_hash_tree(bfc, f, index, dst, tmp.data);
+
+ if (err < 0)
+ result = err;
+ }
+
+ if (result >= 0)
+ log_block_read(mi, &df->df_id, index);
+
+out:
+ if (result == -ETIME)
+ mi->mi_reads_failed_timed_out++;
+ else if (result == -EBADMSG)
+ mi->mi_reads_failed_hash_verification++;
+ else if (result < 0)
+ mi->mi_reads_failed_other++;
+
+ incfs_update_sysfs_error(f, index, result, mi, df);
+
+ return result;
+}
+
+ssize_t incfs_read_merkle_tree_blocks(struct mem_range dst,
+ struct data_file *df, size_t offset)
+{
+ struct backing_file_context *bfc = NULL;
+ struct incfs_df_signature *sig = NULL;
+ size_t to_read = dst.len;
+
+ if (!dst.data || !df)
+ return -EFAULT;
+
+ sig = df->df_signature;
+ bfc = df->df_backing_file_context;
+
+ if (offset > sig->hash_size)
+ return -ERANGE;
+
+ if (offset + to_read > sig->hash_size)
+ to_read = sig->hash_size - offset;
+
+ return incfs_kread(bfc, dst.data, to_read, sig->hash_offset + offset);
+}
+
+int incfs_process_new_data_block(struct data_file *df,
+ struct incfs_fill_block *block, u8 *data,
+ bool *complete)
+{
+ struct mount_info *mi = NULL;
+ struct backing_file_context *bfc = NULL;
+ struct data_file_segment *segment = NULL;
+ struct data_file_block existing_block = {};
+ u16 flags = 0;
+ int error = 0;
+
+ if (!df || !block)
+ return -EFAULT;
+
+ bfc = df->df_backing_file_context;
+ mi = df->df_mount_info;
+
+ if (block->block_index >= df->df_data_block_count)
+ return -ERANGE;
+
+ segment = get_file_segment(df, block->block_index);
+ if (!segment)
+ return -EFAULT;
+
+ if (block->compression == COMPRESSION_LZ4)
+ flags |= INCFS_BLOCK_COMPRESSED_LZ4;
+ else if (block->compression == COMPRESSION_ZSTD)
+ flags |= INCFS_BLOCK_COMPRESSED_ZSTD;
+ else if (block->compression)
+ return -EINVAL;
+
+ error = down_read_killable(&segment->rwsem);
+ if (error)
+ return error;
+
+ error = get_data_file_block(df, block->block_index, &existing_block);
+
+ up_read(&segment->rwsem);
+
+ if (error)
+ return error;
+ if (is_data_block_present(&existing_block))
+ /* Block is already present, nothing to do here */
+ return 0;
+
+ error = down_write_killable(&segment->rwsem);
+ if (error)
+ return error;
+
+ /* Recheck inside write lock */
+ error = get_data_file_block(df, block->block_index, &existing_block);
+ if (error)
+ goto out_up_write;
+
+ if (is_data_block_present(&existing_block))
+ goto out_up_write;
+
+ error = mutex_lock_interruptible(&bfc->bc_mutex);
+ if (error)
+ goto out_up_write;
+
+ error = incfs_write_data_block_to_backing_file(bfc,
+ range(data, block->data_len), block->block_index,
+ df->df_blockmap_off, flags);
+ if (error)
+ goto out_mutex_unlock;
+
+ if (atomic_inc_return(&df->df_data_blocks_written)
+ >= df->df_data_block_count)
+ *complete = true;
+
+out_mutex_unlock:
+ mutex_unlock(&bfc->bc_mutex);
+ if (!error)
+ notify_pending_reads(mi, segment, block->block_index);
+
+out_up_write:
+ up_write(&segment->rwsem);
+
+ if (error)
+ pr_debug("%d error: %d\n", block->block_index, error);
+ return error;
+}
+
+int incfs_read_file_signature(struct data_file *df, struct mem_range dst)
+{
+ struct backing_file_context *bfc = df->df_backing_file_context;
+ struct incfs_df_signature *sig;
+ int read_res = 0;
+
+ if (!dst.data)
+ return -EFAULT;
+
+ sig = df->df_signature;
+ if (!sig)
+ return 0;
+
+ if (dst.len < sig->sig_size)
+ return -E2BIG;
+
+ read_res = incfs_kread(bfc, dst.data, sig->sig_size, sig->sig_offset);
+
+ if (read_res < 0)
+ return read_res;
+
+ if (read_res != sig->sig_size)
+ return -EIO;
+
+ return read_res;
+}
+
+int incfs_process_new_hash_block(struct data_file *df,
+ struct incfs_fill_block *block, u8 *data)
+{
+ struct backing_file_context *bfc = NULL;
+ struct mount_info *mi = NULL;
+ struct mtree *hash_tree = NULL;
+ struct incfs_df_signature *sig = NULL;
+ loff_t hash_area_base = 0;
+ loff_t hash_area_size = 0;
+ int error = 0;
+
+ if (!df || !block)
+ return -EFAULT;
+
+ if (!(block->flags & INCFS_BLOCK_FLAGS_HASH))
+ return -EINVAL;
+
+ bfc = df->df_backing_file_context;
+ mi = df->df_mount_info;
+
+ if (!df)
+ return -ENOENT;
+
+ hash_tree = df->df_hash_tree;
+ sig = df->df_signature;
+ if (!hash_tree || !sig || sig->hash_offset == 0)
+ return -ENOTSUPP;
+
+ hash_area_base = sig->hash_offset;
+ hash_area_size = sig->hash_size;
+ if (hash_area_size < block->block_index * INCFS_DATA_FILE_BLOCK_SIZE
+ + block->data_len) {
+ /* Hash block goes beyond dedicated hash area of this file. */
+ return -ERANGE;
+ }
+
+ error = mutex_lock_interruptible(&bfc->bc_mutex);
+ if (!error) {
+ error = incfs_write_hash_block_to_backing_file(
+ bfc, range(data, block->data_len), block->block_index,
+ hash_area_base, df->df_blockmap_off, df->df_size);
+ mutex_unlock(&bfc->bc_mutex);
+ }
+ if (!error)
+ atomic_inc(&df->df_hash_blocks_written);
+
+ return error;
+}
+
+static int process_blockmap_md(struct incfs_blockmap *bm,
+ struct metadata_handler *handler)
+{
+ struct data_file *df = handler->context;
+ int error = 0;
+ loff_t base_off = le64_to_cpu(bm->m_base_offset);
+ u32 block_count = le32_to_cpu(bm->m_block_count);
+
+ if (!df)
+ return -EFAULT;
+
+ if (df->df_data_block_count > block_count)
+ return -EBADMSG;
+
+ df->df_total_block_count = block_count;
+ df->df_blockmap_off = base_off;
+ return error;
+}
+
+static int process_file_signature_md(struct incfs_file_signature *sg,
+ struct metadata_handler *handler)
+{
+ struct data_file *df = handler->context;
+ struct mtree *hash_tree = NULL;
+ int error = 0;
+ struct incfs_df_signature *signature =
+ kzalloc(sizeof(*signature), GFP_NOFS);
+ void *buf = NULL;
+ ssize_t read;
+
+ if (!signature)
+ return -ENOMEM;
+
+ if (!df || !df->df_backing_file_context ||
+ !df->df_backing_file_context->bc_file) {
+ error = -ENOENT;
+ goto out;
+ }
+
+ signature->hash_offset = le64_to_cpu(sg->sg_hash_tree_offset);
+ signature->hash_size = le32_to_cpu(sg->sg_hash_tree_size);
+ signature->sig_offset = le64_to_cpu(sg->sg_sig_offset);
+ signature->sig_size = le32_to_cpu(sg->sg_sig_size);
+
+ buf = kzalloc(signature->sig_size, GFP_NOFS);
+ if (!buf) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ read = incfs_kread(df->df_backing_file_context, buf,
+ signature->sig_size, signature->sig_offset);
+ if (read < 0) {
+ error = read;
+ goto out;
+ }
+
+ if (read != signature->sig_size) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ hash_tree = incfs_alloc_mtree(range(buf, signature->sig_size),
+ df->df_data_block_count);
+ if (IS_ERR(hash_tree)) {
+ error = PTR_ERR(hash_tree);
+ hash_tree = NULL;
+ goto out;
+ }
+ if (hash_tree->hash_tree_area_size != signature->hash_size) {
+ error = -EINVAL;
+ goto out;
+ }
+ if (signature->hash_size > 0 &&
+ handler->md_record_offset <= signature->hash_offset) {
+ error = -EINVAL;
+ goto out;
+ }
+ if (handler->md_record_offset <= signature->sig_offset) {
+ error = -EINVAL;
+ goto out;
+ }
+ df->df_hash_tree = hash_tree;
+ hash_tree = NULL;
+ df->df_signature = signature;
+ signature = NULL;
+out:
+ incfs_free_mtree(hash_tree);
+ kfree(signature);
+ kfree(buf);
+
+ return error;
+}
+
+static int process_status_md(struct incfs_status *is,
+ struct metadata_handler *handler)
+{
+ struct data_file *df = handler->context;
+
+ df->df_initial_data_blocks_written =
+ le32_to_cpu(is->is_data_blocks_written);
+ atomic_set(&df->df_data_blocks_written,
+ df->df_initial_data_blocks_written);
+
+ df->df_initial_hash_blocks_written =
+ le32_to_cpu(is->is_hash_blocks_written);
+ atomic_set(&df->df_hash_blocks_written,
+ df->df_initial_hash_blocks_written);
+
+ df->df_status_offset = handler->md_record_offset;
+ return 0;
+}
+
+static int process_file_verity_signature_md(
+ struct incfs_file_verity_signature *vs,
+ struct metadata_handler *handler)
+{
+ struct data_file *df = handler->context;
+ struct incfs_df_verity_signature *verity_signature;
+
+ if (!df)
+ return -EFAULT;
+
+ verity_signature = kzalloc(sizeof(*verity_signature), GFP_NOFS);
+ if (!verity_signature)
+ return -ENOMEM;
+
+ verity_signature->offset = le64_to_cpu(vs->vs_offset);
+ verity_signature->size = le32_to_cpu(vs->vs_size);
+ if (verity_signature->size > FS_VERITY_MAX_SIGNATURE_SIZE) {
+ kfree(verity_signature);
+ return -EFAULT;
+ }
+
+ df->df_verity_signature = verity_signature;
+ return 0;
+}
+
+static int incfs_scan_metadata_chain(struct data_file *df)
+{
+ struct metadata_handler *handler = NULL;
+ int result = 0;
+ int records_count = 0;
+ int error = 0;
+ struct backing_file_context *bfc = NULL;
+ int nondata_block_count;
+
+ if (!df || !df->df_backing_file_context)
+ return -EFAULT;
+
+ bfc = df->df_backing_file_context;
+
+ handler = kzalloc(sizeof(*handler), GFP_NOFS);
+ if (!handler)
+ return -ENOMEM;
+
+ handler->md_record_offset = df->df_metadata_off;
+ handler->context = df;
+ handler->handle_blockmap = process_blockmap_md;
+ handler->handle_signature = process_file_signature_md;
+ handler->handle_status = process_status_md;
+ handler->handle_verity_signature = process_file_verity_signature_md;
+
+ while (handler->md_record_offset > 0) {
+ error = incfs_read_next_metadata_record(bfc, handler);
+ if (error) {
+ pr_warn("incfs: Error during reading incfs-metadata record. Offset: %lld Record #%d Error code: %d\n",
+ handler->md_record_offset, records_count + 1,
+ -error);
+ break;
+ }
+ records_count++;
+ }
+ if (error) {
+ pr_warn("incfs: Error %d after reading %d incfs-metadata records.\n",
+ -error, records_count);
+ result = error;
+ } else
+ result = records_count;
+
+ nondata_block_count = df->df_total_block_count -
+ df->df_data_block_count;
+ if (df->df_hash_tree) {
+ int hash_block_count = get_blocks_count_for_size(
+ df->df_hash_tree->hash_tree_area_size);
+
+ /*
+ * Files that were created with a hash tree have the hash tree
+ * included in the block map, i.e. nondata_block_count ==
+ * hash_block_count. Files whose hash tree was added by
+ * FS_IOC_ENABLE_VERITY will still have the original block
+ * count, i.e. nondata_block_count == 0.
+ */
+ if (nondata_block_count != hash_block_count &&
+ nondata_block_count != 0)
+ result = -EINVAL;
+ } else if (nondata_block_count != 0) {
+ result = -EINVAL;
+ }
+
+ kfree(handler);
+ return result;
+}
+
+/*
+ * Quickly checks if there are pending reads with a serial number larger
+ * than a given one.
+ */
+bool incfs_fresh_pending_reads_exist(struct mount_info *mi, int last_number)
+{
+ bool result = false;
+
+ spin_lock(&mi->pending_read_lock);
+ result = (mi->mi_last_pending_read_number > last_number) &&
+ (mi->mi_pending_reads_count > 0);
+ spin_unlock(&mi->pending_read_lock);
+ return result;
+}
+
+int incfs_collect_pending_reads(struct mount_info *mi, int sn_lowerbound,
+ struct incfs_pending_read_info *reads,
+ struct incfs_pending_read_info2 *reads2,
+ int reads_size, int *new_max_sn)
+{
+ int reported_reads = 0;
+ struct pending_read *entry = NULL;
+
+ if (!mi)
+ return -EFAULT;
+
+ if (reads_size <= 0)
+ return 0;
+
+ if (!incfs_fresh_pending_reads_exist(mi, sn_lowerbound))
+ return 0;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(entry, &mi->mi_reads_list_head, mi_reads_list) {
+ if (entry->serial_number <= sn_lowerbound)
+ continue;
+
+ if (reads) {
+ reads[reported_reads].file_id = entry->file_id;
+ reads[reported_reads].block_index = entry->block_index;
+ reads[reported_reads].serial_number =
+ entry->serial_number;
+ reads[reported_reads].timestamp_us =
+ entry->timestamp_us;
+ }
+
+ if (reads2) {
+ reads2[reported_reads].file_id = entry->file_id;
+ reads2[reported_reads].block_index = entry->block_index;
+ reads2[reported_reads].serial_number =
+ entry->serial_number;
+ reads2[reported_reads].timestamp_us =
+ entry->timestamp_us;
+ reads2[reported_reads].uid = entry->uid;
+ }
+
+ if (entry->serial_number > *new_max_sn)
+ *new_max_sn = entry->serial_number;
+
+ reported_reads++;
+ if (reported_reads >= reads_size)
+ break;
+ }
+
+ rcu_read_unlock();
+
+ return reported_reads;
+}
+
+struct read_log_state incfs_get_log_state(struct mount_info *mi)
+{
+ struct read_log *log = &mi->mi_log;
+ struct read_log_state result;
+
+ spin_lock(&log->rl_lock);
+ result = log->rl_head;
+ spin_unlock(&log->rl_lock);
+ return result;
+}
+
+int incfs_get_uncollected_logs_count(struct mount_info *mi,
+ const struct read_log_state *state)
+{
+ struct read_log *log = &mi->mi_log;
+ u32 generation;
+ u64 head_no, tail_no;
+
+ spin_lock(&log->rl_lock);
+ tail_no = log->rl_tail.current_record_no;
+ head_no = log->rl_head.current_record_no;
+ generation = log->rl_head.generation_id;
+ spin_unlock(&log->rl_lock);
+
+ if (generation != state->generation_id)
+ return head_no - tail_no;
+ else
+ return head_no - max_t(u64, tail_no, state->current_record_no);
+}
+
+int incfs_collect_logged_reads(struct mount_info *mi,
+ struct read_log_state *state,
+ struct incfs_pending_read_info *reads,
+ struct incfs_pending_read_info2 *reads2,
+ int reads_size)
+{
+ int dst_idx;
+ struct read_log *log = &mi->mi_log;
+ struct read_log_state *head, *tail;
+
+ spin_lock(&log->rl_lock);
+ head = &log->rl_head;
+ tail = &log->rl_tail;
+
+ if (state->generation_id != head->generation_id) {
+ pr_debug("read ptr is wrong generation: %u/%u",
+ state->generation_id, head->generation_id);
+
+ *state = (struct read_log_state){
+ .generation_id = head->generation_id,
+ };
+ }
+
+ if (state->current_record_no < tail->current_record_no) {
+ pr_debug("read ptr is behind, moving: %u/%u -> %u/%u\n",
+ (u32)state->next_offset,
+ (u32)state->current_pass_no,
+ (u32)tail->next_offset, (u32)tail->current_pass_no);
+
+ *state = *tail;
+ }
+
+ for (dst_idx = 0; dst_idx < reads_size; dst_idx++) {
+ if (state->current_record_no == head->current_record_no)
+ break;
+
+ log_read_one_record(log, state);
+
+ if (reads)
+ reads[dst_idx] = (struct incfs_pending_read_info) {
+ .file_id = state->base_record.file_id,
+ .block_index = state->base_record.block_index,
+ .serial_number = state->current_record_no,
+ .timestamp_us =
+ state->base_record.absolute_ts_us,
+ };
+
+ if (reads2)
+ reads2[dst_idx] = (struct incfs_pending_read_info2) {
+ .file_id = state->base_record.file_id,
+ .block_index = state->base_record.block_index,
+ .serial_number = state->current_record_no,
+ .timestamp_us =
+ state->base_record.absolute_ts_us,
+ .uid = state->base_record.uid,
+ };
+ }
+
+ spin_unlock(&log->rl_lock);
+ return dst_idx;
+}
+
diff --git a/fs/incfs/data_mgmt.h b/fs/incfs/data_mgmt.h
new file mode 100644
index 0000000..555a621
--- /dev/null
+++ b/fs/incfs/data_mgmt.h
@@ -0,0 +1,538 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC
+ */
+#ifndef _INCFS_DATA_MGMT_H
+#define _INCFS_DATA_MGMT_H
+
+#include <linux/cred.h>
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/completion.h>
+#include <linux/wait.h>
+#include <linux/zstd.h>
+#include <linux/rwsem.h>
+
+#include <uapi/linux/incrementalfs.h>
+
+#include "internal.h"
+#include "pseudo_files.h"
+
+#define SEGMENTS_PER_FILE 3
+
+enum LOG_RECORD_TYPE {
+ FULL,
+ SAME_FILE,
+ SAME_FILE_CLOSE_BLOCK,
+ SAME_FILE_CLOSE_BLOCK_SHORT,
+ SAME_FILE_NEXT_BLOCK,
+ SAME_FILE_NEXT_BLOCK_SHORT,
+};
+
+struct full_record {
+ enum LOG_RECORD_TYPE type : 3; /* FULL */
+ u32 block_index : 29;
+ incfs_uuid_t file_id;
+ u64 absolute_ts_us;
+ uid_t uid;
+} __packed; /* 32 bytes */
+
+struct same_file {
+ enum LOG_RECORD_TYPE type : 3; /* SAME_FILE */
+ u32 block_index : 29;
+ uid_t uid;
+ u16 relative_ts_us; /* max 2^16 us ~= 64 ms */
+} __packed; /* 10 bytes */
+
+struct same_file_close_block {
+ enum LOG_RECORD_TYPE type : 3; /* SAME_FILE_CLOSE_BLOCK */
+ u16 relative_ts_us : 13; /* max 2^13 us ~= 8 ms */
+ s16 block_index_delta;
+} __packed; /* 4 bytes */
+
+struct same_file_close_block_short {
+ enum LOG_RECORD_TYPE type : 3; /* SAME_FILE_CLOSE_BLOCK_SHORT */
+ u8 relative_ts_tens_us : 5; /* max 2^5*10 us ~= 320 us */
+ s8 block_index_delta;
+} __packed; /* 2 bytes */
+
+struct same_file_next_block {
+ enum LOG_RECORD_TYPE type : 3; /* SAME_FILE_NEXT_BLOCK */
+ u16 relative_ts_us : 13; /* max 2^13 us ~= 8 ms */
+} __packed; /* 2 bytes */
+
+struct same_file_next_block_short {
+ enum LOG_RECORD_TYPE type : 3; /* SAME_FILE_NEXT_BLOCK_SHORT */
+ u8 relative_ts_tens_us : 5; /* max 2^5*10 us ~= 320 us */
+} __packed; /* 1 byte */
+
+union log_record {
+ struct full_record full_record;
+ struct same_file same_file;
+ struct same_file_close_block same_file_close_block;
+ struct same_file_close_block_short same_file_close_block_short;
+ struct same_file_next_block same_file_next_block;
+ struct same_file_next_block_short same_file_next_block_short;
+};
+
+struct read_log_state {
+ /* Log buffer generation id, incremented on configuration changes */
+ u32 generation_id;
+
+ /* Offset in rl_ring_buf to write into. */
+ u32 next_offset;
+
+ /* Current number of writer passes over rl_ring_buf */
+ u32 current_pass_no;
+
+ /* Current full_record to diff against */
+ struct full_record base_record;
+
+ /* Current record number counting from configuration change */
+ u64 current_record_no;
+};
+
+/* A ring buffer to save records about data blocks which were recently read. */
+struct read_log {
+ void *rl_ring_buf;
+
+ int rl_size;
+
+ struct read_log_state rl_head;
+
+ struct read_log_state rl_tail;
+
+ /* A lock to protect the above fields */
+ spinlock_t rl_lock;
+
+ /* A queue of waiters who want to be notified about reads */
+ wait_queue_head_t ml_notif_wq;
+
+ /* A work item to wake up those waiters without slowing down readers */
+ struct delayed_work ml_wakeup_work;
+};
+
+struct mount_options {
+ unsigned int read_timeout_ms;
+ unsigned int readahead_pages;
+ unsigned int read_log_pages;
+ unsigned int read_log_wakeup_count;
+ bool report_uid;
+ char *sysfs_name;
+};
+
+struct mount_info {
+ struct super_block *mi_sb;
+
+ struct path mi_backing_dir_path;
+
+ struct dentry *mi_index_dir;
+ /* For stacking mounts, if true, this indicates if the index dir needs
+ * to be freed for this SB otherwise it was created by lower level SB */
+ bool mi_index_free;
+
+ struct dentry *mi_incomplete_dir;
+ /* For stacking mounts, if true, this indicates if the incomplete dir
+ * needs to be freed for this SB. Similar to mi_index_free */
+ bool mi_incomplete_free;
+
+ const struct cred *mi_owner;
+
+ struct mount_options mi_options;
+
+ /* This mutex is to be taken before create, rename, delete */
+ struct mutex mi_dir_struct_mutex;
+
+ /*
+ * A queue of waiters who want to be notified about new pending reads.
+ */
+ wait_queue_head_t mi_pending_reads_notif_wq;
+
+ /*
+ * Protects - RCU safe:
+ * - reads_list_head
+ * - mi_pending_reads_count
+ * - mi_last_pending_read_number
+ * - data_file_segment.reads_list_head
+ */
+ spinlock_t pending_read_lock;
+
+ /* List of active pending_read objects */
+ struct list_head mi_reads_list_head;
+
+ /* Total number of items in reads_list_head */
+ int mi_pending_reads_count;
+
+ /*
+ * Last serial number that was assigned to a pending read.
+ * 0 means no pending reads have been seen yet.
+ */
+ int mi_last_pending_read_number;
+
+ /* Temporary buffer for read logger. */
+ struct read_log mi_log;
+
+ /* SELinux needs special xattrs on our pseudo files */
+ struct mem_range pseudo_file_xattr[PSEUDO_FILE_COUNT];
+
+ /* A queue of waiters who want to be notified about blocks_written */
+ wait_queue_head_t mi_blocks_written_notif_wq;
+
+ /* Number of blocks written since mount */
+ atomic_t mi_blocks_written;
+
+ /* Per UID read timeouts */
+ spinlock_t mi_per_uid_read_timeouts_lock;
+ struct incfs_per_uid_read_timeouts *mi_per_uid_read_timeouts;
+ int mi_per_uid_read_timeouts_size;
+
+ /* zstd workspace */
+ struct mutex mi_zstd_workspace_mutex;
+ void *mi_zstd_workspace;
+ ZSTD_DStream *mi_zstd_stream;
+ struct delayed_work mi_zstd_cleanup_work;
+
+ /* sysfs node */
+ struct incfs_sysfs_node *mi_sysfs_node;
+
+ /* Last error information */
+ struct mutex mi_le_mutex;
+ incfs_uuid_t mi_le_file_id;
+ u64 mi_le_time_us;
+ u32 mi_le_page;
+ u32 mi_le_errno;
+ uid_t mi_le_uid;
+
+ /* Number of reads timed out */
+ u32 mi_reads_failed_timed_out;
+
+ /* Number of reads failed because hash verification failed */
+ u32 mi_reads_failed_hash_verification;
+
+ /* Number of reads failed for another reason */
+ u32 mi_reads_failed_other;
+
+ /* Number of reads delayed because page had to be fetched */
+ u32 mi_reads_delayed_pending;
+
+ /* Total time waiting for pages to be fetched */
+ u64 mi_reads_delayed_pending_us;
+
+ /*
+ * Number of reads delayed because of per-uid min_time_us or
+ * min_pending_time_us settings
+ */
+ u32 mi_reads_delayed_min;
+
+ /* Total time waiting because of per-uid min_time_us or
+ * min_pending_time_us settings.
+ *
+ * Note that if a read is initially delayed because we have to wait for
+ * the page, then further delayed because of min_pending_time_us
+ * setting, this counter gets incremented by only the further delay
+ * time.
+ */
+ u64 mi_reads_delayed_min_us;
+};
+
+struct data_file_block {
+ loff_t db_backing_file_data_offset;
+
+ size_t db_stored_size;
+
+ enum incfs_compression_alg db_comp_alg;
+};
+
+struct pending_read {
+ incfs_uuid_t file_id;
+
+ s64 timestamp_us;
+
+ atomic_t done;
+
+ int block_index;
+
+ int serial_number;
+
+ uid_t uid;
+
+ struct list_head mi_reads_list;
+
+ struct list_head segment_reads_list;
+
+ struct rcu_head rcu;
+};
+
+struct data_file_segment {
+ wait_queue_head_t new_data_arrival_wq;
+
+ /* Protects reads and writes from the blockmap */
+ struct rw_semaphore rwsem;
+
+ /* List of active pending_read objects belonging to this segment */
+ /* Protected by mount_info.pending_reads_mutex */
+ struct list_head reads_list_head;
+};
+
+struct data_file {
+ struct backing_file_context *df_backing_file_context;
+
+ struct mount_info *df_mount_info;
+
+ incfs_uuid_t df_id;
+
+ /*
+ * Array of segments used to reduce lock contention for the file.
+ * Segment is chosen for a block depends on the block's index.
+ */
+ struct data_file_segment df_segments[SEGMENTS_PER_FILE];
+
+ /* Base offset of the first metadata record. */
+ loff_t df_metadata_off;
+
+ /* Base offset of the block map. */
+ loff_t df_blockmap_off;
+
+ /* File size in bytes */
+ loff_t df_size;
+
+ /* File header flags */
+ u32 df_header_flags;
+
+ /* File size in DATA_FILE_BLOCK_SIZE blocks */
+ int df_data_block_count;
+
+ /* Total number of blocks, data + hash */
+ int df_total_block_count;
+
+ /* For mapped files, the offset into the actual file */
+ loff_t df_mapped_offset;
+
+ /* Number of data blocks written to file */
+ atomic_t df_data_blocks_written;
+
+ /* Number of data blocks in the status block */
+ u32 df_initial_data_blocks_written;
+
+ /* Number of hash blocks written to file */
+ atomic_t df_hash_blocks_written;
+
+ /* Number of hash blocks in the status block */
+ u32 df_initial_hash_blocks_written;
+
+ /* Offset to status metadata header */
+ loff_t df_status_offset;
+
+ /*
+ * Mutex acquired while enabling verity. Note that df_hash_tree is set
+ * by enable verity.
+ *
+ * The backing file mutex bc_mutex may be taken while this mutex is
+ * held.
+ */
+ struct mutex df_enable_verity;
+
+ /*
+ * Set either at construction time or during enabling verity. In the
+ * latter case, set via smp_store_release, so use smp_load_acquire to
+ * read it.
+ */
+ struct mtree *df_hash_tree;
+
+ /* Guaranteed set if df_hash_tree is set. */
+ struct incfs_df_signature *df_signature;
+
+ /*
+ * The verity file digest, set when verity is enabled and the file has
+ * been opened
+ */
+ struct mem_range df_verity_file_digest;
+
+ struct incfs_df_verity_signature *df_verity_signature;
+};
+
+struct dir_file {
+ struct mount_info *mount_info;
+
+ struct file *backing_dir;
+};
+
+struct inode_info {
+ struct mount_info *n_mount_info; /* A mount, this file belongs to */
+
+ struct inode *n_backing_inode;
+
+ struct data_file *n_file;
+
+ struct inode n_vfs_inode;
+};
+
+struct dentry_info {
+ struct path backing_path;
+};
+
+enum FILL_PERMISSION {
+ CANT_FILL = 0,
+ CAN_FILL = 1,
+};
+
+struct incfs_file_data {
+ /* Does this file handle have INCFS_IOC_FILL_BLOCKS permission */
+ enum FILL_PERMISSION fd_fill_permission;
+
+ /* If INCFS_IOC_GET_FILLED_BLOCKS has been called, where are we */
+ int fd_get_block_pos;
+
+ /* And how many filled blocks are there up to that point */
+ int fd_filled_data_blocks;
+ int fd_filled_hash_blocks;
+};
+
+struct mount_info *incfs_alloc_mount_info(struct super_block *sb,
+ struct mount_options *options,
+ struct path *backing_dir_path);
+
+int incfs_realloc_mount_info(struct mount_info *mi,
+ struct mount_options *options);
+
+void incfs_free_mount_info(struct mount_info *mi);
+
+char *file_id_to_str(incfs_uuid_t id);
+struct dentry *incfs_lookup_dentry(struct dentry *parent, const char *name);
+struct data_file *incfs_open_data_file(struct mount_info *mi, struct file *bf);
+void incfs_free_data_file(struct data_file *df);
+
+struct dir_file *incfs_open_dir_file(struct mount_info *mi, struct file *bf);
+void incfs_free_dir_file(struct dir_file *dir);
+
+struct incfs_read_data_file_timeouts {
+ u32 min_time_us;
+ u32 min_pending_time_us;
+ u32 max_pending_time_us;
+};
+
+ssize_t incfs_read_data_file_block(struct mem_range dst, struct file *f,
+ int index, struct mem_range tmp,
+ struct incfs_read_data_file_timeouts *timeouts,
+ unsigned int *delayed_min_us);
+
+ssize_t incfs_read_merkle_tree_blocks(struct mem_range dst,
+ struct data_file *df, size_t offset);
+
+int incfs_get_filled_blocks(struct data_file *df,
+ struct incfs_file_data *fd,
+ struct incfs_get_filled_blocks_args *arg);
+
+int incfs_read_file_signature(struct data_file *df, struct mem_range dst);
+
+int incfs_process_new_data_block(struct data_file *df,
+ struct incfs_fill_block *block, u8 *data,
+ bool *complete);
+
+int incfs_process_new_hash_block(struct data_file *df,
+ struct incfs_fill_block *block, u8 *data);
+
+bool incfs_fresh_pending_reads_exist(struct mount_info *mi, int last_number);
+
+/*
+ * Collects pending reads and saves them into the array (reads/reads_size).
+ * Only reads with serial_number > sn_lowerbound are reported.
+ * Returns how many reads were saved into the array.
+ */
+int incfs_collect_pending_reads(struct mount_info *mi, int sn_lowerbound,
+ struct incfs_pending_read_info *reads,
+ struct incfs_pending_read_info2 *reads2,
+ int reads_size, int *new_max_sn);
+
+int incfs_collect_logged_reads(struct mount_info *mi,
+ struct read_log_state *start_state,
+ struct incfs_pending_read_info *reads,
+ struct incfs_pending_read_info2 *reads2,
+ int reads_size);
+struct read_log_state incfs_get_log_state(struct mount_info *mi);
+int incfs_get_uncollected_logs_count(struct mount_info *mi,
+ const struct read_log_state *state);
+
+static inline struct inode_info *get_incfs_node(struct inode *inode)
+{
+ if (!inode)
+ return NULL;
+
+ if (inode->i_sb->s_magic != INCFS_MAGIC_NUMBER) {
+ /* This inode doesn't belong to us. */
+ pr_warn_once("incfs: %s on an alien inode.", __func__);
+ return NULL;
+ }
+
+ return container_of(inode, struct inode_info, n_vfs_inode);
+}
+
+static inline struct data_file *get_incfs_data_file(struct file *f)
+{
+ struct inode_info *node = NULL;
+
+ if (!f)
+ return NULL;
+
+ if (!S_ISREG(f->f_inode->i_mode))
+ return NULL;
+
+ node = get_incfs_node(f->f_inode);
+ if (!node)
+ return NULL;
+
+ return node->n_file;
+}
+
+static inline struct dir_file *get_incfs_dir_file(struct file *f)
+{
+ if (!f)
+ return NULL;
+
+ if (!S_ISDIR(f->f_inode->i_mode))
+ return NULL;
+
+ return (struct dir_file *)f->private_data;
+}
+
+/*
+ * Make sure that inode_info.n_file is initialized and inode can be used
+ * for reading and writing data from/to the backing file.
+ */
+int make_inode_ready_for_data_ops(struct mount_info *mi,
+ struct inode *inode,
+ struct file *backing_file);
+
+static inline struct dentry_info *get_incfs_dentry(const struct dentry *d)
+{
+ if (!d)
+ return NULL;
+
+ return (struct dentry_info *)d->d_fsdata;
+}
+
+static inline void get_incfs_backing_path(const struct dentry *d,
+ struct path *path)
+{
+ struct dentry_info *di = get_incfs_dentry(d);
+
+ if (!di) {
+ *path = (struct path) {};
+ return;
+ }
+
+ *path = di->backing_path;
+ path_get(path);
+}
+
+static inline int get_blocks_count_for_size(u64 size)
+{
+ if (size == 0)
+ return 0;
+ return 1 + (size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+}
+
+#endif /* _INCFS_DATA_MGMT_H */
diff --git a/fs/incfs/format.c b/fs/incfs/format.c
new file mode 100644
index 0000000..00e09e8
--- /dev/null
+++ b/fs/incfs/format.c
@@ -0,0 +1,752 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Google LLC
+ */
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/types.h>
+#include <linux/mutex.h>
+#include <linux/mm.h>
+#include <linux/falloc.h>
+#include <linux/slab.h>
+#include <linux/crc32.h>
+#include <linux/kernel.h>
+
+#include "format.h"
+#include "data_mgmt.h"
+
+struct backing_file_context *incfs_alloc_bfc(struct mount_info *mi,
+ struct file *backing_file)
+{
+ struct backing_file_context *result = NULL;
+
+ result = kzalloc(sizeof(*result), GFP_NOFS);
+ if (!result)
+ return ERR_PTR(-ENOMEM);
+
+ result->bc_file = get_file(backing_file);
+ result->bc_cred = mi->mi_owner;
+ mutex_init(&result->bc_mutex);
+ return result;
+}
+
+void incfs_free_bfc(struct backing_file_context *bfc)
+{
+ if (!bfc)
+ return;
+
+ if (bfc->bc_file)
+ fput(bfc->bc_file);
+
+ mutex_destroy(&bfc->bc_mutex);
+ kfree(bfc);
+}
+
+static loff_t incfs_get_end_offset(struct file *f)
+{
+ /*
+ * This function assumes that file size and the end-offset
+ * are the same. This is not always true.
+ */
+ return i_size_read(file_inode(f));
+}
+
+/*
+ * Truncate the tail of the file to the given length.
+ * Used to rollback partially successful multistep writes.
+ */
+static int truncate_backing_file(struct backing_file_context *bfc,
+ loff_t new_end)
+{
+ struct inode *inode = NULL;
+ struct dentry *dentry = NULL;
+ loff_t old_end = 0;
+ struct iattr attr;
+ int result = 0;
+
+ if (!bfc)
+ return -EFAULT;
+
+ LOCK_REQUIRED(bfc->bc_mutex);
+
+ if (!bfc->bc_file)
+ return -EFAULT;
+
+ old_end = incfs_get_end_offset(bfc->bc_file);
+ if (old_end == new_end)
+ return 0;
+ if (old_end < new_end)
+ return -EINVAL;
+
+ inode = bfc->bc_file->f_inode;
+ dentry = bfc->bc_file->f_path.dentry;
+
+ attr.ia_size = new_end;
+ attr.ia_valid = ATTR_SIZE;
+
+ inode_lock(inode);
+ result = notify_change(&nop_mnt_idmap, dentry, &attr, NULL);
+ inode_unlock(inode);
+
+ return result;
+}
+
+static int write_to_bf(struct backing_file_context *bfc, const void *buf,
+ size_t count, loff_t pos)
+{
+ ssize_t res = incfs_kwrite(bfc, buf, count, pos);
+
+ if (res < 0)
+ return res;
+ if (res != count)
+ return -EIO;
+ return 0;
+}
+
+static int append_zeros_no_fallocate(struct backing_file_context *bfc,
+ size_t file_size, size_t len)
+{
+ u8 buffer[256] = {};
+ size_t i;
+
+ for (i = 0; i < len; i += sizeof(buffer)) {
+ int to_write = len - i > sizeof(buffer)
+ ? sizeof(buffer) : len - i;
+ int err = write_to_bf(bfc, buffer, to_write, file_size + i);
+
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+/* Append a given number of zero bytes to the end of the backing file. */
+static int append_zeros(struct backing_file_context *bfc, size_t len)
+{
+ loff_t file_size = 0;
+ loff_t new_last_byte_offset = 0;
+ int result;
+
+ if (!bfc)
+ return -EFAULT;
+
+ if (len == 0)
+ return 0;
+
+ LOCK_REQUIRED(bfc->bc_mutex);
+
+ /*
+ * Allocate only one byte at the new desired end of the file.
+ * It will increase file size and create a zeroed area of
+ * a given size.
+ */
+ file_size = incfs_get_end_offset(bfc->bc_file);
+ new_last_byte_offset = file_size + len - 1;
+ result = vfs_fallocate(bfc->bc_file, 0, new_last_byte_offset, 1);
+ if (result != -EOPNOTSUPP)
+ return result;
+
+ return append_zeros_no_fallocate(bfc, file_size, len);
+}
+
+/*
+ * Append a given metadata record to the backing file and update a previous
+ * record to add the new record the the metadata list.
+ */
+static int append_md_to_backing_file(struct backing_file_context *bfc,
+ struct incfs_md_header *record)
+{
+ int result = 0;
+ loff_t record_offset;
+ loff_t file_pos;
+ __le64 new_md_offset;
+ size_t record_size;
+
+ if (!bfc || !record)
+ return -EFAULT;
+
+ if (bfc->bc_last_md_record_offset < 0)
+ return -EINVAL;
+
+ LOCK_REQUIRED(bfc->bc_mutex);
+
+ record_size = le16_to_cpu(record->h_record_size);
+ file_pos = incfs_get_end_offset(bfc->bc_file);
+ record->h_next_md_offset = 0;
+
+ /* Write the metadata record to the end of the backing file */
+ record_offset = file_pos;
+ new_md_offset = cpu_to_le64(record_offset);
+ result = write_to_bf(bfc, record, record_size, file_pos);
+ if (result)
+ return result;
+
+ /* Update next metadata offset in a previous record or a superblock. */
+ if (bfc->bc_last_md_record_offset) {
+ /*
+ * Find a place in the previous md record where new record's
+ * offset needs to be saved.
+ */
+ file_pos = bfc->bc_last_md_record_offset +
+ offsetof(struct incfs_md_header, h_next_md_offset);
+ } else {
+ /*
+ * No metadata yet, file a place to update in the
+ * file_header.
+ */
+ file_pos = offsetof(struct incfs_file_header,
+ fh_first_md_offset);
+ }
+ result = write_to_bf(bfc, &new_md_offset, sizeof(new_md_offset),
+ file_pos);
+ if (result)
+ return result;
+
+ bfc->bc_last_md_record_offset = record_offset;
+ return result;
+}
+
+/*
+ * Reserve 0-filled space for the blockmap body, and append
+ * incfs_blockmap metadata record pointing to it.
+ */
+int incfs_write_blockmap_to_backing_file(struct backing_file_context *bfc,
+ u32 block_count)
+{
+ struct incfs_blockmap blockmap = {};
+ int result = 0;
+ loff_t file_end = 0;
+ size_t map_size = block_count * sizeof(struct incfs_blockmap_entry);
+
+ if (!bfc)
+ return -EFAULT;
+
+ blockmap.m_header.h_md_entry_type = INCFS_MD_BLOCK_MAP;
+ blockmap.m_header.h_record_size = cpu_to_le16(sizeof(blockmap));
+ blockmap.m_header.h_next_md_offset = cpu_to_le64(0);
+ blockmap.m_block_count = cpu_to_le32(block_count);
+
+ LOCK_REQUIRED(bfc->bc_mutex);
+
+ /* Reserve 0-filled space for the blockmap body in the backing file. */
+ file_end = incfs_get_end_offset(bfc->bc_file);
+ result = append_zeros(bfc, map_size);
+ if (result)
+ return result;
+
+ /* Write blockmap metadata record pointing to the body written above. */
+ blockmap.m_base_offset = cpu_to_le64(file_end);
+ result = append_md_to_backing_file(bfc, &blockmap.m_header);
+ if (result)
+ /* Error, rollback file changes */
+ truncate_backing_file(bfc, file_end);
+
+ return result;
+}
+
+int incfs_write_signature_to_backing_file(struct backing_file_context *bfc,
+ struct mem_range sig, u32 tree_size,
+ loff_t *tree_offset, loff_t *sig_offset)
+{
+ struct incfs_file_signature sg = {};
+ int result = 0;
+ loff_t rollback_pos = 0;
+ loff_t tree_area_pos = 0;
+ size_t alignment = 0;
+
+ if (!bfc)
+ return -EFAULT;
+
+ LOCK_REQUIRED(bfc->bc_mutex);
+
+ rollback_pos = incfs_get_end_offset(bfc->bc_file);
+
+ sg.sg_header.h_md_entry_type = INCFS_MD_SIGNATURE;
+ sg.sg_header.h_record_size = cpu_to_le16(sizeof(sg));
+ sg.sg_header.h_next_md_offset = cpu_to_le64(0);
+ if (sig.data != NULL && sig.len > 0) {
+ sg.sg_sig_size = cpu_to_le32(sig.len);
+ sg.sg_sig_offset = cpu_to_le64(rollback_pos);
+
+ result = write_to_bf(bfc, sig.data, sig.len, rollback_pos);
+ if (result)
+ goto err;
+ }
+
+ tree_area_pos = incfs_get_end_offset(bfc->bc_file);
+ if (tree_size > 0) {
+ if (tree_size > 5 * INCFS_DATA_FILE_BLOCK_SIZE) {
+ /*
+ * If hash tree is big enough, it makes sense to
+ * align in the backing file for faster access.
+ */
+ loff_t offset = round_up(tree_area_pos, PAGE_SIZE);
+
+ alignment = offset - tree_area_pos;
+ tree_area_pos = offset;
+ }
+
+ /*
+ * If root hash is not the only hash in the tree.
+ * reserve 0-filled space for the tree.
+ */
+ result = append_zeros(bfc, tree_size + alignment);
+ if (result)
+ goto err;
+
+ sg.sg_hash_tree_size = cpu_to_le32(tree_size);
+ sg.sg_hash_tree_offset = cpu_to_le64(tree_area_pos);
+ }
+
+ /* Write a hash tree metadata record pointing to the hash tree above. */
+ result = append_md_to_backing_file(bfc, &sg.sg_header);
+err:
+ if (result)
+ /* Error, rollback file changes */
+ truncate_backing_file(bfc, rollback_pos);
+ else {
+ if (tree_offset)
+ *tree_offset = tree_area_pos;
+ if (sig_offset)
+ *sig_offset = rollback_pos;
+ }
+
+ return result;
+}
+
+static int write_new_status_to_backing_file(struct backing_file_context *bfc,
+ u32 data_blocks_written,
+ u32 hash_blocks_written)
+{
+ int result;
+ loff_t rollback_pos;
+ struct incfs_status is = {
+ .is_header = {
+ .h_md_entry_type = INCFS_MD_STATUS,
+ .h_record_size = cpu_to_le16(sizeof(is)),
+ },
+ .is_data_blocks_written = cpu_to_le32(data_blocks_written),
+ .is_hash_blocks_written = cpu_to_le32(hash_blocks_written),
+ };
+
+ LOCK_REQUIRED(bfc->bc_mutex);
+ rollback_pos = incfs_get_end_offset(bfc->bc_file);
+ result = append_md_to_backing_file(bfc, &is.is_header);
+ if (result)
+ truncate_backing_file(bfc, rollback_pos);
+
+ return result;
+}
+
+int incfs_write_status_to_backing_file(struct backing_file_context *bfc,
+ loff_t status_offset,
+ u32 data_blocks_written,
+ u32 hash_blocks_written)
+{
+ struct incfs_status is;
+ int result;
+
+ if (!bfc)
+ return -EFAULT;
+
+ if (status_offset == 0)
+ return write_new_status_to_backing_file(bfc,
+ data_blocks_written, hash_blocks_written);
+
+ result = incfs_kread(bfc, &is, sizeof(is), status_offset);
+ if (result != sizeof(is))
+ return -EIO;
+
+ is.is_data_blocks_written = cpu_to_le32(data_blocks_written);
+ is.is_hash_blocks_written = cpu_to_le32(hash_blocks_written);
+ result = incfs_kwrite(bfc, &is, sizeof(is), status_offset);
+ if (result != sizeof(is))
+ return -EIO;
+
+ return 0;
+}
+
+int incfs_write_verity_signature_to_backing_file(
+ struct backing_file_context *bfc, struct mem_range signature,
+ loff_t *offset)
+{
+ struct incfs_file_verity_signature vs = {};
+ int result;
+ loff_t pos;
+
+ /* No verity signature section is equivalent to an empty section */
+ if (signature.data == NULL || signature.len == 0)
+ return 0;
+
+ pos = incfs_get_end_offset(bfc->bc_file);
+
+ vs = (struct incfs_file_verity_signature) {
+ .vs_header = (struct incfs_md_header) {
+ .h_md_entry_type = INCFS_MD_VERITY_SIGNATURE,
+ .h_record_size = cpu_to_le16(sizeof(vs)),
+ .h_next_md_offset = cpu_to_le64(0),
+ },
+ .vs_size = cpu_to_le32(signature.len),
+ .vs_offset = cpu_to_le64(pos),
+ };
+
+ result = write_to_bf(bfc, signature.data, signature.len, pos);
+ if (result)
+ goto err;
+
+ result = append_md_to_backing_file(bfc, &vs.vs_header);
+ if (result)
+ goto err;
+
+ *offset = pos;
+err:
+ if (result)
+ /* Error, rollback file changes */
+ truncate_backing_file(bfc, pos);
+ return result;
+}
+
+/*
+ * Write a backing file header
+ * It should always be called only on empty file.
+ * fh.fh_first_md_offset is 0 for now, but will be updated
+ * once first metadata record is added.
+ */
+int incfs_write_fh_to_backing_file(struct backing_file_context *bfc,
+ incfs_uuid_t *uuid, u64 file_size)
+{
+ struct incfs_file_header fh = {};
+ loff_t file_pos = 0;
+
+ if (!bfc)
+ return -EFAULT;
+
+ fh.fh_magic = cpu_to_le64(INCFS_MAGIC_NUMBER);
+ fh.fh_version = cpu_to_le64(INCFS_FORMAT_CURRENT_VER);
+ fh.fh_header_size = cpu_to_le16(sizeof(fh));
+ fh.fh_first_md_offset = cpu_to_le64(0);
+ fh.fh_data_block_size = cpu_to_le16(INCFS_DATA_FILE_BLOCK_SIZE);
+
+ fh.fh_file_size = cpu_to_le64(file_size);
+ fh.fh_uuid = *uuid;
+
+ LOCK_REQUIRED(bfc->bc_mutex);
+
+ file_pos = incfs_get_end_offset(bfc->bc_file);
+ if (file_pos != 0)
+ return -EEXIST;
+
+ return write_to_bf(bfc, &fh, sizeof(fh), file_pos);
+}
+
+/*
+ * Write a backing file header for a mapping file
+ * It should always be called only on empty file.
+ */
+int incfs_write_mapping_fh_to_backing_file(struct backing_file_context *bfc,
+ incfs_uuid_t *uuid, u64 file_size, u64 offset)
+{
+ struct incfs_file_header fh = {};
+ loff_t file_pos = 0;
+
+ if (!bfc)
+ return -EFAULT;
+
+ fh.fh_magic = cpu_to_le64(INCFS_MAGIC_NUMBER);
+ fh.fh_version = cpu_to_le64(INCFS_FORMAT_CURRENT_VER);
+ fh.fh_header_size = cpu_to_le16(sizeof(fh));
+ fh.fh_original_offset = cpu_to_le64(offset);
+ fh.fh_data_block_size = cpu_to_le16(INCFS_DATA_FILE_BLOCK_SIZE);
+
+ fh.fh_mapped_file_size = cpu_to_le64(file_size);
+ fh.fh_original_uuid = *uuid;
+ fh.fh_flags = cpu_to_le32(INCFS_FILE_MAPPED);
+
+ LOCK_REQUIRED(bfc->bc_mutex);
+
+ file_pos = incfs_get_end_offset(bfc->bc_file);
+ if (file_pos != 0)
+ return -EEXIST;
+
+ return write_to_bf(bfc, &fh, sizeof(fh), file_pos);
+}
+
+/* Write a given data block and update file's blockmap to point it. */
+int incfs_write_data_block_to_backing_file(struct backing_file_context *bfc,
+ struct mem_range block, int block_index,
+ loff_t bm_base_off, u16 flags)
+{
+ struct incfs_blockmap_entry bm_entry = {};
+ int result = 0;
+ loff_t data_offset = 0;
+ loff_t bm_entry_off =
+ bm_base_off + sizeof(struct incfs_blockmap_entry) * block_index;
+
+ if (!bfc)
+ return -EFAULT;
+
+ if (block.len >= (1 << 16) || block_index < 0)
+ return -EINVAL;
+
+ LOCK_REQUIRED(bfc->bc_mutex);
+
+ data_offset = incfs_get_end_offset(bfc->bc_file);
+ if (data_offset <= bm_entry_off) {
+ /* Blockmap entry is beyond the file's end. It is not normal. */
+ return -EINVAL;
+ }
+
+ /* Write the block data at the end of the backing file. */
+ result = write_to_bf(bfc, block.data, block.len, data_offset);
+ if (result)
+ return result;
+
+ /* Update the blockmap to point to the newly written data. */
+ bm_entry.me_data_offset_lo = cpu_to_le32((u32)data_offset);
+ bm_entry.me_data_offset_hi = cpu_to_le16((u16)(data_offset >> 32));
+ bm_entry.me_data_size = cpu_to_le16((u16)block.len);
+ bm_entry.me_flags = cpu_to_le16(flags);
+
+ return write_to_bf(bfc, &bm_entry, sizeof(bm_entry),
+ bm_entry_off);
+}
+
+int incfs_write_hash_block_to_backing_file(struct backing_file_context *bfc,
+ struct mem_range block,
+ int block_index,
+ loff_t hash_area_off,
+ loff_t bm_base_off,
+ loff_t file_size)
+{
+ struct incfs_blockmap_entry bm_entry = {};
+ int result;
+ loff_t data_offset = 0;
+ loff_t file_end = 0;
+ loff_t bm_entry_off =
+ bm_base_off +
+ sizeof(struct incfs_blockmap_entry) *
+ (block_index + get_blocks_count_for_size(file_size));
+
+ if (!bfc)
+ return -EFAULT;
+
+ LOCK_REQUIRED(bfc->bc_mutex);
+
+ data_offset = hash_area_off + block_index * INCFS_DATA_FILE_BLOCK_SIZE;
+ file_end = incfs_get_end_offset(bfc->bc_file);
+ if (data_offset + block.len > file_end) {
+ /* Block is located beyond the file's end. It is not normal. */
+ return -EINVAL;
+ }
+
+ result = write_to_bf(bfc, block.data, block.len, data_offset);
+ if (result)
+ return result;
+
+ bm_entry.me_data_offset_lo = cpu_to_le32((u32)data_offset);
+ bm_entry.me_data_offset_hi = cpu_to_le16((u16)(data_offset >> 32));
+ bm_entry.me_data_size = cpu_to_le16(INCFS_DATA_FILE_BLOCK_SIZE);
+
+ return write_to_bf(bfc, &bm_entry, sizeof(bm_entry), bm_entry_off);
+}
+
+int incfs_read_blockmap_entry(struct backing_file_context *bfc, int block_index,
+ loff_t bm_base_off,
+ struct incfs_blockmap_entry *bm_entry)
+{
+ int error = incfs_read_blockmap_entries(bfc, bm_entry, block_index, 1,
+ bm_base_off);
+
+ if (error < 0)
+ return error;
+
+ if (error == 0)
+ return -EIO;
+
+ if (error != 1)
+ return -EFAULT;
+
+ return 0;
+}
+
+int incfs_read_blockmap_entries(struct backing_file_context *bfc,
+ struct incfs_blockmap_entry *entries,
+ int start_index, int blocks_number,
+ loff_t bm_base_off)
+{
+ loff_t bm_entry_off =
+ bm_base_off + sizeof(struct incfs_blockmap_entry) * start_index;
+ const size_t bytes_to_read = sizeof(struct incfs_blockmap_entry)
+ * blocks_number;
+ int result = 0;
+
+ if (!bfc || !entries)
+ return -EFAULT;
+
+ if (start_index < 0 || bm_base_off <= 0)
+ return -ENODATA;
+
+ result = incfs_kread(bfc, entries, bytes_to_read, bm_entry_off);
+ if (result < 0)
+ return result;
+ return result / sizeof(*entries);
+}
+
+int incfs_read_file_header(struct backing_file_context *bfc,
+ loff_t *first_md_off, incfs_uuid_t *uuid,
+ u64 *file_size, u32 *flags)
+{
+ ssize_t bytes_read = 0;
+ struct incfs_file_header fh = {};
+
+ if (!bfc || !first_md_off)
+ return -EFAULT;
+
+ bytes_read = incfs_kread(bfc, &fh, sizeof(fh), 0);
+ if (bytes_read < 0)
+ return bytes_read;
+
+ if (bytes_read < sizeof(fh))
+ return -EBADMSG;
+
+ if (le64_to_cpu(fh.fh_magic) != INCFS_MAGIC_NUMBER)
+ return -EILSEQ;
+
+ if (le64_to_cpu(fh.fh_version) > INCFS_FORMAT_CURRENT_VER)
+ return -EILSEQ;
+
+ if (le16_to_cpu(fh.fh_data_block_size) != INCFS_DATA_FILE_BLOCK_SIZE)
+ return -EILSEQ;
+
+ if (le16_to_cpu(fh.fh_header_size) != sizeof(fh))
+ return -EILSEQ;
+
+ if (first_md_off)
+ *first_md_off = le64_to_cpu(fh.fh_first_md_offset);
+ if (uuid)
+ *uuid = fh.fh_uuid;
+ if (file_size)
+ *file_size = le64_to_cpu(fh.fh_file_size);
+ if (flags)
+ *flags = le32_to_cpu(fh.fh_flags);
+ return 0;
+}
+
+/*
+ * Read through metadata records from the backing file one by one
+ * and call provided metadata handlers.
+ */
+int incfs_read_next_metadata_record(struct backing_file_context *bfc,
+ struct metadata_handler *handler)
+{
+ const ssize_t max_md_size = INCFS_MAX_METADATA_RECORD_SIZE;
+ ssize_t bytes_read = 0;
+ size_t md_record_size = 0;
+ loff_t next_record = 0;
+ int res = 0;
+ struct incfs_md_header *md_hdr = NULL;
+
+ if (!bfc || !handler)
+ return -EFAULT;
+
+ if (handler->md_record_offset == 0)
+ return -EPERM;
+
+ memset(&handler->md_buffer, 0, max_md_size);
+ bytes_read = incfs_kread(bfc, &handler->md_buffer, max_md_size,
+ handler->md_record_offset);
+ if (bytes_read < 0)
+ return bytes_read;
+ if (bytes_read < sizeof(*md_hdr))
+ return -EBADMSG;
+
+ md_hdr = &handler->md_buffer.md_header;
+ next_record = le64_to_cpu(md_hdr->h_next_md_offset);
+ md_record_size = le16_to_cpu(md_hdr->h_record_size);
+
+ if (md_record_size > max_md_size) {
+ pr_warn("incfs: The record is too large. Size: %zu",
+ md_record_size);
+ return -EBADMSG;
+ }
+
+ if (bytes_read < md_record_size) {
+ pr_warn("incfs: The record hasn't been fully read.");
+ return -EBADMSG;
+ }
+
+ if (next_record <= handler->md_record_offset && next_record != 0) {
+ pr_warn("incfs: Next record (%lld) points back in file.",
+ next_record);
+ return -EBADMSG;
+ }
+
+ switch (md_hdr->h_md_entry_type) {
+ case INCFS_MD_NONE:
+ break;
+ case INCFS_MD_BLOCK_MAP:
+ if (handler->handle_blockmap)
+ res = handler->handle_blockmap(
+ &handler->md_buffer.blockmap, handler);
+ break;
+ case INCFS_MD_FILE_ATTR:
+ /*
+ * File attrs no longer supported, ignore section for
+ * compatibility
+ */
+ break;
+ case INCFS_MD_SIGNATURE:
+ if (handler->handle_signature)
+ res = handler->handle_signature(
+ &handler->md_buffer.signature, handler);
+ break;
+ case INCFS_MD_STATUS:
+ if (handler->handle_status)
+ res = handler->handle_status(
+ &handler->md_buffer.status, handler);
+ break;
+ case INCFS_MD_VERITY_SIGNATURE:
+ if (handler->handle_verity_signature)
+ res = handler->handle_verity_signature(
+ &handler->md_buffer.verity_signature, handler);
+ break;
+ default:
+ res = -ENOTSUPP;
+ break;
+ }
+
+ if (!res) {
+ if (next_record == 0) {
+ /*
+ * Zero offset for the next record means that the last
+ * metadata record has just been processed.
+ */
+ bfc->bc_last_md_record_offset =
+ handler->md_record_offset;
+ }
+ handler->md_prev_record_offset = handler->md_record_offset;
+ handler->md_record_offset = next_record;
+ }
+ return res;
+}
+
+ssize_t incfs_kread(struct backing_file_context *bfc, void *buf, size_t size,
+ loff_t pos)
+{
+ const struct cred *old_cred = override_creds(bfc->bc_cred);
+ int ret = kernel_read(bfc->bc_file, buf, size, &pos);
+
+ revert_creds(old_cred);
+ return ret;
+}
+
+ssize_t incfs_kwrite(struct backing_file_context *bfc, const void *buf,
+ size_t size, loff_t pos)
+{
+ const struct cred *old_cred = override_creds(bfc->bc_cred);
+ int ret = kernel_write(bfc->bc_file, buf, size, &pos);
+
+ revert_creds(old_cred);
+ return ret;
+}
diff --git a/fs/incfs/format.h b/fs/incfs/format.h
new file mode 100644
index 0000000..d534e9f8
--- /dev/null
+++ b/fs/incfs/format.h
@@ -0,0 +1,413 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2018 Google LLC
+ */
+
+/*
+ * Overview
+ * --------
+ * The backbone of the incremental-fs ondisk format is an append only linked
+ * list of metadata blocks. Each metadata block contains an offset of the next
+ * one. These blocks describe files and directories on the
+ * file system. They also represent actions of adding and removing file names
+ * (hard links).
+ *
+ * Every time incremental-fs instance is mounted, it reads through this list
+ * to recreate filesystem's state in memory. An offset of the first record in
+ * the metadata list is stored in the superblock at the beginning of the backing
+ * file.
+ *
+ * Most of the backing file is taken by data areas and blockmaps.
+ * Since data blocks can be compressed and have different sizes,
+ * single per-file data area can't be pre-allocated. That's why blockmaps are
+ * needed in order to find a location and size of each data block in
+ * the backing file. Each time a file is created, a corresponding block map is
+ * allocated to store future offsets of data blocks.
+ *
+ * Whenever a data block is given by data loader to incremental-fs:
+ * - A data area with the given block is appended to the end of
+ * the backing file.
+ * - A record in the blockmap for the given block index is updated to reflect
+ * its location, size, and compression algorithm.
+
+ * Metadata records
+ * ----------------
+ * incfs_blockmap - metadata record that specifies size and location
+ * of a blockmap area for a given file. This area
+ * contains an array of incfs_blockmap_entry-s.
+ * incfs_file_signature - metadata record that specifies where file signature
+ * and its hash tree can be found in the backing file.
+ *
+ * incfs_file_attr - metadata record that specifies where additional file
+ * attributes blob can be found.
+ *
+ * Metadata header
+ * ---------------
+ * incfs_md_header - header of a metadata record. It's always a part
+ * of other structures and served purpose of metadata
+ * bookkeeping.
+ *
+ * +-----------------------------------------------+ ^
+ * | incfs_md_header | |
+ * | 1. type of body(BLOCKMAP, FILE_ATTR..) | |
+ * | 2. size of the whole record header + body | |
+ * | 3. CRC the whole record header + body | |
+ * | 4. offset of the previous md record |]------+
+ * | 5. offset of the next md record (md link) |]---+
+ * +-----------------------------------------------+ |
+ * | Metadata record body with useful data | |
+ * +-----------------------------------------------+ |
+ * +--->
+ *
+ * Other ondisk structures
+ * -----------------------
+ * incfs_super_block - backing file header
+ * incfs_blockmap_entry - a record in a blockmap area that describes size
+ * and location of a data block.
+ * Data blocks dont have any particular structure, they are written to the
+ * backing file in a raw form as they come from a data loader.
+ *
+ * Backing file layout
+ * -------------------
+ *
+ *
+ * +-------------------------------------------+
+ * | incfs_file_header |]---+
+ * +-------------------------------------------+ |
+ * | metadata |<---+
+ * | incfs_file_signature |]---+
+ * +-------------------------------------------+ |
+ * ......................... |
+ * +-------------------------------------------+ | metadata
+ * +------->| blockmap area | | list links
+ * | | [incfs_blockmap_entry] | |
+ * | | [incfs_blockmap_entry] | |
+ * | | [incfs_blockmap_entry] | |
+ * | +--[| [incfs_blockmap_entry] | |
+ * | | | [incfs_blockmap_entry] | |
+ * | | | [incfs_blockmap_entry] | |
+ * | | +-------------------------------------------+ |
+ * | | ......................... |
+ * | | +-------------------------------------------+ |
+ * | | | metadata |<---+
+ * +----|--[| incfs_blockmap |]---+
+ * | +-------------------------------------------+ |
+ * | ......................... |
+ * | +-------------------------------------------+ |
+ * +-->| data block | |
+ * +-------------------------------------------+ |
+ * ......................... |
+ * +-------------------------------------------+ |
+ * | metadata |<---+
+ * | incfs_file_attr |
+ * +-------------------------------------------+
+ */
+#ifndef _INCFS_FORMAT_H
+#define _INCFS_FORMAT_H
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <uapi/linux/incrementalfs.h>
+
+#include "internal.h"
+
+#define INCFS_MAX_NAME_LEN 255
+#define INCFS_FORMAT_V1 1
+#define INCFS_FORMAT_CURRENT_VER INCFS_FORMAT_V1
+
+enum incfs_metadata_type {
+ INCFS_MD_NONE = 0,
+ INCFS_MD_BLOCK_MAP = 1,
+ INCFS_MD_FILE_ATTR = 2,
+ INCFS_MD_SIGNATURE = 3,
+ INCFS_MD_STATUS = 4,
+ INCFS_MD_VERITY_SIGNATURE = 5,
+};
+
+enum incfs_file_header_flags {
+ INCFS_FILE_MAPPED = 1 << 1,
+};
+
+/* Header included at the beginning of all metadata records on the disk. */
+struct incfs_md_header {
+ __u8 h_md_entry_type;
+
+ /*
+ * Size of the metadata record.
+ * (e.g. inode, dir entry etc) not just this struct.
+ */
+ __le16 h_record_size;
+
+ /*
+ * Was: CRC32 of the metadata record.
+ * (e.g. inode, dir entry etc) not just this struct.
+ */
+ __le32 h_unused1;
+
+ /* Offset of the next metadata entry if any */
+ __le64 h_next_md_offset;
+
+ /* Was: Offset of the previous metadata entry if any */
+ __le64 h_unused2;
+
+} __packed;
+
+/* Backing file header */
+struct incfs_file_header {
+ /* Magic number: INCFS_MAGIC_NUMBER */
+ __le64 fh_magic;
+
+ /* Format version: INCFS_FORMAT_CURRENT_VER */
+ __le64 fh_version;
+
+ /* sizeof(incfs_file_header) */
+ __le16 fh_header_size;
+
+ /* INCFS_DATA_FILE_BLOCK_SIZE */
+ __le16 fh_data_block_size;
+
+ /* File flags, from incfs_file_header_flags */
+ __le32 fh_flags;
+
+ union {
+ /* Standard incfs file */
+ struct {
+ /* Offset of the first metadata record */
+ __le64 fh_first_md_offset;
+
+ /* Full size of the file's content */
+ __le64 fh_file_size;
+
+ /* File uuid */
+ incfs_uuid_t fh_uuid;
+ };
+
+ /* Mapped file - INCFS_FILE_MAPPED set in fh_flags */
+ struct {
+ /* Offset in original file */
+ __le64 fh_original_offset;
+
+ /* Full size of the file's content */
+ __le64 fh_mapped_file_size;
+
+ /* Original file's uuid */
+ incfs_uuid_t fh_original_uuid;
+ };
+ };
+} __packed;
+
+enum incfs_block_map_entry_flags {
+ INCFS_BLOCK_COMPRESSED_LZ4 = 1,
+ INCFS_BLOCK_COMPRESSED_ZSTD = 2,
+
+ /* Reserve 3 bits for compression alg */
+ INCFS_BLOCK_COMPRESSED_MASK = 7,
+};
+
+/* Block map entry pointing to an actual location of the data block. */
+struct incfs_blockmap_entry {
+ /* Offset of the actual data block. Lower 32 bits */
+ __le32 me_data_offset_lo;
+
+ /* Offset of the actual data block. Higher 16 bits */
+ __le16 me_data_offset_hi;
+
+ /* How many bytes the data actually occupies in the backing file */
+ __le16 me_data_size;
+
+ /* Block flags from incfs_block_map_entry_flags */
+ __le16 me_flags;
+} __packed;
+
+/* Metadata record for locations of file blocks. Type = INCFS_MD_BLOCK_MAP */
+struct incfs_blockmap {
+ struct incfs_md_header m_header;
+
+ /* Base offset of the array of incfs_blockmap_entry */
+ __le64 m_base_offset;
+
+ /* Size of the map entry array in blocks */
+ __le32 m_block_count;
+} __packed;
+
+/*
+ * Metadata record for file signature. Type = INCFS_MD_SIGNATURE
+ *
+ * The signature stored here is the APK V4 signature data blob. See the
+ * definition of incfs_new_file_args::signature_info for an explanation of this
+ * blob. Specifically, it contains the root hash, but it does *not* contain
+ * anything that the kernel treats as a signature.
+ *
+ * When FS_IOC_ENABLE_VERITY is called on a file without this record, an APK V4
+ * signature blob and a hash tree are added to the file, and then this metadata
+ * record is created to record their locations.
+ */
+struct incfs_file_signature {
+ struct incfs_md_header sg_header;
+
+ __le32 sg_sig_size; /* The size of the signature. */
+
+ __le64 sg_sig_offset; /* Signature's offset in the backing file */
+
+ __le32 sg_hash_tree_size; /* The size of the hash tree. */
+
+ __le64 sg_hash_tree_offset; /* Hash tree offset in the backing file */
+} __packed;
+
+/* In memory version of above */
+struct incfs_df_signature {
+ u32 sig_size;
+ u64 sig_offset;
+ u32 hash_size;
+ u64 hash_offset;
+};
+
+struct incfs_status {
+ struct incfs_md_header is_header;
+
+ __le32 is_data_blocks_written; /* Number of data blocks written */
+
+ __le32 is_hash_blocks_written; /* Number of hash blocks written */
+
+ __le32 is_dummy[6]; /* Spare fields */
+} __packed;
+
+/*
+ * Metadata record for verity signature. Type = INCFS_MD_VERITY_SIGNATURE
+ *
+ * This record will only exist for verity-enabled files with signatures. Verity
+ * enabled files without signatures do not have this record.
+ *
+ * This is obsolete, as incfs no longer checks this type of signature.
+ */
+struct incfs_file_verity_signature {
+ struct incfs_md_header vs_header;
+
+ /* The size of the signature */
+ __le32 vs_size;
+
+ /* Signature's offset in the backing file */
+ __le64 vs_offset;
+} __packed;
+
+/* In memory version of above */
+struct incfs_df_verity_signature {
+ u32 size;
+ u64 offset;
+};
+
+/* State of the backing file. */
+struct backing_file_context {
+ /* Protects writes to bc_file */
+ struct mutex bc_mutex;
+
+ /* File object to read data from */
+ struct file *bc_file;
+
+ /*
+ * Offset of the last known metadata record in the backing file.
+ * 0 means there are no metadata records.
+ */
+ loff_t bc_last_md_record_offset;
+
+ /*
+ * Credentials to set before reads/writes
+ * Note that this is a pointer to the mount_info mi_owner field so
+ * there is no need to get/put the creds
+ */
+ const struct cred *bc_cred;
+
+ /*
+ * The file has a bad block, i.e. one that has failed checksumming.
+ */
+ bool bc_has_bad_block;
+};
+
+struct metadata_handler {
+ loff_t md_record_offset;
+ loff_t md_prev_record_offset;
+ void *context;
+
+ union {
+ struct incfs_md_header md_header;
+ struct incfs_blockmap blockmap;
+ struct incfs_file_signature signature;
+ struct incfs_status status;
+ struct incfs_file_verity_signature verity_signature;
+ } md_buffer;
+
+ int (*handle_blockmap)(struct incfs_blockmap *bm,
+ struct metadata_handler *handler);
+ int (*handle_signature)(struct incfs_file_signature *sig,
+ struct metadata_handler *handler);
+ int (*handle_status)(struct incfs_status *sig,
+ struct metadata_handler *handler);
+ int (*handle_verity_signature)(struct incfs_file_verity_signature *s,
+ struct metadata_handler *handler);
+};
+#define INCFS_MAX_METADATA_RECORD_SIZE \
+ sizeof_field(struct metadata_handler, md_buffer)
+
+/* Backing file context management */
+struct mount_info;
+struct backing_file_context *incfs_alloc_bfc(struct mount_info *mi,
+ struct file *backing_file);
+
+void incfs_free_bfc(struct backing_file_context *bfc);
+
+/* Writing stuff */
+int incfs_write_blockmap_to_backing_file(struct backing_file_context *bfc,
+ u32 block_count);
+
+int incfs_write_fh_to_backing_file(struct backing_file_context *bfc,
+ incfs_uuid_t *uuid, u64 file_size);
+
+int incfs_write_mapping_fh_to_backing_file(struct backing_file_context *bfc,
+ incfs_uuid_t *uuid, u64 file_size, u64 offset);
+
+int incfs_write_data_block_to_backing_file(struct backing_file_context *bfc,
+ struct mem_range block,
+ int block_index, loff_t bm_base_off,
+ u16 flags);
+
+int incfs_write_hash_block_to_backing_file(struct backing_file_context *bfc,
+ struct mem_range block,
+ int block_index,
+ loff_t hash_area_off,
+ loff_t bm_base_off,
+ loff_t file_size);
+
+int incfs_write_signature_to_backing_file(struct backing_file_context *bfc,
+ struct mem_range sig, u32 tree_size,
+ loff_t *tree_offset, loff_t *sig_offset);
+
+int incfs_write_status_to_backing_file(struct backing_file_context *bfc,
+ loff_t status_offset,
+ u32 data_blocks_written,
+ u32 hash_blocks_written);
+int incfs_write_verity_signature_to_backing_file(
+ struct backing_file_context *bfc, struct mem_range signature,
+ loff_t *offset);
+
+/* Reading stuff */
+int incfs_read_file_header(struct backing_file_context *bfc,
+ loff_t *first_md_off, incfs_uuid_t *uuid,
+ u64 *file_size, u32 *flags);
+
+int incfs_read_blockmap_entry(struct backing_file_context *bfc, int block_index,
+ loff_t bm_base_off,
+ struct incfs_blockmap_entry *bm_entry);
+
+int incfs_read_blockmap_entries(struct backing_file_context *bfc,
+ struct incfs_blockmap_entry *entries,
+ int start_index, int blocks_number,
+ loff_t bm_base_off);
+
+int incfs_read_next_metadata_record(struct backing_file_context *bfc,
+ struct metadata_handler *handler);
+
+ssize_t incfs_kread(struct backing_file_context *bfc, void *buf, size_t size,
+ loff_t pos);
+ssize_t incfs_kwrite(struct backing_file_context *bfc, const void *buf,
+ size_t size, loff_t pos);
+
+#endif /* _INCFS_FORMAT_H */
diff --git a/fs/incfs/integrity.c b/fs/incfs/integrity.c
new file mode 100644
index 0000000..db7eb63
--- /dev/null
+++ b/fs/incfs/integrity.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2019 Google LLC
+ */
+#include <crypto/sha2.h>
+#include <linux/err.h>
+#include <linux/version.h>
+
+#include "integrity.h"
+
+const struct incfs_hash_alg *
+incfs_get_hash_alg(enum incfs_hash_tree_algorithm id)
+{
+ static const struct incfs_hash_alg sha256 = {
+ .name = "sha256",
+ .digest_size = SHA256_DIGEST_SIZE,
+ .id = INCFS_HASH_TREE_SHA256
+ };
+
+ if (id == INCFS_HASH_TREE_SHA256) {
+ BUILD_BUG_ON(INCFS_MAX_HASH_SIZE < SHA256_DIGEST_SIZE);
+ return &sha256;
+ }
+
+ return ERR_PTR(-ENOENT);
+}
+
+struct signature_info {
+ u32 version;
+ enum incfs_hash_tree_algorithm hash_algorithm;
+ u8 log2_blocksize;
+ struct mem_range salt;
+ struct mem_range root_hash;
+};
+
+static bool read_u32(u8 **p, u8 *top, u32 *result)
+{
+ if (*p + sizeof(u32) > top)
+ return false;
+
+ *result = le32_to_cpu(*(__le32 *)*p);
+ *p += sizeof(u32);
+ return true;
+}
+
+static bool read_u8(u8 **p, u8 *top, u8 *result)
+{
+ if (*p + sizeof(u8) > top)
+ return false;
+
+ *result = *(u8 *)*p;
+ *p += sizeof(u8);
+ return true;
+}
+
+static bool read_mem_range(u8 **p, u8 *top, struct mem_range *range)
+{
+ u32 len;
+
+ if (!read_u32(p, top, &len) || *p + len > top)
+ return false;
+
+ range->len = len;
+ range->data = *p;
+ *p += len;
+ return true;
+}
+
+static int incfs_parse_signature(struct mem_range signature,
+ struct signature_info *si)
+{
+ u8 *p = signature.data;
+ u8 *top = signature.data + signature.len;
+ u32 hash_section_size;
+
+ if (signature.len > INCFS_MAX_SIGNATURE_SIZE)
+ return -EINVAL;
+
+ if (!read_u32(&p, top, &si->version) ||
+ si->version != INCFS_SIGNATURE_VERSION)
+ return -EINVAL;
+
+ if (!read_u32(&p, top, &hash_section_size) ||
+ p + hash_section_size > top)
+ return -EINVAL;
+ top = p + hash_section_size;
+
+ if (!read_u32(&p, top, &si->hash_algorithm) ||
+ si->hash_algorithm != INCFS_HASH_TREE_SHA256)
+ return -EINVAL;
+
+ if (!read_u8(&p, top, &si->log2_blocksize) || si->log2_blocksize != 12)
+ return -EINVAL;
+
+ if (!read_mem_range(&p, top, &si->salt))
+ return -EINVAL;
+
+ if (!read_mem_range(&p, top, &si->root_hash))
+ return -EINVAL;
+
+ if (p != top)
+ return -EINVAL;
+
+ return 0;
+}
+
+struct mtree *incfs_alloc_mtree(struct mem_range signature,
+ int data_block_count)
+{
+ int error;
+ struct signature_info si;
+ struct mtree *result = NULL;
+ const struct incfs_hash_alg *hash_alg = NULL;
+ int hash_per_block;
+ int lvl;
+ int total_blocks = 0;
+ int blocks_in_level[INCFS_MAX_MTREE_LEVELS];
+ int blocks = data_block_count;
+
+ if (data_block_count <= 0)
+ return ERR_PTR(-EINVAL);
+
+ error = incfs_parse_signature(signature, &si);
+ if (error)
+ return ERR_PTR(error);
+
+ hash_alg = incfs_get_hash_alg(si.hash_algorithm);
+ if (IS_ERR(hash_alg))
+ return ERR_PTR(PTR_ERR(hash_alg));
+
+ if (si.root_hash.len < hash_alg->digest_size)
+ return ERR_PTR(-EINVAL);
+
+ result = kzalloc(sizeof(*result), GFP_NOFS);
+ if (!result)
+ return ERR_PTR(-ENOMEM);
+
+ result->alg = hash_alg;
+ hash_per_block = INCFS_DATA_FILE_BLOCK_SIZE / result->alg->digest_size;
+
+ /* Calculating tree geometry. */
+ /* First pass: calculate how many blocks in each tree level. */
+ for (lvl = 0; blocks > 1; lvl++) {
+ if (lvl >= INCFS_MAX_MTREE_LEVELS) {
+ pr_err("incfs: too much data in mtree");
+ goto err;
+ }
+
+ blocks = (blocks + hash_per_block - 1) / hash_per_block;
+ blocks_in_level[lvl] = blocks;
+ total_blocks += blocks;
+ }
+ result->depth = lvl;
+ result->hash_tree_area_size = total_blocks * INCFS_DATA_FILE_BLOCK_SIZE;
+ if (result->hash_tree_area_size > INCFS_MAX_HASH_AREA_SIZE)
+ goto err;
+
+ blocks = 0;
+ /* Second pass: calculate offset of each level. 0th level goes last. */
+ for (lvl = 0; lvl < result->depth; lvl++) {
+ u32 suboffset;
+
+ blocks += blocks_in_level[lvl];
+ suboffset = (total_blocks - blocks)
+ * INCFS_DATA_FILE_BLOCK_SIZE;
+
+ result->hash_level_suboffset[lvl] = suboffset;
+ }
+
+ /* Root hash is stored separately from the rest of the tree. */
+ memcpy(result->root_hash, si.root_hash.data, hash_alg->digest_size);
+ return result;
+
+err:
+ kfree(result);
+ return ERR_PTR(-E2BIG);
+}
+
+void incfs_free_mtree(struct mtree *tree)
+{
+ kfree(tree);
+}
+
+int incfs_hash_buffer(const struct incfs_hash_alg *alg, const void *data,
+ size_t len, u8 *out)
+{
+ switch (alg->id) {
+ case INCFS_HASH_TREE_SHA256:
+ sha256(data, len, out);
+ return 0;
+ default:
+ return -ENOENT;
+ }
+}
+
+int incfs_hash_block(const struct incfs_hash_alg *alg, struct mem_range data,
+ struct mem_range digest)
+{
+ if (!alg || !data.data || !digest.data)
+ return -EFAULT;
+
+ if (alg->digest_size > digest.len)
+ return -EINVAL;
+
+ if (data.len < INCFS_DATA_FILE_BLOCK_SIZE) {
+ int err;
+ void *buf = kzalloc(INCFS_DATA_FILE_BLOCK_SIZE, GFP_NOFS);
+
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, data.data, data.len);
+ err = incfs_hash_buffer(alg, buf, INCFS_DATA_FILE_BLOCK_SIZE,
+ digest.data);
+ kfree(buf);
+ return err;
+ }
+ return incfs_hash_buffer(alg, data.data, data.len, digest.data);
+}
diff --git a/fs/incfs/integrity.h b/fs/incfs/integrity.h
new file mode 100644
index 0000000..ddccd00
--- /dev/null
+++ b/fs/incfs/integrity.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC
+ */
+#ifndef _INCFS_INTEGRITY_H
+#define _INCFS_INTEGRITY_H
+#include <linux/types.h>
+#include <linux/kernel.h>
+
+#include <uapi/linux/incrementalfs.h>
+
+#include "internal.h"
+
+#define INCFS_MAX_MTREE_LEVELS 8
+#define INCFS_MAX_HASH_AREA_SIZE (1280 * 1024 * 1024)
+
+struct incfs_hash_alg {
+ const char *name;
+ int digest_size;
+ enum incfs_hash_tree_algorithm id;
+};
+
+/* Merkle tree structure. */
+struct mtree {
+ const struct incfs_hash_alg *alg;
+
+ u8 root_hash[INCFS_MAX_HASH_SIZE];
+
+ /* Offset of each hash level in the hash area. */
+ u32 hash_level_suboffset[INCFS_MAX_MTREE_LEVELS];
+
+ u32 hash_tree_area_size;
+
+ /* Number of levels in hash_level_suboffset */
+ int depth;
+};
+
+const struct incfs_hash_alg *
+incfs_get_hash_alg(enum incfs_hash_tree_algorithm id);
+
+struct mtree *incfs_alloc_mtree(struct mem_range signature,
+ int data_block_count);
+
+void incfs_free_mtree(struct mtree *tree);
+
+size_t incfs_get_mtree_depth(enum incfs_hash_tree_algorithm alg, loff_t size);
+
+size_t incfs_get_mtree_hash_count(enum incfs_hash_tree_algorithm alg,
+ loff_t size);
+
+int incfs_hash_buffer(const struct incfs_hash_alg *alg, const void *data,
+ size_t len, u8 *out);
+
+int incfs_hash_block(const struct incfs_hash_alg *alg, struct mem_range data,
+ struct mem_range digest);
+
+#endif /* _INCFS_INTEGRITY_H */
diff --git a/fs/incfs/internal.h b/fs/incfs/internal.h
new file mode 100644
index 0000000..c2df8bf
--- /dev/null
+++ b/fs/incfs/internal.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2018 Google LLC
+ */
+#ifndef _INCFS_INTERNAL_H
+#define _INCFS_INTERNAL_H
+#include <linux/types.h>
+
+struct mem_range {
+ u8 *data;
+ size_t len;
+};
+
+static inline struct mem_range range(u8 *data, size_t len)
+{
+ return (struct mem_range){ .data = data, .len = len };
+}
+
+#define LOCK_REQUIRED(lock) WARN_ON_ONCE(!mutex_is_locked(&lock))
+
+#define EFSCORRUPTED EUCLEAN
+
+#endif /* _INCFS_INTERNAL_H */
diff --git a/fs/incfs/main.c b/fs/incfs/main.c
new file mode 100644
index 0000000..4e1ec76
--- /dev/null
+++ b/fs/incfs/main.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Google LLC
+ */
+#include <linux/fs.h>
+#include <linux/fs_parser.h>
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <uapi/linux/incrementalfs.h>
+
+#include "sysfs.h"
+#include "vfs.h"
+
+struct file_system_type incfs_fs_type = {
+ .owner = THIS_MODULE,
+ .name = INCFS_NAME,
+ .init_fs_context = incfs_init_fs_context,
+ .parameters = incfs_param_specs,
+ .kill_sb = incfs_kill_sb,
+ .fs_flags = 0
+};
+
+static int __init init_incfs_module(void)
+{
+ int err = 0;
+
+ err = incfs_init_sysfs();
+ if (err)
+ return err;
+
+ err = register_filesystem(&incfs_fs_type);
+ if (err)
+ incfs_cleanup_sysfs();
+
+ return err;
+}
+
+static void __exit cleanup_incfs_module(void)
+{
+ incfs_cleanup_sysfs();
+ unregister_filesystem(&incfs_fs_type);
+}
+
+module_init(init_incfs_module);
+module_exit(cleanup_incfs_module);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Eugene Zemtsov <ezemtsov@google.com>");
+MODULE_DESCRIPTION("Incremental File System");
diff --git a/fs/incfs/pseudo_files.c b/fs/incfs/pseudo_files.c
new file mode 100644
index 0000000..97cce1c
--- /dev/null
+++ b/fs/incfs/pseudo_files.c
@@ -0,0 +1,1395 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Google LLC
+ */
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/fs_parser.h>
+#include <linux/fsnotify.h>
+#include <linux/namei.h>
+#include <linux/poll.h>
+#include <linux/syscalls.h>
+#include <linux/fdtable.h>
+#include <linux/filelock.h>
+
+#include <uapi/linux/incrementalfs.h>
+
+#include "pseudo_files.h"
+
+#include "data_mgmt.h"
+#include "format.h"
+#include "integrity.h"
+#include "vfs.h"
+
+#define READ_WRITE_FILE_MODE 0666
+
+static bool is_pseudo_filename(struct mem_range name);
+
+/*******************************************************************************
+ * .pending_reads pseudo file definition
+ ******************************************************************************/
+#define INCFS_PENDING_READS_INODE 2
+static const char pending_reads_file_name[] = INCFS_PENDING_READS_FILENAME;
+
+/* State of an open .pending_reads file, unique for each file descriptor. */
+struct pending_reads_state {
+ /* A serial number of the last pending read obtained from this file. */
+ int last_pending_read_sn;
+};
+
+static ssize_t pending_reads_read(struct file *f, char __user *buf, size_t len,
+ loff_t *ppos)
+{
+ struct pending_reads_state *pr_state = f->private_data;
+ struct mount_info *mi = get_mount_info(file_superblock(f));
+ bool report_uid;
+ unsigned long page = 0;
+ struct incfs_pending_read_info *reads_buf = NULL;
+ struct incfs_pending_read_info2 *reads_buf2 = NULL;
+ size_t record_size;
+ size_t reads_to_collect;
+ int last_known_read_sn = READ_ONCE(pr_state->last_pending_read_sn);
+ int new_max_sn = last_known_read_sn;
+ int reads_collected = 0;
+ ssize_t result = 0;
+
+ if (!mi)
+ return -EFAULT;
+
+ report_uid = mi->mi_options.report_uid;
+ record_size = report_uid ? sizeof(*reads_buf2) : sizeof(*reads_buf);
+ reads_to_collect = len / record_size;
+
+ if (!incfs_fresh_pending_reads_exist(mi, last_known_read_sn))
+ return 0;
+
+ page = get_zeroed_page(GFP_NOFS);
+ if (!page)
+ return -ENOMEM;
+
+ if (report_uid)
+ reads_buf2 = (struct incfs_pending_read_info2 *) page;
+ else
+ reads_buf = (struct incfs_pending_read_info *) page;
+
+ reads_to_collect =
+ min_t(size_t, PAGE_SIZE / record_size, reads_to_collect);
+
+ reads_collected = incfs_collect_pending_reads(mi, last_known_read_sn,
+ reads_buf, reads_buf2, reads_to_collect,
+ &new_max_sn);
+
+ if (reads_collected < 0) {
+ result = reads_collected;
+ goto out;
+ }
+
+ /*
+ * Just to make sure that we don't accidentally copy more data
+ * to reads buffer than userspace can handle.
+ */
+ reads_collected = min_t(size_t, reads_collected, reads_to_collect);
+ result = reads_collected * record_size;
+
+ /* Copy reads info to the userspace buffer */
+ if (copy_to_user(buf, (void *)page, result)) {
+ result = -EFAULT;
+ goto out;
+ }
+
+ WRITE_ONCE(pr_state->last_pending_read_sn, new_max_sn);
+ *ppos = 0;
+
+out:
+ free_page(page);
+ return result;
+}
+
+static __poll_t pending_reads_poll(struct file *file, poll_table *wait)
+{
+ struct pending_reads_state *state = file->private_data;
+ struct mount_info *mi = get_mount_info(file_superblock(file));
+ __poll_t ret = 0;
+
+ poll_wait(file, &mi->mi_pending_reads_notif_wq, wait);
+ if (incfs_fresh_pending_reads_exist(mi,
+ state->last_pending_read_sn))
+ ret = EPOLLIN | EPOLLRDNORM;
+
+ return ret;
+}
+
+static int pending_reads_open(struct inode *inode, struct file *file)
+{
+ struct pending_reads_state *state = NULL;
+
+ state = kzalloc(sizeof(*state), GFP_NOFS);
+ if (!state)
+ return -ENOMEM;
+
+ file->private_data = state;
+ return 0;
+}
+
+static int pending_reads_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static long ioctl_permit_fill(struct file *f, void __user *arg)
+{
+ struct incfs_permit_fill __user *usr_permit_fill = arg;
+ struct incfs_permit_fill permit_fill;
+ long error = 0;
+ struct file *file = NULL;
+ struct incfs_file_data *fd;
+
+ if (copy_from_user(&permit_fill, usr_permit_fill, sizeof(permit_fill)))
+ return -EFAULT;
+
+ file = fget(permit_fill.file_descriptor);
+ if (IS_ERR_OR_NULL(file)) {
+ if (!file)
+ return -ENOENT;
+
+ return PTR_ERR(file);
+ }
+
+ if (file->f_op != &incfs_file_ops) {
+ error = -EPERM;
+ goto out;
+ }
+
+ if (file->f_inode->i_sb != f->f_inode->i_sb) {
+ error = -EPERM;
+ goto out;
+ }
+
+ fd = file->private_data;
+
+ switch (fd->fd_fill_permission) {
+ case CANT_FILL:
+ fd->fd_fill_permission = CAN_FILL;
+ break;
+
+ case CAN_FILL:
+ pr_debug("CAN_FILL already set");
+ break;
+
+ default:
+ pr_warn("Invalid file private data");
+ error = -EFAULT;
+ goto out;
+ }
+
+out:
+ fput(file);
+ return error;
+}
+
+static int chmod(struct dentry *dentry, umode_t mode)
+{
+ struct inode *inode = dentry->d_inode;
+ struct delegated_inode delegated_inode = { };
+ struct iattr newattrs;
+ int error;
+
+retry_deleg:
+ inode_lock(inode);
+ newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
+ error = notify_change(&nop_mnt_idmap, dentry, &newattrs, &delegated_inode);
+ inode_unlock(inode);
+ if (is_delegated(&delegated_inode)) {
+ error = break_deleg_wait(&delegated_inode);
+ if (!error)
+ goto retry_deleg;
+ }
+ return error;
+}
+
+static bool incfs_equal_ranges(struct mem_range lhs, struct mem_range rhs)
+{
+ if (lhs.len != rhs.len)
+ return false;
+ return memcmp(lhs.data, rhs.data, lhs.len) == 0;
+}
+
+static int validate_name(char *file_name)
+{
+ struct mem_range name = range(file_name, strlen(file_name));
+ int i = 0;
+
+ if (name.len > INCFS_MAX_NAME_LEN)
+ return -ENAMETOOLONG;
+
+ if (is_pseudo_filename(name))
+ return -EINVAL;
+
+ for (i = 0; i < name.len; i++)
+ if (name.data[i] == '/')
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dir_relative_path_resolve(
+ struct mount_info *mi,
+ const char __user *relative_path,
+ struct path *result_path,
+ struct path *base_path)
+{
+ int dir_fd = get_unused_fd_flags(0);
+ struct file *dir_f = NULL;
+ int error = 0;
+
+ if (!base_path)
+ base_path = &mi->mi_backing_dir_path;
+
+ if (dir_fd < 0)
+ return dir_fd;
+
+ dir_f = dentry_open(base_path, O_RDONLY | O_NOATIME, current_cred());
+
+ if (IS_ERR(dir_f)) {
+ error = PTR_ERR(dir_f);
+ goto out;
+ }
+ fd_install(dir_fd, dir_f);
+
+ if (!relative_path) {
+ /* No relative path given, just return the base dir. */
+ *result_path = *base_path;
+ path_get(result_path);
+ goto out;
+ }
+
+ error = user_path_at(dir_fd, relative_path,
+ LOOKUP_FOLLOW | LOOKUP_DIRECTORY, result_path);
+
+out:
+ close_fd(dir_fd);
+ if (error)
+ pr_debug("Error: %d\n", error);
+ return error;
+}
+
+static struct mem_range incfs_copy_signature_info_from_user(u8 __user *original,
+ u64 size)
+{
+ u8 *result;
+
+ if (!original)
+ return range(NULL, 0);
+
+ if (size > INCFS_MAX_SIGNATURE_SIZE)
+ return range(ERR_PTR(-EFAULT), 0);
+
+ result = kzalloc(size, GFP_NOFS | __GFP_COMP);
+ if (!result)
+ return range(ERR_PTR(-ENOMEM), 0);
+
+ if (copy_from_user(result, original, size)) {
+ kfree(result);
+ return range(ERR_PTR(-EFAULT), 0);
+ }
+
+ return range(result, size);
+}
+
+static int init_new_file(struct mount_info *mi, struct dentry *dentry,
+ incfs_uuid_t *uuid, u64 size, struct mem_range attr,
+ u8 __user *user_signature_info, u64 signature_size)
+{
+ struct path path = {};
+ struct file *new_file;
+ int error = 0;
+ struct backing_file_context *bfc = NULL;
+ u32 block_count;
+ struct mem_range raw_signature = { NULL };
+ struct mtree *hash_tree = NULL;
+
+ if (!mi || !dentry || !uuid)
+ return -EFAULT;
+
+ /* Resize newly created file to its true size. */
+ path = (struct path) {
+ .mnt = mi->mi_backing_dir_path.mnt,
+ .dentry = dentry
+ };
+
+ new_file = dentry_open(&path, O_RDWR | O_NOATIME | O_LARGEFILE,
+ current_cred());
+
+ if (IS_ERR(new_file)) {
+ error = PTR_ERR(new_file);
+ goto out;
+ }
+
+ bfc = incfs_alloc_bfc(mi, new_file);
+ fput(new_file);
+ if (IS_ERR(bfc)) {
+ error = PTR_ERR(bfc);
+ bfc = NULL;
+ goto out;
+ }
+
+ mutex_lock(&bfc->bc_mutex);
+ error = incfs_write_fh_to_backing_file(bfc, uuid, size);
+ if (error)
+ goto out;
+
+ block_count = (u32)get_blocks_count_for_size(size);
+
+ if (user_signature_info) {
+ raw_signature = incfs_copy_signature_info_from_user(
+ user_signature_info, signature_size);
+
+ if (IS_ERR(raw_signature.data)) {
+ error = PTR_ERR(raw_signature.data);
+ raw_signature.data = NULL;
+ goto out;
+ }
+
+ hash_tree = incfs_alloc_mtree(raw_signature, block_count);
+ if (IS_ERR(hash_tree)) {
+ error = PTR_ERR(hash_tree);
+ hash_tree = NULL;
+ goto out;
+ }
+
+ error = incfs_write_signature_to_backing_file(bfc,
+ raw_signature, hash_tree->hash_tree_area_size,
+ NULL, NULL);
+ if (error)
+ goto out;
+
+ block_count += get_blocks_count_for_size(
+ hash_tree->hash_tree_area_size);
+ }
+
+ if (block_count)
+ error = incfs_write_blockmap_to_backing_file(bfc, block_count);
+
+ if (error)
+ goto out;
+
+out:
+ if (bfc) {
+ mutex_unlock(&bfc->bc_mutex);
+ incfs_free_bfc(bfc);
+ }
+ incfs_free_mtree(hash_tree);
+ kfree(raw_signature.data);
+
+ if (error)
+ pr_debug("incfs: %s error: %d\n", __func__, error);
+ return error;
+}
+
+static void notify_create(struct file *pending_reads_file,
+ const char __user *dir_name, const char *file_name,
+ const char *file_id_str, bool incomplete_file)
+{
+ struct mount_info *mi =
+ get_mount_info(file_superblock(pending_reads_file));
+ struct path base_path = {
+ .mnt = pending_reads_file->f_path.mnt,
+ .dentry = pending_reads_file->f_path.dentry->d_parent,
+ };
+ struct path dir_path = {};
+ struct dentry *file = NULL;
+ struct dentry *dir = NULL;
+ int error;
+
+ error = dir_relative_path_resolve(mi, dir_name, &dir_path, &base_path);
+ if (error)
+ goto out;
+
+ file = incfs_lookup_dentry(dir_path.dentry, file_name);
+ if (IS_ERR(file)) {
+ error = PTR_ERR(file);
+ file = NULL;
+ goto out;
+ }
+
+ fsnotify_create(d_inode(dir_path.dentry), file);
+
+ if (file_id_str) {
+ dir = incfs_lookup_dentry(base_path.dentry, INCFS_INDEX_NAME);
+ if (IS_ERR(dir)) {
+ error = PTR_ERR(dir);
+ dir = NULL;
+ goto out;
+ }
+
+ dput(file);
+ file = incfs_lookup_dentry(dir, file_id_str);
+ if (IS_ERR(file)) {
+ error = PTR_ERR(file);
+ file = NULL;
+ goto out;
+ }
+
+ fsnotify_create(d_inode(dir), file);
+
+ if (incomplete_file) {
+ dput(dir);
+ dir = incfs_lookup_dentry(base_path.dentry,
+ INCFS_INCOMPLETE_NAME);
+ if (IS_ERR(dir)) {
+ error = PTR_ERR(dir);
+ dir = NULL;
+ goto out;
+ }
+
+ dput(file);
+ file = incfs_lookup_dentry(dir, file_id_str);
+ if (IS_ERR(file)) {
+ error = PTR_ERR(file);
+ file = NULL;
+ goto out;
+ }
+
+ fsnotify_create(d_inode(dir), file);
+ }
+ }
+out:
+ if (error)
+ pr_warn("%s failed with error %d\n", __func__, error);
+
+ dput(dir);
+ dput(file);
+ path_put(&dir_path);
+}
+
+static long ioctl_create_file(struct file *file,
+ struct incfs_new_file_args __user *usr_args)
+{
+ struct mount_info *mi = get_mount_info(file_superblock(file));
+ struct incfs_new_file_args args;
+ char *file_id_str = NULL;
+ struct dentry *index_file_dentry = NULL;
+ struct dentry *named_file_dentry = NULL;
+ struct dentry *incomplete_file_dentry = NULL;
+ struct path parent_dir_path = {};
+ struct inode *index_dir_inode = NULL;
+ __le64 size_attr_value = 0;
+ char *file_name = NULL;
+ char *attr_value = NULL;
+ int error = 0;
+ bool locked = false;
+ bool index_linked = false;
+ bool name_linked = false;
+ bool incomplete_linked = false;
+
+ if (!mi || !mi->mi_index_dir || !mi->mi_incomplete_dir) {
+ error = -EFAULT;
+ goto out;
+ }
+
+ if (copy_from_user(&args, usr_args, sizeof(args)) > 0) {
+ error = -EFAULT;
+ goto out;
+ }
+
+ file_name = strndup_user(u64_to_user_ptr(args.file_name), PATH_MAX);
+ if (IS_ERR(file_name)) {
+ error = PTR_ERR(file_name);
+ file_name = NULL;
+ goto out;
+ }
+
+ error = validate_name(file_name);
+ if (error)
+ goto out;
+
+ file_id_str = file_id_to_str(args.file_id);
+ if (!file_id_str) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ error = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
+ if (error)
+ goto out;
+ locked = true;
+
+ /* Find a directory to put the file into. */
+ error = dir_relative_path_resolve(mi,
+ u64_to_user_ptr(args.directory_path),
+ &parent_dir_path, NULL);
+ if (error)
+ goto out;
+
+ if (parent_dir_path.dentry == mi->mi_index_dir) {
+ /* Can't create a file directly inside .index */
+ error = -EBUSY;
+ goto out;
+ }
+
+ if (parent_dir_path.dentry == mi->mi_incomplete_dir) {
+ /* Can't create a file directly inside .incomplete */
+ error = -EBUSY;
+ goto out;
+ }
+
+ /* Look up a dentry in the parent dir. It should be negative. */
+ named_file_dentry = incfs_lookup_dentry(parent_dir_path.dentry,
+ file_name);
+ if (!named_file_dentry) {
+ error = -EFAULT;
+ goto out;
+ }
+ if (IS_ERR(named_file_dentry)) {
+ error = PTR_ERR(named_file_dentry);
+ named_file_dentry = NULL;
+ goto out;
+ }
+ if (d_really_is_positive(named_file_dentry)) {
+ /* File with this path already exists. */
+ error = -EEXIST;
+ goto out;
+ }
+
+ /* Look up a dentry in the incomplete dir. It should be negative. */
+ incomplete_file_dentry = incfs_lookup_dentry(mi->mi_incomplete_dir,
+ file_id_str);
+ if (!incomplete_file_dentry) {
+ error = -EFAULT;
+ goto out;
+ }
+ if (IS_ERR(incomplete_file_dentry)) {
+ error = PTR_ERR(incomplete_file_dentry);
+ incomplete_file_dentry = NULL;
+ goto out;
+ }
+ if (d_really_is_positive(incomplete_file_dentry)) {
+ /* File with this path already exists. */
+ error = -EEXIST;
+ goto out;
+ }
+
+ /* Look up a dentry in the .index dir. It should be negative. */
+ index_file_dentry = incfs_lookup_dentry(mi->mi_index_dir, file_id_str);
+ if (!index_file_dentry) {
+ error = -EFAULT;
+ goto out;
+ }
+ if (IS_ERR(index_file_dentry)) {
+ error = PTR_ERR(index_file_dentry);
+ index_file_dentry = NULL;
+ goto out;
+ }
+ if (d_really_is_positive(index_file_dentry)) {
+ /* File with this ID already exists in index. */
+ error = -EEXIST;
+ goto out;
+ }
+
+ /* Creating a file in the .index dir. */
+ index_dir_inode = d_inode(mi->mi_index_dir);
+ inode_lock_nested(index_dir_inode, I_MUTEX_PARENT);
+ error = vfs_create(&nop_mnt_idmap, index_file_dentry,
+ args.mode | 0222, NULL);
+ inode_unlock(index_dir_inode);
+
+ if (error)
+ goto out;
+ if (!d_really_is_positive(index_file_dentry)) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ error = chmod(index_file_dentry, args.mode | 0222);
+ if (error) {
+ pr_debug("incfs: chmod err: %d\n", error);
+ goto out;
+ }
+
+ /* Save the file's ID as an xattr for easy fetching in future. */
+ error = vfs_setxattr(&nop_mnt_idmap, index_file_dentry, INCFS_XATTR_ID_NAME,
+ file_id_str, strlen(file_id_str), XATTR_CREATE);
+ if (error) {
+ pr_debug("incfs: vfs_setxattr err:%d\n", error);
+ goto out;
+ }
+
+ /* Save the file's size as an xattr for easy fetching in future. */
+ size_attr_value = cpu_to_le64(args.size);
+ error = vfs_setxattr(&nop_mnt_idmap, index_file_dentry, INCFS_XATTR_SIZE_NAME,
+ (char *)&size_attr_value, sizeof(size_attr_value),
+ XATTR_CREATE);
+ if (error) {
+ pr_debug("incfs: vfs_setxattr err:%d\n", error);
+ goto out;
+ }
+
+ /* Save the file's attribute as an xattr */
+ if (args.file_attr_len && args.file_attr) {
+ if (args.file_attr_len > INCFS_MAX_FILE_ATTR_SIZE) {
+ error = -E2BIG;
+ goto out;
+ }
+
+ attr_value = kmalloc(args.file_attr_len, GFP_NOFS);
+ if (!attr_value) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(attr_value,
+ u64_to_user_ptr(args.file_attr),
+ args.file_attr_len) > 0) {
+ error = -EFAULT;
+ goto out;
+ }
+
+ error = vfs_setxattr(&nop_mnt_idmap, index_file_dentry,
+ INCFS_XATTR_METADATA_NAME,
+ attr_value, args.file_attr_len,
+ XATTR_CREATE);
+
+ if (error)
+ goto out;
+ }
+
+ /* Initializing a newly created file. */
+ error = init_new_file(mi, index_file_dentry, &args.file_id, args.size,
+ range(attr_value, args.file_attr_len),
+ u64_to_user_ptr(args.signature_info),
+ args.signature_size);
+ if (error)
+ goto out;
+ index_linked = true;
+
+ /* Linking a file with its real name from the requested dir. */
+ error = incfs_link(index_file_dentry, named_file_dentry);
+ if (error)
+ goto out;
+ name_linked = true;
+
+ if (args.size) {
+ /* Linking a file with its incomplete entry */
+ error = incfs_link(index_file_dentry, incomplete_file_dentry);
+ if (error)
+ goto out;
+ incomplete_linked = true;
+ }
+
+ notify_create(file, u64_to_user_ptr(args.directory_path), file_name,
+ file_id_str, args.size != 0);
+
+out:
+ if (error) {
+ pr_debug("incfs: %s err:%d\n", __func__, error);
+ if (index_linked)
+ incfs_unlink(index_file_dentry);
+ if (name_linked)
+ incfs_unlink(named_file_dentry);
+ if (incomplete_linked)
+ incfs_unlink(incomplete_file_dentry);
+ }
+
+ kfree(file_id_str);
+ kfree(file_name);
+ kfree(attr_value);
+ dput(named_file_dentry);
+ dput(index_file_dentry);
+ dput(incomplete_file_dentry);
+ path_put(&parent_dir_path);
+ if (locked)
+ mutex_unlock(&mi->mi_dir_struct_mutex);
+
+ return error;
+}
+
+static int init_new_mapped_file(struct mount_info *mi, struct dentry *dentry,
+ incfs_uuid_t *uuid, u64 size, u64 offset)
+{
+ struct path path = {};
+ struct file *new_file;
+ int error = 0;
+ struct backing_file_context *bfc = NULL;
+
+ if (!mi || !dentry || !uuid)
+ return -EFAULT;
+
+ /* Resize newly created file to its true size. */
+ path = (struct path) {
+ .mnt = mi->mi_backing_dir_path.mnt,
+ .dentry = dentry
+ };
+ new_file = dentry_open(&path, O_RDWR | O_NOATIME | O_LARGEFILE,
+ current_cred());
+
+ if (IS_ERR(new_file)) {
+ error = PTR_ERR(new_file);
+ goto out;
+ }
+
+ bfc = incfs_alloc_bfc(mi, new_file);
+ fput(new_file);
+ if (IS_ERR(bfc)) {
+ error = PTR_ERR(bfc);
+ bfc = NULL;
+ goto out;
+ }
+
+ mutex_lock(&bfc->bc_mutex);
+ error = incfs_write_mapping_fh_to_backing_file(bfc, uuid, size, offset);
+ if (error)
+ goto out;
+
+out:
+ if (bfc) {
+ mutex_unlock(&bfc->bc_mutex);
+ incfs_free_bfc(bfc);
+ }
+
+ if (error)
+ pr_debug("incfs: %s error: %d\n", __func__, error);
+ return error;
+}
+
+static long ioctl_create_mapped_file(struct file *file, void __user *arg)
+{
+ struct mount_info *mi = get_mount_info(file_superblock(file));
+ struct incfs_create_mapped_file_args __user *args_usr_ptr = arg;
+ struct incfs_create_mapped_file_args args = {};
+ char *file_name;
+ int error = 0;
+ struct path parent_dir_path = {};
+ char *source_file_name = NULL;
+ struct dentry *source_file_dentry = NULL;
+ u64 source_file_size;
+ struct dentry *file_dentry = NULL;
+ struct inode *parent_inode;
+ __le64 size_attr_value;
+
+ if (copy_from_user(&args, args_usr_ptr, sizeof(args)) > 0)
+ return -EINVAL;
+
+ file_name = strndup_user(u64_to_user_ptr(args.file_name), PATH_MAX);
+ if (IS_ERR(file_name)) {
+ error = PTR_ERR(file_name);
+ file_name = NULL;
+ goto out;
+ }
+
+ error = validate_name(file_name);
+ if (error)
+ goto out;
+
+ if (args.source_offset % INCFS_DATA_FILE_BLOCK_SIZE) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ /* Validate file mapping is in range */
+ source_file_name = file_id_to_str(args.source_file_id);
+ if (!source_file_name) {
+ pr_warn("Failed to alloc source_file_name\n");
+ error = -ENOMEM;
+ goto out;
+ }
+
+ source_file_dentry = incfs_lookup_dentry(mi->mi_index_dir,
+ source_file_name);
+ if (!source_file_dentry) {
+ pr_warn("Source file does not exist\n");
+ error = -EINVAL;
+ goto out;
+ }
+ if (IS_ERR(source_file_dentry)) {
+ pr_warn("Error opening source file\n");
+ error = PTR_ERR(source_file_dentry);
+ source_file_dentry = NULL;
+ goto out;
+ }
+ if (!d_really_is_positive(source_file_dentry)) {
+ pr_warn("Source file dentry negative\n");
+ error = -EINVAL;
+ goto out;
+ }
+
+ error = vfs_getxattr(&nop_mnt_idmap, source_file_dentry, INCFS_XATTR_SIZE_NAME,
+ (char *)&size_attr_value, sizeof(size_attr_value));
+ if (error < 0)
+ goto out;
+
+ if (error != sizeof(size_attr_value)) {
+ pr_warn("Mapped file has no size attr\n");
+ error = -EINVAL;
+ goto out;
+ }
+
+ source_file_size = le64_to_cpu(size_attr_value);
+ if (args.source_offset + args.size > source_file_size) {
+ pr_warn("Mapped file out of range\n");
+ error = -EINVAL;
+ goto out;
+ }
+
+ /* Find a directory to put the file into. */
+ error = dir_relative_path_resolve(mi,
+ u64_to_user_ptr(args.directory_path),
+ &parent_dir_path, NULL);
+ if (error)
+ goto out;
+
+ if (parent_dir_path.dentry == mi->mi_index_dir) {
+ /* Can't create a file directly inside .index */
+ error = -EBUSY;
+ goto out;
+ }
+
+ /* Look up a dentry in the parent dir. It should be negative. */
+ file_dentry = incfs_lookup_dentry(parent_dir_path.dentry,
+ file_name);
+ if (!file_dentry) {
+ error = -EFAULT;
+ goto out;
+ }
+ if (IS_ERR(file_dentry)) {
+ error = PTR_ERR(file_dentry);
+ file_dentry = NULL;
+ goto out;
+ }
+ if (d_really_is_positive(file_dentry)) {
+ error = -EEXIST;
+ goto out;
+ }
+
+ parent_inode = d_inode(parent_dir_path.dentry);
+ inode_lock_nested(parent_inode, I_MUTEX_PARENT);
+ error = vfs_create(&nop_mnt_idmap, file_dentry,
+ args.mode | 0222, NULL);
+ inode_unlock(parent_inode);
+ if (error)
+ goto out;
+
+ error = chmod(file_dentry, args.mode | 0222);
+ if (error) {
+ pr_debug("incfs: chmod err: %d\n", error);
+ goto delete_file;
+ }
+
+ /* Save the file's size as an xattr for easy fetching in future. */
+ size_attr_value = cpu_to_le64(args.size);
+ error = vfs_setxattr(&nop_mnt_idmap, file_dentry, INCFS_XATTR_SIZE_NAME,
+ (char *)&size_attr_value, sizeof(size_attr_value),
+ XATTR_CREATE);
+ if (error) {
+ pr_debug("incfs: vfs_setxattr err:%d\n", error);
+ goto delete_file;
+ }
+
+ error = init_new_mapped_file(mi, file_dentry, &args.source_file_id,
+ args.size, args.source_offset);
+ if (error)
+ goto delete_file;
+
+ notify_create(file, u64_to_user_ptr(args.directory_path), file_name,
+ NULL, false);
+
+ goto out;
+
+delete_file:
+ incfs_unlink(file_dentry);
+
+out:
+ dput(file_dentry);
+ dput(source_file_dentry);
+ path_put(&parent_dir_path);
+ kfree(file_name);
+ kfree(source_file_name);
+ return error;
+}
+
+static long ioctl_get_read_timeouts(struct mount_info *mi, void __user *arg)
+{
+ struct incfs_get_read_timeouts_args __user *args_usr_ptr = arg;
+ struct incfs_get_read_timeouts_args args = {};
+ int error = 0;
+ struct incfs_per_uid_read_timeouts *buffer;
+ int size;
+
+ if (copy_from_user(&args, args_usr_ptr, sizeof(args)))
+ return -EINVAL;
+
+ if (args.timeouts_array_size > INCFS_DATA_FILE_BLOCK_SIZE)
+ return -EINVAL;
+
+ buffer = kzalloc(args.timeouts_array_size, GFP_NOFS);
+ if (!buffer)
+ return -ENOMEM;
+
+ spin_lock(&mi->mi_per_uid_read_timeouts_lock);
+ size = mi->mi_per_uid_read_timeouts_size;
+ if (args.timeouts_array_size < size)
+ error = -E2BIG;
+ else if (size)
+ memcpy(buffer, mi->mi_per_uid_read_timeouts, size);
+ spin_unlock(&mi->mi_per_uid_read_timeouts_lock);
+
+ args.timeouts_array_size_out = size;
+ if (!error && size)
+ if (copy_to_user(u64_to_user_ptr(args.timeouts_array), buffer,
+ size))
+ error = -EFAULT;
+
+ if (!error || error == -E2BIG)
+ if (copy_to_user(args_usr_ptr, &args, sizeof(args)) > 0)
+ error = -EFAULT;
+
+ kfree(buffer);
+ return error;
+}
+
+static long ioctl_set_read_timeouts(struct mount_info *mi, void __user *arg)
+{
+ struct incfs_set_read_timeouts_args __user *args_usr_ptr = arg;
+ struct incfs_set_read_timeouts_args args = {};
+ int error = 0;
+ int size;
+ struct incfs_per_uid_read_timeouts *buffer = NULL, *tmp;
+ int i;
+
+ if (copy_from_user(&args, args_usr_ptr, sizeof(args)))
+ return -EINVAL;
+
+ size = args.timeouts_array_size;
+ if (size) {
+ if (size > INCFS_DATA_FILE_BLOCK_SIZE ||
+ size % sizeof(*buffer) != 0)
+ return -EINVAL;
+
+ buffer = kzalloc(size, GFP_NOFS);
+ if (!buffer)
+ return -ENOMEM;
+
+ if (copy_from_user(buffer, u64_to_user_ptr(args.timeouts_array),
+ size)) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < size / sizeof(*buffer); ++i) {
+ struct incfs_per_uid_read_timeouts *t = &buffer[i];
+
+ if (t->min_pending_time_us > t->max_pending_time_us) {
+ error = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+ spin_lock(&mi->mi_per_uid_read_timeouts_lock);
+ mi->mi_per_uid_read_timeouts_size = size;
+ tmp = mi->mi_per_uid_read_timeouts;
+ mi->mi_per_uid_read_timeouts = buffer;
+ buffer = tmp;
+ spin_unlock(&mi->mi_per_uid_read_timeouts_lock);
+
+out:
+ kfree(buffer);
+ return error;
+}
+
+static long ioctl_get_last_read_error(struct mount_info *mi, void __user *arg)
+{
+ struct incfs_get_last_read_error_args __user *args_usr_ptr = arg;
+ struct incfs_get_last_read_error_args args = {};
+ int error;
+
+ error = mutex_lock_interruptible(&mi->mi_le_mutex);
+ if (error)
+ return error;
+
+ args.file_id_out = mi->mi_le_file_id;
+ args.time_us_out = mi->mi_le_time_us;
+ args.page_out = mi->mi_le_page;
+ args.errno_out = mi->mi_le_errno;
+ args.uid_out = mi->mi_le_uid;
+
+ mutex_unlock(&mi->mi_le_mutex);
+ if (copy_to_user(args_usr_ptr, &args, sizeof(args)) > 0)
+ error = -EFAULT;
+
+ return error;
+}
+
+static long pending_reads_dispatch_ioctl(struct file *f, unsigned int req,
+ unsigned long arg)
+{
+ struct mount_info *mi = get_mount_info(file_superblock(f));
+
+ switch (req) {
+ case INCFS_IOC_CREATE_FILE:
+ return ioctl_create_file(f, (void __user *)arg);
+ case INCFS_IOC_PERMIT_FILL:
+ return ioctl_permit_fill(f, (void __user *)arg);
+ case INCFS_IOC_CREATE_MAPPED_FILE:
+ return ioctl_create_mapped_file(f, (void __user *)arg);
+ case INCFS_IOC_GET_READ_TIMEOUTS:
+ return ioctl_get_read_timeouts(mi, (void __user *)arg);
+ case INCFS_IOC_SET_READ_TIMEOUTS:
+ return ioctl_set_read_timeouts(mi, (void __user *)arg);
+ case INCFS_IOC_GET_LAST_READ_ERROR:
+ return ioctl_get_last_read_error(mi, (void __user *)arg);
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct file_operations incfs_pending_reads_file_ops = {
+ .read = pending_reads_read,
+ .poll = pending_reads_poll,
+ .open = pending_reads_open,
+ .release = pending_reads_release,
+ .llseek = noop_llseek,
+ .unlocked_ioctl = pending_reads_dispatch_ioctl,
+ .compat_ioctl = pending_reads_dispatch_ioctl
+};
+
+/*******************************************************************************
+ * .log pseudo file definition
+ ******************************************************************************/
+#define INCFS_LOG_INODE 3
+static const char log_file_name[] = INCFS_LOG_FILENAME;
+
+/* State of an open .log file, unique for each file descriptor. */
+struct log_file_state {
+ struct read_log_state state;
+};
+
+static ssize_t log_read(struct file *f, char __user *buf, size_t len,
+ loff_t *ppos)
+{
+ struct log_file_state *log_state = f->private_data;
+ struct mount_info *mi = get_mount_info(file_superblock(f));
+ int total_reads_collected = 0;
+ int rl_size;
+ ssize_t result = 0;
+ bool report_uid;
+ void *page = 0;
+ struct incfs_pending_read_info *reads_buf = NULL;
+ struct incfs_pending_read_info2 *reads_buf2 = NULL;
+ size_t record_size;
+ ssize_t reads_to_collect;
+ ssize_t reads_per_page;
+
+ if (!mi)
+ return -EFAULT;
+
+ report_uid = mi->mi_options.report_uid;
+ record_size = report_uid ? sizeof(*reads_buf2) : sizeof(*reads_buf);
+ reads_to_collect = len / record_size;
+ reads_per_page = INCFS_DATA_FILE_BLOCK_SIZE / record_size;
+
+ rl_size = READ_ONCE(mi->mi_log.rl_size);
+ if (rl_size == 0)
+ return 0;
+
+ page = kzalloc(INCFS_DATA_FILE_BLOCK_SIZE, GFP_NOFS);
+ if (!page)
+ return -ENOMEM;
+
+ if (report_uid)
+ reads_buf2 = (struct incfs_pending_read_info2 *)page;
+ else
+ reads_buf = (struct incfs_pending_read_info *)page;
+
+ reads_to_collect = min_t(ssize_t, rl_size, reads_to_collect);
+ while (reads_to_collect > 0) {
+ struct read_log_state next_state;
+ int reads_collected;
+
+ memcpy(&next_state, &log_state->state, sizeof(next_state));
+ reads_collected = incfs_collect_logged_reads(
+ mi, &next_state, reads_buf, reads_buf2,
+ min_t(ssize_t, reads_to_collect, reads_per_page));
+ if (reads_collected <= 0) {
+ result = total_reads_collected ?
+ total_reads_collected * record_size :
+ reads_collected;
+ goto out;
+ }
+ if (copy_to_user(buf, page,
+ reads_collected * record_size)) {
+ result = total_reads_collected ?
+ total_reads_collected * record_size :
+ -EFAULT;
+ goto out;
+ }
+
+ memcpy(&log_state->state, &next_state, sizeof(next_state));
+ total_reads_collected += reads_collected;
+ buf += reads_collected * record_size;
+ reads_to_collect -= reads_collected;
+ }
+
+ result = total_reads_collected * record_size;
+ *ppos = 0;
+out:
+ kfree(page);
+ return result;
+}
+
+static __poll_t log_poll(struct file *file, poll_table *wait)
+{
+ struct log_file_state *log_state = file->private_data;
+ struct mount_info *mi = get_mount_info(file_superblock(file));
+ int count;
+ __poll_t ret = 0;
+
+ poll_wait(file, &mi->mi_log.ml_notif_wq, wait);
+ count = incfs_get_uncollected_logs_count(mi, &log_state->state);
+ if (count >= mi->mi_options.read_log_wakeup_count)
+ ret = EPOLLIN | EPOLLRDNORM;
+
+ return ret;
+}
+
+static int log_open(struct inode *inode, struct file *file)
+{
+ struct log_file_state *log_state = NULL;
+ struct mount_info *mi = get_mount_info(file_superblock(file));
+
+ log_state = kzalloc(sizeof(*log_state), GFP_NOFS);
+ if (!log_state)
+ return -ENOMEM;
+
+ log_state->state = incfs_get_log_state(mi);
+ file->private_data = log_state;
+ return 0;
+}
+
+static int log_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static const struct file_operations incfs_log_file_ops = {
+ .read = log_read,
+ .poll = log_poll,
+ .open = log_open,
+ .release = log_release,
+ .llseek = noop_llseek,
+};
+
+/*******************************************************************************
+ * .blocks_written pseudo file definition
+ ******************************************************************************/
+#define INCFS_BLOCKS_WRITTEN_INODE 4
+static const char blocks_written_file_name[] = INCFS_BLOCKS_WRITTEN_FILENAME;
+
+/* State of an open .blocks_written file, unique for each file descriptor. */
+struct blocks_written_file_state {
+ unsigned long blocks_written;
+};
+
+static ssize_t blocks_written_read(struct file *f, char __user *buf, size_t len,
+ loff_t *ppos)
+{
+ struct mount_info *mi = get_mount_info(file_superblock(f));
+ struct blocks_written_file_state *state = f->private_data;
+ unsigned long blocks_written;
+ char string[21];
+ int result = 0;
+
+ if (!mi)
+ return -EFAULT;
+
+ blocks_written = atomic_read(&mi->mi_blocks_written);
+ if (state->blocks_written == blocks_written)
+ return 0;
+
+ result = snprintf(string, sizeof(string), "%lu", blocks_written);
+ if (result > len)
+ result = len;
+ if (copy_to_user(buf, string, result))
+ return -EFAULT;
+
+ state->blocks_written = blocks_written;
+ return result;
+}
+
+static __poll_t blocks_written_poll(struct file *f, poll_table *wait)
+{
+ struct mount_info *mi = get_mount_info(file_superblock(f));
+ struct blocks_written_file_state *state = f->private_data;
+ unsigned long blocks_written;
+
+ if (!mi)
+ return 0;
+
+ poll_wait(f, &mi->mi_blocks_written_notif_wq, wait);
+ blocks_written = atomic_read(&mi->mi_blocks_written);
+ if (state->blocks_written == blocks_written)
+ return 0;
+
+ return EPOLLIN | EPOLLRDNORM;
+}
+
+static int blocks_written_open(struct inode *inode, struct file *file)
+{
+ struct blocks_written_file_state *state =
+ kzalloc(sizeof(*state), GFP_NOFS);
+
+ if (!state)
+ return -ENOMEM;
+
+ state->blocks_written = -1;
+ file->private_data = state;
+ return 0;
+}
+
+static int blocks_written_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static const struct file_operations incfs_blocks_written_file_ops = {
+ .read = blocks_written_read,
+ .poll = blocks_written_poll,
+ .open = blocks_written_open,
+ .release = blocks_written_release,
+ .llseek = noop_llseek,
+};
+
+/*******************************************************************************
+ * Generic inode lookup functionality
+ ******************************************************************************/
+
+const struct mem_range incfs_pseudo_file_names[] = {
+ { .data = (u8 *)pending_reads_file_name,
+ .len = ARRAY_SIZE(pending_reads_file_name) - 1 },
+ { .data = (u8 *)log_file_name, .len = ARRAY_SIZE(log_file_name) - 1 },
+ { .data = (u8 *)blocks_written_file_name,
+ .len = ARRAY_SIZE(blocks_written_file_name) - 1 }
+};
+
+const unsigned long incfs_pseudo_file_inodes[] = { INCFS_PENDING_READS_INODE,
+ INCFS_LOG_INODE,
+ INCFS_BLOCKS_WRITTEN_INODE };
+
+static const struct file_operations *const pseudo_file_operations[] = {
+ &incfs_pending_reads_file_ops, &incfs_log_file_ops,
+ &incfs_blocks_written_file_ops
+};
+
+static bool is_pseudo_filename(struct mem_range name)
+{
+ int i = 0;
+
+ for (; i < ARRAY_SIZE(incfs_pseudo_file_names); ++i)
+ if (incfs_equal_ranges(incfs_pseudo_file_names[i], name))
+ return true;
+ return false;
+}
+
+static bool get_pseudo_inode(int ino, struct inode *inode)
+{
+ int i = 0;
+
+ for (; i < ARRAY_SIZE(incfs_pseudo_file_inodes); ++i)
+ if (ino == incfs_pseudo_file_inodes[i])
+ break;
+ if (i == ARRAY_SIZE(incfs_pseudo_file_inodes))
+ return false;
+
+ inode_set_mtime(inode, 0, 0);
+ inode_set_atime(inode, 0, 0);
+ inode_set_ctime(inode, 0, 0);
+ inode->i_size = 0;
+ inode->i_ino = ino;
+ inode->i_private = NULL;
+ inode_init_owner(&nop_mnt_idmap, inode, NULL, S_IFREG | READ_WRITE_FILE_MODE);
+ inode->i_op = &incfs_file_inode_ops;
+ inode->i_fop = pseudo_file_operations[i];
+ return true;
+}
+
+struct inode_search {
+ unsigned long ino;
+};
+
+static int inode_test(struct inode *inode, void *opaque)
+{
+ struct inode_search *search = opaque;
+
+ return inode->i_ino == search->ino;
+}
+
+static int inode_set(struct inode *inode, void *opaque)
+{
+ struct inode_search *search = opaque;
+
+ if (get_pseudo_inode(search->ino, inode))
+ return 0;
+
+ /* Unknown inode requested. */
+ return -EINVAL;
+}
+
+static struct inode *fetch_inode(struct super_block *sb, unsigned long ino)
+{
+ struct inode_search search = {
+ .ino = ino
+ };
+ struct inode *inode = iget5_locked(sb, search.ino, inode_test,
+ inode_set, &search);
+
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ if (inode_state_read_once(inode) & I_NEW)
+ unlock_new_inode(inode);
+
+ return inode;
+}
+
+int dir_lookup_pseudo_files(struct super_block *sb, struct dentry *dentry)
+{
+ struct mem_range name_range =
+ range((u8 *)dentry->d_name.name, dentry->d_name.len);
+ unsigned long ino;
+ struct inode *inode;
+ int i = 0;
+
+ for (; i < ARRAY_SIZE(incfs_pseudo_file_names); ++i)
+ if (incfs_equal_ranges(incfs_pseudo_file_names[i], name_range))
+ break;
+ if (i == ARRAY_SIZE(incfs_pseudo_file_names))
+ return -ENOENT;
+
+ ino = incfs_pseudo_file_inodes[i];
+
+ inode = fetch_inode(sb, ino);
+ if (IS_ERR(inode))
+ return PTR_ERR(inode);
+
+ d_add(dentry, inode);
+ return 0;
+}
+
+int emit_pseudo_files(struct dir_context *ctx)
+{
+ loff_t i = ctx->pos;
+
+ for (; i < ARRAY_SIZE(incfs_pseudo_file_names); ++i) {
+ if (!dir_emit(ctx, incfs_pseudo_file_names[i].data,
+ incfs_pseudo_file_names[i].len,
+ incfs_pseudo_file_inodes[i], DT_REG))
+ return -EINVAL;
+
+ ctx->pos++;
+ }
+ return 0;
+}
diff --git a/fs/incfs/pseudo_files.h b/fs/incfs/pseudo_files.h
new file mode 100644
index 0000000..1887218
--- /dev/null
+++ b/fs/incfs/pseudo_files.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020 Google LLC
+ */
+
+#ifndef _INCFS_PSEUDO_FILES_H
+#define _INCFS_PSEUDO_FILES_H
+
+#include "internal.h"
+
+#define PSEUDO_FILE_COUNT 3
+#define INCFS_START_INO_RANGE 10
+
+extern const struct mem_range incfs_pseudo_file_names[PSEUDO_FILE_COUNT];
+extern const unsigned long incfs_pseudo_file_inodes[PSEUDO_FILE_COUNT];
+
+int dir_lookup_pseudo_files(struct super_block *sb, struct dentry *dentry);
+int emit_pseudo_files(struct dir_context *ctx);
+
+#endif
diff --git a/fs/incfs/sysfs.c b/fs/incfs/sysfs.c
new file mode 100644
index 0000000..b4d5b3e4
--- /dev/null
+++ b/fs/incfs/sysfs.c
@@ -0,0 +1,208 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2021 Google LLC
+ */
+#include <linux/fs.h>
+#include <linux/fs_parser.h>
+#include <linux/kobject.h>
+
+#include <uapi/linux/incrementalfs.h>
+
+#include "sysfs.h"
+#include "data_mgmt.h"
+#include "vfs.h"
+
+/******************************************************************************
+ * Define sys/fs/incrementalfs & sys/fs/incrementalfs/features
+ *****************************************************************************/
+#define INCFS_NODE_FEATURES "features"
+#define INCFS_NODE_INSTANCES "instances"
+
+static struct kobject *sysfs_root;
+static struct kobject *features_node;
+static struct kobject *instances_node;
+
+#define DECLARE_FEATURE_FLAG(name) \
+ static ssize_t name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buff) \
+{ \
+ return sysfs_emit(buff, "supported\n"); \
+} \
+ \
+static struct kobj_attribute name##_attr = __ATTR_RO(name)
+
+DECLARE_FEATURE_FLAG(corefs);
+DECLARE_FEATURE_FLAG(zstd);
+DECLARE_FEATURE_FLAG(v2);
+DECLARE_FEATURE_FLAG(bugfix_throttling);
+DECLARE_FEATURE_FLAG(bugfix_inode_eviction);
+DECLARE_FEATURE_FLAG(bugfix_retry_page_fault);
+
+static struct attribute *attributes[] = {
+ &corefs_attr.attr,
+ &zstd_attr.attr,
+ &v2_attr.attr,
+ &bugfix_throttling_attr.attr,
+ &bugfix_inode_eviction_attr.attr,
+ &bugfix_retry_page_fault_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group attr_group = {
+ .attrs = attributes,
+};
+
+int __init incfs_init_sysfs(void)
+{
+ int res = -ENOMEM;
+
+ sysfs_root = kobject_create_and_add(INCFS_NAME, fs_kobj);
+ if (!sysfs_root)
+ return -ENOMEM;
+
+ instances_node = kobject_create_and_add(INCFS_NODE_INSTANCES,
+ sysfs_root);
+ if (!instances_node)
+ goto err_put_root;
+
+ features_node = kobject_create_and_add(INCFS_NODE_FEATURES,
+ sysfs_root);
+ if (!features_node)
+ goto err_put_instances;
+
+ res = sysfs_create_group(features_node, &attr_group);
+ if (res)
+ goto err_put_features;
+
+ return 0;
+
+err_put_features:
+ kobject_put(features_node);
+err_put_instances:
+ kobject_put(instances_node);
+err_put_root:
+ kobject_put(sysfs_root);
+
+ return res;
+}
+
+void incfs_cleanup_sysfs(void)
+{
+ if (features_node) {
+ sysfs_remove_group(features_node, &attr_group);
+ kobject_put(features_node);
+ }
+
+ kobject_put(instances_node);
+ kobject_put(sysfs_root);
+}
+
+/******************************************************************************
+ * Define sys/fs/incrementalfs/instances/<name>/
+ *****************************************************************************/
+#define __DECLARE_STATUS_FLAG(name) \
+static ssize_t name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buff) \
+{ \
+ struct incfs_sysfs_node *node = container_of(kobj, \
+ struct incfs_sysfs_node, isn_sysfs_node); \
+ \
+ return sysfs_emit(buff, "%d\n", node->isn_mi->mi_##name); \
+} \
+ \
+static struct kobj_attribute name##_attr = __ATTR_RO(name)
+
+#define __DECLARE_STATUS_FLAG64(name) \
+static ssize_t name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buff) \
+{ \
+ struct incfs_sysfs_node *node = container_of(kobj, \
+ struct incfs_sysfs_node, isn_sysfs_node); \
+ \
+ return sysfs_emit(buff, "%lld\n", node->isn_mi->mi_##name); \
+} \
+ \
+static struct kobj_attribute name##_attr = __ATTR_RO(name)
+
+__DECLARE_STATUS_FLAG(reads_failed_timed_out);
+__DECLARE_STATUS_FLAG(reads_failed_hash_verification);
+__DECLARE_STATUS_FLAG(reads_failed_other);
+__DECLARE_STATUS_FLAG(reads_delayed_pending);
+__DECLARE_STATUS_FLAG64(reads_delayed_pending_us);
+__DECLARE_STATUS_FLAG(reads_delayed_min);
+__DECLARE_STATUS_FLAG64(reads_delayed_min_us);
+
+static struct attribute *mount_attributes[] = {
+ &reads_failed_timed_out_attr.attr,
+ &reads_failed_hash_verification_attr.attr,
+ &reads_failed_other_attr.attr,
+ &reads_delayed_pending_attr.attr,
+ &reads_delayed_pending_us_attr.attr,
+ &reads_delayed_min_attr.attr,
+ &reads_delayed_min_us_attr.attr,
+ NULL,
+};
+
+static void incfs_sysfs_release(struct kobject *kobj)
+{
+ struct incfs_sysfs_node *node = container_of(kobj,
+ struct incfs_sysfs_node, isn_sysfs_node);
+
+ complete(&node->isn_completion);
+}
+
+static const struct attribute_group mount_attr_group = {
+ .attrs = mount_attributes,
+};
+
+static struct kobj_type incfs_kobj_node_ktype = {
+ .sysfs_ops = &kobj_sysfs_ops,
+ .release = &incfs_sysfs_release,
+};
+
+struct incfs_sysfs_node *incfs_add_sysfs_node(const char *name,
+ struct mount_info *mi)
+{
+ struct incfs_sysfs_node *node = NULL;
+ int error;
+
+ if (!name)
+ return NULL;
+
+ node = kzalloc(sizeof(*node), GFP_NOFS);
+ if (!node)
+ return ERR_PTR(-ENOMEM);
+
+ node->isn_mi = mi;
+
+ init_completion(&node->isn_completion);
+ kobject_init(&node->isn_sysfs_node, &incfs_kobj_node_ktype);
+ error = kobject_add(&node->isn_sysfs_node, instances_node, "%s", name);
+ if (error)
+ goto err;
+
+ error = sysfs_create_group(&node->isn_sysfs_node, &mount_attr_group);
+ if (error)
+ goto err;
+
+ return node;
+
+err:
+ /*
+ * Note kobject_put always calls release, so incfs_sysfs_release will
+ * free node
+ */
+ kobject_put(&node->isn_sysfs_node);
+ return ERR_PTR(error);
+}
+
+void incfs_free_sysfs_node(struct incfs_sysfs_node *node)
+{
+ if (!node)
+ return;
+
+ sysfs_remove_group(&node->isn_sysfs_node, &mount_attr_group);
+ kobject_put(&node->isn_sysfs_node);
+ wait_for_completion_interruptible(&node->isn_completion);
+ kfree(node);
+}
diff --git a/fs/incfs/sysfs.h b/fs/incfs/sysfs.h
new file mode 100644
index 0000000..65bf554
--- /dev/null
+++ b/fs/incfs/sysfs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2021 Google LLC
+ */
+#ifndef _INCFS_SYSFS_H
+#define _INCFS_SYSFS_H
+
+struct incfs_sysfs_node {
+ struct kobject isn_sysfs_node;
+
+ struct completion isn_completion;
+
+ struct mount_info *isn_mi;
+};
+
+int incfs_init_sysfs(void);
+void incfs_cleanup_sysfs(void);
+struct incfs_sysfs_node *incfs_add_sysfs_node(const char *name,
+ struct mount_info *mi);
+void incfs_free_sysfs_node(struct incfs_sysfs_node *node);
+
+#endif
diff --git a/fs/incfs/verity.c b/fs/incfs/verity.c
new file mode 100644
index 0000000..7581c7cb
--- /dev/null
+++ b/fs/incfs/verity.c
@@ -0,0 +1,813 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Google LLC
+ */
+
+/*
+ * fs-verity integration into incfs
+ *
+ * Since incfs has its own merkle tree implementation, most of fs/verity/ is not
+ * needed. incfs also only needs to support the case where
+ * CONFIG_FS_VERITY_BUILTIN_SIGNATURES=n. Therefore, the integration consists of
+ * the following modifications:
+ *
+ * 1. Add the (optional) verity signature to the incfs file format. (Not really
+ * needed anymore, but this is kept around since this is the behavior of
+ * fs/verity/ even when CONFIG_FS_VERITY_BUILTIN_SIGNATURES=n.)
+ * 2. Add a pointer to the digest of the fs-verity descriptor struct to the
+ * data_file struct that incfs attaches to each file inode.
+ * 3. Add the following ioclts:
+ * - FS_IOC_ENABLE_VERITY
+ * - FS_IOC_GETFLAGS
+ * - FS_IOC_MEASURE_VERITY
+ * 4. When FS_IOC_ENABLE_VERITY is called on a non-verity file, the
+ * fs-verity descriptor struct is populated and digested. Then the S_VERITY
+ * flag is set and the xattr incfs.verity is set. If the signature is
+ * non-NULL, an INCFS_MD_VERITY_SIGNATURE is added to the backing file
+ * containing the signature.
+ * 5. When a file with an incfs.verity xattr's inode is initialized, the
+ * inode’s S_VERITY flag is set.
+ * 6. When a file with the S_VERITY flag set on its inode is opened, the
+ * data_file is checked for its verity digest. If the file doesn’t have a
+ * digest, the file’s digest is calculated as above, checked, and set, or the
+ * open is denied if it is not valid.
+ * 7. FS_IOC_GETFLAGS simply returns the value of the S_VERITY flag
+ * 8. FS_IOC_MEASURE_VERITY simply returns the cached digest
+ * 9. The final complication is that if FS_IOC_ENABLE_VERITY is called on a file
+ * which doesn’t have a merkle tree, the merkle tree is calculated before the
+ * rest of the process is completed.
+ */
+
+#include <crypto/sha2.h>
+#include <linux/fs_parser.h>
+#include <linux/fsverity.h>
+#include <linux/mount.h>
+
+#include "verity.h"
+
+#include "data_mgmt.h"
+#include "format.h"
+#include "integrity.h"
+#include "vfs.h"
+
+#define FS_VERITY_MAX_SIGNATURE_SIZE 16128
+
+static int incfs_get_root_hash(struct file *filp, u8 *root_hash)
+{
+ struct data_file *df = get_incfs_data_file(filp);
+
+ if (!df)
+ return -EINVAL;
+
+ memcpy(root_hash, df->df_hash_tree->root_hash,
+ df->df_hash_tree->alg->digest_size);
+
+ return 0;
+}
+
+static int incfs_end_enable_verity(struct file *filp, u8 *sig, size_t sig_size)
+{
+ struct inode *inode = file_inode(filp);
+ struct mem_range signature = {
+ .data = sig,
+ .len = sig_size,
+ };
+ struct data_file *df = get_incfs_data_file(filp);
+ struct backing_file_context *bfc;
+ int error;
+ struct incfs_df_verity_signature *vs = NULL;
+ loff_t offset;
+
+ if (!df || !df->df_backing_file_context)
+ return -EFSCORRUPTED;
+
+ if (sig) {
+ vs = kzalloc(sizeof(*vs), GFP_NOFS);
+ if (!vs)
+ return -ENOMEM;
+ }
+
+ bfc = df->df_backing_file_context;
+ error = mutex_lock_interruptible(&bfc->bc_mutex);
+ if (error)
+ goto out;
+
+ error = incfs_write_verity_signature_to_backing_file(bfc, signature,
+ &offset);
+ mutex_unlock(&bfc->bc_mutex);
+ if (error)
+ goto out;
+
+ /*
+ * Set verity xattr so we can set S_VERITY without opening backing file
+ */
+ error = vfs_setxattr(&nop_mnt_idmap, bfc->bc_file->f_path.dentry,
+ INCFS_XATTR_VERITY_NAME, NULL, 0, XATTR_CREATE);
+ if (error) {
+ pr_warn("incfs: error setting verity xattr: %d\n", error);
+ goto out;
+ }
+
+ if (sig) {
+ *vs = (struct incfs_df_verity_signature) {
+ .size = signature.len,
+ .offset = offset,
+ };
+
+ df->df_verity_signature = vs;
+ vs = NULL;
+ }
+
+ inode_set_flags(inode, S_VERITY, S_VERITY);
+
+out:
+ kfree(vs);
+ return error;
+}
+
+static enum incfs_hash_tree_algorithm incfs_convert_fsverity_hash_alg(
+ int hash_alg)
+{
+ switch (hash_alg) {
+ case FS_VERITY_HASH_ALG_SHA256:
+ return INCFS_HASH_TREE_SHA256;
+ default:
+ return -EINVAL;
+ }
+}
+
+static struct mem_range incfs_get_verity_digest(struct inode *inode)
+{
+ struct inode_info *node = get_incfs_node(inode);
+ struct data_file *df;
+ struct mem_range verity_file_digest;
+
+ if (!node) {
+ pr_warn("Invalid inode\n");
+ return range(NULL, 0);
+ }
+
+ df = node->n_file;
+
+ /*
+ * Pairs with the cmpxchg_release() in incfs_set_verity_digest().
+ * I.e., another task may publish ->df_verity_file_digest concurrently,
+ * executing a RELEASE barrier. We need to use smp_load_acquire() here
+ * to safely ACQUIRE the memory the other task published.
+ */
+ verity_file_digest.data = smp_load_acquire(
+ &df->df_verity_file_digest.data);
+ verity_file_digest.len = df->df_verity_file_digest.len;
+ return verity_file_digest;
+}
+
+static void incfs_set_verity_digest(struct inode *inode,
+ struct mem_range verity_file_digest)
+{
+ struct inode_info *node = get_incfs_node(inode);
+ struct data_file *df;
+
+ if (!node) {
+ pr_warn("Invalid inode\n");
+ kfree(verity_file_digest.data);
+ return;
+ }
+
+ df = node->n_file;
+ df->df_verity_file_digest.len = verity_file_digest.len;
+
+ /*
+ * Multiple tasks may race to set ->df_verity_file_digest.data, so use
+ * cmpxchg_release(). This pairs with the smp_load_acquire() in
+ * incfs_get_verity_digest(). I.e., here we publish
+ * ->df_verity_file_digest.data, with a RELEASE barrier so that other
+ * tasks can ACQUIRE it.
+ */
+ if (cmpxchg_release(&df->df_verity_file_digest.data, NULL,
+ verity_file_digest.data) != NULL)
+ /* Lost the race, so free the file_digest we allocated. */
+ kfree(verity_file_digest.data);
+}
+
+/* Calculate the digest of the fsverity_descriptor. */
+static struct mem_range incfs_calc_verity_digest_from_desc(
+ const struct inode *inode,
+ struct fsverity_descriptor *desc)
+{
+ enum incfs_hash_tree_algorithm incfs_hash_alg;
+ struct mem_range verity_file_digest;
+ int err;
+ const struct incfs_hash_alg *hash_alg;
+
+ incfs_hash_alg = incfs_convert_fsverity_hash_alg(desc->hash_algorithm);
+ if (incfs_hash_alg < 0)
+ return range(ERR_PTR(incfs_hash_alg), 0);
+
+ hash_alg = incfs_get_hash_alg(incfs_hash_alg);
+ if (IS_ERR(hash_alg))
+ return range((u8 *)hash_alg, 0);
+
+ verity_file_digest = range(kzalloc(hash_alg->digest_size, GFP_KERNEL),
+ hash_alg->digest_size);
+ if (!verity_file_digest.data)
+ return range(ERR_PTR(-ENOMEM), 0);
+
+ err = incfs_hash_buffer(hash_alg, desc, sizeof(*desc),
+ verity_file_digest.data);
+ if (err) {
+ pr_err("Error %d computing file digest", err);
+ kfree(verity_file_digest.data);
+ return range(ERR_PTR(err), 0);
+ }
+ pr_debug("Computed file digest: %s:%*phN\n",
+ hash_alg->name, (int) verity_file_digest.len,
+ verity_file_digest.data);
+ return verity_file_digest;
+}
+
+static struct fsverity_descriptor *incfs_get_fsverity_descriptor(
+ struct file *filp, int hash_algorithm)
+{
+ struct inode *inode = file_inode(filp);
+ struct fsverity_descriptor *desc = kzalloc(sizeof(*desc), GFP_KERNEL);
+ int err;
+
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
+ *desc = (struct fsverity_descriptor) {
+ .version = 1,
+ .hash_algorithm = hash_algorithm,
+ .log_blocksize = ilog2(INCFS_DATA_FILE_BLOCK_SIZE),
+ .data_size = cpu_to_le64(inode->i_size),
+ };
+
+ err = incfs_get_root_hash(filp, desc->root_hash);
+ if (err) {
+ kfree(desc);
+ return ERR_PTR(err);
+ }
+
+ return desc;
+}
+
+static struct mem_range incfs_calc_verity_digest(
+ struct inode *inode, struct file *filp,
+ int hash_algorithm)
+{
+ struct fsverity_descriptor *desc = incfs_get_fsverity_descriptor(filp,
+ hash_algorithm);
+ struct mem_range verity_file_digest;
+
+ if (IS_ERR(desc))
+ return range((u8 *)desc, 0);
+ verity_file_digest = incfs_calc_verity_digest_from_desc(inode, desc);
+ kfree(desc);
+ return verity_file_digest;
+}
+
+static int incfs_build_merkle_tree(struct file *f, struct data_file *df,
+ struct backing_file_context *bfc,
+ struct mtree *hash_tree, loff_t hash_offset,
+ const struct incfs_hash_alg *alg,
+ struct mem_range hash)
+{
+ int error = 0;
+ int limit, lvl, i, result;
+ struct mem_range buf = {.len = INCFS_DATA_FILE_BLOCK_SIZE};
+ struct mem_range tmp = {.len = 2 * INCFS_DATA_FILE_BLOCK_SIZE};
+
+ buf.data = (u8 *)kzalloc(buf.len, GFP_NOFS);
+ tmp.data = (u8 *)kzalloc(tmp.len, GFP_NOFS);
+ if (!buf.data || !tmp.data) {
+ error = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * lvl - 1 is the level we are reading, lvl the level we are writing
+ * lvl == -1 means actual blocks
+ * lvl == hash_tree->depth means root hash
+ */
+ limit = df->df_data_block_count;
+ for (lvl = 0; lvl <= hash_tree->depth; lvl++) {
+ for (i = 0; i < limit; ++i) {
+ loff_t hash_level_offset;
+ struct mem_range partial_buf = buf;
+
+ if (lvl == 0)
+ result = incfs_read_data_file_block(partial_buf,
+ f, i, tmp, NULL, NULL);
+ else {
+ hash_level_offset = hash_offset +
+ hash_tree->hash_level_suboffset[lvl - 1];
+
+ result = incfs_kread(bfc, partial_buf.data,
+ partial_buf.len,
+ hash_level_offset + i *
+ INCFS_DATA_FILE_BLOCK_SIZE);
+ }
+
+ if (result < 0) {
+ error = result;
+ goto out;
+ }
+
+ partial_buf.len = result;
+ error = incfs_hash_block(alg, partial_buf, hash);
+ if (error)
+ goto out;
+
+ /*
+ * last level - only one hash to take and it is stored
+ * in the incfs signature record
+ */
+ if (lvl == hash_tree->depth)
+ break;
+
+ hash_level_offset = hash_offset +
+ hash_tree->hash_level_suboffset[lvl];
+
+ result = incfs_kwrite(bfc, hash.data, hash.len,
+ hash_level_offset + hash.len * i);
+
+ if (result < 0) {
+ error = result;
+ goto out;
+ }
+
+ if (result != hash.len) {
+ error = -EIO;
+ goto out;
+ }
+ }
+ limit = DIV_ROUND_UP(limit,
+ INCFS_DATA_FILE_BLOCK_SIZE / hash.len);
+ }
+
+out:
+ kfree(tmp.data);
+ kfree(buf.data);
+ return error;
+}
+
+/*
+ * incfs files have a signature record that is separate from the
+ * verity_signature record. The signature record does not actually contain a
+ * signature, rather it contains the size/offset of the hash tree, and a binary
+ * blob which contains the root hash and potentially a signature.
+ *
+ * If the file was created with a signature record, then this function simply
+ * returns.
+ *
+ * Otherwise it will create a signature record with a minimal binary blob as
+ * defined by the structure below, create space for the hash tree and then
+ * populate it using incfs_build_merkle_tree
+ */
+static int incfs_add_signature_record(struct file *f)
+{
+ /* See incfs_parse_signature */
+ struct {
+ __le32 version;
+ __le32 size_of_hash_info_section;
+ struct {
+ __le32 hash_algorithm;
+ u8 log2_blocksize;
+ __le32 salt_size;
+ u8 salt[0];
+ __le32 hash_size;
+ u8 root_hash[32];
+ } __packed hash_section;
+ __le32 size_of_signing_info_section;
+ u8 signing_info_section[0];
+ } __packed sig = {
+ .version = cpu_to_le32(INCFS_SIGNATURE_VERSION),
+ .size_of_hash_info_section =
+ cpu_to_le32(sizeof(sig.hash_section)),
+ .hash_section = {
+ .hash_algorithm = cpu_to_le32(INCFS_HASH_TREE_SHA256),
+ .log2_blocksize = ilog2(INCFS_DATA_FILE_BLOCK_SIZE),
+ .hash_size = cpu_to_le32(SHA256_DIGEST_SIZE),
+ },
+ };
+
+ struct data_file *df = get_incfs_data_file(f);
+ struct mtree *hash_tree = NULL;
+ struct backing_file_context *bfc;
+ int error;
+ loff_t hash_offset, sig_offset;
+ const struct incfs_hash_alg *alg =
+ incfs_get_hash_alg(INCFS_HASH_TREE_SHA256);
+ u8 hash_buf[INCFS_MAX_HASH_SIZE];
+ int hash_size = alg->digest_size;
+ struct mem_range hash = range(hash_buf, hash_size);
+ int result;
+ struct incfs_df_signature *signature = NULL;
+
+ if (!df)
+ return -EINVAL;
+
+ if (df->df_header_flags & INCFS_FILE_MAPPED)
+ return -EINVAL;
+
+ /* Already signed? */
+ if (df->df_signature && df->df_hash_tree)
+ return 0;
+
+ if (df->df_signature || df->df_hash_tree)
+ return -EFSCORRUPTED;
+
+ /* Add signature metadata record to file */
+ hash_tree = incfs_alloc_mtree(range((u8 *)&sig, sizeof(sig)),
+ df->df_data_block_count);
+ if (IS_ERR(hash_tree))
+ return PTR_ERR(hash_tree);
+
+ bfc = df->df_backing_file_context;
+ if (!bfc) {
+ error = -EFSCORRUPTED;
+ goto out;
+ }
+
+ error = mutex_lock_interruptible(&bfc->bc_mutex);
+ if (error)
+ goto out;
+
+ error = incfs_write_signature_to_backing_file(bfc,
+ range((u8 *)&sig, sizeof(sig)),
+ hash_tree->hash_tree_area_size,
+ &hash_offset, &sig_offset);
+ mutex_unlock(&bfc->bc_mutex);
+ if (error)
+ goto out;
+
+ /* Populate merkle tree */
+ error = incfs_build_merkle_tree(f, df, bfc, hash_tree, hash_offset, alg,
+ hash);
+ if (error)
+ goto out;
+
+ /* Update signature metadata record */
+ memcpy(sig.hash_section.root_hash, hash.data, alg->digest_size);
+ result = incfs_kwrite(bfc, &sig, sizeof(sig), sig_offset);
+ if (result < 0) {
+ error = result;
+ goto out;
+ }
+
+ if (result != sizeof(sig)) {
+ error = -EIO;
+ goto out;
+ }
+
+ /* Update in-memory records */
+ memcpy(hash_tree->root_hash, hash.data, alg->digest_size);
+ signature = kzalloc(sizeof(*signature), GFP_NOFS);
+ if (!signature) {
+ error = -ENOMEM;
+ goto out;
+ }
+ *signature = (struct incfs_df_signature) {
+ .hash_offset = hash_offset,
+ .hash_size = hash_tree->hash_tree_area_size,
+ .sig_offset = sig_offset,
+ .sig_size = sizeof(sig),
+ };
+ df->df_signature = signature;
+ signature = NULL;
+
+ /*
+ * Use memory barrier to prevent readpage seeing the hash tree until
+ * it's fully there
+ */
+ smp_store_release(&df->df_hash_tree, hash_tree);
+ hash_tree = NULL;
+
+out:
+ kfree(signature);
+ kfree(hash_tree);
+ return error;
+}
+
+static int incfs_enable_verity(struct file *filp,
+ const struct fsverity_enable_arg *arg)
+{
+ struct inode *inode = file_inode(filp);
+ struct data_file *df = get_incfs_data_file(filp);
+ u8 *signature = NULL;
+ struct mem_range verity_file_digest = range(NULL, 0);
+ int err;
+
+ if (!df)
+ return -EFSCORRUPTED;
+
+ err = mutex_lock_interruptible(&df->df_enable_verity);
+ if (err)
+ return err;
+
+ if (IS_VERITY(inode)) {
+ err = -EEXIST;
+ goto out;
+ }
+
+ err = incfs_add_signature_record(filp);
+ if (err)
+ goto out;
+
+ /* Get the signature if the user provided one */
+ if (arg->sig_size) {
+ signature = memdup_user(u64_to_user_ptr(arg->sig_ptr),
+ arg->sig_size);
+ if (IS_ERR(signature)) {
+ err = PTR_ERR(signature);
+ signature = NULL;
+ goto out;
+ }
+ }
+
+ verity_file_digest = incfs_calc_verity_digest(inode, filp,
+ arg->hash_algorithm);
+ if (IS_ERR(verity_file_digest.data)) {
+ err = PTR_ERR(verity_file_digest.data);
+ verity_file_digest.data = NULL;
+ goto out;
+ }
+
+ err = incfs_end_enable_verity(filp, signature, arg->sig_size);
+ if (err)
+ goto out;
+
+ /* Successfully enabled verity */
+ incfs_set_verity_digest(inode, verity_file_digest);
+ verity_file_digest.data = NULL;
+out:
+ mutex_unlock(&df->df_enable_verity);
+ kfree(signature);
+ kfree(verity_file_digest.data);
+ if (err)
+ pr_err("%s failed with err %d\n", __func__, err);
+ return err;
+}
+
+int incfs_ioctl_enable_verity(struct file *filp, const void __user *uarg)
+{
+ struct inode *inode = file_inode(filp);
+ struct fsverity_enable_arg arg;
+
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ if (arg.version != 1)
+ return -EINVAL;
+
+ if (arg.__reserved1 ||
+ memchr_inv(arg.__reserved2, 0, sizeof(arg.__reserved2)))
+ return -EINVAL;
+
+ if (arg.hash_algorithm != FS_VERITY_HASH_ALG_SHA256)
+ return -EINVAL;
+
+ if (arg.block_size != PAGE_SIZE)
+ return -EINVAL;
+
+ if (arg.salt_size)
+ return -EINVAL;
+
+ if (arg.sig_size > FS_VERITY_MAX_SIGNATURE_SIZE)
+ return -EMSGSIZE;
+
+ if (S_ISDIR(inode->i_mode))
+ return -EISDIR;
+
+ if (!S_ISREG(inode->i_mode))
+ return -EINVAL;
+
+ return incfs_enable_verity(filp, &arg);
+}
+
+static u8 *incfs_get_verity_signature(struct file *filp, size_t *sig_size)
+{
+ struct data_file *df = get_incfs_data_file(filp);
+ struct incfs_df_verity_signature *vs;
+ u8 *signature;
+ int res;
+
+ if (!df || !df->df_backing_file_context)
+ return ERR_PTR(-EFSCORRUPTED);
+
+ vs = df->df_verity_signature;
+ if (!vs) {
+ *sig_size = 0;
+ return NULL;
+ }
+
+ if (!vs->size) {
+ *sig_size = 0;
+ return ERR_PTR(-EFSCORRUPTED);
+ }
+
+ signature = kzalloc(vs->size, GFP_KERNEL);
+ if (!signature)
+ return ERR_PTR(-ENOMEM);
+
+ res = incfs_kread(df->df_backing_file_context,
+ signature, vs->size, vs->offset);
+
+ if (res < 0)
+ goto err_out;
+
+ if (res != vs->size) {
+ res = -EINVAL;
+ goto err_out;
+ }
+
+ *sig_size = vs->size;
+ return signature;
+
+err_out:
+ kfree(signature);
+ return ERR_PTR(res);
+}
+
+/* Ensure data_file->df_verity_file_digest is populated */
+static int ensure_verity_info(struct inode *inode, struct file *filp)
+{
+ struct mem_range verity_file_digest;
+
+ /* See if this file's verity file digest is already cached */
+ verity_file_digest = incfs_get_verity_digest(inode);
+ if (verity_file_digest.data)
+ return 0;
+
+ verity_file_digest = incfs_calc_verity_digest(inode, filp,
+ FS_VERITY_HASH_ALG_SHA256);
+ if (IS_ERR(verity_file_digest.data))
+ return PTR_ERR(verity_file_digest.data);
+
+ incfs_set_verity_digest(inode, verity_file_digest);
+ return 0;
+}
+
+/**
+ * incfs_fsverity_file_open() - prepare to open a file that may be
+ * verity-enabled
+ * @inode: the inode being opened
+ * @filp: the struct file being set up
+ *
+ * When opening a verity file, set up data_file->df_verity_file_digest if not
+ * already done. Note that incfs does not allow opening for writing, so there is
+ * no need for that check.
+ *
+ * Return: 0 on success, -errno on failure
+ */
+int incfs_fsverity_file_open(struct inode *inode, struct file *filp)
+{
+ if (IS_VERITY(inode))
+ return ensure_verity_info(inode, filp);
+
+ return 0;
+}
+
+int incfs_ioctl_measure_verity(struct file *filp, void __user *_uarg)
+{
+ struct inode *inode = file_inode(filp);
+ struct mem_range verity_file_digest = incfs_get_verity_digest(inode);
+ struct fsverity_digest __user *uarg = _uarg;
+ struct fsverity_digest arg;
+
+ if (!verity_file_digest.data || !verity_file_digest.len)
+ return -ENODATA; /* not a verity file */
+
+ /*
+ * The user specifies the digest_size their buffer has space for; we can
+ * return the digest if it fits in the available space. We write back
+ * the actual size, which may be shorter than the user-specified size.
+ */
+
+ if (get_user(arg.digest_size, &uarg->digest_size))
+ return -EFAULT;
+ if (arg.digest_size < verity_file_digest.len)
+ return -EOVERFLOW;
+
+ memset(&arg, 0, sizeof(arg));
+ arg.digest_algorithm = FS_VERITY_HASH_ALG_SHA256;
+ arg.digest_size = verity_file_digest.len;
+
+ if (copy_to_user(uarg, &arg, sizeof(arg)))
+ return -EFAULT;
+
+ if (copy_to_user(uarg->digest, verity_file_digest.data,
+ verity_file_digest.len))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int incfs_read_merkle_tree(struct file *filp, void __user *buf,
+ u64 start_offset, int length)
+{
+ struct mem_range tmp_buf;
+ size_t offset;
+ int retval = 0;
+ int err = 0;
+ struct data_file *df = get_incfs_data_file(filp);
+
+ if (!df)
+ return -EINVAL;
+
+ tmp_buf = (struct mem_range) {
+ .data = kzalloc(INCFS_DATA_FILE_BLOCK_SIZE, GFP_NOFS),
+ .len = INCFS_DATA_FILE_BLOCK_SIZE,
+ };
+ if (!tmp_buf.data)
+ return -ENOMEM;
+
+ for (offset = start_offset; offset < start_offset + length;
+ offset += tmp_buf.len) {
+ err = incfs_read_merkle_tree_blocks(tmp_buf, df, offset);
+
+ if (err < 0)
+ break;
+
+ if (err != tmp_buf.len)
+ break;
+
+ if (copy_to_user(buf, tmp_buf.data, tmp_buf.len))
+ break;
+
+ buf += tmp_buf.len;
+ retval += tmp_buf.len;
+ }
+
+ kfree(tmp_buf.data);
+ return retval ? retval : err;
+}
+
+static int incfs_read_descriptor(struct file *filp,
+ void __user *buf, u64 offset, int length)
+{
+ int err;
+ struct fsverity_descriptor *desc = incfs_get_fsverity_descriptor(filp,
+ FS_VERITY_HASH_ALG_SHA256);
+
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+ length = min_t(u64, length, sizeof(*desc));
+ err = copy_to_user(buf, desc, length);
+ kfree(desc);
+ return err ? err : length;
+}
+
+static int incfs_read_signature(struct file *filp,
+ void __user *buf, u64 offset, int length)
+{
+ size_t sig_size;
+ static u8 *signature;
+ int err;
+
+ signature = incfs_get_verity_signature(filp, &sig_size);
+ if (IS_ERR(signature))
+ return PTR_ERR(signature);
+
+ if (!signature)
+ return -ENODATA;
+
+ length = min_t(u64, length, sig_size);
+ err = copy_to_user(buf, signature, length);
+ kfree(signature);
+ return err ? err : length;
+}
+
+int incfs_ioctl_read_verity_metadata(struct file *filp,
+ const void __user *uarg)
+{
+ struct fsverity_read_metadata_arg arg;
+ int length;
+ void __user *buf;
+
+ if (copy_from_user(&arg, uarg, sizeof(arg)))
+ return -EFAULT;
+
+ if (arg.__reserved)
+ return -EINVAL;
+
+ /* offset + length must not overflow. */
+ if (arg.offset + arg.length < arg.offset)
+ return -EINVAL;
+
+ /* Ensure that the return value will fit in INT_MAX. */
+ length = min_t(u64, arg.length, INT_MAX);
+
+ buf = u64_to_user_ptr(arg.buf_ptr);
+
+ switch (arg.metadata_type) {
+ case FS_VERITY_METADATA_TYPE_MERKLE_TREE:
+ return incfs_read_merkle_tree(filp, buf, arg.offset, length);
+ case FS_VERITY_METADATA_TYPE_DESCRIPTOR:
+ return incfs_read_descriptor(filp, buf, arg.offset, length);
+ case FS_VERITY_METADATA_TYPE_SIGNATURE:
+ return incfs_read_signature(filp, buf, arg.offset, length);
+ default:
+ return -EINVAL;
+ }
+}
diff --git a/fs/incfs/verity.h b/fs/incfs/verity.h
new file mode 100644
index 0000000..8fcdbc8
--- /dev/null
+++ b/fs/incfs/verity.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2020 Google LLC
+ */
+
+#ifndef _INCFS_VERITY_H
+#define _INCFS_VERITY_H
+
+/* Arbitrary limit to bound the kmalloc() size. Can be changed. */
+#define FS_VERITY_MAX_SIGNATURE_SIZE 16128
+
+#ifdef CONFIG_FS_VERITY
+
+int incfs_ioctl_enable_verity(struct file *filp, const void __user *uarg);
+int incfs_ioctl_measure_verity(struct file *filp, void __user *_uarg);
+
+int incfs_fsverity_file_open(struct inode *inode, struct file *filp);
+int incfs_ioctl_read_verity_metadata(struct file *filp,
+ const void __user *uarg);
+
+#else /* !CONFIG_FS_VERITY */
+
+static inline int incfs_ioctl_enable_verity(struct file *filp,
+ const void __user *uarg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int incfs_ioctl_measure_verity(struct file *filp,
+ void __user *_uarg)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int incfs_fsverity_file_open(struct inode *inode,
+ struct file *filp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int incfs_ioctl_read_verity_metadata(struct file *filp,
+ const void __user *uarg)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* !CONFIG_FS_VERITY */
+
+#endif
diff --git a/fs/incfs/vfs.c b/fs/incfs/vfs.c
new file mode 100644
index 0000000..e1ffa14
--- /dev/null
+++ b/fs/incfs/vfs.c
@@ -0,0 +1,2039 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Google LLC
+ */
+
+#include <linux/blkdev.h>
+#include <linux/compat.h>
+#include <linux/delay.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/fs_parser.h>
+#include <linux/fs_stack.h>
+#include <linux/fsnotify.h>
+#include <linux/fsverity.h>
+#include <linux/mmap_lock.h>
+#include <linux/namei.h>
+#include <linux/pagemap.h>
+#include <linux/parser.h>
+#include <linux/seq_file.h>
+#include <linux/backing-dev-defs.h>
+
+#include <uapi/linux/incrementalfs.h>
+
+#include "vfs.h"
+
+#include "data_mgmt.h"
+#include "format.h"
+#include "internal.h"
+#include "pseudo_files.h"
+#include "sysfs.h"
+#include "verity.h"
+
+static int incfs_parse_param(struct fs_context *fc, struct fs_parameter *param);
+static int incfs_get_tree(struct fs_context *fc);
+static int incfs_reconfigure(struct fs_context *fc);
+static void incfs_fc_free(struct fs_context *fc);
+static int incfs_fc_dup(struct fs_context *fc, struct fs_context *src_fc);
+
+static int dentry_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *dentry, unsigned int flags);
+static void dentry_release(struct dentry *d);
+
+static int iterate_incfs_dir(struct file *file, struct dir_context *ctx);
+static struct dentry *dir_lookup(struct inode *dir_inode,
+ struct dentry *dentry, unsigned int flags);
+static struct dentry *dir_mkdir(struct mnt_idmap *idmap, struct inode *dir,
+ struct dentry *dentry, umode_t mode);
+static int dir_unlink(struct inode *dir, struct dentry *dentry);
+static int dir_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *new_dentry);
+static int dir_rmdir(struct inode *dir, struct dentry *dentry);
+static int dir_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags);
+
+static int file_open(struct inode *inode, struct file *file);
+static int file_release(struct inode *inode, struct file *file);
+static int read_folio(struct file *f, struct folio *folio);
+static long dispatch_ioctl(struct file *f, unsigned int req, unsigned long arg);
+
+#ifdef CONFIG_COMPAT
+static long incfs_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+#endif
+
+static struct inode *incfs_alloc_inode(struct super_block *sb);
+static void incfs_free_inode(struct inode *inode);
+static void incfs_evict_inode(struct inode *inode);
+
+static int incfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *ia);
+static int incfs_getattr(struct mnt_idmap *idmap, const struct path *path,
+ struct kstat *stat, u32 request_mask,
+ unsigned int query_flags);
+static ssize_t incfs_getxattr(struct dentry *d, const char *name,
+ void *value, size_t size);
+static ssize_t incfs_setxattr(struct mnt_idmap *idmap, struct dentry *d,
+ const char *name, void *value, size_t size,
+ int flags);
+static ssize_t incfs_listxattr(struct dentry *d, char *list, size_t size);
+
+static int incfs_show_options(struct seq_file *, struct dentry *);
+
+static const struct super_operations incfs_super_ops = {
+ .statfs = simple_statfs,
+ .alloc_inode = incfs_alloc_inode,
+ .destroy_inode = incfs_free_inode,
+ .evict_inode = incfs_evict_inode,
+ .show_options = incfs_show_options
+};
+
+static const struct fs_context_operations incfs_context_ops = {
+ .parse_param = incfs_parse_param,
+ .get_tree = incfs_get_tree,
+ .reconfigure = incfs_reconfigure,
+ .free = incfs_fc_free,
+ .dup = incfs_fc_dup,
+};
+
+static int dir_rename_wrap(struct mnt_idmap *idmap, struct inode *old_dir,
+ struct dentry *old_dentry, struct inode *new_dir,
+ struct dentry *new_dentry, unsigned int flags)
+{
+ return dir_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
+}
+
+static const struct inode_operations incfs_dir_inode_ops = {
+ .lookup = dir_lookup,
+ .mkdir = dir_mkdir,
+ .rename = dir_rename_wrap,
+ .unlink = dir_unlink,
+ .link = dir_link,
+ .rmdir = dir_rmdir,
+ .setattr = incfs_setattr,
+};
+
+WRAP_DIR_ITER(iterate_incfs_dir) // FIXME!
+static const struct file_operations incfs_dir_fops = {
+ .llseek = generic_file_llseek,
+ .read = generic_read_dir,
+ .iterate_shared = shared_iterate_incfs_dir,
+ .open = file_open,
+ .release = file_release,
+};
+
+static const struct dentry_operations incfs_dentry_ops = {
+ .d_revalidate = dentry_revalidate,
+ .d_release = dentry_release
+};
+
+static const struct address_space_operations incfs_address_space_ops = {
+ .read_folio = read_folio,
+ /* .readpages = readpages */
+};
+
+static vm_fault_t incfs_fault(struct vm_fault *vmf)
+{
+ struct file *file = vmf->vma->vm_file;
+ struct data_file *df = get_incfs_data_file(file);
+ struct backing_file_context *bfc = df ? df->df_backing_file_context : NULL;
+
+ /*
+ * This is something of a kludge
+ * We want to retry if the read from the underlying file is interrupted,
+ * but not if the read fails because the stored data is corrupt since the
+ * latter causes an infinite loop.
+ *
+ * However, whether we wish to retry must be set before we call
+ * filemap_fault, *and* there is no way of getting the read error code out
+ * of filemap_fault.
+ *
+ * So unless there is a robust solution to both the above problems, we can
+ * solve the actual issues we have encoutered by retrying unless there is
+ * known corruption in the backing file. This does mean that we won't retry
+ * with a corrupt backing file if a (good) read is interrupted, but we
+ * don't really handle corruption well anyway at this time.
+ */
+ if (bfc && bfc->bc_has_bad_block)
+ vmf->flags &= ~FAULT_FLAG_ALLOW_RETRY;
+ return filemap_fault(vmf);
+}
+
+static const struct vm_operations_struct incfs_file_vm_ops = {
+ .fault = incfs_fault,
+ .map_pages = filemap_map_pages,
+ .page_mkwrite = filemap_page_mkwrite,
+};
+
+/* This is used for a general mmap of a disk file */
+
+static int incfs_file_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct address_space *mapping = file->f_mapping;
+
+ if (!mapping->a_ops->read_folio)
+ return -ENOEXEC;
+ file_accessed(file);
+ vma->vm_ops = &incfs_file_vm_ops;
+ return 0;
+}
+
+const struct file_operations incfs_file_ops = {
+ .open = file_open,
+ .release = file_release,
+ .read_iter = generic_file_read_iter,
+ .mmap = incfs_file_mmap,
+ .splice_read = filemap_splice_read,
+ .llseek = generic_file_llseek,
+ .unlocked_ioctl = dispatch_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = incfs_compat_ioctl,
+#endif
+};
+
+const struct inode_operations incfs_file_inode_ops = {
+ .setattr = incfs_setattr,
+ .getattr = incfs_getattr,
+ .listxattr = incfs_listxattr
+};
+
+static int incfs_handler_getxattr(const struct xattr_handler *xh,
+ struct dentry *d, struct inode *inode,
+ const char *name, void *buffer, size_t size)
+{
+ return incfs_getxattr(d, name, buffer, size);
+}
+
+static int incfs_handler_setxattr(const struct xattr_handler *xh,
+ struct mnt_idmap *idmap,
+ struct dentry *d, struct inode *inode,
+ const char *name, const void *buffer,
+ size_t size, int flags)
+{
+ return incfs_setxattr(idmap, d, name, (void *)buffer, size, flags);
+}
+
+static const struct xattr_handler incfs_xattr_handler = {
+ .prefix = "", /* AKA all attributes */
+ .get = incfs_handler_getxattr,
+ .set = incfs_handler_setxattr,
+};
+
+static const struct xattr_handler *incfs_xattr_ops[] = {
+ &incfs_xattr_handler,
+ NULL,
+};
+
+struct inode_search {
+ unsigned long ino;
+
+ struct dentry *backing_dentry;
+
+ size_t size;
+
+ bool verity;
+};
+
+
+/* Read file size from the attribute. Quicker than reading the header */
+static u64 read_size_attr(struct dentry *backing_dentry)
+{
+ __le64 attr_value;
+ ssize_t bytes_read;
+
+ bytes_read = vfs_getxattr(&nop_mnt_idmap, backing_dentry, INCFS_XATTR_SIZE_NAME,
+ (char *)&attr_value, sizeof(attr_value));
+
+ if (bytes_read != sizeof(attr_value))
+ return 0;
+
+ return le64_to_cpu(attr_value);
+}
+
+/* Read verity flag from the attribute. Quicker than reading the header */
+static bool read_verity_attr(struct dentry *backing_dentry)
+{
+ return vfs_getxattr(&nop_mnt_idmap, backing_dentry, INCFS_XATTR_VERITY_NAME, NULL, 0)
+ >= 0;
+}
+
+static int inode_test(struct inode *inode, void *opaque)
+{
+ struct inode_search *search = opaque;
+ struct inode_info *node = get_incfs_node(inode);
+ struct inode *backing_inode = d_inode(search->backing_dentry);
+
+ if (!node)
+ return 0;
+
+ return node->n_backing_inode == backing_inode &&
+ inode->i_ino == search->ino;
+}
+
+static int inode_set(struct inode *inode, void *opaque)
+{
+ struct inode_search *search = opaque;
+ struct inode_info *node = get_incfs_node(inode);
+ struct dentry *backing_dentry = search->backing_dentry;
+ struct inode *backing_inode = d_inode(backing_dentry);
+
+ fsstack_copy_attr_all(inode, backing_inode);
+ if (S_ISREG(inode->i_mode)) {
+ u64 size = search->size;
+
+ inode->i_size = size;
+ inode->i_blocks = get_blocks_count_for_size(size);
+ inode->i_mapping->a_ops = &incfs_address_space_ops;
+ inode->i_op = &incfs_file_inode_ops;
+ inode->i_fop = &incfs_file_ops;
+ inode->i_mode &= ~0222;
+ if (search->verity)
+ inode_set_flags(inode, S_VERITY, S_VERITY);
+ } else if (S_ISDIR(inode->i_mode)) {
+ inode->i_size = 0;
+ inode->i_blocks = 1;
+ inode->i_mapping->a_ops = &incfs_address_space_ops;
+ inode->i_op = &incfs_dir_inode_ops;
+ inode->i_fop = &incfs_dir_fops;
+ } else {
+ pr_warn_once("incfs: Unexpected inode type\n");
+ return -EBADF;
+ }
+
+ ihold(backing_inode);
+ node->n_backing_inode = backing_inode;
+ node->n_mount_info = get_mount_info(inode->i_sb);
+ inode_set_ctime_to_ts(inode, inode_get_ctime(backing_inode));
+ inode_set_mtime_to_ts(inode, inode_get_mtime(backing_inode));
+ inode_set_atime_to_ts(inode, inode_get_atime(backing_inode));
+ inode->i_ino = backing_inode->i_ino;
+ if (backing_inode->i_ino < INCFS_START_INO_RANGE) {
+ pr_warn("incfs: ino conflict with backing FS %ld\n",
+ backing_inode->i_ino);
+ }
+
+ return 0;
+}
+
+static struct inode *fetch_regular_inode(struct super_block *sb,
+ struct dentry *backing_dentry)
+{
+ struct inode *backing_inode = d_inode(backing_dentry);
+ struct inode_search search = {
+ .ino = backing_inode->i_ino,
+ .backing_dentry = backing_dentry,
+ .size = read_size_attr(backing_dentry),
+ .verity = read_verity_attr(backing_dentry),
+ };
+ struct inode *inode = iget5_locked(sb, search.ino, inode_test,
+ inode_set, &search);
+
+ if (!inode)
+ return ERR_PTR(-ENOMEM);
+
+ if (inode_state_read_once(inode) & I_NEW)
+ unlock_new_inode(inode);
+
+ return inode;
+}
+
+static int iterate_incfs_dir(struct file *file, struct dir_context *ctx)
+{
+ struct dir_file *dir = get_incfs_dir_file(file);
+ int error = 0;
+ struct mount_info *mi = get_mount_info(file_superblock(file));
+ bool root;
+
+ if (!dir) {
+ error = -EBADF;
+ goto out;
+ }
+
+ root = dir->backing_dir->f_inode
+ == d_inode(mi->mi_backing_dir_path.dentry);
+
+ if (root) {
+ error = emit_pseudo_files(ctx);
+ if (error)
+ goto out;
+ }
+
+ ctx->pos -= PSEUDO_FILE_COUNT;
+ error = iterate_dir(dir->backing_dir, ctx);
+ ctx->pos += PSEUDO_FILE_COUNT;
+ file->f_pos = dir->backing_dir->f_pos;
+out:
+ if (error)
+ pr_warn("incfs: %s %s %d\n", __func__,
+ file->f_path.dentry->d_name.name, error);
+ return error;
+}
+
+static int incfs_init_dentry(struct dentry *dentry, struct path *path)
+{
+ struct dentry_info *d_info = NULL;
+
+ if (!dentry || !path)
+ return -EFAULT;
+
+ d_info = kzalloc(sizeof(*d_info), GFP_NOFS);
+ if (!d_info)
+ return -ENOMEM;
+
+ d_info->backing_path = *path;
+ path_get(path);
+
+ dentry->d_fsdata = d_info;
+ return 0;
+}
+
+static struct dentry *open_or_create_special_dir(struct dentry *backing_dir,
+ const char *name,
+ bool *created)
+{
+ struct dentry *index_dentry;
+ struct inode *backing_inode = d_inode(backing_dir);
+
+ index_dentry = incfs_lookup_dentry(backing_dir, name);
+ if (!index_dentry) {
+ return ERR_PTR(-EINVAL);
+ } else if (IS_ERR(index_dentry)) {
+ return index_dentry;
+ } else if (d_really_is_positive(index_dentry)) {
+ /* Index already exists. */
+ *created = false;
+ return index_dentry;
+ }
+
+ /* Index needs to be created. */
+ inode_lock_nested(backing_inode, I_MUTEX_PARENT);
+ index_dentry = vfs_mkdir(&nop_mnt_idmap, backing_inode, index_dentry,
+ 0777, NULL);
+ inode_unlock(backing_inode);
+
+ if (IS_ERR(index_dentry)) {
+ dput(index_dentry);
+ return ERR_CAST(index_dentry);
+ }
+
+ if (!d_really_is_positive(index_dentry) ||
+ unlikely(d_unhashed(index_dentry))) {
+ dput(index_dentry);
+ return ERR_PTR(-EINVAL);
+ }
+
+ *created = true;
+ return index_dentry;
+}
+
+static int read_single_page_timeouts(struct data_file *df, struct file *f,
+ int block_index, struct mem_range range,
+ struct mem_range tmp,
+ unsigned int *delayed_min_us)
+{
+ struct mount_info *mi = df->df_mount_info;
+ struct incfs_read_data_file_timeouts timeouts = {
+ .max_pending_time_us = U32_MAX,
+ };
+ int uid = current_uid().val;
+ int i;
+
+ spin_lock(&mi->mi_per_uid_read_timeouts_lock);
+ for (i = 0; i < mi->mi_per_uid_read_timeouts_size /
+ sizeof(*mi->mi_per_uid_read_timeouts); ++i) {
+ struct incfs_per_uid_read_timeouts *t =
+ &mi->mi_per_uid_read_timeouts[i];
+
+ if(t->uid == uid) {
+ timeouts.min_time_us = t->min_time_us;
+ timeouts.min_pending_time_us = t->min_pending_time_us;
+ timeouts.max_pending_time_us = t->max_pending_time_us;
+ break;
+ }
+ }
+ spin_unlock(&mi->mi_per_uid_read_timeouts_lock);
+ if (timeouts.max_pending_time_us == U32_MAX) {
+ u64 read_timeout_us = (u64)mi->mi_options.read_timeout_ms *
+ 1000;
+
+ timeouts.max_pending_time_us = read_timeout_us <= U32_MAX ?
+ read_timeout_us : U32_MAX;
+ }
+
+ return incfs_read_data_file_block(range, f, block_index, tmp,
+ &timeouts, delayed_min_us);
+}
+
+static int usleep_interruptible(u32 us)
+{
+ /* See:
+ * https://www.kernel.org/doc/Documentation/timers/timers-howto.txt
+ * for explanation
+ */
+ if (us < 10) {
+ udelay(us);
+ return 0;
+ } else if (us < 20000) {
+ usleep_range(us, us + us / 10);
+ return 0;
+ } else
+ return msleep_interruptible(us / 1000);
+}
+
+static int read_folio(struct file *f, struct folio *folio)
+{
+ struct page *page = &folio->page;
+ loff_t offset = 0;
+ loff_t size = 0;
+ ssize_t total_read = 0;
+ struct data_file *df = get_incfs_data_file(f);
+ int result = 0;
+ void *page_start;
+ int block_index;
+ unsigned int delayed_min_us = 0;
+ struct mem_range tmp = {
+ .len = 2 * INCFS_DATA_FILE_BLOCK_SIZE
+ };
+
+ if (!df) {
+ SetPageError(page);
+ unlock_page(page);
+ return -EBADF;
+ }
+
+ page_start = kmap(page);
+ offset = page_offset(page);
+ block_index = (offset + df->df_mapped_offset) /
+ INCFS_DATA_FILE_BLOCK_SIZE;
+ size = df->df_size;
+
+ tmp.data = kzalloc(tmp.len, GFP_NOFS);
+ if (!tmp.data) {
+ result = -ENOMEM;
+ goto err;
+ }
+
+ while (offset + total_read < size) {
+ ssize_t bytes_to_read = min_t(loff_t,
+ size - offset - total_read,
+ INCFS_DATA_FILE_BLOCK_SIZE);
+
+ result = read_single_page_timeouts(df, f, block_index,
+ range(page_start + total_read, bytes_to_read),
+ tmp, &delayed_min_us);
+ if (result < 0)
+ break;
+
+ total_read += result;
+ block_index++;
+
+ if (result < INCFS_DATA_FILE_BLOCK_SIZE)
+ break;
+ if (total_read == PAGE_SIZE)
+ break;
+ }
+ kfree(tmp.data);
+err:
+ if (result < 0)
+ total_read = 0;
+ else
+ result = 0;
+ if (total_read < PAGE_SIZE)
+ memzero_page(page, total_read, PAGE_SIZE - total_read);
+
+ if (result == -EBADMSG) {
+ struct backing_file_context *bfc = df ? df->df_backing_file_context : NULL;
+
+ if (bfc)
+ bfc->bc_has_bad_block = 1;
+ }
+
+ if (result == 0)
+ SetPageUptodate(page);
+ else
+ SetPageError(page);
+
+ flush_dcache_page(page);
+ kunmap(page);
+ unlock_page(page);
+ if (delayed_min_us)
+ usleep_interruptible(delayed_min_us);
+ return result;
+}
+
+int incfs_link(struct dentry *what, struct dentry *where)
+{
+ struct dentry *parent_dentry = dget_parent(where);
+ struct inode *pinode = d_inode(parent_dentry);
+ int error = 0;
+
+ inode_lock_nested(pinode, I_MUTEX_PARENT);
+ error = vfs_link(what, &nop_mnt_idmap, pinode, where, NULL);
+ inode_unlock(pinode);
+
+ dput(parent_dentry);
+ return error;
+}
+
+int incfs_unlink(struct dentry *dentry)
+{
+ struct dentry *parent_dentry = dget_parent(dentry);
+ struct inode *pinode = d_inode(parent_dentry);
+ int error = 0;
+
+ inode_lock_nested(pinode, I_MUTEX_PARENT);
+ error = vfs_unlink(&nop_mnt_idmap, pinode, dentry, NULL);
+ inode_unlock(pinode);
+
+ dput(parent_dentry);
+ return error;
+}
+
+static int incfs_rmdir(struct dentry *dentry)
+{
+ struct dentry *parent_dentry = dget_parent(dentry);
+ struct inode *pinode = d_inode(parent_dentry);
+ int error = 0;
+
+ inode_lock_nested(pinode, I_MUTEX_PARENT);
+ error = vfs_rmdir(&nop_mnt_idmap, pinode, dentry, NULL);
+ inode_unlock(pinode);
+
+ dput(parent_dentry);
+ return error;
+}
+
+static void notify_unlink(struct dentry *dentry, const char *file_id_str,
+ const char *special_directory)
+{
+ struct dentry *root = dentry;
+ struct dentry *file = NULL;
+ struct dentry *dir = NULL;
+ int error = 0;
+ bool take_lock = root->d_parent != root->d_parent->d_parent;
+
+ while (root != root->d_parent)
+ root = root->d_parent;
+
+ if (take_lock)
+ dir = incfs_lookup_dentry(root, special_directory);
+ else
+ dir = lookup_noperm(&QSTR(special_directory), root);
+
+ if (IS_ERR(dir)) {
+ error = PTR_ERR(dir);
+ goto out;
+ }
+ if (d_is_negative(dir)) {
+ error = -ENOENT;
+ goto out;
+ }
+
+ file = incfs_lookup_dentry(dir, file_id_str);
+ if (IS_ERR(file)) {
+ error = PTR_ERR(file);
+ goto out;
+ }
+ if (d_is_negative(file)) {
+ error = -ENOENT;
+ goto out;
+ }
+
+ fsnotify_unlink(d_inode(dir), file);
+ d_delete(file);
+
+out:
+ if (error)
+ pr_warn("%s failed with error %d\n", __func__, error);
+
+ dput(dir);
+ dput(file);
+}
+
+static void handle_file_completed(struct file *f, struct data_file *df)
+{
+ struct backing_file_context *bfc;
+ struct mount_info *mi = df->df_mount_info;
+ char *file_id_str = NULL;
+ struct dentry *incomplete_file_dentry = NULL;
+ const struct cred *old_cred = override_creds(mi->mi_owner);
+ int error;
+
+ /* Truncate file to remove any preallocated space */
+ bfc = df->df_backing_file_context;
+ if (bfc) {
+ struct file *f = bfc->bc_file;
+
+ if (f) {
+ loff_t size = i_size_read(file_inode(f));
+
+ error = vfs_truncate(&f->f_path, size);
+ if (error)
+ /* No useful action on failure */
+ pr_warn("incfs: Failed to truncate complete file: %d\n",
+ error);
+ }
+ }
+
+ /* This is best effort - there is no useful action to take on failure */
+ file_id_str = file_id_to_str(df->df_id);
+ if (!file_id_str)
+ goto out;
+
+ incomplete_file_dentry = incfs_lookup_dentry(
+ df->df_mount_info->mi_incomplete_dir,
+ file_id_str);
+ if (!incomplete_file_dentry || IS_ERR(incomplete_file_dentry)) {
+ incomplete_file_dentry = NULL;
+ goto out;
+ }
+
+ if (!d_really_is_positive(incomplete_file_dentry))
+ goto out;
+
+ vfs_fsync(df->df_backing_file_context->bc_file, 0);
+ error = incfs_unlink(incomplete_file_dentry);
+ if (error) {
+ pr_warn("incfs: Deleting incomplete file failed: %d\n", error);
+ goto out;
+ }
+
+ notify_unlink(f->f_path.dentry, file_id_str, INCFS_INCOMPLETE_NAME);
+
+out:
+ dput(incomplete_file_dentry);
+ kfree(file_id_str);
+ revert_creds(old_cred);
+}
+
+static long ioctl_fill_blocks(struct file *f, void __user *arg)
+{
+ struct incfs_fill_blocks __user *usr_fill_blocks = arg;
+ struct incfs_fill_blocks fill_blocks;
+ struct incfs_fill_block __user *usr_fill_block_array;
+ struct data_file *df = get_incfs_data_file(f);
+ struct incfs_file_data *fd = f->private_data;
+ const ssize_t data_buf_size = 2 * INCFS_DATA_FILE_BLOCK_SIZE;
+ u8 *data_buf = NULL;
+ ssize_t error = 0;
+ int i = 0;
+ bool complete = false;
+
+ if (!df)
+ return -EBADF;
+
+ if (!fd || fd->fd_fill_permission != CAN_FILL)
+ return -EPERM;
+
+ if (copy_from_user(&fill_blocks, usr_fill_blocks, sizeof(fill_blocks)))
+ return -EFAULT;
+
+ usr_fill_block_array = u64_to_user_ptr(fill_blocks.fill_blocks);
+ data_buf = (u8 *)kzalloc(data_buf_size, GFP_NOFS);
+ if (!data_buf)
+ return -ENOMEM;
+
+ for (i = 0; i < fill_blocks.count; i++) {
+ struct incfs_fill_block fill_block = {};
+
+ if (copy_from_user(&fill_block, &usr_fill_block_array[i],
+ sizeof(fill_block)) > 0) {
+ error = -EFAULT;
+ break;
+ }
+
+ if (fill_block.data_len > data_buf_size) {
+ error = -E2BIG;
+ break;
+ }
+
+ if (copy_from_user(data_buf, u64_to_user_ptr(fill_block.data),
+ fill_block.data_len) > 0) {
+ error = -EFAULT;
+ break;
+ }
+ fill_block.data = 0; /* To make sure nobody uses it. */
+ if (fill_block.flags & INCFS_BLOCK_FLAGS_HASH) {
+ error = incfs_process_new_hash_block(df, &fill_block,
+ data_buf);
+ } else {
+ error = incfs_process_new_data_block(df, &fill_block,
+ data_buf, &complete);
+ }
+ if (error)
+ break;
+ }
+
+ kfree(data_buf);
+
+ if (complete)
+ handle_file_completed(f, df);
+
+ /*
+ * Only report the error if no records were processed, otherwise
+ * just return how many were processed successfully.
+ */
+ if (i == 0)
+ return error;
+
+ return i;
+}
+
+static long ioctl_read_file_signature(struct file *f, void __user *arg)
+{
+ struct incfs_get_file_sig_args __user *args_usr_ptr = arg;
+ struct incfs_get_file_sig_args args = {};
+ u8 *sig_buffer = NULL;
+ size_t sig_buf_size = 0;
+ int error = 0;
+ int read_result = 0;
+ struct data_file *df = get_incfs_data_file(f);
+
+ if (!df)
+ return -EINVAL;
+
+ if (copy_from_user(&args, args_usr_ptr, sizeof(args)) > 0)
+ return -EINVAL;
+
+ sig_buf_size = args.file_signature_buf_size;
+ if (sig_buf_size > INCFS_MAX_SIGNATURE_SIZE)
+ return -E2BIG;
+
+ sig_buffer = kzalloc(sig_buf_size, GFP_NOFS | __GFP_COMP);
+ if (!sig_buffer)
+ return -ENOMEM;
+
+ read_result = incfs_read_file_signature(df,
+ range(sig_buffer, sig_buf_size));
+
+ if (read_result < 0) {
+ error = read_result;
+ goto out;
+ }
+
+ if (copy_to_user(u64_to_user_ptr(args.file_signature), sig_buffer,
+ read_result)) {
+ error = -EFAULT;
+ goto out;
+ }
+
+ args.file_signature_len_out = read_result;
+ if (copy_to_user(args_usr_ptr, &args, sizeof(args)))
+ error = -EFAULT;
+
+out:
+ kfree(sig_buffer);
+
+ return error;
+}
+
+static long ioctl_get_filled_blocks(struct file *f, void __user *arg)
+{
+ struct incfs_get_filled_blocks_args __user *args_usr_ptr = arg;
+ struct incfs_get_filled_blocks_args args = {};
+ struct data_file *df = get_incfs_data_file(f);
+ struct incfs_file_data *fd = f->private_data;
+ int error;
+
+ if (!df || !fd)
+ return -EINVAL;
+
+ if (fd->fd_fill_permission != CAN_FILL)
+ return -EPERM;
+
+ if (copy_from_user(&args, args_usr_ptr, sizeof(args)) > 0)
+ return -EINVAL;
+
+ error = incfs_get_filled_blocks(df, fd, &args);
+
+ if (copy_to_user(args_usr_ptr, &args, sizeof(args)))
+ return -EFAULT;
+
+ return error;
+}
+
+static long ioctl_get_block_count(struct file *f, void __user *arg)
+{
+ struct incfs_get_block_count_args __user *args_usr_ptr = arg;
+ struct incfs_get_block_count_args args = {};
+ struct data_file *df = get_incfs_data_file(f);
+
+ if (!df)
+ return -EINVAL;
+
+ args.total_data_blocks_out = df->df_data_block_count;
+ args.filled_data_blocks_out = atomic_read(&df->df_data_blocks_written);
+ args.total_hash_blocks_out = df->df_total_block_count -
+ df->df_data_block_count;
+ args.filled_hash_blocks_out = atomic_read(&df->df_hash_blocks_written);
+
+ if (copy_to_user(args_usr_ptr, &args, sizeof(args)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static int incfs_ioctl_get_flags(struct file *f, void __user *arg)
+{
+ u32 flags = IS_VERITY(file_inode(f)) ? FS_VERITY_FL : 0;
+
+ return put_user(flags, (int __user *) arg);
+}
+
+static long dispatch_ioctl(struct file *f, unsigned int req, unsigned long arg)
+{
+ switch (req) {
+ case INCFS_IOC_FILL_BLOCKS:
+ return ioctl_fill_blocks(f, (void __user *)arg);
+ case INCFS_IOC_READ_FILE_SIGNATURE:
+ return ioctl_read_file_signature(f, (void __user *)arg);
+ case INCFS_IOC_GET_FILLED_BLOCKS:
+ return ioctl_get_filled_blocks(f, (void __user *)arg);
+ case INCFS_IOC_GET_BLOCK_COUNT:
+ return ioctl_get_block_count(f, (void __user *)arg);
+ case FS_IOC_ENABLE_VERITY:
+ return incfs_ioctl_enable_verity(f, (const void __user *)arg);
+ case FS_IOC_GETFLAGS:
+ return incfs_ioctl_get_flags(f, (void __user *) arg);
+ case FS_IOC_MEASURE_VERITY:
+ return incfs_ioctl_measure_verity(f, (void __user *)arg);
+ case FS_IOC_READ_VERITY_METADATA:
+ return incfs_ioctl_read_verity_metadata(f, (void __user *)arg);
+ default:
+ return -EINVAL;
+ }
+}
+
+#ifdef CONFIG_COMPAT
+static long incfs_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case FS_IOC32_GETFLAGS:
+ cmd = FS_IOC_GETFLAGS;
+ break;
+ case INCFS_IOC_FILL_BLOCKS:
+ case INCFS_IOC_READ_FILE_SIGNATURE:
+ case INCFS_IOC_GET_FILLED_BLOCKS:
+ case INCFS_IOC_GET_BLOCK_COUNT:
+ case FS_IOC_ENABLE_VERITY:
+ case FS_IOC_MEASURE_VERITY:
+ case FS_IOC_READ_VERITY_METADATA:
+ break;
+ default:
+ return -ENOIOCTLCMD;
+ }
+ return dispatch_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
+static struct dentry *dir_lookup(struct inode *dir_inode, struct dentry *dentry,
+ unsigned int flags)
+{
+ struct mount_info *mi = get_mount_info(dir_inode->i_sb);
+ struct dentry *dir_dentry = NULL;
+ struct dentry *backing_dentry = NULL;
+ struct path dir_backing_path = {};
+ struct inode_info *dir_info = get_incfs_node(dir_inode);
+ int err = 0;
+
+ if (!mi || !dir_info || !dir_info->n_backing_inode)
+ return ERR_PTR(-EBADF);
+
+ if (d_inode(mi->mi_backing_dir_path.dentry) ==
+ dir_info->n_backing_inode) {
+ /* We do lookup in the FS root. Show pseudo files. */
+ err = dir_lookup_pseudo_files(dir_inode->i_sb, dentry);
+ if (err != -ENOENT)
+ goto out;
+ err = 0;
+ }
+
+ dir_dentry = dget_parent(dentry);
+ get_incfs_backing_path(dir_dentry, &dir_backing_path);
+ backing_dentry = incfs_lookup_dentry(dir_backing_path.dentry,
+ dentry->d_name.name);
+
+ if (!backing_dentry || IS_ERR(backing_dentry)) {
+ err = IS_ERR(backing_dentry)
+ ? PTR_ERR(backing_dentry)
+ : -EFAULT;
+ backing_dentry = NULL;
+ goto out;
+ } else {
+ struct inode *inode = NULL;
+ struct path backing_path = {
+ .mnt = dir_backing_path.mnt,
+ .dentry = backing_dentry
+ };
+
+ err = incfs_init_dentry(dentry, &backing_path);
+ if (err)
+ goto out;
+
+ if (!d_really_is_positive(backing_dentry)) {
+ /*
+ * No such entry found in the backing dir.
+ * Create a negative entry.
+ */
+ d_add(dentry, NULL);
+ err = 0;
+ goto out;
+ }
+
+ if (d_inode(backing_dentry)->i_sb !=
+ dir_info->n_backing_inode->i_sb) {
+ /*
+ * Somehow after the path lookup we ended up in a
+ * different fs mount. If we keep going it's going
+ * to end badly.
+ */
+ err = -EXDEV;
+ goto out;
+ }
+
+ inode = fetch_regular_inode(dir_inode->i_sb, backing_dentry);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto out;
+ }
+
+ d_add(dentry, inode);
+ }
+
+out:
+ dput(dir_dentry);
+ dput(backing_dentry);
+ path_put(&dir_backing_path);
+ if (err)
+ pr_debug("incfs: %s %s %d\n", __func__,
+ dentry->d_name.name, err);
+ return ERR_PTR(err);
+}
+
+static struct dentry *dir_mkdir(struct mnt_idmap *idmap, struct inode *dir, struct dentry *dentry, umode_t mode)
+{
+ struct mount_info *mi = get_mount_info(dir->i_sb);
+ struct inode_info *dir_node = get_incfs_node(dir);
+ struct dentry *backing_dentry = NULL;
+ struct path backing_path = {};
+ int err = 0;
+
+
+ if (!mi || !dir_node || !dir_node->n_backing_inode)
+ return ERR_PTR(-EBADF);
+
+ err = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
+ if (err)
+ return ERR_PTR(err);
+
+ get_incfs_backing_path(dentry, &backing_path);
+ backing_dentry = backing_path.dentry;
+
+ if (!backing_dentry) {
+ err = -EBADF;
+ goto path_err;
+ }
+
+ if (backing_dentry->d_parent == mi->mi_index_dir) {
+ /* Can't create a subdir inside .index */
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (backing_dentry->d_parent == mi->mi_incomplete_dir) {
+ /* Can't create a subdir inside .incomplete */
+ err = -EBUSY;
+ goto out;
+ }
+ inode_lock_nested(dir_node->n_backing_inode, I_MUTEX_PARENT);
+ backing_dentry = vfs_mkdir(idmap, dir_node->n_backing_inode,
+ backing_dentry, mode | 0222, NULL);
+ inode_unlock(dir_node->n_backing_inode);
+ if (!IS_ERR(backing_dentry)) {
+ struct inode *inode = NULL;
+
+ if (d_really_is_negative(backing_dentry) ||
+ unlikely(d_unhashed(backing_dentry))) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ inode = fetch_regular_inode(dir->i_sb, backing_dentry);
+ if (IS_ERR(inode)) {
+ err = PTR_ERR(inode);
+ goto out;
+ }
+ d_instantiate(dentry, inode);
+ }
+
+out:
+ if (d_really_is_negative(dentry))
+ d_drop(dentry);
+ path_put(&backing_path);
+
+path_err:
+ mutex_unlock(&mi->mi_dir_struct_mutex);
+ if (err)
+ pr_debug("incfs: %s err:%d\n", __func__, err);
+ return ERR_PTR(err);
+}
+
+/*
+ * Delete file referenced by backing_dentry and if appropriate its hardlink
+ * from .index and .incomplete
+ */
+static int file_delete(struct mount_info *mi, struct dentry *dentry,
+ struct dentry *backing_dentry, int nlink)
+{
+ struct dentry *index_file_dentry = NULL;
+ struct dentry *incomplete_file_dentry = NULL;
+ /* 2 chars per byte of file ID + 1 char for \0 */
+ char file_id_str[2 * sizeof(incfs_uuid_t) + 1] = {0};
+ ssize_t uuid_size = 0;
+ int error = 0;
+
+ WARN_ON(!mutex_is_locked(&mi->mi_dir_struct_mutex));
+
+ if (nlink > 3)
+ goto just_unlink;
+
+ uuid_size = vfs_getxattr(&nop_mnt_idmap, backing_dentry, INCFS_XATTR_ID_NAME,
+ file_id_str, 2 * sizeof(incfs_uuid_t));
+ if (uuid_size < 0) {
+ error = uuid_size;
+ goto out;
+ }
+
+ if (uuid_size != 2 * sizeof(incfs_uuid_t)) {
+ error = -EBADMSG;
+ goto out;
+ }
+
+ index_file_dentry = incfs_lookup_dentry(mi->mi_index_dir, file_id_str);
+ if (IS_ERR(index_file_dentry)) {
+ error = PTR_ERR(index_file_dentry);
+ index_file_dentry = NULL;
+ goto out;
+ }
+
+ if (d_really_is_positive(index_file_dentry) && nlink > 0)
+ nlink--;
+
+ if (nlink > 2)
+ goto just_unlink;
+
+ incomplete_file_dentry = incfs_lookup_dentry(mi->mi_incomplete_dir,
+ file_id_str);
+ if (IS_ERR(incomplete_file_dentry)) {
+ error = PTR_ERR(incomplete_file_dentry);
+ incomplete_file_dentry = NULL;
+ goto out;
+ }
+
+ if (d_really_is_positive(incomplete_file_dentry) && nlink > 0)
+ nlink--;
+
+ if (nlink > 1)
+ goto just_unlink;
+
+ if (d_really_is_positive(index_file_dentry)) {
+ error = incfs_unlink(index_file_dentry);
+ if (error)
+ goto out;
+ notify_unlink(dentry, file_id_str, INCFS_INDEX_NAME);
+ }
+
+ if (d_really_is_positive(incomplete_file_dentry)) {
+ error = incfs_unlink(incomplete_file_dentry);
+ if (error)
+ goto out;
+ notify_unlink(dentry, file_id_str, INCFS_INCOMPLETE_NAME);
+ }
+
+just_unlink:
+ error = incfs_unlink(backing_dentry);
+
+out:
+ dput(index_file_dentry);
+ dput(incomplete_file_dentry);
+ if (error)
+ pr_debug("incfs: delete_file_from_index err:%d\n", error);
+ return error;
+}
+
+static int dir_unlink(struct inode *dir, struct dentry *dentry)
+{
+ struct mount_info *mi = get_mount_info(dir->i_sb);
+ struct path backing_path = {};
+ struct kstat stat;
+ int err = 0;
+
+ if (!mi)
+ return -EBADF;
+
+ err = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
+ if (err)
+ return err;
+
+ get_incfs_backing_path(dentry, &backing_path);
+ if (!backing_path.dentry) {
+ err = -EBADF;
+ goto path_err;
+ }
+
+ if (backing_path.dentry->d_parent == mi->mi_index_dir) {
+ /* Direct unlink from .index are not allowed. */
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (backing_path.dentry->d_parent == mi->mi_incomplete_dir) {
+ /* Direct unlink from .incomplete are not allowed. */
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = vfs_getattr(&backing_path, &stat, STATX_NLINK,
+ AT_STATX_SYNC_AS_STAT);
+ if (err)
+ goto out;
+
+ err = file_delete(mi, dentry, backing_path.dentry, stat.nlink);
+
+ d_drop(dentry);
+out:
+ path_put(&backing_path);
+path_err:
+ if (err)
+ pr_debug("incfs: %s err:%d\n", __func__, err);
+ mutex_unlock(&mi->mi_dir_struct_mutex);
+ return err;
+}
+
+static int dir_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *new_dentry)
+{
+ struct mount_info *mi = get_mount_info(dir->i_sb);
+ struct path backing_old_path = {};
+ struct path backing_new_path = {};
+ int error = 0;
+
+ if (!mi)
+ return -EBADF;
+
+ error = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
+ if (error)
+ return error;
+
+ get_incfs_backing_path(old_dentry, &backing_old_path);
+ get_incfs_backing_path(new_dentry, &backing_new_path);
+
+ if (backing_new_path.dentry->d_parent == mi->mi_index_dir) {
+ /* Can't link to .index */
+ error = -EBUSY;
+ goto out;
+ }
+
+ if (backing_new_path.dentry->d_parent == mi->mi_incomplete_dir) {
+ /* Can't link to .incomplete */
+ error = -EBUSY;
+ goto out;
+ }
+
+ error = incfs_link(backing_old_path.dentry, backing_new_path.dentry);
+ if (!error) {
+ struct inode *inode = NULL;
+ struct dentry *bdentry = backing_new_path.dentry;
+
+ if (d_really_is_negative(bdentry)) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ inode = fetch_regular_inode(dir->i_sb, bdentry);
+ if (IS_ERR(inode)) {
+ error = PTR_ERR(inode);
+ goto out;
+ }
+ d_instantiate(new_dentry, inode);
+ }
+
+out:
+ path_put(&backing_old_path);
+ path_put(&backing_new_path);
+ if (error)
+ pr_debug("incfs: %s err:%d\n", __func__, error);
+ mutex_unlock(&mi->mi_dir_struct_mutex);
+ return error;
+}
+
+static int dir_rmdir(struct inode *dir, struct dentry *dentry)
+{
+ struct mount_info *mi = get_mount_info(dir->i_sb);
+ struct path backing_path = {};
+ int err = 0;
+
+ if (!mi)
+ return -EBADF;
+
+ err = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
+ if (err)
+ return err;
+
+ get_incfs_backing_path(dentry, &backing_path);
+ if (!backing_path.dentry) {
+ err = -EBADF;
+ goto path_err;
+ }
+
+ if (backing_path.dentry == mi->mi_index_dir) {
+ /* Can't delete .index */
+ err = -EBUSY;
+ goto out;
+ }
+
+ if (backing_path.dentry == mi->mi_incomplete_dir) {
+ /* Can't delete .incomplete */
+ err = -EBUSY;
+ goto out;
+ }
+
+ err = incfs_rmdir(backing_path.dentry);
+ if (!err)
+ d_drop(dentry);
+out:
+ path_put(&backing_path);
+
+path_err:
+ if (err)
+ pr_debug("incfs: %s err:%d\n", __func__, err);
+ mutex_unlock(&mi->mi_dir_struct_mutex);
+ return err;
+}
+
+static int dir_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ struct mount_info *mi = get_mount_info(old_dir->i_sb);
+ struct dentry *backing_old_dentry;
+ struct dentry *backing_new_dentry;
+ struct dentry *backing_old_dir_dentry;
+ struct dentry *backing_new_dir_dentry;
+ struct inode *target_inode;
+ struct dentry *trap;
+ struct renamedata rd = {};
+ int error = 0;
+
+ error = mutex_lock_interruptible(&mi->mi_dir_struct_mutex);
+ if (error)
+ return error;
+
+ backing_old_dentry = get_incfs_dentry(old_dentry)->backing_path.dentry;
+
+ if (!backing_old_dentry || backing_old_dentry == mi->mi_index_dir ||
+ backing_old_dentry == mi->mi_incomplete_dir) {
+ /* Renaming .index or .incomplete not allowed */
+ error = -EBUSY;
+ goto exit;
+ }
+
+ backing_new_dentry = get_incfs_dentry(new_dentry)->backing_path.dentry;
+ dget(backing_old_dentry);
+ dget(backing_new_dentry);
+
+ backing_old_dir_dentry = dget_parent(backing_old_dentry);
+ backing_new_dir_dentry = dget_parent(backing_new_dentry);
+ target_inode = d_inode(new_dentry);
+
+ if (backing_old_dir_dentry == mi->mi_index_dir ||
+ backing_old_dir_dentry == mi->mi_incomplete_dir) {
+ /* Direct moves from .index or .incomplete are not allowed. */
+ error = -EBUSY;
+ goto out;
+ }
+
+ trap = lock_rename(backing_old_dir_dentry, backing_new_dir_dentry);
+
+ if (trap == backing_old_dentry) {
+ error = -EINVAL;
+ goto unlock_out;
+ }
+ if (trap == backing_new_dentry) {
+ error = -ENOTEMPTY;
+ goto unlock_out;
+ }
+
+ rd.old_parent = backing_old_dir_dentry;
+ rd.old_dentry = backing_old_dentry;
+ rd.new_parent = backing_new_dir_dentry;
+ rd.new_dentry = backing_new_dentry;
+ rd.flags = flags;
+ rd.mnt_idmap = &nop_mnt_idmap;
+ rd.delegated_inode = NULL;
+
+ error = vfs_rename(&rd);
+ if (error)
+ goto unlock_out;
+ if (target_inode)
+ fsstack_copy_attr_all(target_inode,
+ get_incfs_node(target_inode)->n_backing_inode);
+ fsstack_copy_attr_all(new_dir, d_inode(backing_new_dir_dentry));
+ if (new_dir != old_dir)
+ fsstack_copy_attr_all(old_dir, d_inode(backing_old_dir_dentry));
+
+unlock_out:
+ unlock_rename(backing_old_dir_dentry, backing_new_dir_dentry);
+
+out:
+ dput(backing_new_dir_dentry);
+ dput(backing_old_dir_dentry);
+ dput(backing_new_dentry);
+ dput(backing_old_dentry);
+
+exit:
+ mutex_unlock(&mi->mi_dir_struct_mutex);
+ if (error)
+ pr_debug("incfs: %s err:%d\n", __func__, error);
+ return error;
+}
+
+
+static int file_open(struct inode *inode, struct file *file)
+{
+ struct mount_info *mi = get_mount_info(inode->i_sb);
+ struct file *backing_file = NULL;
+ struct path backing_path = {};
+ int err = 0;
+ int flags = O_NOATIME | O_LARGEFILE |
+ (S_ISDIR(inode->i_mode) ? O_RDONLY : O_RDWR);
+ const struct cred *old_cred;
+
+ WARN_ON(file->private_data);
+
+ if (!mi)
+ return -EBADF;
+
+ get_incfs_backing_path(file->f_path.dentry, &backing_path);
+ if (!backing_path.dentry)
+ return -EBADF;
+
+ old_cred = override_creds(mi->mi_owner);
+ backing_file = dentry_open(&backing_path, flags, current_cred());
+ revert_creds(old_cred);
+ path_put(&backing_path);
+
+ if (IS_ERR(backing_file)) {
+ err = PTR_ERR(backing_file);
+ backing_file = NULL;
+ goto out;
+ }
+
+ if (S_ISREG(inode->i_mode)) {
+ struct incfs_file_data *fd = kzalloc(sizeof(*fd), GFP_NOFS);
+
+ if (!fd) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ *fd = (struct incfs_file_data) {
+ .fd_fill_permission = CANT_FILL,
+ };
+ file->private_data = fd;
+
+ err = make_inode_ready_for_data_ops(mi, inode, backing_file);
+ if (err)
+ goto out;
+
+ err = incfs_fsverity_file_open(inode, file);
+ if (err)
+ goto out;
+ } else if (S_ISDIR(inode->i_mode)) {
+ struct dir_file *dir = NULL;
+
+ dir = incfs_open_dir_file(mi, backing_file);
+ if (IS_ERR(dir))
+ err = PTR_ERR(dir);
+ else
+ file->private_data = dir;
+ } else
+ err = -EBADF;
+
+out:
+ if (err) {
+ pr_debug("name:%s err: %d\n",
+ file->f_path.dentry->d_name.name, err);
+ if (S_ISREG(inode->i_mode))
+ kfree(file->private_data);
+ else if (S_ISDIR(inode->i_mode))
+ incfs_free_dir_file(file->private_data);
+
+ file->private_data = NULL;
+ }
+
+ if (backing_file)
+ fput(backing_file);
+ return err;
+}
+
+static int file_release(struct inode *inode, struct file *file)
+{
+ if (S_ISREG(inode->i_mode)) {
+ kfree(file->private_data);
+ file->private_data = NULL;
+ } else if (S_ISDIR(inode->i_mode)) {
+ struct dir_file *dir = get_incfs_dir_file(file);
+
+ incfs_free_dir_file(dir);
+ }
+
+ return 0;
+}
+
+static int dentry_revalidate(struct inode *dir, const struct qstr *name,
+ struct dentry *d, unsigned int flags)
+{
+ struct path backing_path = {};
+ struct inode_info *info = get_incfs_node(d_inode(d));
+ struct inode *binode = (info == NULL) ? NULL : info->n_backing_inode;
+ struct dentry *backing_dentry = NULL;
+ int result = 0;
+
+ if (flags & LOOKUP_RCU)
+ return -ECHILD;
+
+ get_incfs_backing_path(d, &backing_path);
+ backing_dentry = backing_path.dentry;
+ if (!backing_dentry)
+ goto out;
+
+ if (d_inode(backing_dentry) != binode) {
+ /*
+ * Backing inodes obtained via dentry and inode don't match.
+ * It indicates that most likely backing dir has changed
+ * directly bypassing Incremental FS interface.
+ */
+ goto out;
+ }
+
+ if (backing_dentry->d_flags & DCACHE_OP_REVALIDATE) {
+ struct inode_info *dir_info = get_incfs_node(dir);
+ struct inode *backing_dir = dir_info ? dir_info->n_backing_inode : NULL;
+ struct name_snapshot n;
+
+ if (!backing_dir)
+ goto out;
+ take_dentry_name_snapshot(&n, backing_dentry);
+ result = backing_dentry->d_op->d_revalidate(backing_dir,
+ &n.name, backing_dentry, flags);
+ release_dentry_name_snapshot(&n);
+ } else
+ result = 1;
+
+out:
+ path_put(&backing_path);
+ return result;
+}
+
+static void dentry_release(struct dentry *d)
+{
+ struct dentry_info *di = get_incfs_dentry(d);
+
+ if (di)
+ path_put(&di->backing_path);
+ kfree(d->d_fsdata);
+ d->d_fsdata = NULL;
+}
+
+static struct inode *incfs_alloc_inode(struct super_block *sb)
+{
+ struct inode_info *node = kzalloc(sizeof(*node), GFP_NOFS);
+
+ /* TODO: add a slab-based cache here. */
+ if (!node)
+ return NULL;
+ inode_init_once(&node->n_vfs_inode);
+ return &node->n_vfs_inode;
+}
+
+static void incfs_free_inode(struct inode *inode)
+{
+ struct inode_info *node = get_incfs_node(inode);
+
+ kfree(node);
+}
+
+static void incfs_evict_inode(struct inode *inode)
+{
+ struct inode_info *node = get_incfs_node(inode);
+
+ if (node) {
+ if (node->n_backing_inode) {
+ iput(node->n_backing_inode);
+ node->n_backing_inode = NULL;
+ }
+ if (node->n_file) {
+ incfs_free_data_file(node->n_file);
+ node->n_file = NULL;
+ }
+ }
+
+ truncate_inode_pages(&inode->i_data, 0);
+ clear_inode(inode);
+}
+
+static int incfs_setattr(struct mnt_idmap *idmap, struct dentry *dentry,
+ struct iattr *ia)
+{
+ struct dentry_info *di = get_incfs_dentry(dentry);
+ struct dentry *backing_dentry;
+ struct inode *backing_inode;
+ int error;
+
+ if (ia->ia_valid & ATTR_SIZE)
+ return -EINVAL;
+
+ if ((ia->ia_valid & (ATTR_KILL_SUID|ATTR_KILL_SGID)) &&
+ (ia->ia_valid & ATTR_MODE))
+ return -EINVAL;
+
+ if (!di)
+ return -EINVAL;
+ backing_dentry = di->backing_path.dentry;
+ if (!backing_dentry)
+ return -EINVAL;
+
+ backing_inode = d_inode(backing_dentry);
+
+ /* incfs files are readonly, but the backing files must be writeable */
+ if (S_ISREG(backing_inode->i_mode)) {
+ if ((ia->ia_valid & ATTR_MODE) && (ia->ia_mode & 0222))
+ return -EINVAL;
+
+ ia->ia_mode |= 0222;
+ }
+
+ inode_lock(d_inode(backing_dentry));
+ error = notify_change(idmap, backing_dentry, ia, NULL);
+ inode_unlock(d_inode(backing_dentry));
+
+ if (error)
+ return error;
+
+ if (S_ISREG(backing_inode->i_mode))
+ ia->ia_mode &= ~0222;
+
+ return simple_setattr(idmap, dentry, ia);
+}
+
+
+static int incfs_getattr(struct mnt_idmap *idmap, const struct path *path,
+ struct kstat *stat, u32 request_mask,
+ unsigned int query_flags)
+{
+ struct inode *inode = d_inode(path->dentry);
+
+ generic_fillattr(idmap, request_mask, inode, stat);
+
+ if (inode->i_ino < INCFS_START_INO_RANGE)
+ return 0;
+
+ stat->attributes &= ~STATX_ATTR_VERITY;
+ if (IS_VERITY(inode))
+ stat->attributes |= STATX_ATTR_VERITY;
+ stat->attributes_mask |= STATX_ATTR_VERITY;
+
+ if (request_mask & STATX_BLOCKS) {
+ struct kstat backing_kstat;
+ struct dentry_info *di = get_incfs_dentry(path->dentry);
+ int error = 0;
+ struct path *backing_path;
+
+ if (!di)
+ return -EFSCORRUPTED;
+ backing_path = &di->backing_path;
+ error = vfs_getattr(backing_path, &backing_kstat, STATX_BLOCKS,
+ AT_STATX_SYNC_AS_STAT);
+ if (error)
+ return error;
+
+ stat->blocks = backing_kstat.blocks;
+ }
+
+ return 0;
+}
+
+static ssize_t incfs_getxattr(struct dentry *d, const char *name,
+ void *value, size_t size)
+{
+ struct dentry_info *di = get_incfs_dentry(d);
+ struct mount_info *mi = get_mount_info(d->d_sb);
+ char *stored_value;
+ size_t stored_size;
+ int i;
+
+ if (di && di->backing_path.dentry)
+ return vfs_getxattr(&nop_mnt_idmap, di->backing_path.dentry, name, value, size);
+
+ if (strcmp(name, "security.selinux"))
+ return -ENODATA;
+
+ for (i = 0; i < PSEUDO_FILE_COUNT; ++i)
+ if (!strcmp(d->d_iname, incfs_pseudo_file_names[i].data))
+ break;
+ if (i == PSEUDO_FILE_COUNT)
+ return -ENODATA;
+
+ stored_value = mi->pseudo_file_xattr[i].data;
+ stored_size = mi->pseudo_file_xattr[i].len;
+ if (!stored_value)
+ return -ENODATA;
+
+ if (stored_size > size)
+ return -E2BIG;
+
+ memcpy(value, stored_value, stored_size);
+ return stored_size;
+}
+
+
+static ssize_t incfs_setxattr(struct mnt_idmap *idmap, struct dentry *d,
+ const char *name, void *value, size_t size,
+ int flags)
+{
+ struct dentry_info *di = get_incfs_dentry(d);
+ struct mount_info *mi = get_mount_info(d->d_sb);
+ u8 **stored_value;
+ size_t *stored_size;
+ int i;
+
+ if (di && di->backing_path.dentry)
+ return vfs_setxattr(idmap, di->backing_path.dentry, name, value,
+ size, flags);
+
+ if (strcmp(name, "security.selinux"))
+ return -ENODATA;
+
+ if (size > INCFS_MAX_FILE_ATTR_SIZE)
+ return -E2BIG;
+
+ for (i = 0; i < PSEUDO_FILE_COUNT; ++i)
+ if (!strcmp(d->d_iname, incfs_pseudo_file_names[i].data))
+ break;
+ if (i == PSEUDO_FILE_COUNT)
+ return -ENODATA;
+
+ stored_value = &mi->pseudo_file_xattr[i].data;
+ stored_size = &mi->pseudo_file_xattr[i].len;
+ kfree (*stored_value);
+ *stored_value = kzalloc(size, GFP_NOFS);
+ if (!*stored_value)
+ return -ENOMEM;
+
+ memcpy(*stored_value, value, size);
+ *stored_size = size;
+ return 0;
+}
+
+static ssize_t incfs_listxattr(struct dentry *d, char *list, size_t size)
+{
+ struct dentry_info *di = get_incfs_dentry(d);
+
+ if (!di || !di->backing_path.dentry)
+ return -ENODATA;
+
+ return vfs_listxattr(di->backing_path.dentry, list, size);
+}
+
+int incfs_init_fs_context(struct fs_context *fc)
+{
+ struct mount_options *ctx = kzalloc(sizeof(struct mount_options), GFP_KERNEL);
+
+ if (!ctx)
+ return -ENOMEM;
+
+ *ctx = (struct mount_options) {
+ .read_timeout_ms = 1000, /* Default: 1s */
+ .readahead_pages = 10,
+ .read_log_pages = 2,
+ .read_log_wakeup_count = 10,
+ };
+
+ fc->fs_private = ctx;
+ fc->ops = &incfs_context_ops;
+
+ /* i_version is always enabled now */
+ fc->sb_flags |= SB_I_VERSION;
+ return 0;
+}
+
+enum {
+ Opt_read_timeout,
+ Opt_readahead_pages,
+ Opt_rlog_pages,
+ Opt_rlog_wakeup_cnt,
+ Opt_report_uid,
+ Opt_sysfs_name,
+};
+
+const struct fs_parameter_spec incfs_param_specs[] = {
+ fsparam_u32("read_timeout_ms", Opt_read_timeout),
+ fsparam_u32("readahead", Opt_readahead_pages),
+ fsparam_u32("rlog_pages", Opt_rlog_pages),
+ fsparam_u32("rlog_wakeup_cnt", Opt_rlog_wakeup_cnt),
+ fsparam_flag("report_uid", Opt_report_uid),
+ fsparam_file_or_string("sysfs_name", Opt_sysfs_name),
+ {}
+};
+
+static int incfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
+{
+ struct mount_options *ctx = fc->fs_private;
+ struct fs_parse_result result;
+ int token = fs_parse(fc, incfs_param_specs, param, &result);
+
+ if (token < 0)
+ return token;
+
+ switch (token) {
+ case Opt_read_timeout:
+ if (result.uint_32 > 3600000)
+ return -EINVAL;
+ ctx->read_timeout_ms = result.uint_32;
+ return 0;
+ case Opt_readahead_pages:
+ ctx->readahead_pages = result.uint_32;
+ return 0;
+ case Opt_rlog_pages:
+ ctx->read_log_pages = result.uint_32;
+ return 0;
+ case Opt_rlog_wakeup_cnt:
+ ctx->read_log_wakeup_count = result.uint_32;
+ return 0;
+ case Opt_report_uid:
+ ctx->report_uid = true;
+ return 0;
+ case Opt_sysfs_name:
+ swap(ctx->sysfs_name, param->string);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int incfs_reconfigure(struct fs_context *fc)
+{
+ struct super_block *sb = fc->root->d_sb;
+ struct mount_options *ctx = fc->fs_private;
+ struct mount_info *mi = get_mount_info(sb);
+
+ pr_debug("incfs: %s\n", __func__);
+ sync_filesystem(sb);
+
+ if (ctx->report_uid != mi->mi_options.report_uid) {
+ pr_err("incfs: Can't change report_uid mount option on remount\n");
+ return -EOPNOTSUPP;
+ }
+
+ return incfs_realloc_mount_info(mi, ctx);
+}
+
+static void incfs_fc_free(struct fs_context *fc)
+{
+ struct mount_options *ctx = fc->fs_private;
+
+ if (!ctx)
+ return;
+ kfree(ctx->sysfs_name);
+ kfree(ctx);
+}
+
+static int incfs_fc_dup(struct fs_context *fc, struct fs_context *src_fc)
+{
+ struct mount_options *src_ctx = src_fc->fs_private;
+ struct mount_options *ctx;
+ int err;
+
+ if (!src_ctx)
+ return -EINVAL;
+
+ ctx = kmemdup(src_ctx, sizeof(struct mount_options), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (ctx->sysfs_name) {
+ ctx->sysfs_name = kmemdup(ctx->sysfs_name, strlen(ctx->sysfs_name) + 1, GFP_KERNEL);
+ if (!ctx->sysfs_name) {
+ err = -ENOMEM;
+ goto free_ctx;
+ }
+ }
+
+ fc->fs_private = ctx;
+ return 0;
+
+free_ctx:
+ kfree(ctx);
+ return err;
+}
+
+static int incfs_fill_super(struct super_block *sb, struct fs_context *fc)
+{
+ struct mount_options *ctx = fc->fs_private;
+ struct mount_info *mi = NULL;
+ struct path backing_dir_path = {};
+ struct dentry *index_dir = NULL;
+ struct dentry *incomplete_dir = NULL;
+ struct super_block *src_fs_sb = NULL;
+ struct inode *root_inode = NULL;
+ bool dir_created = false;
+ int error = 0;
+
+ sb->s_op = &incfs_super_ops;
+ set_default_d_op(sb, &incfs_dentry_ops);
+ sb->s_flags |= S_NOATIME;
+ sb->s_magic = INCFS_MAGIC_NUMBER;
+ sb->s_time_gran = 1;
+ sb->s_blocksize = INCFS_DATA_FILE_BLOCK_SIZE;
+ sb->s_blocksize_bits = blksize_bits(sb->s_blocksize);
+ sb->s_xattr = incfs_xattr_ops;
+ sb->s_bdi->ra_pages = ctx->readahead_pages;
+
+ error = kern_path(fc->source, LOOKUP_FOLLOW | LOOKUP_DIRECTORY,
+ &backing_dir_path);
+ if (error || backing_dir_path.dentry == NULL ||
+ !d_really_is_positive(backing_dir_path.dentry)) {
+ pr_err("incfs: Error accessing: %s.\n", fc->source);
+ goto err_deactivate;
+ }
+ src_fs_sb = backing_dir_path.dentry->d_sb;
+ sb->s_maxbytes = src_fs_sb->s_maxbytes;
+ sb->s_stack_depth = src_fs_sb->s_stack_depth + 1;
+
+ if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
+ error = -EINVAL;
+ goto err_put_path;
+ }
+
+ mi = incfs_alloc_mount_info(sb, ctx, &backing_dir_path);
+ if (IS_ERR_OR_NULL(mi)) {
+ error = PTR_ERR(mi);
+ pr_err("incfs: Error allocating mount info. %d\n", error);
+ goto err_put_path;
+ }
+ fc->s_fs_info = mi;
+
+ sb->s_fs_info = mi;
+ mi->mi_backing_dir_path = backing_dir_path;
+ index_dir = open_or_create_special_dir(backing_dir_path.dentry,
+ INCFS_INDEX_NAME, &dir_created);
+ if (IS_ERR_OR_NULL(index_dir)) {
+ error = PTR_ERR(index_dir);
+ pr_err("incfs: Can't find or create .index dir in %s\n",
+ fc->source);
+ /* No need to null index_dir since we don't put it */
+ goto err_put_path;
+ }
+
+ mi->mi_index_dir = index_dir;
+ mi->mi_index_free = dir_created;
+
+ incomplete_dir = open_or_create_special_dir(backing_dir_path.dentry,
+ INCFS_INCOMPLETE_NAME,
+ &dir_created);
+ if (IS_ERR_OR_NULL(incomplete_dir)) {
+ error = PTR_ERR(incomplete_dir);
+ pr_err("incfs: Can't find or create .incomplete dir in %s\n",
+ fc->source);
+ /* No need to null incomplete_dir since we don't put it */
+ goto err_put_path;
+ }
+ mi->mi_incomplete_dir = incomplete_dir;
+ mi->mi_incomplete_free = dir_created;
+
+ root_inode = fetch_regular_inode(sb, backing_dir_path.dentry);
+ if (IS_ERR(root_inode)) {
+ error = PTR_ERR(root_inode);
+ goto err_put_path;
+ }
+
+ sb->s_root = d_make_root(root_inode);
+ if (!sb->s_root) {
+ error = -ENOMEM;
+ goto err_put_path;
+ }
+ error = incfs_init_dentry(sb->s_root, &backing_dir_path);
+ if (error)
+ goto err_put_path;
+
+ path_put(&backing_dir_path);
+ return 0;
+
+err_put_path:
+ path_put(&backing_dir_path);
+err_deactivate:
+ deactivate_locked_super(sb);
+ pr_err("incfs: mount failed %d\n", error);
+ return error;
+}
+
+static int incfs_get_tree(struct fs_context *fc)
+{
+ return get_tree_nodev(fc, incfs_fill_super);
+}
+
+void incfs_kill_sb(struct super_block *sb)
+{
+ struct mount_info *mi = sb->s_fs_info;
+ struct inode *dinode = NULL;
+
+ pr_debug("incfs: incfs_kill_sb\n");
+
+ /*
+ * We must kill the super before freeing mi, since killing the super
+ * triggers inode eviction, which triggers the final update of the
+ * backing file, which uses certain information for mi
+ */
+ kill_anon_super(sb);
+
+ if (mi) {
+ if (mi->mi_backing_dir_path.dentry)
+ dinode = d_inode(mi->mi_backing_dir_path.dentry);
+
+ if (dinode) {
+ if (mi->mi_index_dir && mi->mi_index_free)
+ vfs_rmdir(&nop_mnt_idmap, dinode,
+ mi->mi_index_dir, NULL);
+
+ if (mi->mi_incomplete_dir && mi->mi_incomplete_free)
+ vfs_rmdir(&nop_mnt_idmap, dinode,
+ mi->mi_incomplete_dir, NULL);
+ }
+
+ incfs_free_mount_info(mi);
+ sb->s_fs_info = NULL;
+ }
+}
+
+static int incfs_show_options(struct seq_file *m, struct dentry *root)
+{
+ struct mount_info *mi = get_mount_info(root->d_sb);
+
+ seq_printf(m, ",read_timeout_ms=%u", mi->mi_options.read_timeout_ms);
+ seq_printf(m, ",readahead=%u", mi->mi_options.readahead_pages);
+ if (mi->mi_options.read_log_pages != 0) {
+ seq_printf(m, ",rlog_pages=%u", mi->mi_options.read_log_pages);
+ seq_printf(m, ",rlog_wakeup_cnt=%u",
+ mi->mi_options.read_log_wakeup_count);
+ }
+ if (mi->mi_options.report_uid)
+ seq_puts(m, ",report_uid");
+
+ if (mi->mi_sysfs_node)
+ seq_printf(m, ",sysfs_name=%s",
+ kobject_name(&mi->mi_sysfs_node->isn_sysfs_node));
+ return 0;
+}
diff --git a/fs/incfs/vfs.h b/fs/incfs/vfs.h
new file mode 100644
index 0000000..06c4171
--- /dev/null
+++ b/fs/incfs/vfs.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2018 Google LLC
+ */
+
+#ifndef _INCFS_VFS_H
+#define _INCFS_VFS_H
+
+extern const struct file_operations incfs_file_ops;
+extern const struct inode_operations incfs_file_inode_ops;
+
+void incfs_kill_sb(struct super_block *sb);
+int incfs_init_fs_context(struct fs_context *fc);
+
+extern const struct fs_parameter_spec incfs_param_specs[];
+
+int incfs_link(struct dentry *what, struct dentry *where);
+int incfs_unlink(struct dentry *dentry);
+
+static inline struct mount_info *get_mount_info(struct super_block *sb)
+{
+ struct mount_info *result = sb->s_fs_info;
+
+ WARN_ON(!result);
+ return result;
+}
+
+static inline struct super_block *file_superblock(struct file *f)
+{
+ struct inode *inode = file_inode(f);
+
+ return inode->i_sb;
+}
+
+#endif
diff --git a/fs/kernel_read_file.c b/fs/kernel_read_file.c
index de32c95..d839216 100644
--- a/fs/kernel_read_file.c
+++ b/fs/kernel_read_file.c
@@ -183,3 +183,13 @@ ssize_t kernel_read_file_from_fd(int fd, loff_t offset, void **buf,
return kernel_read_file(fd_file(f), offset, buf, buf_size, file_size, id);
}
EXPORT_SYMBOL_GPL(kernel_read_file_from_fd);
+
+ssize_t read_comp_algo_dictionary(void **dict, const char *dict_path)
+{
+ return kernel_read_file_from_path(dict_path, 0,
+ dict,
+ INT_MAX,
+ NULL,
+ READING_POLICY);
+}
+EXPORT_SYMBOL_GPL(read_comp_algo_dictionary);
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 5e1845f..caed143 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -732,6 +732,8 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
struct fsnotify_group *group;
struct inode *inode;
struct path path;
+ struct path alteredpath;
+ struct path *canonical_path = &path;
int ret;
unsigned flags = 0;
@@ -773,13 +775,23 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,
if (ret)
return ret;
+ /* support stacked filesystems */
+ if (path.dentry && path.dentry->d_op) {
+ if (path.dentry->d_op->d_canonical_path) {
+ path.dentry->d_op->d_canonical_path(&path,
+ &alteredpath);
+ canonical_path = &alteredpath;
+ path_put(&path);
+ }
+ }
+
/* inode held in place by reference to path; group by fget on fd */
- inode = path.dentry->d_inode;
+ inode = canonical_path->dentry->d_inode;
group = fd_file(f)->private_data;
/* create/update an inode mark */
ret = inotify_update_watch(group, inode, mask);
- path_put(&path);
+ path_put(canonical_path);
return ret;
}
diff --git a/fs/open.c b/fs/open.c
index 91f1139..c40c504 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -35,6 +35,7 @@
#include <linux/filelock.h>
#include "internal.h"
+#include <trace/hooks/syscall_check.h>
int do_truncate(struct mnt_idmap *idmap, struct dentry *dentry,
loff_t length, unsigned int time_attrs, struct file *filp)
@@ -922,6 +923,7 @@ static int do_dentry_open(struct file *f,
error = -ENODEV;
goto cleanup_all;
}
+ trace_android_vh_check_file_open(f);
error = security_file_open(f);
if (unlikely(error))
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 4c863d1..72ec55b 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -97,6 +97,7 @@
#include <linux/resctrl.h>
#include <linux/cn_proc.h>
#include <linux/ksm.h>
+#include <linux/cpufreq_times.h>
#include <uapi/linux/lsm.h>
#include <trace/events/oom.h>
#include "internal.h"
@@ -3408,6 +3409,9 @@ static const struct pid_entry tgid_base_stuff[] = {
#ifdef CONFIG_LIVEPATCH
ONE("patch_state", S_IRUSR, proc_pid_patch_state),
#endif
+#ifdef CONFIG_CPU_FREQ_TIMES
+ ONE("time_in_state", 0444, proc_time_in_state_show),
+#endif
#ifdef CONFIG_KSTACK_ERASE_METRICS
ONE("stack_depth", S_IRUGO, proc_stack_depth),
#endif
@@ -3755,6 +3759,9 @@ static const struct pid_entry tid_base_stuff[] = {
ONE("ksm_merging_pages", S_IRUSR, proc_pid_ksm_merging_pages),
ONE("ksm_stat", S_IRUSR, proc_pid_ksm_stat),
#endif
+#ifdef CONFIG_CPU_FREQ_TIMES
+ ONE("time_in_state", 0444, proc_time_in_state_show),
+#endif
};
static int proc_tid_base_readdir(struct file *file, struct dir_context *ctx)
diff --git a/fs/proc/page.c b/fs/proc/page.c
index f9b2c2c..e2b9422 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -216,6 +216,7 @@ u64 stable_page_flags(const struct page *page)
#endif
u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
+ u |= kpf_copy_bit(k, KPF_ERROR, PG_error);
u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty);
u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate);
u |= kpf_copy_bit(k, KPF_WRITEBACK, PG_writeback);
diff --git a/fs/select.c b/fs/select.c
index e0244db..70fb876 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -11,7 +11,7 @@
* parameter to reflect time remaining.
*
* 24 January 2000
- * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
+ * Changed sys_poll()/do_poll() to use PAGE_SIZE chunk-based allocation
* of fds to overcome nfds < 16390 descriptors limit (Tigran Aivazian).
*/
@@ -644,7 +644,7 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
/*
* We need 6 bitmaps (in/out/ex for both incoming and outgoing),
* since we used fdset we need to allocate memory in units of
- * long-words.
+ * long-words.
*/
size = FDS_BYTES(n);
bits = stack_fds;
diff --git a/gki/aarch64/afdo/README.md b/gki/aarch64/afdo/README.md
new file mode 100644
index 0000000..f162e42
--- /dev/null
+++ b/gki/aarch64/afdo/README.md
@@ -0,0 +1,81 @@
+# AutoFDO profiles for Android common kernels
+
+This directory contains AutoFDO profiles for Android common kernels. These profiles are used to
+optimize kernel builds, improving performance for specific architectures and kernel versions.
+
+## Profile Availability
+
+The AutoFDO profile (kernel.afdo) for vmlinux is updated regularly for the following kernel
+branches:
+
+* [android15-6.6](https://android.googlesource.com/kernel/common/+/refs/heads/android15-6.6/android/gki/aarch64/afdo)
+* [android16-6.12](https://android.googlesource.com/kernel/common/+/refs/heads/android16-6.12/gki/aarch64/afdo/)
+
+
+## Performance improvements
+
+When applying these AutoFDO profiles to the android15-6.6 and android16-6.12 kernels, we observed
+the following performance improvements during testing on a Pixel 6 device.
+
+
+| Benchmark | Improvement |
+| --------------------- | ----------- |
+| Boot time | 2-3% |
+| Cold App launch time | 3-4% |
+| Binder-rpc | 8-9% |
+| Binder-addints | 12-25% |
+| Hwbinder | 12-18% |
+| Bionic (syscall_mmap) | 6% |
+
+
+## Steps to reproduce the profile
+
+A kernel profile is generated by running app crawling and app launching for top 100 apps from Google
+Play Store. While running, we collect ETM data for the kernel, which records executed instruction
+stream. Finally, we merge and convert ETM data to one AutoFDO profile.
+
+### 1. Build a kernel image and flash it on an Android device
+ * The source code and test device used to generate each profile are described above.
+ * We use a Pixel device. But using other real devices should get a similar profile.
+
+### 2. Run app crawling and app launching for top 100 apps
+ * Add a gmail account on the test device. Because app crawler can use the account to automatically
+ login some of the apps.
+ * We run [App Crawler](https://developer.android.com/studio/test/other-testing-tools/app-crawler)
+ for one app for 3 minutes, and run it twice.
+ * We run app launching for one app for 3 seconds, and run it 15 times. After each running, the
+ app is killed and cache is cleared. So we get profile for cold app startups.
+
+### 3. Generate the AutoFDO profile
+
+First, collect performance data using simpleperf with Coresight ETM or ARM ETE while running app
+crawling and app launching scenarios. Following is an example recording kernel activity for 180
+seconds. For a complete guide, refer to [Record ETM data for the kernel](https://android.googlesource.com/platform/system/extras/+/refs/heads/android16-release/simpleperf/doc/collect_etm_data_for_autofdo.md#A-complete-example_kernel).
+
+```sh
+(device) / $ simpleperf record -e cs-etm:k -a --duration 180 -z -o perf.data
+```
+
+Next, convert the ETM data to an AutoFDO profile using `simpleperf inject` and `create_llvm_prof`
+on the host machine. For a complete guide, refer to [Convert ETM data to AutoFDO Profile on Host](https://android.googlesource.com/platform/system/extras/+/refs/heads/android16-release/simpleperf/doc/collect_etm_data_for_autofdo.md#A-complete-example_kernel).
+
+Note that `create_llvm_prof` recently enabled the symbol list by default, which can cause Clang
+to de-optimize any kernel function not listed in the profile. This is usually not we want in the
+kernel, unless the profile cover all critical paths. So we recommend adding `--prof_sym_list=false`
+to avoid de-optimization. This also prevents the following build error from section mismatch.
+
+```
+WARNING: modpost: vmlinux: section mismatch in reference: list_add+0x0 (section: .text.hot.list_add) -> dir_list (section: .init.data)
+```
+
+We use the following command to perform the conversion.
+
+```sh
+# Convert the AutoFDO profile to the LLVM profile format:
+# The --prof_sym_list=false flag is important for kernel profiles. Without it, clang
+# assumes any function not listed in the profile is cold. This can lead to unwanted
+# deoptimizations, even when -fprofile-sample-accurate is not enabled.
+(host) $ create_llvm_prof --profiler=text --binary={vmlinux_dir}/vmlinux \
+ --profile=kernel.autofdo --format=extbinary --use_fs_discriminator \
+ --out=kernel.afdo --prof_sym_list=false
+```
diff --git a/include/OWNERS b/include/OWNERS
new file mode 100644
index 0000000..3875be2
--- /dev/null
+++ b/include/OWNERS
@@ -0,0 +1,2 @@
+per-file crypto/**=file:/crypto/OWNERS
+per-file net/**=file:/net/OWNERS
diff --git a/include/asm-generic/TEST_MAPPING b/include/asm-generic/TEST_MAPPING
new file mode 100644
index 0000000..22f1de1
--- /dev/null
+++ b/include/asm-generic/TEST_MAPPING
@@ -0,0 +1,337 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "CtsJobSchedulerTestCases",
+ "options": [
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testConnectivityConstraintExecutes_withMobile"
+ }
+ ]
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.IncomingCallTest"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "VtsBootconfigTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/include/crypto/TEST_MAPPING b/include/crypto/TEST_MAPPING
new file mode 100644
index 0000000..047b1ca
--- /dev/null
+++ b/include/crypto/TEST_MAPPING
@@ -0,0 +1,329 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.NullBindingCallScreeningServiceTest"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "VtsBootconfigTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/include/drm/TEST_MAPPING b/include/drm/TEST_MAPPING
new file mode 100644
index 0000000..8d7ddcf
--- /dev/null
+++ b/include/drm/TEST_MAPPING
@@ -0,0 +1,320 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsWifiBroadcastsHostTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.PhoneAccountSuggestionServiceTest"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/include/kunit/run-in-irq-context.h b/include/kunit/run-in-irq-context.h
index c89b1b1..bfe60d6 100644
--- a/include/kunit/run-in-irq-context.h
+++ b/include/kunit/run-in-irq-context.h
@@ -12,16 +12,16 @@
#include <linux/hrtimer.h>
#include <linux/workqueue.h>
-#define KUNIT_IRQ_TEST_HRTIMER_INTERVAL us_to_ktime(5)
-
struct kunit_irq_test_state {
bool (*func)(void *test_specific_state);
void *test_specific_state;
bool task_func_reported_failure;
bool hardirq_func_reported_failure;
bool softirq_func_reported_failure;
+ atomic_t task_func_calls;
atomic_t hardirq_func_calls;
atomic_t softirq_func_calls;
+ ktime_t interval;
struct hrtimer timer;
struct work_struct bh_work;
};
@@ -30,14 +30,25 @@ static enum hrtimer_restart kunit_irq_test_timer_func(struct hrtimer *timer)
{
struct kunit_irq_test_state *state =
container_of(timer, typeof(*state), timer);
+ int task_calls, hardirq_calls, softirq_calls;
WARN_ON_ONCE(!in_hardirq());
- atomic_inc(&state->hardirq_func_calls);
+ task_calls = atomic_read(&state->task_func_calls);
+ hardirq_calls = atomic_inc_return(&state->hardirq_func_calls);
+ softirq_calls = atomic_read(&state->softirq_func_calls);
+
+ /*
+ * If the timer is firing too often for the softirq or task to ever have
+ * a chance to run, increase the timer interval. This is needed on very
+ * slow systems.
+ */
+ if (hardirq_calls >= 20 && (softirq_calls == 0 || task_calls == 0))
+ state->interval = ktime_add_ns(state->interval, 250);
if (!state->func(state->test_specific_state))
state->hardirq_func_reported_failure = true;
- hrtimer_forward_now(&state->timer, KUNIT_IRQ_TEST_HRTIMER_INTERVAL);
+ hrtimer_forward_now(&state->timer, state->interval);
queue_work(system_bh_wq, &state->bh_work);
return HRTIMER_RESTART;
}
@@ -86,10 +97,14 @@ static inline void kunit_run_irq_test(struct kunit *test, bool (*func)(void *),
struct kunit_irq_test_state state = {
.func = func,
.test_specific_state = test_specific_state,
+ /*
+ * Start with a 5us timer interval. If the system can't keep
+ * up, kunit_irq_test_timer_func() will increase it.
+ */
+ .interval = us_to_ktime(5),
};
unsigned long end_jiffies;
- int hardirq_calls, softirq_calls;
- bool allctx = false;
+ int task_calls, hardirq_calls, softirq_calls;
/*
* Set up a hrtimer (the way we access hardirq context) and a work
@@ -104,21 +119,18 @@ static inline void kunit_run_irq_test(struct kunit *test, bool (*func)(void *),
* and hardirq), or 1 second, whichever comes first.
*/
end_jiffies = jiffies + HZ;
- hrtimer_start(&state.timer, KUNIT_IRQ_TEST_HRTIMER_INTERVAL,
- HRTIMER_MODE_REL_HARD);
- for (int task_calls = 0, calls = 0;
- ((calls < max_iterations) || !allctx) &&
- !time_after(jiffies, end_jiffies);
- task_calls++) {
+ hrtimer_start(&state.timer, state.interval, HRTIMER_MODE_REL_HARD);
+ do {
if (!func(test_specific_state))
state.task_func_reported_failure = true;
+ task_calls = atomic_inc_return(&state.task_func_calls);
hardirq_calls = atomic_read(&state.hardirq_func_calls);
softirq_calls = atomic_read(&state.softirq_func_calls);
- calls = task_calls + hardirq_calls + softirq_calls;
- allctx = (task_calls > 0) && (hardirq_calls > 0) &&
- (softirq_calls > 0);
- }
+ } while ((task_calls + hardirq_calls + softirq_calls < max_iterations ||
+ (task_calls == 0 || hardirq_calls == 0 ||
+ softirq_calls == 0)) &&
+ !time_after(jiffies, end_jiffies));
/* Cancel the timer and work. */
hrtimer_cancel(&state.timer);
diff --git a/include/linux/OWNERS b/include/linux/OWNERS
new file mode 100644
index 0000000..68b6ded
--- /dev/null
+++ b/include/linux/OWNERS
@@ -0,0 +1,4 @@
+per-file bio.h=file:/block/OWNERS
+per-file blk*.h=file:/block/OWNERS
+per-file f2fs**=file:/fs/f2fs/OWNERS
+per-file net**=file:/net/OWNERS
diff --git a/include/linux/TEST_MAPPING b/include/linux/TEST_MAPPING
new file mode 100644
index 0000000..47d239a
--- /dev/null
+++ b/include/linux/TEST_MAPPING
@@ -0,0 +1,357 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsWifiBroadcastsHostTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "CtsJobSchedulerTestCases",
+ "options": [
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testCellularConstraintExecutedAndStopped"
+ },
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testConnectivityConstraintExecutes_transitionNetworks"
+ },
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testConnectivityConstraintExecutes_withMobile"
+ },
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testEJMeteredConstraintFails_withMobile_DataSaverOn"
+ },
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testMeteredConstraintFails_withMobile_DataSaverOn"
+ }
+ ]
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.ExtendedInCallServiceTest"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "VtsBootconfigTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/include/linux/android_kabi.h b/include/linux/android_kabi.h
new file mode 100644
index 0000000..caa0569
--- /dev/null
+++ b/include/linux/android_kabi.h
@@ -0,0 +1,209 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * android_kabi.h - Android kernel abi abstraction header
+ *
+ * Copyright (C) 2020-2025 Google, Inc.
+ *
+ * Heavily influenced by rh_kabi.h which came from the RHEL/CENTOS kernel and
+ * was:
+ * Copyright (c) 2014 Don Zickus
+ * Copyright (c) 2015-2018 Jiri Benc
+ * Copyright (c) 2015 Sabrina Dubroca, Hannes Frederic Sowa
+ * Copyright (c) 2016-2018 Prarit Bhargava
+ * Copyright (c) 2017 Paolo Abeni, Larry Woodman
+ *
+ * These macros are to be used to try to help alleviate future kernel abi
+ * changes that will occur as LTS and other kernel patches are merged into the
+ * tree during a period in which the kernel abi is wishing to not be disturbed.
+ *
+ * There are two times these macros should be used:
+ * - Before the kernel abi is "frozen"
+ * Padding can be added to various kernel structures that have in the past
+ * been known to change over time. That will give "room" in the structure
+ * that can then be used when fields are added so that the structure size
+ * will not change.
+ *
+ * - After the kernel abi is "frozen"
+ * If a structure's field is changed to a type that is identical in size to
+ * the previous type, it can be changed with a union macro
+ * If a field is added to a structure, the padding fields can be used to add
+ * the new field in a "safe" way.
+ */
+
+#ifndef _ANDROID_KABI_H
+#define _ANDROID_KABI_H
+
+#ifdef CONFIG_ANDROID_KABI_RESERVE
+
+#include <linux/args.h>
+#include <linux/compiler.h>
+#include <linux/compiler_attributes.h>
+#include <linux/stringify.h>
+
+/*
+ * Worker macros, don't use these, use the ones without a leading '_'
+ */
+
+#if defined(BUILD_VDSO) || defined(__DISABLE_EXPORTS)
+#define __ANDROID_KABI_RULE(hint, target, value)
+#else
+#define __ANDROID_KABI_RULE(hint, target, value) \
+ static const char CONCATENATE(__gendwarfksyms_rule_, \
+ __COUNTER__)[] __used __aligned(1) \
+ __section(".discard.gendwarfksyms.kabi_rules") = \
+ "1\0" #hint "\0" target "\0" value
+#endif
+
+#define _ANDROID_KABI_RULE(hint, target, value) \
+ __ANDROID_KABI_RULE(hint, #target, #value)
+
+#define _ANDROID_KABI_NORMAL_SIZE_ALIGN(_orig, _new) \
+ union { \
+ _Static_assert( \
+ sizeof(struct { _new; }) <= \
+ sizeof(struct { _orig; }), \
+ FILE_LINE ": " __stringify(_new) \
+ " is larger than " __stringify(_orig)); \
+ _Static_assert( \
+ __alignof__(struct { _new; }) <= \
+ __alignof__(struct { _orig; }), \
+ FILE_LINE ": " __stringify(_orig) \
+ " is not aligned the same as " \
+ __stringify(_new)); \
+ }
+
+#define _ANDROID_KABI_REPLACE(_orig, _new) \
+ union { \
+ _new; \
+ struct { \
+ _orig; \
+ }; \
+ _ANDROID_KABI_NORMAL_SIZE_ALIGN(_orig, _new); \
+ }
+
+
+/*
+ * Macros to use _before_ the ABI is frozen
+ */
+
+/*
+ * ANDROID_KABI_RESERVE
+ * Reserve some "padding" in a structure for use by LTS backports.
+ * This normally placed at the end of a structure.
+ * number: the "number" of the padding variable in the structure. Start with
+ * 1 and go up.
+ *
+ *
+ * ANDROID_BACKPORT_RESERVE
+ * Similar to ANDROID_KABI_RESERVE, but this is for planned feature backports
+ * (not for LTS).
+ */
+#define ANDROID_KABI_RESERVE(number) u64 __kabi_reserved##number
+#define ANDROID_BACKPORT_RESERVE(number) u64 __kabi_reserved_backport##number
+
+/*
+ * Macros to use _after_ the ABI is frozen
+ */
+
+/*
+ * ANDROID_KABI_DECLONLY(fqn)
+ * Treat the struct/union/enum fqn as a declaration, i.e. even if
+ * a definition is available, don't expand the contents.
+ */
+#define ANDROID_KABI_DECLONLY(fqn) _ANDROID_KABI_RULE(declonly, fqn, /**/)
+
+/*
+ * ANDROID_KABI_ENUMERATOR_IGNORE(fqn, field)
+ * When expanding enum fqn, skip the provided field. This makes it
+ * possible to hide added enum fields from versioning.
+ */
+#define ANDROID_KABI_ENUMERATOR_IGNORE(fqn, field) \
+ _ANDROID_KABI_RULE(enumerator_ignore, fqn field, /**/)
+
+/*
+ * ANDROID_KABI_ENUMERATOR_VALUE(fqn, field, value)
+ * When expanding enum fqn, use the provided value for the
+ * specified field. This makes it possible to override enumerator
+ * values when calculating versions.
+ */
+#define ANDROID_KABI_ENUMERATOR_VALUE(fqn, field, value) \
+ _ANDROID_KABI_RULE(enumerator_value, fqn field, value)
+
+/*
+ * ANDROID_KABI_BYTE_SIZE(fqn, value)
+ * Set the byte_size attribute for the struct/union/enum fqn to
+ * value bytes.
+ */
+#define ANDROID_KABI_BYTE_SIZE(fqn, value) \
+ _ANDROID_KABI_RULE(byte_size, fqn, value)
+
+/*
+ * ANDROID_KABI_TYPE_STRING(type, str)
+ * For the given type, override the type string used in symtypes
+ * output and version calculation with str.
+ */
+#define ANDROID_KABI_TYPE_STRING(type, str) \
+ __ANDROID_KABI_RULE(type_string, type, str)
+
+/*
+ * ANDROID_KABI_IGNORE
+ * Add a new field that's ignored in versioning.
+ */
+#define ANDROID_KABI_IGNORE(n, _new) \
+ union { \
+ _new; \
+ unsigned char __kabi_ignored##n; \
+ }
+
+/*
+ * ANDROID_KABI_REPLACE
+ * Replace a field with a compatible new field.
+ */
+#define ANDROID_KABI_REPLACE(_oldtype, _oldname, _new) \
+ _ANDROID_KABI_REPLACE(_oldtype __kabi_renamed##_oldname, struct { _new; })
+
+/*
+ * ANDROID_KABI_USE(number, _new)
+ * Use a previous padding entry that was defined with ANDROID_KABI_RESERVE
+ * number: the previous "number" of the padding variable
+ * _new: the variable to use now instead of the padding variable
+ */
+#define ANDROID_KABI_USE(number, _new) \
+ _ANDROID_KABI_REPLACE(ANDROID_KABI_RESERVE(number), _new)
+
+/*
+ * ANDROID_KABI_USE2(number, _new1, _new2)
+ * Use a previous padding entry that was defined with ANDROID_KABI_RESERVE for
+ * two new variables that fit into 64 bits. This is good for when you do not
+ * want to "burn" a 64bit padding variable for a smaller variable size if not
+ * needed.
+ */
+#define ANDROID_KABI_USE2(number, _new1, _new2) \
+ _ANDROID_KABI_REPLACE(ANDROID_KABI_RESERVE(number), struct{ _new1; _new2; })
+
+/*
+ * ANDROID_BACKPORT_USE(number, _new)
+ * Use a previous padding entry that was defined with ANDROID_BACKPORT_RESERVE
+ * number: the previous "number" of the padding variable
+ * _new: the variable to use now instead of the padding variable
+ */
+#define ANDROID_BACKPORT_USE(number, _new) \
+ _ANDROID_KABI_REPLACE(ANDROID_BACKPORT_RESERVE(number), _new)
+
+#else /* CONFIG_ANDROID_KABI_RESERVE */
+
+#define ANDROID_KABI_RESERVE(number)
+#define ANDROID_BACKPORT_RESERVE(number)
+#define ANDROID_KABI_DECLONLY(fqn)
+#define ANDROID_KABI_ENUMERATOR_IGNORE(fqn, field)
+#define ANDROID_KABI_ENUMERATOR_VALUE(fqn, field, value)
+#define ANDROID_KABI_BYTE_SIZE(fqn, value)
+#define ANDROID_KABI_TYPE_STRING(type, str)
+#define ANDROID_KABI_IGNORE(n, _new) _new
+#define ANDROID_KABI_REPLACE(_oldtype, _oldname, _new) _new
+#define ANDROID_KABI_USE(number, _new) _new
+#define ANDROID_KABI_USE2(number, _new1, _new2) _new1; _new2
+
+#endif /* CONFIG_ANDROID_KABI_RESERVE */
+
+#endif /* _ANDROID_KABI_H */
diff --git a/include/linux/android_vendor.h b/include/linux/android_vendor.h
new file mode 100644
index 0000000..af3014c
--- /dev/null
+++ b/include/linux/android_vendor.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * android_vendor.h - Android vendor data
+ *
+ * Copyright 2020 Google LLC
+ *
+ * These macros are to be used to reserve space in kernel data structures
+ * for use by vendor modules.
+ *
+ * These macros should be used before the kernel abi is "frozen".
+ * Fields can be added to various kernel structures that need space
+ * for functionality implemented in vendor modules. The use of
+ * these fields is vendor specific.
+ */
+#ifndef _ANDROID_VENDOR_H
+#define _ANDROID_VENDOR_H
+
+/*
+ * ANDROID_VENDOR_DATA
+ * Reserve some "padding" in a structure for potential future use.
+ * This normally placed at the end of a structure.
+ * number: the "number" of the padding variable in the structure. Start with
+ * 1 and go up.
+ *
+ * ANDROID_VENDOR_DATA_ARRAY
+ * Same as ANDROID_VENDOR_DATA but allocates an array of u64 with
+ * the specified size
+ */
+#ifdef CONFIG_ANDROID_VENDOR_OEM_DATA
+#define ANDROID_VENDOR_DATA(n) u64 android_vendor_data##n
+#define ANDROID_VENDOR_DATA_ARRAY(n, s) u64 android_vendor_data##n[s]
+
+#define ANDROID_OEM_DATA(n) u64 android_oem_data##n
+#define ANDROID_OEM_DATA_ARRAY(n, s) u64 android_oem_data##n[s]
+
+#define android_init_vendor_data(p, n) \
+ memset(&p->android_vendor_data##n, 0, sizeof(p->android_vendor_data##n))
+#define android_init_oem_data(p, n) \
+ memset(&p->android_oem_data##n, 0, sizeof(p->android_oem_data##n))
+#else
+#define ANDROID_VENDOR_DATA(n)
+#define ANDROID_VENDOR_DATA_ARRAY(n, s)
+#define ANDROID_OEM_DATA(n)
+#define ANDROID_OEM_DATA_ARRAY(n, s)
+
+#define android_init_vendor_data(p, n)
+#define android_init_oem_data(p, n)
+#endif
+
+#endif /* _ANDROID_VENDOR_H */
diff --git a/include/linux/blk-crypto.h b/include/linux/blk-crypto.h
index f7c3cb4..1b348c2 100644
--- a/include/linux/blk-crypto.h
+++ b/include/linux/blk-crypto.h
@@ -181,6 +181,9 @@ static inline struct bio_crypt_ctx *bio_crypt_ctx(struct bio *bio)
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
+static inline void bio_clone_skip_dm_default_key(struct bio *dst,
+ const struct bio *src);
+
bool __blk_crypto_submit_bio(struct bio *bio);
/**
@@ -218,9 +221,42 @@ int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask);
static inline int bio_crypt_clone(struct bio *dst, struct bio *src,
gfp_t gfp_mask)
{
+ bio_clone_skip_dm_default_key(dst, src);
if (bio_has_crypt_ctx(src))
return __bio_crypt_clone(dst, src, gfp_mask);
return 0;
}
+#if IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
+static inline void bio_set_skip_dm_default_key(struct bio *bio)
+{
+ bio->bi_skip_dm_default_key = true;
+}
+
+static inline bool bio_should_skip_dm_default_key(const struct bio *bio)
+{
+ return bio->bi_skip_dm_default_key;
+}
+
+static inline void bio_clone_skip_dm_default_key(struct bio *dst,
+ const struct bio *src)
+{
+ dst->bi_skip_dm_default_key = src->bi_skip_dm_default_key;
+}
+#else /* CONFIG_DM_DEFAULT_KEY */
+static inline void bio_set_skip_dm_default_key(struct bio *bio)
+{
+}
+
+static inline bool bio_should_skip_dm_default_key(const struct bio *bio)
+{
+ return false;
+}
+
+static inline void bio_clone_skip_dm_default_key(struct bio *dst,
+ const struct bio *src)
+{
+}
+#endif /* !CONFIG_DM_DEFAULT_KEY */
+
#endif /* __LINUX_BLK_CRYPTO_H */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 8808ee7..3e6fc62 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -261,6 +261,9 @@ struct bio {
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
struct bio_crypt_ctx *bi_crypt_context;
+#if IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
+ bool bi_skip_dm_default_key;
+#endif
#endif
#if defined(CONFIG_BLK_DEV_INTEGRITY)
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 630705a..143d47e 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -32,6 +32,7 @@
#define CLK_OPS_PARENT_ENABLE BIT(12)
/* duty cycle call may be forwarded to the parent clock */
#define CLK_DUTY_CYCLE_PARENT BIT(13)
+#define CLK_DONT_HOLD_STATE BIT(14) /* Don't hold state */
struct clk;
struct clk_hw;
@@ -219,6 +220,13 @@ struct clk_duty {
* directory is provided as an argument. Called with
* prepare_lock held. Returns 0 on success, -EERROR otherwise.
*
+ * @pre_rate_change: Optional callback for a clock to fulfill its rate
+ * change requirements before any rate change has occurred in
+ * its clock tree. Returns 0 on success, -EERROR otherwise.
+ *
+ * @post_rate_change: Optional callback for a clock to clean up any
+ * requirements that were needed while the clock and its tree
+ * was changing states. Returns 0 on success, -EERROR otherwise.
*
* The clk_enable/clk_disable and clk_prepare/clk_unprepare pairs allow
* implementations to split any work between atomic (enable) and sleepable
@@ -266,6 +274,12 @@ struct clk_ops {
int (*init)(struct clk_hw *hw);
void (*terminate)(struct clk_hw *hw);
void (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
+ int (*pre_rate_change)(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long new_rate);
+ int (*post_rate_change)(struct clk_hw *hw,
+ unsigned long old_rate,
+ unsigned long rate);
};
/**
@@ -1356,6 +1370,7 @@ int __must_check of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
void clk_unregister(struct clk *clk);
void clk_hw_unregister(struct clk_hw *hw);
+void clk_sync_state(struct device *dev);
/* helper functions */
const char *__clk_get_name(const struct clk *clk);
diff --git a/include/linux/cpufreq_times.h b/include/linux/cpufreq_times.h
new file mode 100644
index 0000000..38272a5
--- /dev/null
+++ b/include/linux/cpufreq_times.h
@@ -0,0 +1,42 @@
+/* drivers/cpufreq/cpufreq_times.c
+ *
+ * Copyright (C) 2018 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_CPUFREQ_TIMES_H
+#define _LINUX_CPUFREQ_TIMES_H
+
+#include <linux/cpufreq.h>
+#include <linux/pid.h>
+
+#ifdef CONFIG_CPU_FREQ_TIMES
+void cpufreq_task_times_init(struct task_struct *p);
+void cpufreq_task_times_alloc(struct task_struct *p);
+void cpufreq_task_times_exit(struct task_struct *p);
+int proc_time_in_state_show(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *p);
+void cpufreq_acct_update_power(struct task_struct *p, u64 cputime);
+void cpufreq_times_create_policy(struct cpufreq_policy *policy);
+void cpufreq_times_record_transition(struct cpufreq_policy *policy,
+ unsigned int new_freq);
+#else
+static inline void cpufreq_task_times_init(struct task_struct *p) {}
+static inline void cpufreq_task_times_alloc(struct task_struct *p) {}
+static inline void cpufreq_task_times_exit(struct task_struct *p) {}
+static inline void cpufreq_acct_update_power(struct task_struct *p,
+ u64 cputime) {}
+static inline void cpufreq_times_create_policy(struct cpufreq_policy *policy) {}
+static inline void cpufreq_times_record_transition(
+ struct cpufreq_policy *policy, unsigned int new_freq) {}
+#endif /* CONFIG_CPU_FREQ_TIMES */
+#endif /* _LINUX_CPUFREQ_TIMES_H */
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 898c60d..22e3c518 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -166,6 +166,7 @@ struct dentry_operations {
struct dentry *(*d_real)(struct dentry *, enum d_real_type type);
bool (*d_unalias_trylock)(const struct dentry *);
void (*d_unalias_unlock)(const struct dentry *);
+ void (*d_canonical_path)(const struct path *, struct path *);
} ____cacheline_aligned;
/*
@@ -280,7 +281,7 @@ extern int path_has_submounts(const struct path *);
* This adds the entry to the hash queues.
*/
extern void d_rehash(struct dentry *);
-
+
extern void d_add(struct dentry *, struct inode *);
/* used for rename() and baskets */
diff --git a/include/linux/device/TEST_MAPPING b/include/linux/device/TEST_MAPPING
new file mode 100644
index 0000000..aada857
--- /dev/null
+++ b/include/linux/device/TEST_MAPPING
@@ -0,0 +1,245 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.PhoneAccountTest"
+ }
+ ]
+ }
+ ]
+}
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
index 99c3c83..63de5f0 100644
--- a/include/linux/device/bus.h
+++ b/include/linux/device/bus.h
@@ -35,6 +35,8 @@ struct fwnode_handle;
* otherwise. It may also return error code if determining that
* the driver supports the device is not possible. In case of
* -EPROBE_DEFER it will queue the device for deferred probing.
+ * Note: This callback may be invoked with or without the device
+ * lock held.
* @uevent: Called when a device is added, removed, or a few other things
* that generate uevents to add the environment variables.
* @probe: Called when a new device or driver add to this bus, and callback
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 133b9e6..9edee77 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -222,6 +222,41 @@ struct dma_buf_ops {
int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
/**
+ * @begin_cpu_access_partial:
+ *
+ * This is called from dma_buf_begin_cpu_access_partial() and allows the
+ * exporter to ensure that the memory specified in the range is
+ * available for cpu access - the exporter might need to allocate or
+ * swap-in and pin the backing storage.
+ * The exporter also needs to ensure that cpu access is
+ * coherent for the access direction. The direction can be used by the
+ * exporter to optimize the cache flushing, i.e. access with a different
+ * direction (read instead of write) might return stale or even bogus
+ * data (e.g. when the exporter needs to copy the data to temporary
+ * storage).
+ *
+ * This callback is optional.
+ *
+ * FIXME: This is both called through the DMA_BUF_IOCTL_SYNC command
+ * from userspace (where storage shouldn't be pinned to avoid handing
+ * de-factor mlock rights to userspace) and for the kernel-internal
+ * users of the various kmap interfaces, where the backing storage must
+ * be pinned to guarantee that the atomic kmap calls can succeed. Since
+ * there's no in-kernel users of the kmap interfaces yet this isn't a
+ * real problem.
+ *
+ * Returns:
+ *
+ * 0 on success or a negative error code on failure. This can for
+ * example fail when the backing storage can't be allocated. Can also
+ * return -ERESTARTSYS or -EINTR when the call has been interrupted and
+ * needs to be restarted.
+ */
+ int (*begin_cpu_access_partial)(struct dma_buf *dmabuf,
+ enum dma_data_direction,
+ unsigned int offset, unsigned int len);
+
+ /**
* @end_cpu_access:
*
* This is called from dma_buf_end_cpu_access() when the importer is
@@ -239,6 +274,28 @@ struct dma_buf_ops {
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
/**
+ * @end_cpu_access_partial:
+ *
+ * This is called from dma_buf_end_cpu_access_partial() when the
+ * importer is done accessing the CPU. The exporter can use to limit
+ * cache flushing to only the range specefied and to unpin any
+ * resources pinned in @begin_cpu_access_umapped.
+ * The result of any dma_buf kmap calls after end_cpu_access_partial is
+ * undefined.
+ *
+ * This callback is optional.
+ *
+ * Returns:
+ *
+ * 0 on success or a negative error code on failure. Can return
+ * -ERESTARTSYS or -EINTR when the call has been interrupted and needs
+ * to be restarted.
+ */
+ int (*end_cpu_access_partial)(struct dma_buf *dmabuf,
+ enum dma_data_direction,
+ unsigned int offset, unsigned int len);
+
+ /**
* @mmap:
*
* This callback is used by the dma_buf_mmap() function
@@ -277,6 +334,20 @@ struct dma_buf_ops {
int (*vmap)(struct dma_buf *dmabuf, struct iosys_map *map);
void (*vunmap)(struct dma_buf *dmabuf, struct iosys_map *map);
+
+ /**
+ * @get_flags:
+ *
+ * This is called by dma_buf_get_flags and is used to get the buffer's
+ * flags.
+ * This callback is optional.
+ *
+ * Returns:
+ *
+ * 0 on success or a negative error code on failure. On success flags
+ * will be populated with the buffer's flags.
+ */
+ int (*get_flags)(struct dma_buf *dmabuf, unsigned long *flags);
};
/**
@@ -476,6 +547,8 @@ struct dma_buf_attach_ops {
* @importer_ops: importer operations for this attachment, if provided
* dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
* @importer_priv: importer specific attachment data.
+ * @dma_map_attrs: DMA attributes to be used when the exporter maps the buffer
+ * through dma_buf_map_attachment.
*
* This structure holds the attachment information between the dma_buf buffer
* and its user device(s). The list contains one attachment struct per device
@@ -494,6 +567,7 @@ struct dma_buf_attachment {
const struct dma_buf_attach_ops *importer_ops;
void *importer_priv;
void *priv;
+ unsigned long dma_map_attrs;
};
/**
@@ -581,8 +655,14 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
void dma_buf_move_notify(struct dma_buf *dma_buf);
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
+int dma_buf_begin_cpu_access_partial(struct dma_buf *dma_buf,
+ enum dma_data_direction dir,
+ unsigned int offset, unsigned int len);
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
+int dma_buf_end_cpu_access_partial(struct dma_buf *dma_buf,
+ enum dma_data_direction dir,
+ unsigned int offset, unsigned int len);
struct sg_table *
dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
enum dma_data_direction direction);
@@ -596,6 +676,7 @@ int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map);
void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map);
+int dma_buf_get_flags(struct dma_buf *dmabuf, unsigned long *flags);
struct dma_buf *dma_buf_iter_begin(void);
struct dma_buf *dma_buf_iter_next(struct dma_buf *dmbuf);
#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
index 648328a..b5fcc20 100644
--- a/include/linux/dma-heap.h
+++ b/include/linux/dma-heap.h
@@ -42,10 +42,72 @@ struct dma_heap_export_info {
void *dma_heap_get_drvdata(struct dma_heap *heap);
+/**
+ * dma_heap_get_dev() - get device struct for the heap
+ * @heap: DMA-Heap to retrieve device struct from
+ *
+ * Returns:
+ * The device struct for the heap.
+ */
+struct device *dma_heap_get_dev(struct dma_heap *heap);
+
+/**
+ * dma_heap_get_name() - get heap name
+ * @heap: DMA-Heap to retrieve private data for
+ *
+ * Returns:
+ * The char* for the heap name.
+ */
const char *dma_heap_get_name(struct dma_heap *heap);
struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
+/**
+ * dma_heap_put - drops a reference to a dmabuf heaps, potentially freeing it
+ * @heap: heap pointer
+ */
+void dma_heap_put(struct dma_heap *heap);
+
+/**
+ * dma_heap_find - Returns the registered dma_heap with the specified name
+ * @name: Name of the heap to find
+ *
+ * NOTE: dma_heaps returned from this function MUST be released
+ * using dma_heap_put() when the user is done.
+ */
+struct dma_heap *dma_heap_find(const char *name);
+
+/**
+ * dma_heap_buffer_alloc - Allocate dma-buf from a dma_heap
+ * @heap: dma_heap to allocate from
+ * @len: size to allocate
+ * @fd_flags: flags to set on returned dma-buf fd
+ * @heap_flags: flags to pass to the dma heap
+ *
+ * This is for internal dma-buf allocations only.
+ */
+struct dma_buf *dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
+ u32 fd_flags,
+ u64 heap_flags);
+
+/** dma_heap_buffer_free - Free dma_buf allocated by dma_heap_buffer_alloc
+ * @dma_buf: dma_buf to free
+ *
+ * This is really only a simple wrapper to dma_buf_put()
+ */
+void dma_heap_buffer_free(struct dma_buf *);
+
+/**
+ * dma_heap_bufferfd_alloc - Allocate dma-buf fd from a dma_heap
+ * @heap: dma_heap to allocate from
+ * @len: size to allocate
+ * @fd_flags: flags to set on returned dma-buf fd
+ * @heap_flags: flags to pass to the dma heap
+ */
+int dma_heap_bufferfd_alloc(struct dma_heap *heap, size_t len,
+ u32 fd_flags,
+ u64 heap_flags);
+
extern bool mem_accounting;
#endif /* _DMA_HEAPS_H */
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index 516aba5..0a1fc258 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -924,6 +924,20 @@ static inline u64 fscrypt_limit_io_blocks(const struct inode *inode, u64 lblk,
}
#endif /* !CONFIG_FS_ENCRYPTION_INLINE_CRYPT */
+#if IS_ENABLED(CONFIG_FS_ENCRYPTION) && IS_ENABLED(CONFIG_DM_DEFAULT_KEY)
+static inline bool
+fscrypt_inode_should_skip_dm_default_key(const struct inode *inode)
+{
+ return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
+}
+#else
+static inline bool
+fscrypt_inode_should_skip_dm_default_key(const struct inode *inode)
+{
+ return false;
+}
+#endif
+
/**
* fscrypt_inode_uses_inline_crypto() - test whether an inode uses inline
* encryption
diff --git a/include/linux/hid.h b/include/linux/hid.h
index dce862c..3664df9 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -512,9 +512,9 @@ struct hid_usage {
__s8 wheel_factor; /* 120/resolution_multiplier */
__u16 code; /* input driver code */
__u8 type; /* input driver type */
- __s16 hat_min; /* hat switch fun */
- __s16 hat_max; /* ditto */
- __s16 hat_dir; /* ditto */
+ __s8 hat_min; /* hat switch fun */
+ __s8 hat_max; /* ditto */
+ __s8 hat_dir; /* ditto */
__s16 wheel_accumulated; /* hi-res wheel */
};
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 6cd26ff..68b7e0f 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -529,6 +529,17 @@ DECLARE_STATIC_KEY_FALSE(force_irqthreads_key);
#define set_softirq_pending(x) (__this_cpu_write(local_softirq_pending_ref, (x)))
#define or_softirq_pending(x) (__this_cpu_or(local_softirq_pending_ref, (x)))
+/**
+ * __cpu_softirq_pending() - Checks to see if softirq is pending on a cpu
+ *
+ * This helper is inherently racy, as we're accessing per-cpu data w/o locks.
+ * But peeking at the flag can still be useful when deciding where to place a
+ * task.
+ */
+static inline u32 __cpu_softirq_pending(int cpu)
+{
+ return (u32)per_cpu(local_softirq_pending_ref, cpu);
+}
#endif /* local_softirq_pending */
/* Some architectures might implement lazy enabling/disabling of
@@ -577,6 +588,12 @@ enum
#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(TIMER_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ) |\
BIT(HRTIMER_SOFTIRQ) | BIT(RCU_SOFTIRQ))
+/* Softirq's where the handling might be long: */
+#define LONG_SOFTIRQ_MASK (BIT(NET_TX_SOFTIRQ) | \
+ BIT(NET_RX_SOFTIRQ) | \
+ BIT(BLOCK_SOFTIRQ) | \
+ BIT(IRQ_POLL_SOFTIRQ) | \
+ BIT(TASKLET_SOFTIRQ))
/* map softirq index to softirq name. update 'softirq_to_name' in
* kernel/softirq.c when adding a new softirq.
@@ -660,6 +677,10 @@ static inline unsigned int local_timers_pending(void)
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
+#ifdef CONFIG_RT_SOFTIRQ_AWARE_SCHED
+DECLARE_PER_CPU(u32, active_softirqs);
+#endif
+
static inline struct task_struct *this_cpu_ksoftirqd(void)
{
return this_cpu_read(ksoftirqd);
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 443053a..7b06d7a 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -54,6 +54,7 @@ struct ipv6_devconf {
__s32 accept_ra_rt_info_max_plen;
#endif
#endif
+ __s32 accept_ra_rt_table;
__s32 accept_source_route;
__s32 accept_ra_from_local;
#ifdef CONFIG_IPV6_OPTIMISTIC_DAD
diff --git a/include/linux/kernel_read_file.h b/include/linux/kernel_read_file.h
index d613a7b..d90ae77 100644
--- a/include/linux/kernel_read_file.h
+++ b/include/linux/kernel_read_file.h
@@ -53,4 +53,6 @@ ssize_t kernel_read_file_from_fd(int fd, loff_t offset,
size_t *file_size,
enum kernel_read_file_id id);
+ssize_t read_comp_algo_dictionary(void **dict, const char *dict_path);
+
#endif /* _LINUX_KERNEL_READ_FILE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index 14f391b..1725e60 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -413,6 +413,7 @@ struct module {
struct module_attribute *modinfo_attrs;
const char *version;
const char *srcversion;
+ const char *scmversion;
struct kobject *holders_dir;
/* Exported symbols */
@@ -438,10 +439,12 @@ struct module {
const u32 *gpl_crcs;
bool using_gplonly_symbols;
-#ifdef CONFIG_MODULE_SIG
- /* Signature was verified. */
+ /*
+ * Signature was verified. Unconditionally compiled in Android to
+ * preserve ABI compatibility between kernels without module
+ * signing enabled and signed modules.
+ */
bool sig_ok;
-#endif
bool async_probe_requested;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d4e6e00..64226ef 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2239,9 +2239,9 @@ struct net_device {
atomic_t carrier_up_count;
atomic_t carrier_down_count;
-#ifdef CONFIG_WIRELESS_EXT
+ /* Android KMI hack to allow vendors to have their own wifi changes in modules */
const struct iw_handler_def *wireless_handlers;
-#endif
+ struct iw_public_data *wireless_data;
const struct ethtool_ops *ethtool_ops;
#ifdef CONFIG_NET_L3_MASTER_DEV
const struct l3mdev_ops *l3mdev_ops;
@@ -2318,9 +2318,8 @@ struct net_device {
#if IS_ENABLED(CONFIG_AX25)
struct ax25_dev __rcu *ax25_ptr;
#endif
-#if IS_ENABLED(CONFIG_CFG80211)
+ /* Android KMI hack to allow vendors to have their own wifi changes in modules */
struct wireless_dev *ieee80211_ptr;
-#endif
#if IS_ENABLED(CONFIG_IEEE802154) || IS_ENABLED(CONFIG_6LOWPAN)
struct wpan_dev *ieee802154_ptr;
#endif
diff --git a/include/linux/netfilter/xt_quota2.h b/include/linux/netfilter/xt_quota2.h
new file mode 100644
index 0000000..a391871
--- /dev/null
+++ b/include/linux/netfilter/xt_quota2.h
@@ -0,0 +1,26 @@
+#ifndef _XT_QUOTA_H
+#define _XT_QUOTA_H
+#include <linux/types.h>
+
+enum xt_quota_flags {
+ XT_QUOTA_INVERT = 1 << 0,
+ XT_QUOTA_GROW = 1 << 1,
+ XT_QUOTA_PACKET = 1 << 2,
+ XT_QUOTA_NO_CHANGE = 1 << 3,
+ XT_QUOTA_MASK = 0x0F,
+};
+
+struct xt_quota_counter;
+
+struct xt_quota_mtinfo2 {
+ char name[15];
+ u_int8_t flags;
+
+ /* Comparison-invariant */
+ aligned_u64 quota;
+
+ /* Used internally by the kernel */
+ struct xt_quota_counter *master __attribute__((aligned(8)));
+};
+
+#endif /* _XT_QUOTA_H */
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 7b02bc1..0ffb5c6 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -109,6 +109,8 @@ extern int unregister_oom_notifier(struct notifier_block *nb);
extern bool oom_killer_disable(signed long timeout);
extern void oom_killer_enable(void);
+extern void dump_tasks(struct oom_control *oc);
+
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
#endif /* _INCLUDE_LINUX_OOM_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f7a0e4a..f380f3f 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -66,6 +66,8 @@
* PG_referenced, PG_reclaim are used for page reclaim for anonymous and
* file-backed pagecache (see mm/vmscan.c).
*
+ * PG_error is set to indicate that an I/O error occurred on this page.
+ *
* PG_arch_1 is an architecture specific page state bit. The generic code
* guarantees that this bit is cleared for a page when it first is entered into
* the page cache.
@@ -101,6 +103,7 @@ enum pageflags {
PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
PG_active,
PG_workingset,
+ PG_error,
PG_owner_priv_1, /* Owner use. If pagecache, fs may use */
PG_owner_2, /* Owner use. If pagecache, fs may use */
PG_arch_1,
@@ -189,7 +192,7 @@ enum pageflags {
*/
/* At least one page in this folio has the hwpoison flag set */
- PG_has_hwpoisoned = PG_active,
+ PG_has_hwpoisoned = PG_error,
PG_large_rmappable = PG_workingset, /* anon or file-backed */
PG_partially_mapped = PG_reclaim, /* was identified to be partially mapped */
};
@@ -550,6 +553,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; }
__PAGEFLAG(Locked, locked, PF_NO_TAIL)
FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE)
+PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
FOLIO_FLAG(referenced, FOLIO_HEAD_PAGE)
FOLIO_TEST_CLEAR_FLAG(referenced, FOLIO_HEAD_PAGE)
__FOLIO_SET_FLAG(referenced, FOLIO_HEAD_PAGE)
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 3328357..8fdc090 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -3,11 +3,14 @@
#define __LINUX_PAGE_OWNER_H
#include <linux/jump_label.h>
+#include <linux/stackdepot.h>
#ifdef CONFIG_PAGE_OWNER
extern struct static_key_false page_owner_inited;
extern struct page_ext_operations page_owner_ops;
+extern depot_stack_handle_t get_page_owner_handle(struct page_ext *page_ext,
+ unsigned long pfn);
extern void __reset_page_owner(struct page *page, unsigned short order);
extern void __set_page_owner(struct page *page,
unsigned short order, gfp_t gfp_mask);
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 360ffdf2..f24e62d 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -52,6 +52,12 @@ enum power_supply_charge_type {
POWER_SUPPLY_CHARGE_TYPE_CUSTOM, /* use CHARGE_CONTROL_* props */
POWER_SUPPLY_CHARGE_TYPE_LONGLIFE, /* slow speed, longer life */
POWER_SUPPLY_CHARGE_TYPE_BYPASS, /* bypassing the charger */
+
+ /*
+ * force to 50 to minimize the chances of userspace binary
+ * incompatibility on newer upstream kernels
+ */
+ POWER_SUPPLY_CHARGE_TYPE_TAPER_EXT = 50, /* charging in CV phase */
};
enum {
@@ -811,6 +817,10 @@ static inline struct power_supply *power_supply_get_by_name(const char *name)
#endif
extern struct power_supply *power_supply_get_by_reference(struct fwnode_handle *fwnode,
const char *property);
+extern int power_supply_get_by_reference_array(struct fwnode_handle *fwnode,
+ const char *property,
+ struct power_supply **psy,
+ ssize_t size);
extern struct power_supply *devm_power_supply_get_by_reference(
struct device *dev, const char *property);
diff --git a/include/linux/profile.h b/include/linux/profile.h
index 3f53cdb..0ca3ee5 100644
--- a/include/linux/profile.h
+++ b/include/linux/profile.h
@@ -24,6 +24,11 @@ static inline int create_proc_profile(void)
}
#endif
+enum profile_type {
+ PROFILE_TASK_EXIT,
+ PROFILE_MUNMAP
+};
+
#ifdef CONFIG_PROFILING
extern int prof_on __read_mostly;
@@ -54,6 +59,15 @@ static inline void profile_hit(int type, void *ip)
struct task_struct;
struct mm_struct;
+/* task is in do_exit() */
+void profile_task_exit(struct task_struct * task);
+
+/* sys_munmap */
+void profile_munmap(unsigned long addr);
+
+int profile_event_register(enum profile_type, struct notifier_block * n);
+int profile_event_unregister(enum profile_type, struct notifier_block * n);
+
#else
#define prof_on 0
@@ -78,6 +92,18 @@ static inline void profile_hit(int type, void *ip)
return;
}
+static inline int profile_event_register(enum profile_type t, struct notifier_block * n)
+{
+ return -ENOSYS;
+}
+
+static inline int profile_event_unregister(enum profile_type t, struct notifier_block * n)
+{
+ return -ENOSYS;
+}
+
+#define profile_task_exit(a) do { } while (0)
+#define profile_munmap(a) do { } while (0)
#endif /* CONFIG_PROFILING */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 074ad4e..31f7919 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -48,6 +48,7 @@
#include <linux/uidgid_types.h>
#include <linux/tracepoint-defs.h>
#include <linux/unwind_deferred_types.h>
+#include <linux/android_vendor.h>
#include <asm/kmap_size.h>
#include <linux/time64.h>
#ifndef COMPILE_OFFSETS
@@ -345,6 +346,7 @@ extern int __must_check io_schedule_prepare(void);
extern void io_schedule_finish(int token);
extern long io_schedule_timeout(long timeout);
extern void io_schedule(void);
+extern int select_fallback_rq(int cpu, struct task_struct *p);
/* wrapper functions to trace from this header file */
DECLARE_TRACEPOINT(sched_set_state_tp);
@@ -1110,6 +1112,10 @@ struct task_struct {
u64 stimescaled;
#endif
u64 gtime;
+#ifdef CONFIG_CPU_FREQ_TIMES
+ u64 *time_in_state;
+ unsigned int max_state;
+#endif
struct prev_cputime prev_cputime;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
struct vtime vtime;
@@ -1226,6 +1232,7 @@ struct task_struct {
raw_spinlock_t pi_lock;
struct wake_q_node wake_q;
+ int wake_q_count;
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task: */
@@ -1601,6 +1608,8 @@ struct task_struct {
struct callback_head mce_kill_me;
int mce_count;
#endif
+ ANDROID_VENDOR_DATA_ARRAY(1, 6);
+ ANDROID_OEM_DATA_ARRAY(1, 6);
#ifdef CONFIG_KRETPROBES
struct llist_head kretprobe_instances;
diff --git a/include/linux/sched/TEST_MAPPING b/include/linux/sched/TEST_MAPPING
new file mode 100644
index 0000000..2e408eb
--- /dev/null
+++ b/include/linux/sched/TEST_MAPPING
@@ -0,0 +1,229 @@
+{
+ "imports": [
+ {
+ "path": "frameworks/base/packages/PackageInstaller"
+ },
+ {
+ "path": "frameworks/base/core/java/android/content/pm"
+ },
+ {
+ "path": "frameworks/base/services/core/java/com/android/server"
+ },
+ {
+ "path": "frameworks/base/core/java/com/android/internal/app"
+ },
+ {
+ "path": "frameworks/base/apex/jobscheduler/service/java/com/android/server/job"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsSilentUpdateHostTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsJobSchedulerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsSuspendAppsTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
index 765bbc3..5159dec 100644
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
@@ -38,6 +38,7 @@
struct wake_q_head {
struct wake_q_node *first;
struct wake_q_node **lastp;
+ int count;
};
#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
@@ -52,6 +53,7 @@ static inline void wake_q_init(struct wake_q_head *head)
{
head->first = WAKE_Q_TAIL;
head->lastp = &head->first;
+ head->count = 0;
}
static inline bool wake_q_empty(struct wake_q_head *head)
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index b02876f..1c4aae3 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -40,6 +40,62 @@ typedef int __bitwise suspend_state_t;
#define PM_SUSPEND_MIN PM_SUSPEND_TO_IDLE
#define PM_SUSPEND_MAX ((__force suspend_state_t) 4)
+enum suspend_stat_step {
+ SUSPEND_WORKING = 0,
+ SUSPEND_FREEZE,
+ SUSPEND_PREPARE,
+ SUSPEND_SUSPEND,
+ SUSPEND_SUSPEND_LATE,
+ SUSPEND_SUSPEND_NOIRQ,
+ SUSPEND_RESUME_NOIRQ,
+ SUSPEND_RESUME_EARLY,
+ SUSPEND_RESUME
+};
+
+#define SUSPEND_NR_STEPS SUSPEND_RESUME
+
+struct suspend_stats {
+ unsigned int step_failures[SUSPEND_NR_STEPS];
+ unsigned int success;
+ unsigned int fail;
+#define REC_FAILED_NUM 2
+ int last_failed_dev;
+ char failed_devs[REC_FAILED_NUM][40];
+ int last_failed_errno;
+ int errno[REC_FAILED_NUM];
+ int last_failed_step;
+ u64 last_hw_sleep;
+ u64 total_hw_sleep;
+ u64 max_hw_sleep;
+ enum suspend_stat_step failed_steps[REC_FAILED_NUM];
+};
+
+extern struct suspend_stats suspend_stats;
+
+static inline void dpm_save_failed_dev(const char *name)
+{
+ strscpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
+ name,
+ sizeof(suspend_stats.failed_devs[0]));
+ suspend_stats.last_failed_dev++;
+ suspend_stats.last_failed_dev %= REC_FAILED_NUM;
+}
+
+static inline void dpm_save_failed_errno(int err)
+{
+ suspend_stats.errno[suspend_stats.last_failed_errno] = err;
+ suspend_stats.last_failed_errno++;
+ suspend_stats.last_failed_errno %= REC_FAILED_NUM;
+}
+
+static inline void dpm_save_failed_step(enum suspend_stat_step step)
+{
+ suspend_stats.step_failures[step-1]++;
+ suspend_stats.failed_steps[suspend_stats.last_failed_step] = step;
+ suspend_stats.last_failed_step++;
+ suspend_stats.last_failed_step %= REC_FAILED_NUM;
+}
+
/**
* struct platform_suspend_ops - Callbacks for managing platform dependent
* system sleep states.
@@ -479,6 +535,7 @@ extern bool pm_get_wakeup_count(unsigned int *count, bool block);
extern bool pm_save_wakeup_count(unsigned int count);
extern void pm_wakep_autosleep_enabled(bool set);
extern void pm_print_active_wakeup_sources(void);
+extern void pm_get_active_wakeup_sources(char *pending_sources, size_t max);
extern unsigned int lock_system_sleep(void);
extern void unlock_system_sleep(unsigned int);
@@ -589,19 +646,4 @@ static inline void queue_up_suspend_work(void) {}
#endif /* !CONFIG_PM_AUTOSLEEP */
-enum suspend_stat_step {
- SUSPEND_WORKING = 0,
- SUSPEND_FREEZE,
- SUSPEND_PREPARE,
- SUSPEND_SUSPEND,
- SUSPEND_SUSPEND_LATE,
- SUSPEND_SUSPEND_NOIRQ,
- SUSPEND_RESUME_NOIRQ,
- SUSPEND_RESUME_EARLY,
- SUSPEND_RESUME
-};
-
-void dpm_save_failed_dev(const char *name);
-void dpm_save_failed_step(enum suspend_stat_step step);
-
#endif /* _LINUX_SUSPEND_H */
diff --git a/include/linux/usb/android_configfs_uevent.h b/include/linux/usb/android_configfs_uevent.h
new file mode 100644
index 0000000..07b82dda
--- /dev/null
+++ b/include/linux/usb/android_configfs_uevent.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2024 Google LLC
+ */
+#ifndef _ANDROID_USB_CONFIGFS_UEVENT_H
+#define _ANDROID_USB_CONFIGFS_UEVENT_H
+
+#ifdef CONFIG_ANDROID_USB_CONFIGFS_UEVENT
+#include <linux/device.h>
+#include <linux/workqueue.h>
+#include <linux/mutex.h>
+
+struct android_uevent_opts {
+ struct device *dev;
+ int device_id;
+ bool connected;
+ bool configured;
+ bool sw_connected;
+ struct work_struct work;
+ struct ida function_ida;
+};
+
+/**
+ * android_create_function_device - creates a device within the android_usb
+ * class with a new minor number.
+ * @name: the name for the device which is to be created
+ * @drvdata: the data to be added to the device for callbacks, can be NULL
+ * @groups: NULL-terminated list of attribute groups to be created, can be NULL
+ *
+ * This should be called by function drivers which wish to register a device
+ * within the android_usb class.
+ *
+ * Returns: a pointer to the newly created device upon success, or an ERR_PTR
+ * for the encountered error.
+ */
+struct device *android_create_function_device(char *name, void *drvdata,
+ const struct attribute_group **groups);
+
+/**
+ * android_remove_function_device - destroys a device which was created by
+ * calling android_create_function_device, and performs any necessary cleanup.
+ * @dev: the device to be destroyed
+ */
+void android_remove_function_device(struct device *dev);
+#else
+
+struct android_uevent_opts {};
+
+static inline struct device *android_create_function_device(char *name)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void android_remove_function_device(struct device *dev)
+{
+}
+#endif /* CONFIG_ANDROID_USB_CONFIGFS_UEVENT */
+#endif /* _ANDROID_USB_CONFIGFS_UEVENT_H */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index c18041f..1e9b141 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -28,6 +28,7 @@
#include <linux/usb/webusb.h>
#include <linux/log2.h>
#include <linux/configfs.h>
+#include <linux/usb/android_configfs_uevent.h>
/*
* USB function drivers should return USB_GADGET_DELAYED_STATUS if they
@@ -499,6 +500,8 @@ struct usb_composite_dev {
/* protects deactivations and delayed_status counts*/
spinlock_t lock;
+ struct android_uevent_opts android_opts;
+
/* public: */
unsigned int setup_pending:1;
unsigned int os_desc_pending:1;
diff --git a/include/linux/wakeup_reason.h b/include/linux/wakeup_reason.h
new file mode 100644
index 0000000..54f5caa
--- /dev/null
+++ b/include/linux/wakeup_reason.h
@@ -0,0 +1,37 @@
+/*
+ * include/linux/wakeup_reason.h
+ *
+ * Logs the reason which caused the kernel to resume
+ * from the suspend mode.
+ *
+ * Copyright (C) 2014 Google, Inc.
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_WAKEUP_REASON_H
+#define _LINUX_WAKEUP_REASON_H
+
+#define MAX_SUSPEND_ABORT_LEN 256
+
+#ifdef CONFIG_SUSPEND
+void log_irq_wakeup_reason(int irq);
+void log_threaded_irq_wakeup_reason(int irq, int parent_irq);
+void log_suspend_abort_reason(const char *fmt, ...);
+void log_abnormal_wakeup_reason(const char *fmt, ...);
+void clear_wakeup_reasons(void);
+#else
+static inline void log_irq_wakeup_reason(int irq) { }
+static inline void log_threaded_irq_wakeup_reason(int irq, int parent_irq) { }
+static inline void log_suspend_abort_reason(const char *fmt, ...) { }
+static inline void log_abnormal_wakeup_reason(const char *fmt, ...) { }
+static inline void clear_wakeup_reasons(void) { }
+#endif
+
+#endif /* _LINUX_WAKEUP_REASON_H */
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 4424d48..f64fdf7 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -20,7 +20,7 @@
#include <media/media-request.h>
#include <media/frame_vector.h>
-#define VB2_MAX_FRAME (32)
+#define VB2_MAX_FRAME (64)
#define VB2_MAX_PLANES (8)
/**
diff --git a/include/net/TEST_MAPPING b/include/net/TEST_MAPPING
new file mode 100644
index 0000000..b1c299e
--- /dev/null
+++ b/include/net/TEST_MAPPING
@@ -0,0 +1,357 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsWifiBroadcastsHostTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "CtsJobSchedulerTestCases",
+ "options": [
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testCellularConstraintExecutedAndStopped"
+ },
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testConnectivityConstraintExecutes_transitionNetworks"
+ },
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testConnectivityConstraintExecutes_withMobile"
+ },
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testEJMeteredConstraintFails_withMobile_DataSaverOn"
+ },
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testMeteredConstraintFails_withMobile_DataSaverOn"
+ }
+ ]
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.CallRedirectionServiceTest"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "VtsBootconfigTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 9e96776..16ef929 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -304,6 +304,18 @@ static inline bool ipv6_is_mld(struct sk_buff *skb, int nexthdr, int offset)
void addrconf_prefix_rcv(struct net_device *dev,
u8 *opt, int len, bool sllao);
+/* Determines into what table to put autoconf PIO/RIO/default routes
+ * learned on this device.
+ *
+ * - If 0, use the same table for every device. This puts routes into
+ * one of RT_TABLE_{PREFIX,INFO,DFLT} depending on the type of route
+ * (but note that these three are currently all equal to
+ * RT6_TABLE_MAIN).
+ * - If > 0, use the specified table.
+ * - If < 0, put routes into table dev->ifindex + (-rt_table).
+ */
+u32 addrconf_rt_table(const struct net_device *dev, u32 default_table);
+
/*
* anycast prototypes (anycast.c)
*/
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index fc01de1..d9912c1 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -9662,9 +9662,7 @@ int cfg80211_register_netdevice(struct net_device *dev);
*/
static inline void cfg80211_unregister_netdevice(struct net_device *dev)
{
-#if IS_ENABLED(CONFIG_CFG80211)
cfg80211_unregister_wdev(dev->ieee80211_ptr);
-#endif
}
/**
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 10d3edd..24da735 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1795,9 +1795,7 @@ static inline int xfrm6_tunnel_check_size(struct sk_buff *skb)
}
#endif
-#if IS_ENABLED(CONFIG_NET_PKTGEN)
int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
-#endif
void xfrm_local_error(struct sk_buff *skb, int mtu);
int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
diff --git a/include/trace/TEST_MAPPING b/include/trace/TEST_MAPPING
new file mode 100644
index 0000000..61a5065
--- /dev/null
+++ b/include/trace/TEST_MAPPING
@@ -0,0 +1,331 @@
+{
+ "imports": [
+ {
+ "path": "packages/services/Telecomm"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsWifiBroadcastsHostTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.PhoneAccountTest"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "VtsBootconfigTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/include/trace/events/OWNERS b/include/trace/events/OWNERS
new file mode 100644
index 0000000..a63dbf4
--- /dev/null
+++ b/include/trace/events/OWNERS
@@ -0,0 +1 @@
+per-file f2fs**=file:/fs/f2fs/OWNERS
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index a6e5a44..ee2f4ab 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -142,6 +142,7 @@ TRACE_DEFINE_ENUM(___GFP_LAST_BIT);
#define __def_pageflag_names \
DEF_PAGEFLAG_NAME(locked), \
DEF_PAGEFLAG_NAME(waiters), \
+ DEF_PAGEFLAG_NAME(error), \
DEF_PAGEFLAG_NAME(referenced), \
DEF_PAGEFLAG_NAME(uptodate), \
DEF_PAGEFLAG_NAME(dirty), \
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 7b2645b..f9785c6 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -547,6 +547,30 @@ DEFINE_EVENT_SCHEDSTAT(sched_stat_template, sched_stat_blocked,
TP_ARGS(tsk, delay));
/*
+ * Tracepoint for recording the cause of uninterruptible sleep.
+ */
+TRACE_EVENT(sched_blocked_reason,
+
+ TP_PROTO(struct task_struct *tsk),
+
+ TP_ARGS(tsk),
+
+ TP_STRUCT__entry(
+ __field( pid_t, pid )
+ __field( void*, caller )
+ __field( bool, io_wait )
+ ),
+
+ TP_fast_assign(
+ __entry->pid = tsk->pid;
+ __entry->caller = (void *)__get_wchan(tsk);
+ __entry->io_wait = tsk->in_iowait;
+ ),
+
+ TP_printk("pid=%d iowait=%d caller=%pS", __entry->pid, __entry->io_wait, __entry->caller)
+);
+
+/*
* Tracepoint for accounting runtime (time the task is executing
* on a CPU).
*/
diff --git a/include/trace/hooks/avc.h b/include/trace/hooks/avc.h
new file mode 100644
index 0000000..5100dde7
--- /dev/null
+++ b/include/trace/hooks/avc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM avc
+
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_AVC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_AVC_H
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+struct avc_node;
+DECLARE_RESTRICTED_HOOK(android_rvh_selinux_avc_insert,
+ TP_PROTO(const struct avc_node *node),
+ TP_ARGS(node), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_selinux_avc_node_delete,
+ TP_PROTO(const struct avc_node *node),
+ TP_ARGS(node), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_selinux_avc_node_replace,
+ TP_PROTO(const struct avc_node *old, const struct avc_node *new),
+ TP_ARGS(old, new), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_selinux_avc_lookup,
+ TP_PROTO(const struct avc_node *node, u32 ssid, u32 tsid, u16 tclass),
+ TP_ARGS(node, ssid, tsid, tclass), 1);
+
+#endif /* _TRACE_HOOK_AVC_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/cgroup.h b/include/trace/hooks/cgroup.h
new file mode 100644
index 0000000..abb4b37
--- /dev/null
+++ b/include/trace/hooks/cgroup.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cgroup
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_CGROUP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_CGROUP_H
+#include <trace/hooks/vendor_hooks.h>
+
+struct cgroup_taskset;
+struct cgroup_subsys;
+struct cgroup_subsys_state;
+DECLARE_HOOK(android_vh_cgroup_attach,
+ TP_PROTO(struct cgroup_subsys *ss, struct cgroup_taskset *tset),
+ TP_ARGS(ss, tset));
+
+DECLARE_RESTRICTED_HOOK(android_rvh_cpu_cgroup_attach,
+ TP_PROTO(struct cgroup_taskset *tset),
+ TP_ARGS(tset), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_cpu_cgroup_online,
+ TP_PROTO(struct cgroup_subsys_state *css),
+ TP_ARGS(css), 1);
+#endif
+
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/cpufreq.h b/include/trace/hooks/cpufreq.h
new file mode 100644
index 0000000..1f148ed
--- /dev/null
+++ b/include/trace/hooks/cpufreq.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpufreq
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_CPUFREQ_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_CPUFREQ_H
+
+#include <trace/hooks/vendor_hooks.h>
+
+struct cpufreq_policy;
+
+DECLARE_RESTRICTED_HOOK(android_rvh_show_max_freq,
+ TP_PROTO(struct cpufreq_policy *policy, unsigned int *max_freq),
+ TP_ARGS(policy, max_freq), 1);
+
+DECLARE_HOOK(android_vh_cpufreq_online,
+ TP_PROTO(struct cpufreq_policy *policy),
+ TP_ARGS(policy));
+
+#endif /* _TRACE_HOOK_CPUFREQ_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/cpuidle.h b/include/trace/hooks/cpuidle.h
new file mode 100644
index 0000000..b1ee27e
--- /dev/null
+++ b/include/trace/hooks/cpuidle.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpuidle
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_CPUIDLE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_CPUIDLE_H
+
+#include <trace/hooks/vendor_hooks.h>
+
+struct cpuidle_device;
+
+DECLARE_HOOK(android_vh_cpu_idle_enter,
+ TP_PROTO(int *state, struct cpuidle_device *dev),
+ TP_ARGS(state, dev))
+DECLARE_HOOK(android_vh_cpu_idle_exit,
+ TP_PROTO(int state, struct cpuidle_device *dev),
+ TP_ARGS(state, dev))
+
+#endif /* _TRACE_HOOK_CPUIDLE_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+
diff --git a/include/trace/hooks/cpuidle_psci.h b/include/trace/hooks/cpuidle_psci.h
new file mode 100644
index 0000000..eef0032
--- /dev/null
+++ b/include/trace/hooks/cpuidle_psci.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cpuidle_psci
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_CPUIDLE_PSCI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_CPUIDLE_PSCI_H
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+
+struct cpuidle_device;
+DECLARE_HOOK(android_vh_cpuidle_psci_enter,
+ TP_PROTO(struct cpuidle_device *dev, bool s2idle),
+ TP_ARGS(dev, s2idle));
+
+DECLARE_HOOK(android_vh_cpuidle_psci_exit,
+ TP_PROTO(struct cpuidle_device *dev, bool s2idle),
+ TP_ARGS(dev, s2idle));
+
+#endif /* _TRACE_HOOK_CPUIDLE_PSCI_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/debug.h b/include/trace/hooks/debug.h
new file mode 100644
index 0000000..5a20141
--- /dev/null
+++ b/include/trace/hooks/debug.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM debug
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_DEBUG_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_DEBUG_H
+
+#include <trace/hooks/vendor_hooks.h>
+
+struct pt_regs;
+
+DECLARE_HOOK(android_vh_ipi_stop,
+ TP_PROTO(struct pt_regs *regs),
+ TP_ARGS(regs))
+
+#endif /* _TRACE_HOOK_DEBUG_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/epoch.h b/include/trace/hooks/epoch.h
new file mode 100644
index 0000000..ccee2d8
--- /dev/null
+++ b/include/trace/hooks/epoch.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM epoch
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_EPOCH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_EPOCH_H
+
+#include <trace/hooks/vendor_hooks.h>
+
+DECLARE_HOOK(android_vh_show_suspend_epoch_val,
+ TP_PROTO(u64 suspend_ns, u64 suspend_cycles),
+ TP_ARGS(suspend_ns, suspend_cycles));
+
+DECLARE_HOOK(android_vh_show_resume_epoch_val,
+ TP_PROTO(u64 resume_cycles),
+ TP_ARGS(resume_cycles));
+
+#endif /* _TRACE_HOOK_EPOCH_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/fpsimd.h b/include/trace/hooks/fpsimd.h
new file mode 100644
index 0000000..1033718
--- /dev/null
+++ b/include/trace/hooks/fpsimd.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fpsimd
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_FPSIMD_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_FPSIMD_H
+
+#include <trace/hooks/vendor_hooks.h>
+
+struct task_struct;
+
+DECLARE_HOOK(android_vh_is_fpsimd_save,
+ TP_PROTO(struct task_struct *prev, struct task_struct *next),
+ TP_ARGS(prev, next))
+
+#endif /* _TRACE_HOOK_FPSIMD_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/gic.h b/include/trace/hooks/gic.h
new file mode 100644
index 0000000..daa11cd
--- /dev/null
+++ b/include/trace/hooks/gic.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gic
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_GIC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_GIC_H
+
+#include <trace/hooks/vendor_hooks.h>
+
+struct irq_data;
+DECLARE_HOOK(android_vh_gic_set_affinity,
+ TP_PROTO(struct irq_data *d, const struct cpumask *mask_val,
+ bool force, u8 *gic_cpu_map, void __iomem *reg),
+ TP_ARGS(d, mask_val, force, gic_cpu_map, reg));
+
+#endif /* _TRACE_HOOK_GIC_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/gic_v3.h b/include/trace/hooks/gic_v3.h
new file mode 100644
index 0000000..54368d6
--- /dev/null
+++ b/include/trace/hooks/gic_v3.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM gic_v3
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_GIC_V3_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_GIC_V3_H
+
+#include <trace/hooks/vendor_hooks.h>
+
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+struct irq_data;
+struct cpumask;
+DECLARE_RESTRICTED_HOOK(android_rvh_gic_v3_set_affinity,
+ TP_PROTO(struct irq_data *d, const struct cpumask *mask_val,
+ u64 *affinity, bool force, void __iomem *base,
+ void __iomem *rbase, u64 redist_stride),
+ TP_ARGS(d, mask_val, affinity, force, base, rbase, redist_stride),
+ 1);
+
+#endif /* _TRACE_HOOK_GIC_V3_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/iommu.h b/include/trace/hooks/iommu.h
new file mode 100644
index 0000000..d965a23
--- /dev/null
+++ b/include/trace/hooks/iommu.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM iommu
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_IOMMU_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_IOMMU_H
+
+#include <trace/hooks/vendor_hooks.h>
+
+DECLARE_RESTRICTED_HOOK(android_rvh_iommu_setup_dma_ops,
+ TP_PROTO(struct device *dev),
+ TP_ARGS(dev), 1);
+
+struct iova_domain;
+
+DECLARE_HOOK(android_vh_iommu_iovad_alloc_iova,
+ TP_PROTO(struct device *dev, struct iova_domain *iovad, dma_addr_t iova, size_t size),
+ TP_ARGS(dev, iovad, iova, size));
+
+DECLARE_HOOK(android_vh_iommu_iovad_free_iova,
+ TP_PROTO(struct iova_domain *iovad, dma_addr_t iova, size_t size),
+ TP_ARGS(iovad, iova, size));
+
+#endif /* _TRACE_HOOK_IOMMU_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/mm.h b/include/trace/hooks/mm.h
new file mode 100644
index 0000000..b3912a9
--- /dev/null
+++ b/include/trace/hooks/mm.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mm
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_MM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_MM_H
+
+#include <trace/hooks/vendor_hooks.h>
+/*
+
+DECLARE_RESTRICTED_HOOK(android_rvh_set_skip_swapcache_flags,
+ TP_PROTO(gfp_t *flags),
+ TP_ARGS(flags), 1);
+DECLARE_RESTRICTED_HOOK(android_rvh_set_gfp_zone_flags,
+ TP_PROTO(gfp_t *flags),
+ TP_ARGS(flags), 1);
+DECLARE_RESTRICTED_HOOK(android_rvh_set_readahead_gfp_mask,
+ TP_PROTO(gfp_t *flags),
+ TP_ARGS(flags), 1);
+
+*/
+#endif /* _TRACE_HOOK_MM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/mpam.h b/include/trace/hooks/mpam.h
new file mode 100644
index 0000000..50f5a68
--- /dev/null
+++ b/include/trace/hooks/mpam.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mpam
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_MPAM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_MPAM_H
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+struct task_struct;
+DECLARE_HOOK(android_vh_mpam_set,
+ TP_PROTO(struct task_struct *prev, struct task_struct *next),
+ TP_ARGS(prev, next));
+
+#endif /* _TRACE_HOOK_MPAM_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/net.h b/include/trace/hooks/net.h
new file mode 100644
index 0000000..381649b
--- /dev/null
+++ b/include/trace/hooks/net.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM net
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_NET_VH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_NET_VH_H
+#include <trace/hooks/vendor_hooks.h>
+
+struct packet_type;
+struct list_head;
+DECLARE_HOOK(android_vh_ptype_head,
+ TP_PROTO(const struct packet_type *pt, struct list_head *vendor_pt),
+ TP_ARGS(pt, vendor_pt));
+
+/* macro versions of hooks are no longer required */
+
+#endif /* _TRACE_HOOK_NET_VH_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/pm_domain.h b/include/trace/hooks/pm_domain.h
new file mode 100644
index 0000000..2a530d1
--- /dev/null
+++ b/include/trace/hooks/pm_domain.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pm_domain
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_PM_DOMAIN_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_PM_DOMAIN_H
+
+#include <trace/hooks/vendor_hooks.h>
+
+struct generic_pm_domain;
+DECLARE_HOOK(android_vh_allow_domain_state,
+ TP_PROTO(struct generic_pm_domain *genpd, uint32_t idx, bool *allow),
+ TP_ARGS(genpd, idx, allow))
+
+#endif /* _TRACE_HOOK_PM_DOMAIN_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/printk.h b/include/trace/hooks/printk.h
new file mode 100644
index 0000000..b3e9598
--- /dev/null
+++ b/include/trace/hooks/printk.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM printk
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_PRINTK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_PRINTK_H
+
+#include <trace/hooks/vendor_hooks.h>
+
+DECLARE_HOOK(android_vh_printk_hotplug,
+ TP_PROTO(int *flag),
+ TP_ARGS(flag));
+
+DECLARE_HOOK(android_vh_printk_caller_id,
+ TP_PROTO(u32 *caller_id),
+ TP_ARGS(caller_id));
+DECLARE_HOOK(android_vh_printk_caller,
+ TP_PROTO(char *caller, size_t size, u32 id, int *ret),
+ TP_ARGS(caller, size, id, ret));
+DECLARE_HOOK(android_vh_printk_ext_header,
+ TP_PROTO(char *caller, size_t size, u32 id, int *ret),
+ TP_ARGS(caller, size, id, ret));
+
+#endif /* _TRACE_HOOK_PRINTK_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/reboot.h b/include/trace/hooks/reboot.h
new file mode 100644
index 0000000..e9c7d68
--- /dev/null
+++ b/include/trace/hooks/reboot.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM reboot
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_REBOOT_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_REBOOT_H
+
+#include <trace/hooks/vendor_hooks.h>
+
+DECLARE_RESTRICTED_HOOK(android_rvh_hw_protection_shutdown,
+ TP_PROTO(const char *reason),
+ TP_ARGS(reason), 1);
+
+#endif /* _TRACE_HOOK_REBOOT_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/remoteproc.h b/include/trace/hooks/remoteproc.h
new file mode 100644
index 0000000..55fae70
--- /dev/null
+++ b/include/trace/hooks/remoteproc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM remoteproc
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_RPROC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_RPROC_H
+
+#include <trace/hooks/vendor_hooks.h>
+
+struct rproc;
+
+/* When recovery succeeds */
+DECLARE_HOOK(android_vh_rproc_recovery,
+ TP_PROTO(struct rproc *rproc),
+ TP_ARGS(rproc));
+
+/* When recovery mode is enabled or disabled by sysfs */
+DECLARE_HOOK(android_vh_rproc_recovery_set,
+ TP_PROTO(struct rproc *rproc),
+ TP_ARGS(rproc));
+
+#endif /* _TRACE_HOOK_RPROC_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/sched.h b/include/trace/hooks/sched.h
new file mode 100644
index 0000000..8e45f71
--- /dev/null
+++ b/include/trace/hooks/sched.h
@@ -0,0 +1,352 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sched
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_SCHED_H
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+struct task_struct;
+DECLARE_RESTRICTED_HOOK(android_rvh_select_task_rq_fair,
+ TP_PROTO(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags, int *new_cpu),
+ TP_ARGS(p, prev_cpu, sd_flag, wake_flags, new_cpu), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_select_task_rq_rt,
+ TP_PROTO(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags, int *new_cpu),
+ TP_ARGS(p, prev_cpu, sd_flag, wake_flags, new_cpu), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_select_fallback_rq,
+ TP_PROTO(int cpu, struct task_struct *p, int *new_cpu),
+ TP_ARGS(cpu, p, new_cpu), 1);
+
+struct rq;
+DECLARE_HOOK(android_vh_scheduler_tick,
+ TP_PROTO(struct rq *rq),
+ TP_ARGS(rq));
+
+DECLARE_RESTRICTED_HOOK(android_rvh_enqueue_task,
+ TP_PROTO(struct rq *rq, struct task_struct *p, int flags),
+ TP_ARGS(rq, p, flags), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_dequeue_task,
+ TP_PROTO(struct rq *rq, struct task_struct *p, int flags),
+ TP_ARGS(rq, p, flags), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_can_migrate_task,
+ TP_PROTO(struct task_struct *p, int dst_cpu, int *can_migrate),
+ TP_ARGS(p, dst_cpu, can_migrate), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_find_lowest_rq,
+ TP_PROTO(struct task_struct *p, struct cpumask *local_cpu_mask,
+ int ret, int *lowest_cpu),
+ TP_ARGS(p, local_cpu_mask, ret, lowest_cpu), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_prepare_prio_fork,
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_finish_prio_fork,
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_rtmutex_prepare_setprio,
+ TP_PROTO(struct task_struct *p, struct task_struct *pi_task),
+ TP_ARGS(p, pi_task), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_rto_next_cpu,
+ TP_PROTO(int rto_cpu, struct cpumask *rto_mask, int *cpu),
+ TP_ARGS(rto_cpu, rto_mask, cpu), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_is_cpu_allowed,
+ TP_PROTO(struct task_struct *p, int cpu, bool *allowed),
+ TP_ARGS(p, cpu, allowed), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_get_nohz_timer_target,
+ TP_PROTO(int *cpu, bool *done),
+ TP_ARGS(cpu, done), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_set_user_nice_locked,
+ TP_PROTO(struct task_struct *p, long *nice, bool *allowed),
+ TP_ARGS(p, nice, allowed), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_setscheduler,
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_replace_next_task_fair,
+ TP_PROTO(struct rq *rq, struct task_struct **p, struct task_struct *prev),
+ TP_ARGS(rq, p, prev), 1);
+
+struct sched_group;
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_balance_find_src_group,
+ TP_PROTO(struct sched_group *busiest, struct rq *dst_rq, int *out_balance),
+ TP_ARGS(busiest, dst_rq, out_balance), 1);
+
+DECLARE_HOOK(android_vh_dump_throttled_rt_tasks,
+ TP_PROTO(int cpu, u64 clock, ktime_t rt_period, u64 rt_runtime,
+ s64 rt_period_timer_expires),
+ TP_ARGS(cpu, clock, rt_period, rt_runtime, rt_period_timer_expires));
+
+DECLARE_HOOK(android_vh_jiffies_update,
+ TP_PROTO(void *unused),
+ TP_ARGS(unused));
+
+struct rq_flags;
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_newidle_balance,
+ TP_PROTO(struct rq *this_rq, struct rq_flags *rf,
+ int *pulled_task, int *done),
+ TP_ARGS(this_rq, rf, pulled_task, done), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_nohz_balancer_kick,
+ TP_PROTO(struct rq *rq, unsigned int *flags, int *done),
+ TP_ARGS(rq, flags, done), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_rebalance_domains,
+ TP_PROTO(struct rq *rq, int *continue_balancing),
+ TP_ARGS(rq, continue_balancing), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_find_busiest_queue,
+ TP_PROTO(int dst_cpu, struct sched_group *group,
+ struct cpumask *env_cpus, struct rq **busiest,
+ int *done),
+ TP_ARGS(dst_cpu, group, env_cpus, busiest, done), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_migrate_queued_task,
+ TP_PROTO(struct rq *rq, struct rq_flags *rf,
+ struct task_struct *p, int new_cpu,
+ int *detached),
+ TP_ARGS(rq, rf, p, new_cpu, detached), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_cpu_overutilized,
+ TP_PROTO(int cpu, int *overutilized),
+ TP_ARGS(cpu, overutilized), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_setaffinity,
+ TP_PROTO(struct task_struct *p, const struct cpumask *in_mask, int *retval),
+ TP_ARGS(p, in_mask, retval), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_getaffinity,
+ TP_PROTO(struct task_struct *p, struct cpumask *in_mask),
+ TP_ARGS(p, in_mask), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_set_task_cpu,
+ TP_PROTO(struct task_struct *p, unsigned int new_cpu),
+ TP_ARGS(p, new_cpu), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_try_to_wake_up,
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_try_to_wake_up_success,
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_fork,
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_wake_up_new_task,
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_new_task_stats,
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_flush_task,
+ TP_PROTO(struct task_struct *prev),
+ TP_ARGS(prev), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_tick_entry,
+ TP_PROTO(struct rq *rq),
+ TP_ARGS(rq), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_schedule,
+ TP_PROTO(struct task_struct *prev, struct task_struct *next, struct rq *rq),
+ TP_ARGS(prev, next, rq), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_cpu_starting,
+ TP_PROTO(int cpu),
+ TP_ARGS(cpu), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_cpu_dying,
+ TP_PROTO(int cpu),
+ TP_ARGS(cpu), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_account_irq,
+ TP_PROTO(struct task_struct *curr, int cpu, s64 delta, bool start),
+ TP_ARGS(curr, cpu, delta, start), 1);
+
+struct sched_entity;
+DECLARE_RESTRICTED_HOOK(android_rvh_place_entity,
+ TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial, u64 *vruntime),
+ TP_ARGS(cfs_rq, se, initial, vruntime), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_build_perf_domains,
+ TP_PROTO(bool *eas_check),
+ TP_ARGS(eas_check), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_update_cpu_capacity,
+ TP_PROTO(int cpu, unsigned long *capacity),
+ TP_ARGS(cpu, capacity), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_update_misfit_status,
+ TP_PROTO(struct task_struct *p, struct rq *rq, bool *need_update),
+ TP_ARGS(p, rq, need_update), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_fork_init,
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_ttwu_cond,
+ TP_PROTO(int cpu, bool *cond),
+ TP_ARGS(cpu, cond), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_schedule_bug,
+ TP_PROTO(void *unused),
+ TP_ARGS(unused), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_exec,
+ TP_PROTO(bool *cond),
+ TP_ARGS(cond), 1);
+
+DECLARE_HOOK(android_vh_build_sched_domains,
+ TP_PROTO(bool has_asym),
+ TP_ARGS(has_asym));
+
+DECLARE_RESTRICTED_HOOK(android_rvh_check_preempt_tick,
+ TP_PROTO(struct task_struct *p, unsigned long *ideal_runtime, bool *skip_preempt,
+ unsigned long delta_exec, struct cfs_rq *cfs_rq, struct sched_entity *curr,
+ unsigned int granularity),
+ TP_ARGS(p, ideal_runtime, skip_preempt, delta_exec, cfs_rq, curr, granularity), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_sched_balance_rt,
+ TP_PROTO(struct rq *rq, struct task_struct *p, int *done),
+ TP_ARGS(rq, p, done), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_check_preempt_wakeup_fair,
+ TP_PROTO(struct rq *rq, struct task_struct *p, bool *preempt, bool *nopreempt,
+ int wake_flags, struct sched_entity *se, struct sched_entity *pse),
+ TP_ARGS(rq, p, preempt, nopreempt, wake_flags, se, pse), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_set_cpus_allowed_by_task,
+ TP_PROTO(const struct cpumask *cpu_valid_mask, const struct cpumask *new_mask,
+ struct task_struct *p, unsigned int *dest_cpu),
+ TP_ARGS(cpu_valid_mask, new_mask, p, dest_cpu), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_do_sched_yield,
+ TP_PROTO(struct rq *rq),
+ TP_ARGS(rq), 1);
+
+DECLARE_HOOK(android_vh_free_task,
+ TP_PROTO(struct task_struct *p),
+ TP_ARGS(p));
+
+enum uclamp_id;
+struct uclamp_se;
+DECLARE_RESTRICTED_HOOK(android_rvh_uclamp_eff_get,
+ TP_PROTO(struct task_struct *p, enum uclamp_id clamp_id,
+ struct uclamp_se *uclamp_max, struct uclamp_se *uclamp_eff, int *ret),
+ TP_ARGS(p, clamp_id, uclamp_max, uclamp_eff, ret), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_after_enqueue_task,
+ TP_PROTO(struct rq *rq, struct task_struct *p, int flags),
+ TP_ARGS(rq, p, flags), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_after_dequeue_task,
+ TP_PROTO(struct rq *rq, struct task_struct *p, int flags, bool *dequeue_task_result),
+ TP_ARGS(rq, p, flags, dequeue_task_result), 1);
+
+struct cfs_rq;
+struct sched_entity;
+struct rq_flags;
+DECLARE_RESTRICTED_HOOK(android_rvh_enqueue_entity,
+ TP_PROTO(struct cfs_rq *cfs, struct sched_entity *se),
+ TP_ARGS(cfs, se), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_dequeue_entity,
+ TP_PROTO(struct cfs_rq *cfs, struct sched_entity *se),
+ TP_ARGS(cfs, se), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_entity_tick,
+ TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se),
+ TP_ARGS(cfs_rq, se), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_enqueue_task_fair,
+ TP_PROTO(struct rq *rq, struct task_struct *p, int flags),
+ TP_ARGS(rq, p, flags), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_dequeue_task_fair,
+ TP_PROTO(struct rq *rq, struct task_struct *p, int flags),
+ TP_ARGS(rq, p, flags), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_util_est_update,
+ TP_PROTO(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep, int *ret),
+ TP_ARGS(cfs_rq, p, task_sleep, ret), 1);
+
+DECLARE_HOOK(android_vh_setscheduler_uclamp,
+ TP_PROTO(struct task_struct *tsk, int clamp_id, unsigned int value),
+ TP_ARGS(tsk, clamp_id, value));
+
+DECLARE_HOOK(android_vh_update_topology_flags_workfn,
+ TP_PROTO(void *unused),
+ TP_ARGS(unused));
+
+DECLARE_RESTRICTED_HOOK(android_rvh_update_thermal_stats,
+ TP_PROTO(int cpu),
+ TP_ARGS(cpu), 1);
+
+DECLARE_HOOK(android_vh_do_wake_up_sync,
+ TP_PROTO(struct wait_queue_head *wq_head, int *done, struct sock *sk),
+ TP_ARGS(wq_head, done, sk));
+
+DECLARE_HOOK(android_vh_set_wake_flags,
+ TP_PROTO(int *wake_flags, unsigned int *mode),
+ TP_ARGS(wake_flags, mode));
+
+DECLARE_RESTRICTED_HOOK(android_rvh_find_new_ilb,
+ TP_PROTO(struct cpumask *nohz_idle_cpus_mask, int *new_ilb),
+ TP_ARGS(nohz_idle_cpus_mask, new_ilb), 1);
+
+DECLARE_HOOK(android_vh_dup_task_struct,
+ TP_PROTO(struct task_struct *tsk, struct task_struct *orig),
+ TP_ARGS(tsk, orig));
+
+DECLARE_RESTRICTED_HOOK(android_rvh_update_rq_clock_pelt,
+ TP_PROTO(struct rq *rq, s64 delta, int *ret),
+ TP_ARGS(rq, delta, ret), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_update_load_avg_blocked_se,
+ TP_PROTO(u64 now, struct sched_entity *se, int *ret),
+ TP_ARGS(now, se, ret), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_update_load_avg_se,
+ TP_PROTO(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se, int *ret),
+ TP_ARGS(now, cfs_rq, se, ret), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_update_load_avg_cfs_rq,
+ TP_PROTO(u64 now, struct cfs_rq *cfs_rq, int *ret),
+ TP_ARGS(now, cfs_rq, ret), 1);
+
+DECLARE_RESTRICTED_HOOK(android_rvh_update_rt_rq_load_avg_internal,
+ TP_PROTO(u64 now, struct rq *rq, int running, int *ret),
+ TP_ARGS(now, rq, running, ret), 1);
+
+struct sched_dl_entity;
+DECLARE_HOOK(android_vh_dump_dl_server,
+ TP_PROTO(struct sched_dl_entity *dl_se, struct task_struct *p),
+ TP_ARGS(dl_se, p));
+
+struct affinity_context;
+DECLARE_RESTRICTED_HOOK(android_rvh_set_cpus_allowed_ptr,
+ TP_PROTO(struct task_struct *p, struct affinity_context *ctx, bool *skip_user_ptr),
+ TP_ARGS(p, ctx, skip_user_ptr), 1);
+
+/* macro versions of hooks are no longer required */
+
+#endif /* _TRACE_HOOK_SCHED_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/selinux.h b/include/trace/hooks/selinux.h
new file mode 100644
index 0000000..0b65631b
--- /dev/null
+++ b/include/trace/hooks/selinux.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM selinux
+
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_SELINUX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_SELINUX_H
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+struct selinux_state;
+DECLARE_RESTRICTED_HOOK(android_rvh_selinux_is_initialized,
+ TP_PROTO(const struct selinux_state *state),
+ TP_ARGS(state), 1);
+
+#endif /* _TRACE_HOOK_SELINUX_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/signal.h b/include/trace/hooks/signal.h
new file mode 100644
index 0000000..c1051ee
--- /dev/null
+++ b/include/trace/hooks/signal.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM signal
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_SIGNAL_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_SIGNAL_H
+#include <trace/hooks/vendor_hooks.h>
+
+struct task_struct;
+DECLARE_HOOK(android_vh_do_send_sig_info,
+ TP_PROTO(int sig, struct task_struct *killer, struct task_struct *dst),
+ TP_ARGS(sig, killer, dst));
+#endif /* _TRACE_HOOK_SIGNAL_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/sys.h b/include/trace/hooks/sys.h
new file mode 100644
index 0000000..e2d5d6d
--- /dev/null
+++ b/include/trace/hooks/sys.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sys
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_SYS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_SYS_H
+#include <trace/hooks/vendor_hooks.h>
+
+struct task_struct;
+DECLARE_HOOK(android_vh_syscall_prctl_finished,
+ TP_PROTO(int option, struct task_struct *task),
+ TP_ARGS(option, task));
+#endif
+
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/syscall_check.h b/include/trace/hooks/syscall_check.h
new file mode 100644
index 0000000..56d8267
--- /dev/null
+++ b/include/trace/hooks/syscall_check.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM syscall_check
+
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_SYSCALL_CHECK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_SYSCALL_CHECK_H
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+struct file;
+union bpf_attr;
+DECLARE_HOOK(android_vh_check_mmap_file,
+ TP_PROTO(const struct file *file, unsigned long prot,
+ unsigned long flag, unsigned long ret),
+ TP_ARGS(file, prot, flag, ret));
+
+DECLARE_HOOK(android_vh_check_file_open,
+ TP_PROTO(const struct file *file),
+ TP_ARGS(file));
+
+DECLARE_HOOK(android_vh_check_bpf_syscall,
+ TP_PROTO(int cmd, const union bpf_attr *attr, unsigned int size),
+ TP_ARGS(cmd, attr, size));
+
+#endif /* _TRACE_HOOK_SYSCALL_CHECK_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/sysrqcrash.h b/include/trace/hooks/sysrqcrash.h
new file mode 100644
index 0000000..92e7bc7
--- /dev/null
+++ b/include/trace/hooks/sysrqcrash.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sysrqcrash
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_SYSRQCRASH_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_SYSRQCRASH_H
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+DECLARE_HOOK(android_vh_sysrq_crash,
+ TP_PROTO(void *data),
+ TP_ARGS(data));
+
+#endif /* _TRACE_HOOK_SYSRQCRASH_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/timer.h b/include/trace/hooks/timer.h
new file mode 100644
index 0000000..67ef865
--- /dev/null
+++ b/include/trace/hooks/timer.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM timer
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_TIMER_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_TIMER_H
+
+#include <trace/hooks/vendor_hooks.h>
+
+DECLARE_HOOK(android_vh_timer_calc_index,
+ TP_PROTO(unsigned int lvl, unsigned long *expires),
+ TP_ARGS(lvl, expires));
+
+#endif /* _TRACE_HOOK_TIMER_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/ufshcd.h b/include/trace/hooks/ufshcd.h
new file mode 100644
index 0000000..234613f
--- /dev/null
+++ b/include/trace/hooks/ufshcd.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM ufshcd
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH trace/hooks
+#if !defined(_TRACE_HOOK_UFSHCD_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_UFSHCD_H
+#include <trace/hooks/vendor_hooks.h>
+/*
+ * Following tracepoints are not exported in tracefs and provide a
+ * mechanism for vendor modules to hook and extend functionality
+ */
+struct ufs_hba;
+struct scsi_cmnd;
+
+DECLARE_HOOK(android_vh_ufs_fill_prdt,
+ TP_PROTO(struct ufs_hba *hba, struct scsi_cmnd *cmd,
+ unsigned int segments, int *err),
+ TP_ARGS(hba, cmd, segments, err));
+
+DECLARE_RESTRICTED_HOOK(android_rvh_ufs_reprogram_all_keys,
+ TP_PROTO(struct ufs_hba *hba, int *err),
+ TP_ARGS(hba, err), 1);
+
+DECLARE_HOOK(android_vh_ufs_prepare_command,
+ TP_PROTO(struct ufs_hba *hba, struct scsi_cmnd *cmd, int *err),
+ TP_ARGS(hba, cmd, err));
+
+DECLARE_HOOK(android_vh_ufs_update_sysfs,
+ TP_PROTO(struct ufs_hba *hba),
+ TP_ARGS(hba));
+
+DECLARE_HOOK(android_vh_ufs_send_command,
+ TP_PROTO(struct ufs_hba *hba, struct scsi_cmnd *cmd),
+ TP_ARGS(hba, cmd));
+
+DECLARE_HOOK(android_vh_ufs_compl_command,
+ TP_PROTO(struct ufs_hba *hba, struct scsi_cmnd *cmd),
+ TP_ARGS(hba, cmd));
+
+struct uic_command;
+DECLARE_HOOK(android_vh_ufs_send_uic_command,
+ TP_PROTO(struct ufs_hba *hba, const struct uic_command *ucmd,
+ int str_t),
+ TP_ARGS(hba, ucmd, str_t));
+
+DECLARE_HOOK(android_vh_ufs_send_tm_command,
+ TP_PROTO(struct ufs_hba *hba, int tag, int str_t),
+ TP_ARGS(hba, tag, str_t));
+
+DECLARE_HOOK(android_vh_ufs_check_int_errors,
+ TP_PROTO(struct ufs_hba *hba, bool queue_eh_work),
+ TP_ARGS(hba, queue_eh_work));
+
+#endif /* _TRACE_HOOK_UFSHCD_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/hooks/vendor_hooks.h b/include/trace/hooks/vendor_hooks.h
new file mode 100644
index 0000000..cbac87b
--- /dev/null
+++ b/include/trace/hooks/vendor_hooks.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Note: we intentionally omit include file ifdef protection
+ * This is due to the way trace events work. If a file includes two
+ * trace event headers under one "CREATE_TRACE_POINTS" the first include
+ * will override the DECLARE_RESTRICTED_HOOK and break the second include.
+ */
+
+#ifndef __GENKSYMS__
+#include <linux/tracepoint.h>
+#endif
+
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_ANDROID_VENDOR_HOOKS)
+
+#define DECLARE_HOOK DECLARE_TRACE_EVENT
+
+int android_rvh_probe_register(struct tracepoint *tp, void *probe, void *data);
+
+#ifdef TRACE_HEADER_MULTI_READ
+
+#define __DEFINE_HOOK_EXT(_name, _ext, proto, args) \
+ static const char __tpstrtab_##_name[] \
+ __section("__tracepoints_strings") = #_name; \
+ extern struct static_call_key STATIC_CALL_KEY(tp_func_##_name); \
+ int __traceiter_##_name(void *__data, proto); \
+ struct tracepoint __tracepoint_##_name __used \
+ __section("__tracepoints") = { \
+ .name = __tpstrtab_##_name, \
+ .key = STATIC_KEY_FALSE_INIT, \
+ .static_call_key = &STATIC_CALL_KEY(tp_func_##_name), \
+ .static_call_tramp = STATIC_CALL_TRAMP_ADDR(tp_func_##_name), \
+ .iterator = &__traceiter_##_name, \
+ .funcs = NULL, \
+ .ext = _ext, \
+ }; \
+ __TRACEPOINT_ENTRY(_name); \
+ int __traceiter_##_name(void *__data, proto) \
+ { \
+ struct tracepoint_func *it_func_ptr; \
+ void *it_func; \
+ \
+ it_func_ptr = (&__tracepoint_##_name)->funcs; \
+ it_func = (it_func_ptr)->func; \
+ do { \
+ __data = (it_func_ptr)->data; \
+ ((void(*)(void *, proto))(it_func))(__data, args); \
+ it_func = READ_ONCE((++it_func_ptr)->func); \
+ } while (it_func); \
+ return 0; \
+ } \
+ DEFINE_STATIC_CALL(tp_func_##_name, __traceiter_##_name);
+
+#undef DECLARE_RESTRICTED_HOOK
+
+#define DEFINE_HOOK_FN(_name, _reg, _unreg, _proto, _args) \
+ static struct tracepoint_ext __tracepoint_ext_##_name = { \
+ .regfunc = _reg, \
+ .unregfunc = _unreg, \
+ }; \
+ __DEFINE_HOOK_EXT(_name, &__tracepoint_ext_##_name, PARAMS(_proto), PARAMS(_args));
+#define DECLARE_RESTRICTED_HOOK(name, proto, args, cond) \
+ __DEFINE_HOOK_EXT(name, NULL, PARAMS(proto), PARAMS(args));
+
+
+/* prevent additional recursion */
+#undef TRACE_HEADER_MULTI_READ
+#else /* TRACE_HEADER_MULTI_READ */
+
+#ifdef CONFIG_HAVE_STATIC_CALL
+#define __DO_RESTRICTED_HOOK_CALL(name, args) \
+ do { \
+ struct tracepoint_func *it_func_ptr; \
+ void *__data; \
+ it_func_ptr = (&__tracepoint_##name)->funcs; \
+ if (it_func_ptr) { \
+ __data = (it_func_ptr)->data; \
+ static_call(tp_func_##name)(__data, args); \
+ } \
+ } while (0)
+#else
+#define __DO_RESTRICTED_HOOK_CALL(name, args) __traceiter_##name(NULL, args)
+#endif
+
+#define DO_RESTRICTED_HOOK(name, args, cond) \
+ do { \
+ if (!(cond)) \
+ return; \
+ \
+ __DO_RESTRICTED_HOOK_CALL(name, TP_ARGS(args)); \
+ } while (0)
+
+#define __DECLARE_RESTRICTED_HOOK(name, proto, args, cond, data_proto) \
+ extern int __traceiter_##name(data_proto); \
+ DECLARE_STATIC_CALL(tp_func_##name, __traceiter_##name); \
+ extern struct tracepoint __tracepoint_##name; \
+ static inline void trace_##name(proto) \
+ { \
+ if (static_branch_unlikely(&__tracepoint_##name.key)) \
+ DO_RESTRICTED_HOOK(name, \
+ TP_ARGS(args), \
+ TP_CONDITION(cond)); \
+ } \
+ static inline bool \
+ trace_##name##_enabled(void) \
+ { \
+ return static_branch_unlikely(&__tracepoint_##name.key);\
+ } \
+ static inline int \
+ register_trace_##name(void (*probe)(data_proto), void *data) \
+ { \
+ return android_rvh_probe_register(&__tracepoint_##name, \
+ (void *)probe, data); \
+ } \
+ /* vendor hooks cannot be unregistered */ \
+
+#undef DECLARE_RESTRICTED_HOOK
+#define DECLARE_RESTRICTED_HOOK(name, proto, args, cond) \
+ __DECLARE_RESTRICTED_HOOK(name, PARAMS(proto), PARAMS(args), \
+ cond, \
+ PARAMS(void *__data, proto))
+
+#endif /* TRACE_HEADER_MULTI_READ */
+
+#else /* !CONFIG_TRACEPOINTS || !CONFIG_ANDROID_VENDOR_HOOKS */
+/* suppress trace hooks */
+#define DECLARE_HOOK DECLARE_EVENT_NOP
+#define DECLARE_RESTRICTED_HOOK(name, proto, args, cond) \
+ DECLARE_EVENT_NOP(name, PARAMS(proto), PARAMS(args))
+#endif
diff --git a/include/trace/hooks/vmscan.h b/include/trace/hooks/vmscan.h
new file mode 100644
index 0000000..83116f7
--- /dev/null
+++ b/include/trace/hooks/vmscan.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vmscan
+
+#define TRACE_INCLUDE_PATH trace/hooks
+
+#if !defined(_TRACE_HOOK_VMSCAN_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HOOK_VMSCAN_H
+
+#include <trace/hooks/vendor_hooks.h>
+
+DECLARE_RESTRICTED_HOOK(android_rvh_set_balance_anon_file_reclaim,
+ TP_PROTO(bool *balance_anon_file_reclaim),
+ TP_ARGS(balance_anon_file_reclaim), 1);
+#endif /* _TRACE_HOOK_VMSCAN_H */
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/uapi/linux/OWNERS b/include/uapi/linux/OWNERS
new file mode 100644
index 0000000..1ee1fe2
--- /dev/null
+++ b/include/uapi/linux/OWNERS
@@ -0,0 +1,2 @@
+per-file f2fs**=file:/fs/f2fs/OWNERS
+per-file net**=file:/net/OWNERS
diff --git a/include/uapi/linux/TEST_MAPPING b/include/uapi/linux/TEST_MAPPING
new file mode 100644
index 0000000..4b1552f
--- /dev/null
+++ b/include/uapi/linux/TEST_MAPPING
@@ -0,0 +1,309 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.CallTest"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 701cad3..4f2c2f1 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -38,11 +38,59 @@ enum {
BINDER_TYPE_PTR = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
};
+/**
+ * enum flat_binder_object_shifts: shift values for flat_binder_object_flags
+ * @FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT: shift for getting scheduler policy.
+ *
+ */
+enum flat_binder_object_shifts {
+ FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT = 9,
+};
+
+/**
+ * enum flat_binder_object_flags - flags for use in flat_binder_object.flags
+ */
enum flat_binder_object_flags {
+ /**
+ * @FLAT_BINDER_FLAG_PRIORITY_MASK: bit-mask for min scheduler priority
+ *
+ * These bits can be used to set the minimum scheduler priority
+ * at which transactions into this node should run. Valid values
+ * in these bits depend on the scheduler policy encoded in
+ * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK.
+ *
+ * For SCHED_NORMAL/SCHED_BATCH, the valid range is between [-20..19]
+ * For SCHED_FIFO/SCHED_RR, the value can run between [1..99]
+ */
FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
+ /**
+ * @FLAT_BINDER_FLAG_ACCEPTS_FDS: whether the node accepts fds.
+ */
FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
/**
+ * @FLAT_BINDER_FLAG_SCHED_POLICY_MASK: bit-mask for scheduling policy
+ *
+ * These two bits can be used to set the min scheduling policy at which
+ * transactions on this node should run. These match the UAPI
+ * scheduler policy values, eg:
+ * 00b: SCHED_NORMAL
+ * 01b: SCHED_FIFO
+ * 10b: SCHED_RR
+ * 11b: SCHED_BATCH
+ */
+ FLAT_BINDER_FLAG_SCHED_POLICY_MASK =
+ 3U << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT,
+
+ /**
+ * @FLAT_BINDER_FLAG_INHERIT_RT: whether the node inherits RT policy
+ *
+ * Only when set, calls into this node will inherit a real-time
+ * scheduling policy from the caller (for synchronous transactions).
+ */
+ FLAT_BINDER_FLAG_INHERIT_RT = 0x800,
+
+ /**
* @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts
*
* Only when set, causes senders to include their security
diff --git a/include/uapi/linux/dm-user.h b/include/uapi/linux/dm-user.h
new file mode 100644
index 0000000..6d8f535b
--- /dev/null
+++ b/include/uapi/linux/dm-user.h
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: LGPL-2.0+ WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2020 Google, Inc
+ * Copyright (C) 2020 Palmer Dabbelt <palmerdabbelt@google.com>
+ */
+
+#ifndef _LINUX_DM_USER_H
+#define _LINUX_DM_USER_H
+
+#include <linux/types.h>
+
+/*
+ * dm-user proxies device mapper ops between the kernel and userspace. It's
+ * essentially just an RPC mechanism: all kernel calls create a request,
+ * userspace handles that with a response. Userspace obtains requests via
+ * read() and provides responses via write().
+ *
+ * See Documentation/block/dm-user.rst for more information.
+ */
+
+#define DM_USER_REQ_MAP_READ 0
+#define DM_USER_REQ_MAP_WRITE 1
+#define DM_USER_REQ_MAP_FLUSH 2
+#define DM_USER_REQ_MAP_DISCARD 3
+#define DM_USER_REQ_MAP_SECURE_ERASE 4
+#define DM_USER_REQ_MAP_WRITE_SAME 5
+#define DM_USER_REQ_MAP_WRITE_ZEROES 6
+#define DM_USER_REQ_MAP_ZONE_OPEN 7
+#define DM_USER_REQ_MAP_ZONE_CLOSE 8
+#define DM_USER_REQ_MAP_ZONE_FINISH 9
+#define DM_USER_REQ_MAP_ZONE_APPEND 10
+#define DM_USER_REQ_MAP_ZONE_RESET 11
+#define DM_USER_REQ_MAP_ZONE_RESET_ALL 12
+
+#define DM_USER_REQ_MAP_FLAG_FAILFAST_DEV 0x00001
+#define DM_USER_REQ_MAP_FLAG_FAILFAST_TRANSPORT 0x00002
+#define DM_USER_REQ_MAP_FLAG_FAILFAST_DRIVER 0x00004
+#define DM_USER_REQ_MAP_FLAG_SYNC 0x00008
+#define DM_USER_REQ_MAP_FLAG_META 0x00010
+#define DM_USER_REQ_MAP_FLAG_PRIO 0x00020
+#define DM_USER_REQ_MAP_FLAG_NOMERGE 0x00040
+#define DM_USER_REQ_MAP_FLAG_IDLE 0x00080
+#define DM_USER_REQ_MAP_FLAG_INTEGRITY 0x00100
+#define DM_USER_REQ_MAP_FLAG_FUA 0x00200
+#define DM_USER_REQ_MAP_FLAG_PREFLUSH 0x00400
+#define DM_USER_REQ_MAP_FLAG_RAHEAD 0x00800
+#define DM_USER_REQ_MAP_FLAG_BACKGROUND 0x01000
+#define DM_USER_REQ_MAP_FLAG_NOWAIT 0x02000
+#define DM_USER_REQ_MAP_FLAG_CGROUP_PUNT 0x04000
+#define DM_USER_REQ_MAP_FLAG_NOUNMAP 0x08000
+#define DM_USER_REQ_MAP_FLAG_HIPRI 0x10000
+#define DM_USER_REQ_MAP_FLAG_DRV 0x20000
+#define DM_USER_REQ_MAP_FLAG_SWAP 0x40000
+
+#define DM_USER_RESP_SUCCESS 0
+#define DM_USER_RESP_ERROR 1
+#define DM_USER_RESP_UNSUPPORTED 2
+
+struct dm_user_message {
+ __u64 seq;
+ __u64 type;
+ __u64 flags;
+ __u64 sector;
+ __u64 len;
+ __u8 buf[];
+};
+
+#endif
diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h
index 3aff99f..37fedfd 100644
--- a/include/uapi/linux/fscrypt.h
+++ b/include/uapi/linux/fscrypt.h
@@ -130,7 +130,10 @@ struct fscrypt_add_key_arg {
__u32 key_id;
#define FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED 0x00000001
__u32 flags;
- __u32 __reserved[7];
+ __u32 __reserved[6];
+ /* N.B.: "temporary" flag, not reserved upstream */
+#define __FSCRYPT_ADD_KEY_FLAG_HW_WRAPPED 0x00000001
+ __u32 __flags;
__u8 raw[];
};
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index c13e1f9..0ce5f47 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -663,6 +663,7 @@ enum fuse_opcode {
FUSE_TMPFILE = 51,
FUSE_STATX = 52,
FUSE_COPY_FILE_RANGE_64 = 53,
+ FUSE_CANONICAL_PATH = 2016,
/* CUSE specific operations */
CUSE_INIT = 4096,
diff --git a/include/uapi/linux/icmp.h b/include/uapi/linux/icmp.h
index 163c099..d3242d5 100644
--- a/include/uapi/linux/icmp.h
+++ b/include/uapi/linux/icmp.h
@@ -97,7 +97,11 @@ struct icmphdr {
} echo;
__be32 gateway;
struct {
+#ifdef __BIONIC__
+ __be16 __linux_unused;
+#else
__be16 __unused;
+#endif
__be16 mtu;
} frag;
__u8 reserved[4];
diff --git a/include/uapi/linux/incrementalfs.h b/include/uapi/linux/incrementalfs.h
new file mode 100644
index 0000000..f8338af
--- /dev/null
+++ b/include/uapi/linux/incrementalfs.h
@@ -0,0 +1,590 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Userspace interface for Incremental FS.
+ *
+ * Incremental FS is special-purpose Linux virtual file system that allows
+ * execution of a program while its binary and resource files are still being
+ * lazily downloaded over the network, USB etc.
+ *
+ * Copyright 2019 Google LLC
+ */
+#ifndef _UAPI_LINUX_INCREMENTALFS_H
+#define _UAPI_LINUX_INCREMENTALFS_H
+
+#include <linux/limits.h>
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/xattr.h>
+
+/* ===== constants ===== */
+#define INCFS_NAME "incremental-fs"
+
+/*
+ * Magic number used in file header and in memory superblock
+ * Note that it is a 5 byte unsigned long. Thus on 32 bit kernels, it is
+ * truncated to a 4 byte number
+ */
+#define INCFS_MAGIC_NUMBER (0x5346434e49ul & ULONG_MAX)
+
+#define INCFS_DATA_FILE_BLOCK_SIZE 4096
+#define INCFS_HEADER_VER 1
+
+/* TODO: This value is assumed in incfs_copy_signature_info_from_user to be the
+ * actual signature length. Set back to 64 when fixed.
+ */
+#define INCFS_MAX_HASH_SIZE 32
+#define INCFS_MAX_FILE_ATTR_SIZE 512
+
+#define INCFS_INDEX_NAME ".index"
+#define INCFS_INCOMPLETE_NAME ".incomplete"
+#define INCFS_PENDING_READS_FILENAME ".pending_reads"
+#define INCFS_LOG_FILENAME ".log"
+#define INCFS_BLOCKS_WRITTEN_FILENAME ".blocks_written"
+#define INCFS_XATTR_ID_NAME (XATTR_USER_PREFIX "incfs.id")
+#define INCFS_XATTR_SIZE_NAME (XATTR_USER_PREFIX "incfs.size")
+#define INCFS_XATTR_METADATA_NAME (XATTR_USER_PREFIX "incfs.metadata")
+#define INCFS_XATTR_VERITY_NAME (XATTR_USER_PREFIX "incfs.verity")
+
+#define INCFS_MAX_SIGNATURE_SIZE 8096
+#define INCFS_SIGNATURE_VERSION 2
+#define INCFS_SIGNATURE_SECTIONS 2
+
+#define INCFS_IOCTL_BASE_CODE 'g'
+
+/* ===== ioctl requests on the command dir ===== */
+
+/*
+ * Create a new file
+ * May only be called on .pending_reads file
+ */
+#define INCFS_IOC_CREATE_FILE \
+ _IOWR(INCFS_IOCTL_BASE_CODE, 30, struct incfs_new_file_args)
+
+/* Read file signature */
+#define INCFS_IOC_READ_FILE_SIGNATURE \
+ _IOR(INCFS_IOCTL_BASE_CODE, 31, struct incfs_get_file_sig_args)
+
+/*
+ * Fill in one or more data block. This may only be called on a handle
+ * passed as a parameter to INCFS_IOC_PERMIT_FILLING
+ *
+ * Returns number of blocks filled in, or error if none were
+ */
+#define INCFS_IOC_FILL_BLOCKS \
+ _IOR(INCFS_IOCTL_BASE_CODE, 32, struct incfs_fill_blocks)
+
+/*
+ * Permit INCFS_IOC_FILL_BLOCKS on the given file descriptor
+ * May only be called on .pending_reads file
+ *
+ * Returns 0 on success or error
+ */
+#define INCFS_IOC_PERMIT_FILL \
+ _IOW(INCFS_IOCTL_BASE_CODE, 33, struct incfs_permit_fill)
+
+/*
+ * Fills buffer with ranges of populated blocks
+ *
+ * Returns 0 if all ranges written
+ * error otherwise
+ *
+ * Either way, range_buffer_size_out is set to the number
+ * of bytes written. Should be set to 0 by caller. The ranges
+ * filled are valid, but if an error was returned there might
+ * be more ranges to come.
+ *
+ * Ranges are ranges of filled blocks:
+ *
+ * 1 2 7 9
+ *
+ * means blocks 1, 2, 7, 8, 9 are filled, 0, 3, 4, 5, 6 and 10 on
+ * are not
+ *
+ * If hashing is enabled for the file, the hash blocks are simply
+ * treated as though they immediately followed the data blocks.
+ */
+#define INCFS_IOC_GET_FILLED_BLOCKS \
+ _IOR(INCFS_IOCTL_BASE_CODE, 34, struct incfs_get_filled_blocks_args)
+
+/*
+ * Creates a new mapped file
+ * May only be called on .pending_reads file
+ */
+#define INCFS_IOC_CREATE_MAPPED_FILE \
+ _IOWR(INCFS_IOCTL_BASE_CODE, 35, struct incfs_create_mapped_file_args)
+
+/*
+ * Get number of blocks, total and filled
+ * May only be called on .pending_reads file
+ */
+#define INCFS_IOC_GET_BLOCK_COUNT \
+ _IOR(INCFS_IOCTL_BASE_CODE, 36, struct incfs_get_block_count_args)
+
+/*
+ * Get per UID read timeouts
+ * May only be called on .pending_reads file
+ */
+#define INCFS_IOC_GET_READ_TIMEOUTS \
+ _IOR(INCFS_IOCTL_BASE_CODE, 37, struct incfs_get_read_timeouts_args)
+
+/*
+ * Set per UID read timeouts
+ * May only be called on .pending_reads file
+ */
+#define INCFS_IOC_SET_READ_TIMEOUTS \
+ _IOW(INCFS_IOCTL_BASE_CODE, 38, struct incfs_set_read_timeouts_args)
+
+/*
+ * Get last read error
+ * May only be called on .pending_reads file
+ */
+#define INCFS_IOC_GET_LAST_READ_ERROR \
+ _IOW(INCFS_IOCTL_BASE_CODE, 39, struct incfs_get_last_read_error_args)
+
+/* ===== sysfs feature flags ===== */
+/*
+ * Each flag is represented by a file in /sys/fs/incremental-fs/features
+ * If the file exists the feature is supported
+ * Also the file contents will be the line "supported"
+ */
+
+/*
+ * Basic flag stating that the core incfs file system is available
+ */
+#define INCFS_FEATURE_FLAG_COREFS "corefs"
+
+/*
+ * zstd compression support
+ */
+#define INCFS_FEATURE_FLAG_ZSTD "zstd"
+
+/*
+ * v2 feature set support. Covers:
+ * INCFS_IOC_CREATE_MAPPED_FILE
+ * INCFS_IOC_GET_BLOCK_COUNT
+ * INCFS_IOC_GET_READ_TIMEOUTS/INCFS_IOC_SET_READ_TIMEOUTS
+ * .blocks_written status file
+ * .incomplete folder
+ * report_uid mount option
+ */
+#define INCFS_FEATURE_FLAG_V2 "v2"
+
+enum incfs_compression_alg {
+ COMPRESSION_NONE = 0,
+ COMPRESSION_LZ4 = 1,
+ COMPRESSION_ZSTD = 2,
+};
+
+enum incfs_block_flags {
+ INCFS_BLOCK_FLAGS_NONE = 0,
+ INCFS_BLOCK_FLAGS_HASH = 1,
+};
+
+typedef struct {
+ __u8 bytes[16];
+} incfs_uuid_t __attribute__((aligned (8)));
+
+/*
+ * Description of a pending read. A pending read - a read call by
+ * a userspace program for which the filesystem currently doesn't have data.
+ *
+ * Reads from .pending_reads and .log return an array of these structure
+ */
+struct incfs_pending_read_info {
+ /* Id of a file that is being read from. */
+ incfs_uuid_t file_id;
+
+ /* A number of microseconds since system boot to the read. */
+ __aligned_u64 timestamp_us;
+
+ /* Index of a file block that is being read. */
+ __u32 block_index;
+
+ /* A serial number of this pending read. */
+ __u32 serial_number;
+};
+
+/*
+ * Description of a pending read. A pending read - a read call by
+ * a userspace program for which the filesystem currently doesn't have data.
+ *
+ * This version of incfs_pending_read_info is used whenever the file system is
+ * mounted with the report_uid flag
+ */
+struct incfs_pending_read_info2 {
+ /* Id of a file that is being read from. */
+ incfs_uuid_t file_id;
+
+ /* A number of microseconds since system boot to the read. */
+ __aligned_u64 timestamp_us;
+
+ /* Index of a file block that is being read. */
+ __u32 block_index;
+
+ /* A serial number of this pending read. */
+ __u32 serial_number;
+
+ /* The UID of the reading process */
+ __u32 uid;
+
+ __u32 reserved;
+};
+
+/*
+ * Description of a data or hash block to add to a data file.
+ */
+struct incfs_fill_block {
+ /* Index of a data block. */
+ __u32 block_index;
+
+ /* Length of data */
+ __u32 data_len;
+
+ /*
+ * A pointer to an actual data for the block.
+ *
+ * Equivalent to: __u8 *data;
+ */
+ __aligned_u64 data;
+
+ /*
+ * Compression algorithm used to compress the data block.
+ * Values from enum incfs_compression_alg.
+ */
+ __u8 compression;
+
+ /* Values from enum incfs_block_flags */
+ __u8 flags;
+
+ __u16 reserved1;
+
+ __u32 reserved2;
+
+ __aligned_u64 reserved3;
+};
+
+/*
+ * Description of a number of blocks to add to a data file
+ *
+ * Argument for INCFS_IOC_FILL_BLOCKS
+ */
+struct incfs_fill_blocks {
+ /* Number of blocks */
+ __u64 count;
+
+ /* A pointer to an array of incfs_fill_block structs */
+ __aligned_u64 fill_blocks;
+};
+
+/*
+ * Permit INCFS_IOC_FILL_BLOCKS on the given file descriptor
+ * May only be called on .pending_reads file
+ *
+ * Argument for INCFS_IOC_PERMIT_FILL
+ */
+struct incfs_permit_fill {
+ /* File to permit fills on */
+ __u32 file_descriptor;
+};
+
+enum incfs_hash_tree_algorithm {
+ INCFS_HASH_TREE_NONE = 0,
+ INCFS_HASH_TREE_SHA256 = 1
+};
+
+/*
+ * Create a new file or directory.
+ */
+struct incfs_new_file_args {
+ /* Id of a file to create. */
+ incfs_uuid_t file_id;
+
+ /*
+ * Total size of the new file. Ignored if S_ISDIR(mode).
+ */
+ __aligned_u64 size;
+
+ /*
+ * File mode. Permissions and dir flag.
+ */
+ __u16 mode;
+
+ __u16 reserved1;
+
+ __u32 reserved2;
+
+ /*
+ * A pointer to a null-terminated relative path to the file's parent
+ * dir.
+ * Max length: PATH_MAX
+ *
+ * Equivalent to: char *directory_path;
+ */
+ __aligned_u64 directory_path;
+
+ /*
+ * A pointer to a null-terminated file's name.
+ * Max length: PATH_MAX
+ *
+ * Equivalent to: char *file_name;
+ */
+ __aligned_u64 file_name;
+
+ /*
+ * A pointer to a file attribute to be set on creation.
+ *
+ * Equivalent to: u8 *file_attr;
+ */
+ __aligned_u64 file_attr;
+
+ /*
+ * Length of the data buffer specfied by file_attr.
+ * Max value: INCFS_MAX_FILE_ATTR_SIZE
+ */
+ __u32 file_attr_len;
+
+ __u32 reserved4;
+
+ /*
+ * Points to an APK V4 Signature data blob
+ * Signature must have two sections
+ * Format is:
+ * u32 version
+ * u32 size_of_hash_info_section
+ * u8 hash_info_section[]
+ * u32 size_of_signing_info_section
+ * u8 signing_info_section[]
+ *
+ * Note that incfs does not care about what is in signing_info_section
+ *
+ * hash_info_section has following format:
+ * u32 hash_algorithm; // Must be SHA256 == 1
+ * u8 log2_blocksize; // Must be 12 for 4096 byte blocks
+ * u32 salt_size;
+ * u8 salt[];
+ * u32 hash_size;
+ * u8 root_hash[];
+ */
+ __aligned_u64 signature_info;
+
+ /* Size of signature_info */
+ __aligned_u64 signature_size;
+
+ __aligned_u64 reserved6;
+};
+
+/*
+ * Request a digital signature blob for a given file.
+ * Argument for INCFS_IOC_READ_FILE_SIGNATURE ioctl
+ */
+struct incfs_get_file_sig_args {
+ /*
+ * A pointer to the data buffer to save an signature blob to.
+ *
+ * Equivalent to: u8 *file_signature;
+ */
+ __aligned_u64 file_signature;
+
+ /* Size of the buffer at file_signature. */
+ __u32 file_signature_buf_size;
+
+ /*
+ * Number of bytes save file_signature buffer.
+ * It is set after ioctl done.
+ */
+ __u32 file_signature_len_out;
+};
+
+struct incfs_filled_range {
+ __u32 begin;
+ __u32 end;
+};
+
+/*
+ * Request ranges of filled blocks
+ * Argument for INCFS_IOC_GET_FILLED_BLOCKS
+ */
+struct incfs_get_filled_blocks_args {
+ /*
+ * A buffer to populate with ranges of filled blocks
+ *
+ * Equivalent to struct incfs_filled_ranges *range_buffer
+ */
+ __aligned_u64 range_buffer;
+
+ /* Size of range_buffer */
+ __u32 range_buffer_size;
+
+ /* Start index to read from */
+ __u32 start_index;
+
+ /*
+ * End index to read to. 0 means read to end. This is a range,
+ * so incfs will read from start_index to end_index - 1
+ */
+ __u32 end_index;
+
+ /* Actual number of blocks in file */
+ __u32 total_blocks_out;
+
+ /* The number of data blocks in file */
+ __u32 data_blocks_out;
+
+ /* Number of bytes written to range buffer */
+ __u32 range_buffer_size_out;
+
+ /* Sector scanned up to, if the call was interrupted */
+ __u32 index_out;
+};
+
+/*
+ * Create a new mapped file
+ * Argument for INCFS_IOC_CREATE_MAPPED_FILE
+ */
+struct incfs_create_mapped_file_args {
+ /*
+ * Total size of the new file.
+ */
+ __aligned_u64 size;
+
+ /*
+ * File mode. Permissions and dir flag.
+ */
+ __u16 mode;
+
+ __u16 reserved1;
+
+ __u32 reserved2;
+
+ /*
+ * A pointer to a null-terminated relative path to the incfs mount
+ * point
+ * Max length: PATH_MAX
+ *
+ * Equivalent to: char *directory_path;
+ */
+ __aligned_u64 directory_path;
+
+ /*
+ * A pointer to a null-terminated file name.
+ * Max length: PATH_MAX
+ *
+ * Equivalent to: char *file_name;
+ */
+ __aligned_u64 file_name;
+
+ /* Id of source file to map. */
+ incfs_uuid_t source_file_id;
+
+ /*
+ * Offset in source file to start mapping. Must be a multiple of
+ * INCFS_DATA_FILE_BLOCK_SIZE
+ */
+ __aligned_u64 source_offset;
+};
+
+/*
+ * Get information about the blocks in this file
+ * Argument for INCFS_IOC_GET_BLOCK_COUNT
+ */
+struct incfs_get_block_count_args {
+ /* Total number of data blocks in the file */
+ __u32 total_data_blocks_out;
+
+ /* Number of filled data blocks in the file */
+ __u32 filled_data_blocks_out;
+
+ /* Total number of hash blocks in the file */
+ __u32 total_hash_blocks_out;
+
+ /* Number of filled hash blocks in the file */
+ __u32 filled_hash_blocks_out;
+};
+
+/* Description of timeouts for one UID */
+struct incfs_per_uid_read_timeouts {
+ /* UID to apply these timeouts to */
+ __u32 uid;
+
+ /*
+ * Min time in microseconds to read any block. Note that this doesn't
+ * apply to reads which are satisfied from the page cache.
+ */
+ __u32 min_time_us;
+
+ /*
+ * Min time in microseconds to satisfy a pending read. Any pending read
+ * which is filled before this time will be delayed so that the total
+ * read time >= this value.
+ */
+ __u32 min_pending_time_us;
+
+ /*
+ * Max time in microseconds to satisfy a pending read before the read
+ * times out. If set to U32_MAX, defaults to mount options
+ * read_timeout_ms * 1000. Must be >= min_pending_time_us
+ */
+ __u32 max_pending_time_us;
+};
+
+/*
+ * Get the read timeouts array
+ * Argument for INCFS_IOC_GET_READ_TIMEOUTS
+ */
+struct incfs_get_read_timeouts_args {
+ /*
+ * A pointer to a buffer to fill with the current timeouts
+ *
+ * Equivalent to struct incfs_per_uid_read_timeouts *
+ */
+ __aligned_u64 timeouts_array;
+
+ /* Size of above buffer in bytes */
+ __u32 timeouts_array_size;
+
+ /* Size used in bytes, or size needed if -ENOMEM returned */
+ __u32 timeouts_array_size_out;
+};
+
+/*
+ * Set the read timeouts array
+ * Arguments for INCFS_IOC_SET_READ_TIMEOUTS
+ */
+struct incfs_set_read_timeouts_args {
+ /*
+ * A pointer to an array containing the new timeouts
+ * This will replace any existing timeouts
+ *
+ * Equivalent to struct incfs_per_uid_read_timeouts *
+ */
+ __aligned_u64 timeouts_array;
+
+ /* Size of above array in bytes. Must be < 256 */
+ __u32 timeouts_array_size;
+};
+
+/*
+ * Get last read error struct
+ * Arguments for INCFS_IOC_GET_LAST_READ_ERROR
+ */
+struct incfs_get_last_read_error_args {
+ /* File id of last file that had a read error */
+ incfs_uuid_t file_id_out;
+
+ /* Time of last read error, in us, from CLOCK_MONOTONIC */
+ __u64 time_us_out;
+
+ /* Index of page that was being read at last read error */
+ __u32 page_out;
+
+ /* errno of last read error */
+ __u32 errno_out;
+
+ /* uid of last read error */
+ __u32 uid_out;
+
+ __u32 reserved1;
+ __u64 reserved2;
+};
+
+#endif /* _UAPI_LINUX_INCREMENTALFS_H */
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 4bdb6a1..3528168 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -643,6 +643,10 @@
#define KEY_EPRIVACY_SCREEN_ON 0x252
#define KEY_EPRIVACY_SCREEN_OFF 0x253
+#define KEY_ACTION_ON_SELECTION 0x254 /* AL Action on Selection (HUTRR119) */
+#define KEY_CONTEXTUAL_INSERT 0x255 /* AL Contextual Insertion (HUTRR119) */
+#define KEY_CONTEXTUAL_QUERY 0x256 /* AL Contextual Query (HUTRR119) */
+
#define KEY_KBDINPUTASSIST_PREV 0x260
#define KEY_KBDINPUTASSIST_NEXT 0x261
#define KEY_KBDINPUTASSIST_PREVGROUP 0x262
diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h
index ff80322..6f2f272 100644
--- a/include/uapi/linux/kernel-page-flags.h
+++ b/include/uapi/linux/kernel-page-flags.h
@@ -7,7 +7,7 @@
*/
#define KPF_LOCKED 0
-#define KPF_ERROR 1 /* Now unused */
+#define KPF_ERROR 1
#define KPF_REFERENCED 2
#define KPF_UPTODATE 3
#define KPF_DIRTY 4
diff --git a/include/uapi/linux/netfilter/xt_IDLETIMER.h b/include/uapi/linux/netfilter/xt_IDLETIMER.h
index 7bfb31a..104ac32 100644
--- a/include/uapi/linux/netfilter/xt_IDLETIMER.h
+++ b/include/uapi/linux/netfilter/xt_IDLETIMER.h
@@ -33,7 +33,7 @@ struct idletimer_tg_info_v1 {
char label[MAX_IDLETIMER_LABEL_SIZE];
- __u8 send_nl_msg; /* unused: for compatibility with Android */
+ __u8 send_nl_msg;
__u8 timer_type;
/* for kernel module internal use only */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index eda4492..cc88051 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -70,7 +70,7 @@
* Common stuff for both V4L1 and V4L2
* Moved from videodev.h
*/
-#define VIDEO_MAX_FRAME 32
+#define VIDEO_MAX_FRAME 64
#define VIDEO_MAX_PLANES 8
/*
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 8563b66..57498a4 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -1424,6 +1424,11 @@ static inline int ufshcd_disable_host_tx_lcc(struct ufs_hba *hba)
return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_LOCAL_TX_LCC_ENABLE), 0);
}
+int ufshcd_query_flag_retry(struct ufs_hba *hba,
+ enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res);
+
+int ufshcd_bkops_ctrl(struct ufs_hba *hba);
+
void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit);
void ufshcd_fixup_dev_quirks(struct ufs_hba *hba,
const struct ufs_dev_quirk *fixups);
diff --git a/include/vdso/TEST_MAPPING b/include/vdso/TEST_MAPPING
new file mode 100644
index 0000000..efff780
--- /dev/null
+++ b/include/vdso/TEST_MAPPING
@@ -0,0 +1,318 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.PhoneAccountTest"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "VtsBootconfigTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/init/Kconfig b/init/Kconfig
index b55deae..72c7375 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -242,7 +242,7 @@
config WERROR
bool "Compile the kernel with warnings as errors"
- default COMPILE_TEST
+ default y
help
A kernel build should not cause any compiler warnings, and this
enables the '-Werror' (for C) and '-Dwarnings' (for Rust) flags
@@ -1458,6 +1458,16 @@
desktop applications. Task group autogeneration is currently based
upon task session.
+config RT_SOFTIRQ_AWARE_SCHED
+ bool "Improve RT scheduling during long softirq execution"
+ depends on SMP && !PREEMPT_RT
+ default n
+ help
+ Enable an optimization which tries to avoid placing RT tasks on CPUs
+ occupied by nonpreemptible tasks, such as a long softirq or CPUs
+ which may soon block preemptions, such as a CPU running a ksoftirq
+ thread which handles slow softirqs.
+
config RELAY
bool "Kernel->user space relay support (formerly relayfs)"
select IRQ_WORK
@@ -2283,3 +2293,5 @@
# <asm/syscall_wrapper.h>.
config ARCH_HAS_SYSCALL_WRAPPER
def_bool n
+
+source "init/Kconfig.gki"
diff --git a/init/Kconfig.gki b/init/Kconfig.gki
new file mode 100644
index 0000000..d399d6d
--- /dev/null
+++ b/init/Kconfig.gki
@@ -0,0 +1,335 @@
+config GKI_HIDDEN_DRM_CONFIGS
+ bool "Hidden DRM configs needed for GKI"
+ select AUXILIARY_BUS if (X86)
+ select DRM_KMS_HELPER if (HAS_IOMEM && DRM)
+ select DRM_GEM_SHMEM_HELPER if (DRM)
+ select DRM_MIPI_DSI
+ select DRM_PRIVACY_SCREEN if (X86)
+ select DRM_TTM if (HAS_IOMEM && DRM)
+ select DRM_BRIDGE_CONNECTOR if (DRM_DISPLAY_HELPER)
+ select DRM_DISPLAY_HDCP_HELPER if (DRM_DISPLAY_HELPER)
+ select DRM_DISPLAY_HDMI_HELPER if (DRM_DISPLAY_HELPER)
+ select HMM_MIRROR if (DRM)
+ select VIDEOMODE_HELPERS
+ select WANT_DEV_COREDUMP
+ select INTERVAL_TREE
+ help
+ Dummy config option used to enable hidden DRM configs.
+ These are normally selected implicitly when including a
+ DRM module, but for GKI, the modules are built out-of-tree.
+
+config GKI_HIDDEN_MCP251XFD_CONFIGS
+ bool "Hidden MCP251XFD configs needed for GKI"
+ select CAN_RX_OFFLOAD
+ help
+ Dummy config option used to enable hidden MCP251XFD configs.
+ These are normally selected implicitly when including a
+ MCP251XFD module, but for GKI, the modules are built out-of-tree.
+
+config GKI_HIDDEN_REGMAP_CONFIGS
+ bool "Hidden Regmap configs needed for GKI"
+ select REGMAP_IRQ
+ select REGMAP_MMIO
+ select REGMAP_SPMI
+ select SPMI
+ help
+ Dummy config option used to enable hidden regmap configs.
+ These are normally selected implicitly when a module
+ that relies on it is configured.
+
+config GKI_HIDDEN_CRYPTO_CONFIGS
+ bool "Hidden CRYPTO configs needed for GKI"
+ select CRYPTO_ENGINE
+ help
+ Dummy config option used to enable hidden CRYPTO configs.
+ These are normally selected implicitly when a module
+ that relies on it is configured.
+
+config GKI_HIDDEN_SND_CONFIGS
+ bool "Hidden SND configs needed for GKI"
+ select SND_COMPRESS_ACCEL
+ select SND_VMASTER
+ select SND_PCM_ELD
+ select SND_JACK
+ select SND_JACK_INPUT_DEV
+ select SND_INTEL_NHLT if (ACPI)
+ help
+ Dummy config option used to enable hidden SND configs.
+ These are normally selected implicitly when a module
+ that relies on it is configured.
+
+config GKI_HIDDEN_SND_SOC_CONFIGS
+ bool "Hidden SND_SOC configs needed for GKI"
+ select SND_SOC_GENERIC_DMAENGINE_PCM if (SND_SOC && SND)
+ select SND_PCM_IEC958
+ select SND_SOC_COMPRESS if (SND_SOC && SND)
+ select SND_SOC_TOPOLOGY if (SND_SOC && SND)
+ select DMADEVICES
+ select DMA_VIRTUAL_CHANNELS
+ help
+ Dummy config option used to enable hidden SND_SOC configs.
+ These are normally selected implicitly when a module
+ that relies on it is configured.
+
+config GKI_HIDDEN_UFS_CONFIGS
+ bool "Hidden UFS configs needed for GKI"
+ select SCSI_UFS_VARIABLE_SG_ENTRY_SIZE if SCSI_UFS_CRYPTO
+ help
+ Dummy config option used to enable hidden UFS configs.
+ These are normally selected implicitly when a module
+ that relies on it is configured.
+
+config GKI_HIDDEN_MMC_CONFIGS
+ bool "Hidden MMC configs needed for GKI"
+ select MMC_SDHCI_IO_ACCESSORS if (MMC_SDHCI)
+ help
+ Dummy config option used to enable hidden MMC configs.
+ These are normally selected implicitly when a module
+ that relies on it is configured.
+
+config GKI_HIDDEN_GPIO_CONFIGS
+ bool "Hidden GPIO configs needed for GKI"
+ select PINCTRL_SINGLE if (PINCTRL && OF && HAS_IOMEM)
+ select GPIO_PL061 if (HAS_IOMEM && ARM_AMBA && GPIOLIB)
+ select GPIO_SWNODE_UNDEFINED if (X86)
+ select GPIOLIB_IRQCHIP if (GPIOLIB && X86)
+ help
+ Dummy config option used to enable hidden GPIO configs.
+ These are normally selected implicitly when a module
+ that relies on it is configured.
+
+config GKI_HIDDEN_X86_CONFIGS
+ bool "Hidden X86 configs needed for GKI"
+ select ACPI_THERMAL_LIB if (X86 && ACPI)
+ select CHECK_SIGNATURE if (X86 && DMI)
+ select INTEL_SCU if (X86)
+ select INTEL_TCC if (X86)
+ select P2SB if (X86)
+ help
+ Dummy config option used to enable hidden X86 configs.
+ These are normally selected implicitly when a module
+ that relies on it is configured.
+
+# If this file is included on a 32-bit allmodconfig build, the select for
+# IOMMU_IO_PGTABLE_LPAE will trigger an "unmet direct dependency" warning
+# because this option is incompatible with GENERIC_ATOMIC64, which is
+# required by CPU_V6 and implied by ARCH_BCM2835. ARCH_BCM2835 is enabled
+# in the ARM defconfig (multi_v7_defconfig) and implied by many BCM2835
+# drivers, so it is lower impact to disable IOMMU_IO_PGTABLE_LPAE here
+config GKI_HIDDEN_QCOM_CONFIGS
+ bool "Hidden QCOM configs needed for GKI"
+ select QCOM_SMEM_STATE
+ select QCOM_GDSC if (ARCH_QCOM)
+ select IOMMU_IO_PGTABLE_LPAE if (ARCH_QCOM && 64BIT)
+ select INTERCONNECT_QCOM if (ARCH_QCOM)
+ select AUXILIARY_BUS if (ARCH_QCOM)
+
+ help
+ Dummy config option used to enable hidden QCOM configs.
+ These are normally selected implicitly when a module
+ that relies on it is configured.
+
+config GKI_HIDDEN_MEDIA_CONFIGS
+ bool "Hidden Media configs needed for GKI"
+ select VIDEOBUF2_CORE
+ select V4L2_MEM2MEM_DEV
+ select MEDIA_CONTROLLER
+ select MEDIA_CONTROLLER_REQUEST_API
+ select MEDIA_SUPPORT
+ select FRAME_VECTOR
+ select CEC_CORE
+ select CEC_NOTIFIER
+ select CEC_PIN
+ select VIDEOBUF2_DMA_CONTIG
+ select VIDEOBUF2_DMA_SG
+ select VIDEO_V4L2_SUBDEV_API
+ help
+ Dummy config option used to enable hidden media configs.
+ These are normally selected implicitly when a module
+ that relies on it is configured.
+
+config GKI_HIDDEN_VIRTUAL_CONFIGS
+ bool "Hidden Virtual configs needed for GKI"
+ select HVC_DRIVER
+ select DIMLIB
+ help
+ Dummy config option used to enable hidden virtual device configs.
+ These are normally selected implicitly when a module
+ that relies on it is configured.
+
+# LEGACY_WEXT_ALLCONFIG Discussed upstream, soundly rejected as a unique
+# problem for GKI to solve. It should be noted that these extensions are
+# in-effect deprecated and generally unsupported and we should pressure
+# the SOC vendors to drop any modules that require these extensions.
+config GKI_LEGACY_WEXT_ALLCONFIG
+ bool "Hidden wireless extension configs needed for GKI"
+ select WIRELESS_EXT
+ select WEXT_CORE
+ select WEXT_PROC
+ select WEXT_SPY
+ select WEXT_PRIV
+ help
+ Dummy config option used to enable all the hidden legacy wireless
+ extensions to the core wireless network functionality used by
+ add-in modules.
+
+ If you are not building a kernel to be used for a variety of
+ out-of-kernel built wireless modules, say N here.
+
+config GKI_HIDDEN_USB_CONFIGS
+ bool "Hidden USB configurations needed for GKI"
+ select USB_PHY
+ help
+ Dummy config option used to enable all USB related hidden configs.
+ These configurations are usually only selected by another config
+ option or a combination of them.
+
+ If you are not building a kernel to be used for a variety of
+ out-of-kernel build USB drivers, say N here.
+
+config GKI_HIDDEN_SOC_BUS_CONFIGS
+ bool "Hidden SoC bus configuration needed for GKI"
+ select SOC_BUS
+ help
+ Dummy config option used to enable SOC_BUS hidden Kconfig.
+ The configuration is required for SoCs to register themselves to the bus.
+
+ If you are not building a kernel to be used for a variety of SoCs and
+ out-of-tree drivers, say N here.
+
+config GKI_HIDDEN_RPMSG_CONFIGS
+ bool "Hidden RPMSG configuration needed for GKI"
+ select RPMSG
+ help
+ Dummy config option used to enable the hidden RPMSG config.
+ This configuration is usually only selected by another config
+ option or a combination of them.
+
+ If you are not building a kernel to be used for a variety of
+ out-of-kernel build RPMSG drivers, say N here.
+
+config GKI_HIDDEN_GPU_CONFIGS
+ bool "Hidden GPU configuration needed for GKI"
+ select TRACE_GPU_MEM
+ help
+ Dummy config option used to enable the hidden GPU config.
+ These are normally selected implicitly when a module
+ that relies on it is configured.
+
+config GKI_HIDDEN_IRQ_CONFIGS
+ bool "Hidden IRQ configuration needed for GKI"
+ select GENERIC_IRQ_CHIP
+ select IRQ_DOMAIN_HIERARCHY
+ select IRQ_FASTEOI_HIERARCHY_HANDLERS
+ help
+ Dummy config option used to enable GENERIC_IRQ_CHIP hidden
+ config, required by various SoC platforms. This is usually
+ selected by ARCH_*.
+
+config GKI_HIDDEN_HYPERVISOR_CONFIGS
+ bool "Hidden hypervisor configuration needed for GKI"
+ select SYS_HYPERVISOR
+ help
+ Dummy config option used to enable the SYS_HYPERVISOR hidden
+ config, required by various SoC platforms. This is usually
+ selected by XEN or S390.
+
+config GKI_HIDDEN_NET_CONFIGS
+ bool "Hidden networking configuration needed for GKI"
+ select PAGE_POOL
+ select NET_PTP_CLASSIFY
+ select NET_DEVLINK
+ help
+ Dummy config option used to enable the networking hidden
+ config, required by various SoC platforms.
+
+config GKI_HIDDEN_PHY_CONFIGS
+ bool "Hidden PHY configuration needed for GKI"
+ select GENERIC_PHY_MIPI_DPHY
+ help
+ Dummy config option used to enable the hidden PHY configs,
+ required by various SoC platforms.
+
+config GKI_HIDDEN_MM_CONFIGS
+ bool "Hidden MM configuration needed for GKI"
+ select PAGE_REPORTING
+ select BALLOON_COMPACTION
+ select MEMORY_BALLOON
+ help
+ Dummy config option used to enable hidden MM configs,
+ currently required for VIRTIO_BALLOON
+
+config GKI_HIDDEN_ETHERNET_CONFIGS
+ bool "Hidden Ethernet configuration needed for GKI"
+ select PHYLINK
+ help
+ Dummy config option used to enable the hidden Ethernet PHYLINK
+ configs, required by various ethernet devices.
+
+config GKI_HIDDEN_IOMMU_CONFIGS
+ bool "Hidden IOMMU configuration needed for GKI"
+ select IOMMU_SVA if (ARM64 || X86)
+ help
+ Dummy config used to enable hidden IOMMU configs. These are
+ normally selected implicitly when a module that relies on it is
+ configured.
+
+config GKI_HIDDEN_DMA_CONFIGS
+ bool "Hidden DMA configuration needed for GKI"
+ select ASYNC_TX_ENABLE_CHANNEL_SWITCH
+ select DMA_ENGINE_RAID
+ help
+ Dummy config option used to enable the hidden DMA configs,
+ required by various SoC platforms.
+
+config GKI_HIDDEN_XFRM_OFFLOAD_CONFIGS
+ bool "Hidden IPsec offload configuration needed for GKI"
+ select XFRM_OFFLOAD
+ help
+ Dummy config option used to enable the IPsec offload hidden
+ config, required by various SoC platforms.
+
+# Atrocities needed for
+# a) building GKI modules in separate tree, or
+# b) building drivers that are not modularizable
+#
+# All of these should be reworked into an upstream solution
+# if possible.
+#
+config GKI_HACKS_TO_FIX
+ bool "GKI Dummy config options"
+ select GKI_HIDDEN_CRYPTO_CONFIGS
+ select GKI_HIDDEN_DRM_CONFIGS
+ select GKI_HIDDEN_MCP251XFD_CONFIGS
+ select GKI_HIDDEN_REGMAP_CONFIGS
+ select GKI_HIDDEN_SND_CONFIGS
+ select GKI_HIDDEN_SND_SOC_CONFIGS
+ select GKI_HIDDEN_UFS_CONFIGS
+ select GKI_HIDDEN_MMC_CONFIGS
+ select GKI_HIDDEN_GPIO_CONFIGS
+ select GKI_HIDDEN_X86_CONFIGS
+ select GKI_HIDDEN_QCOM_CONFIGS
+ select GKI_LEGACY_WEXT_ALLCONFIG
+ select GKI_HIDDEN_MEDIA_CONFIGS
+ select GKI_HIDDEN_VIRTUAL_CONFIGS
+ select GKI_HIDDEN_USB_CONFIGS
+ select GKI_HIDDEN_SOC_BUS_CONFIGS
+ select GKI_HIDDEN_RPMSG_CONFIGS
+ select GKI_HIDDEN_GPU_CONFIGS
+ select GKI_HIDDEN_IRQ_CONFIGS
+ select GKI_HIDDEN_HYPERVISOR_CONFIGS
+ select GKI_HIDDEN_NET_CONFIGS
+ select GKI_HIDDEN_PHY_CONFIGS
+ select GKI_HIDDEN_MM_CONFIGS
+ select GKI_HIDDEN_ETHERNET_CONFIGS
+ select GKI_HIDDEN_DMA_CONFIGS
+ select GKI_HIDDEN_IOMMU_CONFIGS
+ select GKI_HIDDEN_XFRM_OFFLOAD_CONFIGS
+ select MIN_HEAP
+
+ help
+ Dummy config option used to enable core functionality used by
+ modules that may not be selectable in this config.
+
+ Unless you are building a GKI kernel to be used with modules
+ built from a different config, say N here.
diff --git a/io_uring/TEST_MAPPING b/io_uring/TEST_MAPPING
new file mode 100644
index 0000000..aada857
--- /dev/null
+++ b/io_uring/TEST_MAPPING
@@ -0,0 +1,245 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.PhoneAccountTest"
+ }
+ ]
+ }
+ ]
+}
diff --git a/kernel/bpf/TEST_MAPPING b/kernel/bpf/TEST_MAPPING
new file mode 100644
index 0000000..1c8bacc
--- /dev/null
+++ b/kernel/bpf/TEST_MAPPING
@@ -0,0 +1,329 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.ExtendedInCallServiceTest"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "VtsBootconfigTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 274039e3..36c67788 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -46,6 +46,8 @@
#include <net/netkit.h>
#include <net/tcx.h>
+#include <trace/hooks/syscall_check.h>
+
#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
(map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
(map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
@@ -6201,6 +6203,8 @@ static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size)
if (copy_from_bpfptr(&attr, uattr, size) != 0)
return -EFAULT;
+ trace_android_vh_check_bpf_syscall(cmd, &attr, size);
+
err = security_bpf(cmd, &attr, size, uattr.is_kernel);
if (err < 0)
return err;
diff --git a/kernel/cgroup/TEST_MAPPING b/kernel/cgroup/TEST_MAPPING
new file mode 100644
index 0000000..1c8bacc
--- /dev/null
+++ b/kernel/cgroup/TEST_MAPPING
@@ -0,0 +1,329 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.ExtendedInCallServiceTest"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "VtsBootconfigTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c
index a4337c9..4e25b74 100644
--- a/kernel/cgroup/cgroup-v1.c
+++ b/kernel/cgroup/cgroup-v1.c
@@ -523,7 +523,8 @@ static ssize_t __cgroup1_procs_write(struct kernfs_open_file *of,
tcred = get_task_cred(task);
if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
!uid_eq(cred->euid, tcred->uid) &&
- !uid_eq(cred->euid, tcred->suid))
+ !uid_eq(cred->euid, tcred->suid) &&
+ !ns_capable(tcred->user_ns, CAP_SYS_NICE))
ret = -EACCES;
put_cred(tcred);
if (ret)
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index c22cda7..a49799d 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -62,6 +62,9 @@
#define CREATE_TRACE_POINTS
#include <trace/events/cgroup.h>
+#undef CREATE_TRACE_POINTS
+
+#include <trace/hooks/cgroup.h>
#define CGROUP_FILE_NAME_MAX (MAX_CGROUP_TYPE_NAMELEN + \
MAX_CFTYPE_NAME + 2)
@@ -2632,6 +2635,7 @@ struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
return cgroup_taskset_next(tset, dst_cssp);
}
+EXPORT_SYMBOL_GPL(cgroup_taskset_first);
/**
* cgroup_taskset_next - iterate to the next task in taskset
@@ -2678,6 +2682,7 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
return NULL;
}
+EXPORT_SYMBOL_GPL(cgroup_taskset_next);
/**
* cgroup_migrate_execute - migrate a taskset
@@ -2748,6 +2753,7 @@ static int cgroup_migrate_execute(struct cgroup_mgctx *mgctx)
do_each_subsys_mask(ss, ssid, mgctx->ss_mask) {
if (ss->attach) {
tset->ssid = ssid;
+ trace_android_vh_cgroup_attach(ss, tset);
ss->attach(tset);
}
} while_each_subsys_mask();
@@ -4786,6 +4792,7 @@ struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
return next;
return NULL;
}
+EXPORT_SYMBOL_GPL(css_next_child);
/**
* css_next_descendant_pre - find the next descendant for pre-order walk
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index 9faf343..95b1b7a 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -988,6 +988,7 @@ void rebuild_sched_domains(void)
rebuild_sched_domains_cpuslocked();
cpus_read_unlock();
}
+EXPORT_SYMBOL_GPL(rebuild_sched_domains);
void cpuset_reset_sched_domains(void)
{
diff --git a/kernel/dma/TEST_MAPPING b/kernel/dma/TEST_MAPPING
new file mode 100644
index 0000000..1f4ee84
--- /dev/null
+++ b/kernel/dma/TEST_MAPPING
@@ -0,0 +1,277 @@
+{
+ "imports": [
+ {
+ "path": "packages/modules/Connectivity"
+ },
+ {
+ "path": "packages/services/Telecomm"
+ },
+ {
+ "path": "system/netd"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsUsbManagerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.PhoneAccountTest"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ }
+ ]
+}
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index c56004d..5434896 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -54,6 +54,7 @@
#endif
struct cma *dma_contiguous_default_area;
+EXPORT_SYMBOL_GPL(dma_contiguous_default_area);
/*
* Default global CMA area size can be defined in kernel's .config.
diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c
index 86f87e4..39a2298 100644
--- a/kernel/dma/debug.c
+++ b/kernel/dma/debug.c
@@ -1174,10 +1174,11 @@ static void check_sync(struct device *dev,
dir2name[entry->direction],
dir2name[ref->direction]);
+ /* sg list count can be less than map count when partial cache sync */
if (ref->sg_call_ents && ref->type == dma_debug_sg &&
- ref->sg_call_ents != entry->sg_call_ents) {
+ ref->sg_call_ents > entry->sg_call_ents) {
err_printk(ref->dev, entry, "device driver syncs "
- "DMA sg list with different entry count "
+ "DMA sg list count larger than map count "
"[map count=%d] [sync count=%d]\n",
entry->sg_call_ents, ref->sg_call_ents);
}
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index d8e6f1d..c8806e8 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -792,6 +792,7 @@ struct io_tlb_pool *__swiotlb_find_pool(struct device *dev, phys_addr_t paddr)
rcu_read_unlock();
return pool;
}
+EXPORT_SYMBOL_GPL(__swiotlb_find_pool);
/**
* swiotlb_del_pool() - remove an IO TLB pool from a device
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 1f5699b..d4dbc10 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4955,6 +4955,7 @@ int perf_event_read_local(struct perf_event *event, u64 *value,
return ret;
}
+EXPORT_SYMBOL_GPL(perf_event_read_local);
static int perf_event_read(struct perf_event *event, bool group)
{
diff --git a/kernel/exit.c b/kernel/exit.c
index 8a87021..958d8cf 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -901,6 +901,7 @@ void __noreturn do_exit(long code)
WARN_ON(irqs_disabled());
WARN_ON(tsk->plug);
+ profile_task_exit(tsk);
kcov_task_exit(tsk);
kmsan_task_exit(tsk);
diff --git a/kernel/fork.c b/kernel/fork.c
index 65113a3..6fa5d71 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -105,6 +105,7 @@
#include <linux/rseq.h>
#include <uapi/linux/pidfd.h>
#include <linux/pidfs.h>
+#include <linux/cpufreq_times.h>
#include <linux/tick.h>
#include <linux/unwind_deferred.h>
#include <linux/pgalloc.h>
@@ -124,6 +125,8 @@
#include <kunit/visibility.h>
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/sched.h>
/*
* Minimum number of threads to boot the kernel
*/
@@ -134,6 +137,8 @@
*/
#define MAX_THREADS FUTEX_TID_MASK
+EXPORT_TRACEPOINT_SYMBOL_GPL(task_newtask);
+
/*
* Protected counters by write_lock_irq(&tasklist_lock)
*/
@@ -154,6 +159,7 @@ static const char * const resident_page_types[] = {
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
+EXPORT_SYMBOL_GPL(tasklist_lock);
#ifdef CONFIG_PROVE_RCU
int lockdep_tasklist_lock_is_held(void)
@@ -531,9 +537,11 @@ void free_task(struct task_struct *tsk)
#ifdef CONFIG_SECCOMP
WARN_ON_ONCE(tsk->seccomp.filter);
#endif
+ cpufreq_task_times_exit(tsk);
release_user_cpus_ptr(tsk);
scs_release(tsk);
+ trace_android_vh_free_task(tsk);
#ifndef CONFIG_THREAD_INFO_IN_TASK
/*
* The task is finally done with both the stack and thread_info,
@@ -1001,6 +1009,10 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
tsk->mm_cid.cid = MM_CID_UNSET;
tsk->mm_cid.active = 0;
#endif
+ android_init_vendor_data(tsk, 1);
+ android_init_oem_data(tsk, 1);
+
+ trace_android_vh_dup_task_struct(tsk, orig);
return tsk;
free_stack:
@@ -2064,6 +2076,8 @@ __latent_entropy struct task_struct *copy_process(
if (args->io_thread)
p->flags |= PF_IO_WORKER;
+ cpufreq_task_times_init(p);
+
if (args->name)
strscpy_pad(p->comm, args->name, sizeof(p->comm));
@@ -2657,6 +2671,8 @@ pid_t kernel_clone(struct kernel_clone_args *args)
if (IS_ERR(p))
return PTR_ERR(p);
+ cpufreq_task_times_alloc(p);
+
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
diff --git a/kernel/gen_kheaders.sh b/kernel/gen_kheaders.sh
index 896a503..34a4cfa 100755
--- a/kernel/gen_kheaders.sh
+++ b/kernel/gen_kheaders.sh
@@ -30,8 +30,8 @@
mkdir "${tmpdir}"
# shellcheck disable=SC2154 # srctree is passed as an env variable
-sed "s:^${srctree}/::" "${srclist}" | ${TAR} -c -f - -C "${srctree}" -T - | ${TAR} -xf - -C "${tmpdir}"
-${TAR} -c -f - -T "${objlist}" | ${TAR} -xf - -C "${tmpdir}"
+sed "s:^${srctree}/::" "${srclist}" | ${TAR} -c --dereference -f - -C "${srctree}" -T - | ${TAR} -xf - -C "${tmpdir}"
+${TAR} -c --dereference -f - -T "${objlist}" | ${TAR} -xf - -C "${tmpdir}"
# Remove comments except SDPX lines
# Use a temporary file to store directory contents to prevent find/xargs from
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 6147a07..87e363f 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -14,6 +14,7 @@
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/irqdomain.h>
+#include <linux/wakeup_reason.h>
#include <trace/events/irq.h>
@@ -483,8 +484,22 @@ static bool irq_can_handle_pm(struct irq_desc *desc)
* If the interrupt is not in progress and is not an armed
* wakeup interrupt, proceed.
*/
- if (!irqd_has_set(irqd, IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED))
+ if (!irqd_has_set(irqd, IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED)) {
+#ifdef CONFIG_PM_SLEEP
+ if (unlikely(desc->no_suspend_depth &&
+ irqd_is_wakeup_set(&desc->irq_data))) {
+ unsigned int irq = irq_desc_get_irq(desc);
+ const char *name = "(unnamed)";
+
+ if (desc->action && desc->action->name)
+ name = desc->action->name;
+
+ log_abnormal_wakeup_reason("misconfigured IRQ %u %s",
+ irq, name);
+ }
+#endif
return true;
+ }
/*
* If the interrupt is an armed wakeup source, mark it pending
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 7173b8b..209098b 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -416,9 +416,7 @@ struct irq_desc *irq_to_desc(unsigned int irq)
{
return mtree_load(&sparse_irqs, irq);
}
-#ifdef CONFIG_KVM_BOOK3S_64_HV_MODULE
EXPORT_SYMBOL_GPL(irq_to_desc);
-#endif
void irq_lock_sparse(void)
{
@@ -998,6 +996,7 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
return desc && desc->kstat_irqs ? per_cpu(desc->kstat_irqs->cnt, cpu) : 0;
}
+EXPORT_SYMBOL_GPL(kstat_irqs_cpu);
static unsigned int kstat_irqs_desc(struct irq_desc *desc, const struct cpumask *cpumask)
{
@@ -1081,3 +1080,4 @@ void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class
}
EXPORT_SYMBOL_GPL(__irq_set_lockdep_class);
#endif
+EXPORT_SYMBOL_GPL(kstat_irqs_usr);
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 73f7e1fd..a42ae75 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -180,6 +180,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
return true;
#endif /* CONFIG_SMP */
}
+EXPORT_SYMBOL_GPL(irq_work_queue_on);
bool irq_work_needs_cpu(void)
{
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 20451b6..aa9475c 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -619,6 +619,7 @@ void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
WARN_ON_ONCE(kthread->started);
}
+EXPORT_SYMBOL_GPL(kthread_bind_mask);
/**
* kthread_bind - bind a just-created kthread to a cpu.
@@ -680,6 +681,7 @@ void kthread_set_per_cpu(struct task_struct *k, int cpu)
kthread->cpu = cpu;
set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
}
+EXPORT_SYMBOL_GPL(kthread_set_per_cpu);
bool kthread_is_per_cpu(struct task_struct *p)
{
diff --git a/kernel/module/Kconfig b/kernel/module/Kconfig
index be74917..419def5 100644
--- a/kernel/module/Kconfig
+++ b/kernel/module/Kconfig
@@ -248,6 +248,20 @@
the version). With this option, such a "srcversion" field
will be created for all modules. If unsure, say N.
+config MODULE_SCMVERSION
+ bool "SCM version for modules"
+ depends on LOCALVERSION_AUTO
+ help
+ This enables the module attribute "scmversion" which can be used
+ by developers to identify the SCM version of a given module, e.g.
+ git sha1 or hg sha1. The SCM version can be queried by modinfo or
+ via the sysfs node: /sys/modules/MODULENAME/scmversion. This is
+ useful when the kernel or kernel modules are updated separately
+ since that causes the vermagic of the kernel and the module to
+ differ.
+
+ If unsure, say N.
+
config MODULE_SIG
bool "Module signature verification"
select MODULE_SIG_FORMAT
@@ -277,6 +291,19 @@
Reject unsigned modules or signed modules for which we don't have a
key. Without this, such modules will simply taint the kernel.
+config MODULE_SIG_PROTECT
+ bool "Android GKI module protection"
+ depends on MODULE_SIG && !MODULE_SIG_FORCE
+ help
+ Enables Android GKI symbol and export protection support.
+
+ This modifies the behavior of the MODULE_SIG_FORCE as follows:
+ - Allows Android GKI Modules signed using MODULE_SIG_ALL during build.
+ - Allows other modules to load if they don't violate the access to
+ Android GKI protected symbols and do not export the symbols already
+ exported by the Android GKI modules. Loading will fail and return
+ -EACCES (Permission denied) if symbol access conditions are not met.
+
config MODULE_SIG_ALL
bool "Automatically sign all modules"
default y
diff --git a/kernel/module/Makefile b/kernel/module/Makefile
index 50ffcc4..45706b0 100644
--- a/kernel/module/Makefile
+++ b/kernel/module/Makefile
@@ -13,6 +13,7 @@
obj-$(CONFIG_MODULE_DEBUG_AUTOLOAD_DUPS) += dups.o
obj-$(CONFIG_MODULE_DECOMPRESS) += decompress.o
obj-$(CONFIG_MODULE_SIG) += signing.o
+obj-$(CONFIG_MODULE_SIG_PROTECT) += gki_module.o
obj-$(CONFIG_LIVEPATCH) += livepatch.o
obj-$(CONFIG_MODULES_TREE_LOOKUP) += tree_lookup.o
obj-$(CONFIG_DEBUG_KMEMLEAK) += debug_kmemleak.o
@@ -23,3 +24,38 @@
obj-$(CONFIG_MODVERSIONS) += version.o
obj-$(CONFIG_MODULE_UNLOAD_TAINT_TRACKING) += tracking.o
obj-$(CONFIG_MODULE_STATS) += stats.o
+
+#
+# ANDROID: GKI: Generate headerfiles required for gki_module.o
+#
+# Dependencies on generated files need to be listed explicitly
+$(obj)/gki_module.o: include/generated/gki_module_protected_exports.h \
+ include/generated/gki_module_unprotected.h
+
+ifneq ($(CONFIG_UNUSED_KSYMS_WHITELIST),)
+ALL_KMI_SYMBOLS := $(CONFIG_UNUSED_KSYMS_WHITELIST)
+else
+ALL_KMI_SYMBOLS := include/config/abi_gki_kmi_symbols
+$(ALL_KMI_SYMBOLS):
+ : > $@
+endif
+
+include/generated/gki_module_unprotected.h: $(ALL_KMI_SYMBOLS) \
+ $(srctree)/scripts/gen_gki_modules_headers.sh
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/gen_gki_modules_headers.sh $@ \
+ "$(srctree)" \
+ $(ALL_KMI_SYMBOLS)
+
+# ABI protected exports list file specific to ARCH if exists else empty
+ABI_PROTECTED_EXPORTS_FILE :=
+ifeq ($(ARCH),arm64)
+ ABI_PROTECTED_EXPORTS_FILE := $(wildcard $(srctree)/android/abi_gki_protected_exports_aarch64)
+else
+ ABI_PROTECTED_EXPORTS_FILE := $(wildcard $(srctree)/android/abi_gki_protected_exports_$(ARCH))
+endif
+
+include/generated/gki_module_protected_exports.h: $(ABI_PROTECTED_EXPORTS_FILE) \
+ $(srctree)/scripts/gen_gki_modules_headers.sh
+ $(Q)$(CONFIG_SHELL) $(srctree)/scripts/gen_gki_modules_headers.sh $@ \
+ "$(srctree)" \
+ $(ABI_PROTECTED_EXPORTS_FILE)
diff --git a/kernel/module/gki_module.c b/kernel/module/gki_module.c
new file mode 100644
index 0000000..ec9116f
--- /dev/null
+++ b/kernel/module/gki_module.c
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2022 Google LLC
+ * Author: ramjiyani@google.com (Ramji Jiyani)
+ */
+
+#include <linux/bsearch.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/string.h>
+
+#include "internal.h"
+
+/*
+ * Build time generated header files
+ *
+ * gki_module_protected_exports.h -- Symbols protected from _export_ by unsigned modules
+ * gki_module_unprotected.h -- Symbols allowed to _access_ by unsigned modules
+ */
+#include <generated/gki_module_protected_exports.h>
+#include <generated/gki_module_unprotected.h>
+
+#define MAX_STRCMP_LEN (max(MAX_UNPROTECTED_NAME_LEN, MAX_PROTECTED_EXPORTS_NAME_LEN))
+
+/* bsearch() comparision callback */
+static int gki_cmp_name(const void *sym, const void *protected_sym)
+{
+ return strncmp(sym, protected_sym, MAX_STRCMP_LEN);
+}
+
+/**
+ * gki_is_module_protected_export - Is a symbol exported from a protected GKI module?
+ *
+ * @name: Symbol being checked against exported symbols from protected GKI modules
+ */
+bool gki_is_module_protected_export(const char *name)
+{
+ if (NR_UNPROTECTED_SYMBOLS) {
+ return bsearch(name, gki_protected_exports_symbols, NR_PROTECTED_EXPORTS_SYMBOLS,
+ MAX_PROTECTED_EXPORTS_NAME_LEN, gki_cmp_name) != NULL;
+ } else {
+ /*
+ * If there are no symbols in unprotected list; We don't need to
+ * protect exports as there is no KMI enforcement.
+ * Treat everything exportable in this case.
+ */
+ return false;
+ }
+}
+
+/**
+ * gki_is_module_unprotected_symbol - Is a symbol unprotected for unsigned module?
+ *
+ * @name: Symbol being checked in list of unprotected symbols
+ */
+bool gki_is_module_unprotected_symbol(const char *name)
+{
+ if (NR_UNPROTECTED_SYMBOLS) {
+ return bsearch(name, gki_unprotected_symbols, NR_UNPROTECTED_SYMBOLS,
+ MAX_UNPROTECTED_NAME_LEN, gki_cmp_name) != NULL;
+ } else {
+ /*
+ * If there are no symbols in unprotected list;
+ * there isn't a KMI enforcement for the kernel.
+ * Treat everything accessible in this case.
+ */
+ return true;
+ }
+}
diff --git a/kernel/module/internal.h b/kernel/module/internal.h
index 6182025..b4c4689 100644
--- a/kernel/module/internal.h
+++ b/kernel/module/internal.h
@@ -423,3 +423,17 @@ static inline int same_magic(const char *amagic, const char *bmagic, bool has_cr
return strcmp(amagic, bmagic) == 0;
}
#endif /* CONFIG_MODVERSIONS */
+
+#ifdef CONFIG_MODULE_SIG_PROTECT
+extern bool gki_is_module_unprotected_symbol(const char *name);
+extern bool gki_is_module_protected_export(const char *name);
+#else
+static inline bool gki_is_module_unprotected_symbol(const char *name)
+{
+ return true;
+}
+static inline bool gki_is_module_protected_export(const char *name)
+{
+ return false;
+}
+#endif /* CONFIG_MODULE_SIG_PROTECT */
diff --git a/kernel/module/main.c b/kernel/module/main.c
index 2bac4c7..8fc26cb 100644
--- a/kernel/module/main.c
+++ b/kernel/module/main.c
@@ -606,6 +606,7 @@ static const struct module_attribute modinfo_##field = { \
MODINFO_ATTR(version);
MODINFO_ATTR(srcversion);
+MODINFO_ATTR(scmversion);
static struct {
char name[MODULE_NAME_LEN];
@@ -1058,6 +1059,7 @@ const struct module_attribute *const modinfo_attrs[] = {
&module_uevent,
&modinfo_version,
&modinfo_srcversion,
+ &modinfo_scmversion,
&modinfo_initstate,
&modinfo_coresize,
#ifdef CONFIG_ARCH_WANTS_MODULES_DATA_IN_VMALLOC
@@ -1233,6 +1235,8 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
const char *name,
char ownername[])
{
+ bool is_vendor_module;
+ bool is_vendor_exported_symbol;
struct find_symbol_arg fsa = {
.name = name,
.gplok = !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)),
@@ -1269,6 +1273,24 @@ static const struct kernel_symbol *resolve_symbol(struct module *mod,
goto getname;
}
+ /*
+ * ANDROID GKI
+ *
+ * Vendor (i.e., unsigned) modules are only permitted to use:
+ *
+ * 1. symbols exported by other vendor (unsigned) modules
+ * 2. unprotected symbols
+ */
+ is_vendor_module = !mod->sig_ok;
+ is_vendor_exported_symbol = fsa.owner && !fsa.owner->sig_ok;
+
+ if (is_vendor_module &&
+ !is_vendor_exported_symbol &&
+ !gki_is_module_unprotected_symbol(name)) {
+ fsa.sym = ERR_PTR(-EACCES);
+ goto getname;
+ }
+
err = ref_module(mod, fsa.owner);
if (err) {
fsa.sym = ERR_PTR(err);
@@ -1482,6 +1504,14 @@ static int verify_exported_symbols(struct module *mod)
.name = kernel_symbol_name(s),
.gplok = true,
};
+
+ if (!mod->sig_ok && gki_is_module_protected_export(
+ kernel_symbol_name(s))) {
+ pr_err("%s: exports protected symbol %s\n",
+ mod->name, kernel_symbol_name(s));
+ return -EACCES;
+ }
+
if (find_symbol(&fsa)) {
pr_err("%s: exports duplicate symbol %s"
" (owned by %s)\n",
@@ -1562,9 +1592,15 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
ignore_undef_symbol(info->hdr->e_machine, name)))
break;
- ret = PTR_ERR(ksym) ?: -ENOENT;
- pr_warn("%s: Unknown symbol %s (err %d)\n",
- mod->name, name, ret);
+ if (PTR_ERR(ksym) == -EACCES) {
+ ret = -EACCES;
+ pr_warn("%s: Protected symbol: %s (err %d)\n",
+ mod->name, name, ret);
+ } else {
+ ret = PTR_ERR(ksym) ?: -ENOENT;
+ pr_warn("%s: Unknown symbol %s (err %d)\n",
+ mod->name, name, ret);
+ }
break;
default:
@@ -2543,6 +2579,7 @@ static void module_augment_kernel_taints(struct module *mod, struct load_info *i
}
#ifdef CONFIG_MODULE_SIG
mod->sig_ok = info->sig_ok;
+#ifndef CONFIG_MODULE_SIG_PROTECT
if (!mod->sig_ok) {
pr_notice_once("%s: module verification failed: signature "
"and/or required key missing - tainting "
@@ -2550,6 +2587,9 @@ static void module_augment_kernel_taints(struct module *mod, struct load_info *i
add_taint_module(mod, TAINT_UNSIGNED_MODULE, LOCKDEP_STILL_OK);
}
#endif
+#else
+ mod->sig_ok = 0;
+#endif
/*
* ndiswrapper is under GPL by itself, but loads proprietary modules.
diff --git a/kernel/module/signing.c b/kernel/module/signing.c
index a2ff4242..7ffb2ae 100644
--- a/kernel/module/signing.c
+++ b/kernel/module/signing.c
@@ -19,8 +19,20 @@
#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "module."
+/*
+ * ANDROID: GKI:
+ * Only enforce signature if SIG_PROTECT is not set
+ */
+#ifndef CONFIG_MODULE_SIG_PROTECT
static bool sig_enforce = IS_ENABLED(CONFIG_MODULE_SIG_FORCE);
module_param(sig_enforce, bool_enable_only, 0644);
+void set_module_sig_enforced(void)
+{
+ sig_enforce = true;
+}
+#else
+#define sig_enforce false
+#endif
/*
* Export sig_enforce kernel cmdline parameter to allow other subsystems rely
@@ -32,11 +44,6 @@ bool is_module_sig_enforced(void)
}
EXPORT_SYMBOL(is_module_sig_enforced);
-void set_module_sig_enforced(void)
-{
- sig_enforce = true;
-}
-
/*
* Verify the signature on a module.
*/
@@ -121,5 +128,13 @@ int module_sig_check(struct load_info *info, int flags)
return -EKEYREJECTED;
}
+/*
+ * ANDROID: GKI: Do not prevent loading of unsigned modules;
+ * as all modules except GKI modules are not signed.
+ */
+#ifndef CONFIG_MODULE_SIG_PROTECT
return security_locked_down(LOCKDOWN_MODULE_SIGNATURE);
+#else
+ return 0;
+#endif
}
diff --git a/kernel/pid.c b/kernel/pid.c
index 3b96571..8318e4e 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -482,6 +482,7 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
{
return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
}
+EXPORT_SYMBOL_GPL(find_task_by_vpid);
struct task_struct *find_get_task_by_vpid(pid_t nr)
{
diff --git a/kernel/power/Makefile b/kernel/power/Makefile
index 773e278..cf6008d 100644
--- a/kernel/power/Makefile
+++ b/kernel/power/Makefile
@@ -21,6 +21,7 @@
obj-$(CONFIG_MAGIC_SYSRQ) += poweroff.o
+obj-$(CONFIG_SUSPEND) += wakeup_reason.o
obj-$(CONFIG_ENERGY_MODEL) += em.o
em-y := energy_model.o
em-$(CONFIG_NET) += em_netlink_autogen.o em_netlink.o
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 5f8c9e1..05f967a 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -165,6 +165,19 @@ int unregister_pm_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL_GPL(unregister_pm_notifier);
+void pm_report_hw_sleep_time(u64 t)
+{
+ suspend_stats.last_hw_sleep = t;
+ suspend_stats.total_hw_sleep += t;
+}
+EXPORT_SYMBOL_GPL(pm_report_hw_sleep_time);
+
+void pm_report_max_hw_sleep(u64 t)
+{
+ suspend_stats.max_hw_sleep = t;
+}
+EXPORT_SYMBOL_GPL(pm_report_max_hw_sleep);
+
int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down)
{
int ret;
@@ -384,74 +397,6 @@ static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
power_attr(pm_test);
#endif /* CONFIG_PM_SLEEP_DEBUG */
-#define SUSPEND_NR_STEPS SUSPEND_RESUME
-#define REC_FAILED_NUM 2
-
-struct suspend_stats {
- unsigned int step_failures[SUSPEND_NR_STEPS];
- unsigned int success;
- unsigned int fail;
- int last_failed_dev;
- char failed_devs[REC_FAILED_NUM][40];
- int last_failed_errno;
- int errno[REC_FAILED_NUM];
- int last_failed_step;
- u64 last_hw_sleep;
- u64 total_hw_sleep;
- u64 max_hw_sleep;
- enum suspend_stat_step failed_steps[REC_FAILED_NUM];
-};
-
-static struct suspend_stats suspend_stats;
-static DEFINE_MUTEX(suspend_stats_lock);
-
-void dpm_save_failed_dev(const char *name)
-{
- mutex_lock(&suspend_stats_lock);
-
- strscpy(suspend_stats.failed_devs[suspend_stats.last_failed_dev],
- name, sizeof(suspend_stats.failed_devs[0]));
- suspend_stats.last_failed_dev++;
- suspend_stats.last_failed_dev %= REC_FAILED_NUM;
-
- mutex_unlock(&suspend_stats_lock);
-}
-
-void dpm_save_failed_step(enum suspend_stat_step step)
-{
- suspend_stats.step_failures[step-1]++;
- suspend_stats.failed_steps[suspend_stats.last_failed_step] = step;
- suspend_stats.last_failed_step++;
- suspend_stats.last_failed_step %= REC_FAILED_NUM;
-}
-
-void dpm_save_errno(int err)
-{
- if (!err) {
- suspend_stats.success++;
- return;
- }
-
- suspend_stats.fail++;
-
- suspend_stats.errno[suspend_stats.last_failed_errno] = err;
- suspend_stats.last_failed_errno++;
- suspend_stats.last_failed_errno %= REC_FAILED_NUM;
-}
-
-void pm_report_hw_sleep_time(u64 t)
-{
- suspend_stats.last_hw_sleep = t;
- suspend_stats.total_hw_sleep += t;
-}
-EXPORT_SYMBOL_GPL(pm_report_hw_sleep_time);
-
-void pm_report_max_hw_sleep(u64 t)
-{
- suspend_stats.max_hw_sleep = t;
-}
-EXPORT_SYMBOL_GPL(pm_report_max_hw_sleep);
-
static const char * const suspend_step_names[] = {
[SUSPEND_WORKING] = "",
[SUSPEND_FREEZE] = "freeze",
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 75b6384..1723a6e5 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -350,5 +350,3 @@ static inline void pm_sleep_enable_secondary_cpus(void)
suspend_enable_secondary_cpus();
cpuidle_resume();
}
-
-void dpm_save_errno(int err);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 57c4426..21ff55c 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -31,6 +31,7 @@
#include <linux/compiler.h>
#include <linux/moduleparam.h>
#include <linux/fs.h>
+#include <linux/wakeup_reason.h>
#include "power.h"
@@ -151,6 +152,8 @@ static void s2idle_loop(void)
break;
}
+ clear_wakeup_reasons();
+
if (s2idle_ops && s2idle_ops->check)
s2idle_ops->check();
@@ -389,6 +392,7 @@ static int suspend_prepare(suspend_state_t state)
if (!error)
return 0;
+ log_suspend_abort_reason("One or more tasks refusing to freeze");
dpm_save_failed_step(SUSPEND_FREEZE);
filesystems_thaw();
pm_notifier_call_chain(PM_POST_SUSPEND);
@@ -418,7 +422,7 @@ void __weak arch_suspend_enable_irqs(void)
*/
static int suspend_enter(suspend_state_t state, bool *wakeup)
{
- int error;
+ int error, last_dev;
error = platform_suspend_prepare(state);
if (error)
@@ -426,7 +430,11 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
error = dpm_suspend_late(PMSG_SUSPEND);
if (error) {
+ last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
+ last_dev %= REC_FAILED_NUM;
pr_err("late suspend of devices failed\n");
+ log_suspend_abort_reason("late suspend of %s device failed",
+ suspend_stats.failed_devs[last_dev]);
goto Platform_finish;
}
error = platform_suspend_prepare_late(state);
@@ -435,7 +443,11 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
error = dpm_suspend_noirq(PMSG_SUSPEND);
if (error) {
+ last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
+ last_dev %= REC_FAILED_NUM;
pr_err("noirq suspend of devices failed\n");
+ log_suspend_abort_reason("noirq suspend of %s device failed",
+ suspend_stats.failed_devs[last_dev]);
goto Platform_early_resume;
}
error = platform_suspend_prepare_noirq(state);
@@ -451,8 +463,10 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
}
error = pm_sleep_disable_secondary_cpus();
- if (error || suspend_test(TEST_CPUS))
+ if (error || suspend_test(TEST_CPUS)) {
+ log_suspend_abort_reason("Disabling non-boot cpus failed");
goto Enable_cpus;
+ }
arch_suspend_disable_irqs();
BUG_ON(!irqs_disabled());
@@ -523,6 +537,8 @@ int suspend_devices_and_enter(suspend_state_t state)
error = dpm_suspend_start(PMSG_SUSPEND);
if (error) {
pr_err("Some devices failed to suspend, or early wake event detected\n");
+ log_suspend_abort_reason(
+ "Some devices failed to suspend, or early wake event detected");
goto Recover_platform;
}
suspend_test_finish("suspend devices");
@@ -642,7 +658,12 @@ int pm_suspend(suspend_state_t state)
pr_info("suspend entry (%s)\n", mem_sleep_labels[state]);
error = enter_state(state);
- dpm_save_errno(error);
+ if (error) {
+ suspend_stats.fail++;
+ dpm_save_failed_errno(error);
+ } else {
+ suspend_stats.success++;
+ }
pr_info("suspend exit\n");
return error;
}
diff --git a/kernel/power/wakeup_reason.c b/kernel/power/wakeup_reason.c
new file mode 100644
index 0000000..8fefaa3
--- /dev/null
+++ b/kernel/power/wakeup_reason.c
@@ -0,0 +1,438 @@
+/*
+ * kernel/power/wakeup_reason.c
+ *
+ * Logs the reasons which caused the kernel to resume from
+ * the suspend mode.
+ *
+ * Copyright (C) 2020 Google, Inc.
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/wakeup_reason.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/notifier.h>
+#include <linux/suspend.h>
+#include <linux/slab.h>
+
+/*
+ * struct wakeup_irq_node - stores data and relationships for IRQs logged as
+ * either base or nested wakeup reasons during suspend/resume flow.
+ * @siblings - for membership on leaf or parent IRQ lists
+ * @irq - the IRQ number
+ * @irq_name - the name associated with the IRQ, or a default if none
+ */
+struct wakeup_irq_node {
+ struct list_head siblings;
+ int irq;
+ const char *irq_name;
+};
+
+enum wakeup_reason_flag {
+ RESUME_NONE = 0,
+ RESUME_IRQ,
+ RESUME_ABORT,
+ RESUME_ABNORMAL,
+};
+
+static DEFINE_SPINLOCK(wakeup_reason_lock);
+
+static LIST_HEAD(leaf_irqs); /* kept in ascending IRQ sorted order */
+static LIST_HEAD(parent_irqs); /* unordered */
+
+static struct kmem_cache *wakeup_irq_nodes_cache;
+
+static const char *default_irq_name = "(unnamed)";
+
+static struct kobject *kobj;
+
+static bool capture_reasons;
+static int wakeup_reason;
+static char non_irq_wake_reason[MAX_SUSPEND_ABORT_LEN];
+
+static ktime_t last_monotime; /* monotonic time before last suspend */
+static ktime_t curr_monotime; /* monotonic time after last suspend */
+static ktime_t last_stime; /* monotonic boottime offset before last suspend */
+static ktime_t curr_stime; /* monotonic boottime offset after last suspend */
+
+static void init_node(struct wakeup_irq_node *p, int irq)
+{
+ struct irq_desc *desc;
+
+ INIT_LIST_HEAD(&p->siblings);
+
+ p->irq = irq;
+ desc = irq_to_desc(irq);
+ if (desc && desc->action && desc->action->name)
+ p->irq_name = desc->action->name;
+ else
+ p->irq_name = default_irq_name;
+}
+
+static struct wakeup_irq_node *create_node(int irq)
+{
+ struct wakeup_irq_node *result;
+
+ result = kmem_cache_alloc(wakeup_irq_nodes_cache, GFP_ATOMIC);
+ if (unlikely(!result))
+ pr_warn("Failed to log wakeup IRQ %d\n", irq);
+ else
+ init_node(result, irq);
+
+ return result;
+}
+
+static void delete_list(struct list_head *head)
+{
+ struct wakeup_irq_node *n;
+
+ while (!list_empty(head)) {
+ n = list_first_entry(head, struct wakeup_irq_node, siblings);
+ list_del(&n->siblings);
+ kmem_cache_free(wakeup_irq_nodes_cache, n);
+ }
+}
+
+static bool add_sibling_node_sorted(struct list_head *head, int irq)
+{
+ struct wakeup_irq_node *n = NULL;
+ struct list_head *predecessor = head;
+
+ if (unlikely(WARN_ON(!head)))
+ return NULL;
+
+ if (!list_empty(head))
+ list_for_each_entry(n, head, siblings) {
+ if (n->irq < irq)
+ predecessor = &n->siblings;
+ else if (n->irq == irq)
+ return true;
+ else
+ break;
+ }
+
+ n = create_node(irq);
+ if (n) {
+ list_add(&n->siblings, predecessor);
+ return true;
+ }
+
+ return false;
+}
+
+static struct wakeup_irq_node *find_node_in_list(struct list_head *head,
+ int irq)
+{
+ struct wakeup_irq_node *n;
+
+ if (unlikely(WARN_ON(!head)))
+ return NULL;
+
+ list_for_each_entry(n, head, siblings)
+ if (n->irq == irq)
+ return n;
+
+ return NULL;
+}
+
+void log_irq_wakeup_reason(int irq)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wakeup_reason_lock, flags);
+ if (wakeup_reason == RESUME_ABNORMAL || wakeup_reason == RESUME_ABORT) {
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+ return;
+ }
+
+ if (!capture_reasons) {
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+ return;
+ }
+
+ if (find_node_in_list(&parent_irqs, irq) == NULL)
+ add_sibling_node_sorted(&leaf_irqs, irq);
+
+ wakeup_reason = RESUME_IRQ;
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+}
+
+void log_threaded_irq_wakeup_reason(int irq, int parent_irq)
+{
+ struct wakeup_irq_node *parent;
+ unsigned long flags;
+
+ /*
+ * Intentionally unsynchronized. Calls that come in after we have
+ * resumed should have a fast exit path since there's no work to be
+ * done, any any coherence issue that could cause a wrong value here is
+ * both highly improbable - given the set/clear timing - and very low
+ * impact (parent IRQ gets logged instead of the specific child).
+ */
+ if (!capture_reasons)
+ return;
+
+ spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+ if (wakeup_reason == RESUME_ABNORMAL || wakeup_reason == RESUME_ABORT) {
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+ return;
+ }
+
+ if (!capture_reasons || (find_node_in_list(&leaf_irqs, irq) != NULL)) {
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+ return;
+ }
+
+ parent = find_node_in_list(&parent_irqs, parent_irq);
+ if (parent != NULL)
+ add_sibling_node_sorted(&leaf_irqs, irq);
+ else {
+ parent = find_node_in_list(&leaf_irqs, parent_irq);
+ if (parent != NULL) {
+ list_del_init(&parent->siblings);
+ list_add_tail(&parent->siblings, &parent_irqs);
+ add_sibling_node_sorted(&leaf_irqs, irq);
+ }
+ }
+
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+}
+EXPORT_SYMBOL_GPL(log_threaded_irq_wakeup_reason);
+
+static void __log_abort_or_abnormal_wake(bool abort, const char *fmt,
+ va_list args)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+ /* Suspend abort or abnormal wake reason has already been logged. */
+ if (wakeup_reason != RESUME_NONE) {
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+ return;
+ }
+
+ if (abort)
+ wakeup_reason = RESUME_ABORT;
+ else
+ wakeup_reason = RESUME_ABNORMAL;
+
+ vsnprintf(non_irq_wake_reason, MAX_SUSPEND_ABORT_LEN, fmt, args);
+
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+}
+
+void log_suspend_abort_reason(const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ __log_abort_or_abnormal_wake(true, fmt, args);
+ va_end(args);
+}
+EXPORT_SYMBOL_GPL(log_suspend_abort_reason);
+
+void log_abnormal_wakeup_reason(const char *fmt, ...)
+{
+ va_list args;
+
+ va_start(args, fmt);
+ __log_abort_or_abnormal_wake(false, fmt, args);
+ va_end(args);
+}
+EXPORT_SYMBOL_GPL(log_abnormal_wakeup_reason);
+
+void clear_wakeup_reasons(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+ delete_list(&leaf_irqs);
+ delete_list(&parent_irqs);
+ wakeup_reason = RESUME_NONE;
+ capture_reasons = true;
+
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+}
+
+static void print_wakeup_sources(void)
+{
+ struct wakeup_irq_node *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+ capture_reasons = false;
+
+ if (wakeup_reason == RESUME_ABORT) {
+ pr_info("Abort: %s\n", non_irq_wake_reason);
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+ return;
+ }
+
+ if (wakeup_reason == RESUME_IRQ && !list_empty(&leaf_irqs))
+ list_for_each_entry(n, &leaf_irqs, siblings)
+ pr_info("Resume caused by IRQ %d, %s\n", n->irq,
+ n->irq_name);
+ else if (wakeup_reason == RESUME_ABNORMAL)
+ pr_info("Resume caused by %s\n", non_irq_wake_reason);
+ else
+ pr_info("Resume cause unknown\n");
+
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+}
+
+static ssize_t last_resume_reason_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ ssize_t buf_offset = 0;
+ struct wakeup_irq_node *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&wakeup_reason_lock, flags);
+
+ if (wakeup_reason == RESUME_ABORT) {
+ buf_offset = scnprintf(buf, PAGE_SIZE, "Abort: %s",
+ non_irq_wake_reason);
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+ return buf_offset;
+ }
+
+ if (wakeup_reason == RESUME_IRQ && !list_empty(&leaf_irqs))
+ list_for_each_entry(n, &leaf_irqs, siblings)
+ buf_offset += scnprintf(buf + buf_offset,
+ PAGE_SIZE - buf_offset,
+ "%d %s\n", n->irq, n->irq_name);
+ else if (wakeup_reason == RESUME_ABNORMAL)
+ buf_offset = scnprintf(buf, PAGE_SIZE, "-1 %s",
+ non_irq_wake_reason);
+
+ spin_unlock_irqrestore(&wakeup_reason_lock, flags);
+
+ return buf_offset;
+}
+
+static ssize_t last_suspend_time_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ struct timespec64 sleep_time;
+ struct timespec64 total_time;
+ struct timespec64 suspend_resume_time;
+
+ /*
+ * total_time is calculated from monotonic bootoffsets because
+ * unlike CLOCK_MONOTONIC it include the time spent in suspend state.
+ */
+ total_time = ktime_to_timespec64(ktime_sub(curr_stime, last_stime));
+
+ /*
+ * suspend_resume_time is calculated as monotonic (CLOCK_MONOTONIC)
+ * time interval before entering suspend and post suspend.
+ */
+ suspend_resume_time =
+ ktime_to_timespec64(ktime_sub(curr_monotime, last_monotime));
+
+ /* sleep_time = total_time - suspend_resume_time */
+ sleep_time = timespec64_sub(total_time, suspend_resume_time);
+
+ /* Export suspend_resume_time and sleep_time in pair here. */
+ return sprintf(buf, "%llu.%09lu %llu.%09lu\n",
+ (unsigned long long)suspend_resume_time.tv_sec,
+ suspend_resume_time.tv_nsec,
+ (unsigned long long)sleep_time.tv_sec,
+ sleep_time.tv_nsec);
+}
+
+static struct kobj_attribute resume_reason = __ATTR_RO(last_resume_reason);
+static struct kobj_attribute suspend_time = __ATTR_RO(last_suspend_time);
+
+static struct attribute *attrs[] = {
+ &resume_reason.attr,
+ &suspend_time.attr,
+ NULL,
+};
+static struct attribute_group attr_group = {
+ .attrs = attrs,
+};
+
+/* Detects a suspend and clears all the previous wake up reasons*/
+static int wakeup_reason_pm_event(struct notifier_block *notifier,
+ unsigned long pm_event, void *unused)
+{
+ switch (pm_event) {
+ case PM_SUSPEND_PREPARE:
+ /* monotonic time since boot */
+ last_monotime = ktime_get();
+ /* monotonic time since boot including the time spent in suspend */
+ last_stime = ktime_get_boottime();
+ clear_wakeup_reasons();
+ break;
+ case PM_POST_SUSPEND:
+ /* monotonic time since boot */
+ curr_monotime = ktime_get();
+ /* monotonic time since boot including the time spent in suspend */
+ curr_stime = ktime_get_boottime();
+ print_wakeup_sources();
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block wakeup_reason_pm_notifier_block = {
+ .notifier_call = wakeup_reason_pm_event,
+};
+
+static int __init wakeup_reason_init(void)
+{
+ if (register_pm_notifier(&wakeup_reason_pm_notifier_block)) {
+ pr_warn("[%s] failed to register PM notifier\n", __func__);
+ goto fail;
+ }
+
+ kobj = kobject_create_and_add("wakeup_reasons", kernel_kobj);
+ if (!kobj) {
+ pr_warn("[%s] failed to create a sysfs kobject\n", __func__);
+ goto fail_unregister_pm_notifier;
+ }
+
+ if (sysfs_create_group(kobj, &attr_group)) {
+ pr_warn("[%s] failed to create a sysfs group\n", __func__);
+ goto fail_kobject_put;
+ }
+
+ wakeup_irq_nodes_cache =
+ kmem_cache_create("wakeup_irq_node_cache",
+ sizeof(struct wakeup_irq_node), 0, 0, NULL);
+ if (!wakeup_irq_nodes_cache)
+ goto fail_remove_group;
+
+ return 0;
+
+fail_remove_group:
+ sysfs_remove_group(kobj, &attr_group);
+fail_kobject_put:
+ kobject_put(kobj);
+fail_unregister_pm_notifier:
+ unregister_pm_notifier(&wakeup_reason_pm_notifier_block);
+fail:
+ return 1;
+}
+
+late_initcall(wakeup_reason_init);
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index 03231495..d3a82830 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -56,6 +56,8 @@
#include <trace/events/initcall.h>
#define CREATE_TRACE_POINTS
#include <trace/events/printk.h>
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/printk.h>
#include "printk_ringbuffer.h"
#include "console_cmdline.h"
@@ -644,10 +646,14 @@ static ssize_t info_print_ext_header(char *buf, size_t size,
u64 ts_usec = info->ts_nsec;
char caller[20];
#ifdef CONFIG_PRINTK_CALLER
+ int vh_ret = 0;
u32 id = info->caller_id;
- snprintf(caller, sizeof(caller), ",caller=%c%u",
- id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
+ trace_android_vh_printk_ext_header(caller, sizeof(caller), id, &vh_ret);
+
+ if (!vh_ret)
+ snprintf(caller, sizeof(caller), ",caller=%c%u",
+ id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
#else
caller[0] = '\0';
#endif
@@ -1364,9 +1370,12 @@ static size_t print_time(u64 ts, char *buf)
static size_t print_caller(u32 id, char *buf)
{
char caller[12];
+ int vh_ret = 0;
- snprintf(caller, sizeof(caller), "%c%u",
- id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
+ trace_android_vh_printk_caller(caller, sizeof(caller), id, &vh_ret);
+ if (!vh_ret)
+ snprintf(caller, sizeof(caller), "%c%u",
+ id & 0x80000000 ? 'C' : 'T', id & ~0x80000000);
return sprintf(buf, "[%6s]", caller);
}
#else
@@ -2137,6 +2146,12 @@ static inline void printk_delay(int level)
static inline u32 printk_caller_id(void)
{
+ u32 caller_id = 0;
+
+ trace_android_vh_printk_caller_id(&caller_id);
+ if (caller_id)
+ return caller_id;
+
return in_task() ? task_pid_nr(current) :
CALLER_ID_MASK + smp_processor_id();
}
@@ -2863,6 +2878,12 @@ void console_resume_all(void)
*/
static int console_cpu_notify(unsigned int cpu)
{
+ int flag = 0;
+
+ trace_android_vh_printk_hotplug(&flag);
+ if (flag)
+ return 0;
+
struct console_flush_type ft;
if (!cpuhp_tasks_frozen) {
@@ -4725,6 +4746,7 @@ int _printk_deferred(const char *fmt, ...)
return r;
}
+EXPORT_SYMBOL_GPL(_printk_deferred);
/*
* printk rate limiting, lifted from the networking subsystem.
diff --git a/kernel/profile.c b/kernel/profile.c
index 1fcf1ad..13dd4fd 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -114,6 +114,59 @@ int __ref profile_init(void)
return -ENOMEM;
}
+/* Profile event notifications */
+
+static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
+static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
+
+void profile_task_exit(struct task_struct *task)
+{
+ blocking_notifier_call_chain(&task_exit_notifier, 0, task);
+}
+
+void profile_munmap(unsigned long addr)
+{
+ blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
+}
+
+int profile_event_register(enum profile_type type, struct notifier_block *n)
+{
+ int err = -EINVAL;
+
+ switch (type) {
+ case PROFILE_TASK_EXIT:
+ err = blocking_notifier_chain_register(
+ &task_exit_notifier, n);
+ break;
+ case PROFILE_MUNMAP:
+ err = blocking_notifier_chain_register(
+ &munmap_notifier, n);
+ break;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(profile_event_register);
+
+int profile_event_unregister(enum profile_type type, struct notifier_block *n)
+{
+ int err = -EINVAL;
+
+ switch (type) {
+ case PROFILE_TASK_EXIT:
+ err = blocking_notifier_chain_unregister(
+ &task_exit_notifier, n);
+ break;
+ case PROFILE_MUNMAP:
+ err = blocking_notifier_chain_unregister(
+ &munmap_notifier, n);
+ break;
+ }
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(profile_event_unregister);
+
static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
{
unsigned long pc;
diff --git a/kernel/rcu/Kconfig b/kernel/rcu/Kconfig
index 7622992..f5c614a 100644
--- a/kernel/rcu/Kconfig
+++ b/kernel/rcu/Kconfig
@@ -381,4 +381,25 @@
Say Y here if you need tighter callback-limit enforcement.
Say N here if you are unsure.
+config RCU_BOOT_END_DELAY
+ int "Minimum time before RCU may consider in-kernel boot as completed"
+ range 0 120000
+ default 20000
+ help
+ Default value of the minimum time in milliseconds from the start of boot
+ that must elapse before the boot sequence can be marked complete from RCU's
+ perspective, after which RCU's behavior becomes more relaxed.
+ Userspace can also mark the boot as completed sooner than this default
+ by writing the time in milliseconds, say once userspace considers
+ the system as booted, to: /sys/module/rcupdate/parameters/rcu_boot_end_delay.
+ Or even just writing a value of 0 to this sysfs node. The sysfs node can
+ also be used to extend the delay to be larger than the default, assuming
+ the marking of boot completion has not yet occurred.
+
+ The actual delay for RCU's view of the system to be marked as booted can be
+ higher than this value if the kernel takes a long time to initialize but it
+ will never be smaller than this value.
+
+ Accept the default if unsure.
+
endmenu # "RCU Subsystem"
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index d98a5c3..d1b4a52 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -44,6 +44,7 @@
#include <linux/slab.h>
#include <linux/irq_work.h>
#include <linux/rcupdate_trace.h>
+#include <linux/jiffies.h>
#define CREATE_TRACE_POINTS
@@ -225,13 +226,50 @@ void rcu_unexpedite_gp(void)
}
EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
+/*
+ * Minimum time in milliseconds from the start boot until RCU can consider
+ * in-kernel boot as completed. This can also be tuned at runtime to end the
+ * boot earlier, by userspace init code writing the time in milliseconds (even
+ * 0) to: /sys/module/rcupdate/parameters/rcu_boot_end_delay. The sysfs node
+ * can also be used to extend the delay to be larger than the default, assuming
+ * the marking of boot complete has not yet occurred.
+ */
+static int rcu_boot_end_delay = CONFIG_RCU_BOOT_END_DELAY;
+
static bool rcu_boot_ended __read_mostly;
+static bool rcu_boot_end_called __read_mostly;
+static DEFINE_MUTEX(rcu_boot_end_lock);
/*
- * Inform RCU of the end of the in-kernel boot sequence.
+ * Inform RCU of the end of the in-kernel boot sequence. The boot sequence will
+ * not be marked ended until at least rcu_boot_end_delay milliseconds have passed.
*/
-void rcu_end_inkernel_boot(void)
+void rcu_end_inkernel_boot(void);
+static void rcu_boot_end_work_fn(struct work_struct *work)
{
+ rcu_end_inkernel_boot();
+}
+static DECLARE_DELAYED_WORK(rcu_boot_end_work, rcu_boot_end_work_fn);
+
+/* Must be called with rcu_boot_end_lock held. */
+static void rcu_end_inkernel_boot_locked(void)
+{
+ rcu_boot_end_called = true;
+
+ if (rcu_boot_ended)
+ return;
+
+ if (rcu_boot_end_delay) {
+ u64 boot_ms = div_u64(ktime_get_boot_fast_ns(), 1000000UL);
+
+ if (boot_ms < rcu_boot_end_delay) {
+ schedule_delayed_work(&rcu_boot_end_work,
+ msecs_to_jiffies(rcu_boot_end_delay - boot_ms));
+ return;
+ }
+ }
+
+ cancel_delayed_work(&rcu_boot_end_work);
rcu_unexpedite_gp();
rcu_async_relax();
if (rcu_normal_after_boot)
@@ -239,6 +277,39 @@ void rcu_end_inkernel_boot(void)
rcu_boot_ended = true;
}
+void rcu_end_inkernel_boot(void)
+{
+ mutex_lock(&rcu_boot_end_lock);
+ rcu_end_inkernel_boot_locked();
+ mutex_unlock(&rcu_boot_end_lock);
+}
+
+static int param_set_rcu_boot_end(const char *val, const struct kernel_param *kp)
+{
+ uint end_ms;
+ int ret = kstrtouint(val, 0, &end_ms);
+
+ if (ret)
+ return ret;
+ /*
+ * rcu_end_inkernel_boot() should be called at least once during init
+ * before we can allow param changes to end the boot.
+ */
+ mutex_lock(&rcu_boot_end_lock);
+ rcu_boot_end_delay = end_ms;
+ if (!rcu_boot_ended && rcu_boot_end_called) {
+ rcu_end_inkernel_boot_locked();
+ }
+ mutex_unlock(&rcu_boot_end_lock);
+ return ret;
+}
+
+static const struct kernel_param_ops rcu_boot_end_ops = {
+ .set = param_set_rcu_boot_end,
+ .get = param_get_uint,
+};
+module_param_cb(rcu_boot_end_delay, &rcu_boot_end_ops, &rcu_boot_end_delay, 0644);
+
/*
* Let rcutorture know when it is OK to turn it up to eleven.
*/
diff --git a/kernel/reboot.c b/kernel/reboot.c
index 695c33e..1b94d2d 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -19,6 +19,8 @@
#include <linux/syscore_ops.h>
#include <linux/uaccess.h>
+#include <trace/hooks/reboot.h>
+
/*
* this indicates whether you can reboot with ctrl-alt-del: the default is yes
*/
@@ -35,6 +37,7 @@ EXPORT_SYMBOL(cad_pid);
enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
EXPORT_SYMBOL_GPL(reboot_mode);
enum reboot_mode panic_reboot_mode = REBOOT_UNDEFINED;
+EXPORT_SYMBOL_GPL(panic_reboot_mode);
static enum hw_protection_action hw_protection_action = HWPROT_ACT_SHUTDOWN;
@@ -1040,6 +1043,8 @@ void __hw_protection_trigger(const char *reason, int ms_until_forced,
if (!atomic_dec_and_test(&allow_proceed))
return;
+ trace_android_rvh_hw_protection_shutdown(reason);
+
/*
* Queue a backup emergency shutdown in the event of
* orderly_poweroff failure
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index b1f1a36..68916a0 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -40,3 +40,4 @@
obj-y += fair.o
obj-y += build_policy.o
obj-y += build_utility.o
+obj-$(CONFIG_ANDROID_VENDOR_HOOKS) += vendor_hooks.o
diff --git a/kernel/sched/OWNERS b/kernel/sched/OWNERS
new file mode 100644
index 0000000..2fccdcd
--- /dev/null
+++ b/kernel/sched/OWNERS
@@ -0,0 +1,2 @@
+qperret@google.com
+tkjos@google.com
diff --git a/kernel/sched/TEST_MAPPING b/kernel/sched/TEST_MAPPING
new file mode 100644
index 0000000..269bb3b
--- /dev/null
+++ b/kernel/sched/TEST_MAPPING
@@ -0,0 +1,315 @@
+{
+ "imports": [
+ {
+ "path": "frameworks/base/packages/PackageInstaller"
+ },
+ {
+ "path": "frameworks/base/services/core/java/com/android/server"
+ },
+ {
+ "path": "frameworks/base/core/java/com/android/internal/app"
+ },
+ {
+ "path": "frameworks/base/apex/jobscheduler/service/java/com/android/server/job"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsSilentUpdateHostTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsJobSchedulerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsWifiBroadcastsHostTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ }
+ ],
+ "presubmit-large": [
+ {
+ "name": "CtsSuspendAppsTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "VtsBootconfigTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7597776..0b3a1be 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -100,6 +100,9 @@
#include "../smpboot.h"
#include "../locking/mutex.h"
+#include <trace/hooks/sched.h>
+#include <trace/hooks/cgroup.h>
+
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpu);
EXPORT_TRACEPOINT_SYMBOL_GPL(ipi_send_cpumask);
@@ -119,11 +122,13 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_compute_energy_tp);
+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_switch);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_entry_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_exit_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_set_need_resched_tp);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+EXPORT_SYMBOL_GPL(runqueues);
DEFINE_PER_CPU(struct rnd_state, sched_rnd_state);
#ifdef CONFIG_SCHED_PROXY_EXEC
@@ -167,6 +172,7 @@ __setup("sched_proxy_exec", setup_proxy_exec);
__read_mostly unsigned int sysctl_sched_features =
#include "features.h"
0;
+EXPORT_SYMBOL_GPL(sysctl_sched_features);
#undef SCHED_FEAT
/*
@@ -661,6 +667,7 @@ void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
raw_spin_unlock(lock);
}
}
+EXPORT_SYMBOL_GPL(raw_spin_rq_lock_nested);
bool raw_spin_rq_trylock(struct rq *rq)
__context_unsafe()
@@ -691,6 +698,7 @@ void raw_spin_rq_unlock(struct rq *rq)
{
raw_spin_unlock(rq_lockp(rq));
}
+EXPORT_SYMBOL_GPL(raw_spin_rq_unlock);
/*
* double_rq_lock - safely lock two runqueues
@@ -710,6 +718,7 @@ void double_rq_lock(struct rq *rq1, struct rq *rq2)
double_rq_clock_clear_update(rq1, rq2);
}
+EXPORT_SYMBOL_GPL(double_rq_lock);
/*
* ___task_rq_lock - lock the rq @p resides on.
@@ -733,6 +742,7 @@ struct rq *___task_rq_lock(struct task_struct *p, struct rq_flags *rf)
cpu_relax();
}
}
+EXPORT_SYMBOL_GPL(___task_rq_lock);
/*
* task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
@@ -773,6 +783,7 @@ struct rq *_task_rq_lock(struct task_struct *p, struct rq_flags *rf)
cpu_relax();
}
}
+EXPORT_SYMBOL_GPL(_task_rq_lock);
/*
* RQ-clock updating methods:
@@ -866,6 +877,7 @@ void update_rq_clock(struct rq *rq)
update_rq_clock_task(rq, delta);
}
+EXPORT_SYMBOL_GPL(update_rq_clock);
#ifdef CONFIG_SCHED_HRTICK
/*
@@ -1036,6 +1048,7 @@ static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
*/
*head->lastp = node;
head->lastp = &node->next;
+ head->count++;
return true;
}
@@ -1092,12 +1105,14 @@ void wake_up_q(struct wake_q_head *head)
/* pairs with cmpxchg_relaxed() in __wake_q_add() */
WRITE_ONCE(task->wake_q.next, NULL);
/* Task can safely be re-inserted now. */
+ task->wake_q_count = head->count;
/*
* wake_up_process() executes a full barrier, which pairs with
* the queueing in wake_q_add() so as not to miss wakeups.
*/
wake_up_process(task);
+ task->wake_q_count = 0;
put_task_struct(task);
}
}
@@ -1181,6 +1196,7 @@ void resched_curr_lazy(struct rq *rq)
{
__resched_curr(rq, get_lazy_tif_bit());
}
+EXPORT_SYMBOL_GPL(resched_curr);
void resched_cpu(int cpu)
{
@@ -1207,6 +1223,11 @@ int get_nohz_timer_target(void)
int i, cpu = smp_processor_id(), default_cpu = -1;
struct sched_domain *sd;
const struct cpumask *hk_mask;
+ bool done = false;
+
+ trace_android_rvh_get_nohz_timer_target(&cpu, &done);
+ if (done)
+ return cpu;
if (housekeeping_cpu(cpu, HK_TYPE_KERNEL_NOISE)) {
if (!idle_cpu(cpu))
@@ -1525,6 +1546,7 @@ static struct uclamp_se uclamp_default[UCLAMP_CNT];
* * An admin modifying the cgroup cpu.uclamp.{min, max}
*/
DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
+EXPORT_SYMBOL_GPL(sched_uclamp_used);
static inline unsigned int
uclamp_idle_value(struct rq *rq, enum uclamp_id clamp_id,
@@ -1641,6 +1663,12 @@ uclamp_eff_get(struct task_struct *p, enum uclamp_id clamp_id)
{
struct uclamp_se uc_req = uclamp_tg_restrict(p, clamp_id);
struct uclamp_se uc_max = uclamp_default[clamp_id];
+ struct uclamp_se uc_eff;
+ int ret = 0;
+
+ trace_android_rvh_uclamp_eff_get(p, clamp_id, &uc_max, &uc_eff, &ret);
+ if (ret)
+ return uc_eff;
/* System default restrictions always apply */
if (unlikely(uc_req.value > uc_max.value))
@@ -1661,6 +1689,7 @@ unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
return (unsigned long)uc_eff.value;
}
+EXPORT_SYMBOL_GPL(uclamp_eff_value);
/*
* When a task is enqueued on a rq, the clamp bucket currently defined by the
@@ -2104,7 +2133,9 @@ void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
*/
uclamp_rq_inc(rq, p, flags);
+ trace_android_rvh_enqueue_task(rq, p, flags);
p->sched_class->enqueue_task(rq, p, flags);
+ trace_android_rvh_after_enqueue_task(rq, p, flags);
psi_enqueue(p, flags);
@@ -2120,6 +2151,7 @@ void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
*/
inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
{
+ bool dequeue_task_result;
if (sched_core_enabled(rq))
sched_core_dequeue(rq, p, flags);
@@ -2136,7 +2168,10 @@ inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags)
* and mark the task ->sched_delayed.
*/
uclamp_rq_dec(rq, p);
- return p->sched_class->dequeue_task(rq, p, flags);
+ trace_android_rvh_dequeue_task(rq, p, flags);
+ dequeue_task_result = p->sched_class->dequeue_task(rq, p, flags);
+ trace_android_rvh_after_dequeue_task(rq, p, flags, &dequeue_task_result);
+ return dequeue_task_result;
}
void activate_task(struct rq *rq, struct task_struct *p, int flags)
@@ -2149,6 +2184,7 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
WRITE_ONCE(p->on_rq, TASK_ON_RQ_QUEUED);
ASSERT_EXCLUSIVE_WRITER(p->on_rq);
}
+EXPORT_SYMBOL_GPL(activate_task);
void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
{
@@ -2164,6 +2200,7 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
dequeue_task(rq, p, flags);
}
+EXPORT_SYMBOL_GPL(deactivate_task);
static void block_task(struct rq *rq, struct task_struct *p, int flags)
{
@@ -2202,6 +2239,7 @@ void wakeup_preempt(struct rq *rq, struct task_struct *p, int flags)
if (task_on_rq_queued(donor) && test_tsk_need_resched(rq->curr))
rq_clock_skip_update(rq);
}
+EXPORT_SYMBOL_GPL(wakeup_preempt);
static __always_inline
int __task_state_match(struct task_struct *p, unsigned int state)
@@ -2402,6 +2440,8 @@ static inline bool rq_has_pinned_tasks(struct rq *rq)
*/
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
{
+ bool allowed = true;
+
/* When not in the task's cpumask, no point in looking further. */
if (!task_allowed_on_cpu(p, cpu))
return false;
@@ -2410,14 +2450,20 @@ static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
if (is_migration_disabled(p))
return cpu_online(cpu);
+ /* check for all cases */
+ trace_android_rvh_is_cpu_allowed(p, cpu, &allowed);
+
/* Non kernel threads are not allowed during either online or offline. */
if (!(p->flags & PF_KTHREAD))
- return cpu_active(cpu);
+ return cpu_active(cpu) && allowed;
/* KTHREAD_IS_PER_CPU is always allowed. */
if (kthread_is_per_cpu(p))
return cpu_online(cpu);
+ if (!allowed)
+ return false;
+
/* Regular kernel threads don't get to stay during offline. */
if (cpu_dying(cpu))
return false;
@@ -2449,12 +2495,24 @@ static struct rq *move_queued_task(struct rq *rq, struct rq_flags *rf,
struct task_struct *p, int new_cpu)
__must_hold(__rq_lockp(rq))
{
+ int detached = 0;
+
lockdep_assert_rq_held(rq);
+ /*
+ * The vendor hook may drop the lock temporarily, so
+ * pass the rq flags to unpin lock. We expect the
+ * rq lock to be held after return.
+ */
+ trace_android_rvh_migrate_queued_task(rq, rf, p, new_cpu, &detached);
+ if (detached)
+ goto attach;
+
deactivate_task(rq, p, DEQUEUE_NOCLOCK);
set_task_cpu(p, new_cpu);
- rq_unlock(rq, rf);
+attach:
+ rq_unlock(rq, rf);
rq = cpu_rq(new_cpu);
rq_lock(rq, rf);
@@ -2492,8 +2550,8 @@ struct set_affinity_pending {
* So we race with normal scheduler movements, but that's OK, as long
* as the task is no longer on this CPU.
*/
-static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
- struct task_struct *p, int dest_cpu)
+struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
+ struct task_struct *p, int dest_cpu)
__must_hold(__rq_lockp(rq))
{
/* Affinity changed (again). */
@@ -2504,6 +2562,7 @@ static struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
return rq;
}
+EXPORT_SYMBOL_GPL(__migrate_task);
/*
* migration_cpu_stop - this will be executed by a high-prio stopper thread
@@ -3072,6 +3131,7 @@ static int __set_cpus_allowed_ptr_locked(struct task_struct *p,
* immediately required to distribute the tasks within their new mask.
*/
dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, ctx->new_mask);
+ trace_android_rvh_set_cpus_allowed_by_task(cpu_valid_mask, ctx->new_mask, p, &dest_cpu);
if (dest_cpu >= nr_cpu_ids) {
ret = -EINVAL;
goto out;
@@ -3100,13 +3160,15 @@ int __set_cpus_allowed_ptr(struct task_struct *p, struct affinity_context *ctx)
{
struct rq_flags rf;
struct rq *rq;
+ bool skip_user_ptr = false;
+ trace_android_rvh_set_cpus_allowed_ptr(p, ctx, &skip_user_ptr);
rq = task_rq_lock(p, &rf);
/*
* Masking should be skipped if SCA_USER or any of the SCA_MIGRATE_*
* flags are set.
*/
- if (p->user_cpus_ptr &&
+ if (!skip_user_ptr && p->user_cpus_ptr &&
!(ctx->flags & (SCA_USER | SCA_MIGRATE_ENABLE | SCA_MIGRATE_DISABLE)) &&
cpumask_and(rq->scratch_mask, ctx->new_mask, p->user_cpus_ptr))
ctx->new_mask = rq->scratch_mask;
@@ -3287,13 +3349,13 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++;
perf_event_task_migrate(p);
+ trace_android_rvh_set_task_cpu(p, new_cpu);
}
__set_task_cpu(p, new_cpu);
}
-#endif /* CONFIG_SMP */
+EXPORT_SYMBOL_GPL(set_task_cpu);
-#ifdef CONFIG_NUMA_BALANCING
static void __migrate_swap_task(struct task_struct *p, int cpu)
{
if (task_on_rq_queued(p)) {
@@ -3391,13 +3453,16 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
goto out;
+#ifdef CONFIG_NUMA_BALANCING
trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
+#endif
ret = stop_two_cpus(arg.dst_cpu, arg.src_cpu, migrate_swap_stop, &arg);
out:
return ret;
}
-#endif /* CONFIG_NUMA_BALANCING */
+EXPORT_SYMBOL_GPL(migrate_swap);
+#endif /* CONFIG_SMP */
/***
* kick_process - kick a running thread to enter/exit the kernel
@@ -3444,12 +3509,16 @@ EXPORT_SYMBOL_GPL(kick_process);
* select_task_rq() below may allow selection of !active CPUs in order
* to satisfy the above rules.
*/
-static int select_fallback_rq(int cpu, struct task_struct *p)
+int select_fallback_rq(int cpu, struct task_struct *p)
{
int nid = cpu_to_node(cpu);
const struct cpumask *nodemask = NULL;
enum { cpuset, possible, fail } state = cpuset;
- int dest_cpu;
+ int dest_cpu = -1;
+
+ trace_android_rvh_select_fallback_rq(cpu, p, &dest_cpu);
+ if (dest_cpu >= 0)
+ return dest_cpu;
/*
* If the node that the CPU is on has been offlined, cpu_to_node()
@@ -3508,6 +3577,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
return dest_cpu;
}
+EXPORT_SYMBOL_GPL(select_fallback_rq);
/*
* The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
@@ -3649,6 +3719,9 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
{
int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
+ if (wake_flags & WF_SYNC)
+ en_flags |= ENQUEUE_WAKEUP_SYNC;
+
lockdep_assert_rq_held(rq);
if (p->sched_contributes_to_load)
@@ -3813,6 +3886,7 @@ void wake_up_if_idle(int cpu)
resched_curr(rq);
}
}
+EXPORT_SYMBOL_GPL(wake_up_if_idle);
bool cpus_equal_capacity(int this_cpu, int that_cpu)
{
@@ -3896,7 +3970,12 @@ static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
{
- if (sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) {
+ bool cond = false;
+
+ trace_android_rvh_ttwu_cond(cpu, &cond);
+
+ if ((sched_feat(TTWU_QUEUE) && ttwu_queue_cond(p, cpu)) ||
+ cond) {
sched_clock_cpu(cpu); /* Sync clocks across CPUs */
__ttwu_queue_wakelist(p, cpu, wake_flags);
return true;
@@ -4160,6 +4239,9 @@ int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
if (READ_ONCE(p->on_rq) && ttwu_runnable(p, wake_flags))
break;
+ if (READ_ONCE(p->__state) & TASK_UNINTERRUPTIBLE)
+ trace_sched_blocked_reason(p);
+
/*
* Ensure we load p->on_cpu _after_ p->on_rq, otherwise it would be
* possible to, falsely, observe p->on_cpu == 0.
@@ -4227,6 +4309,8 @@ int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
*/
smp_cond_load_acquire(&p->on_cpu, !VAL);
+ trace_android_rvh_try_to_wake_up(p);
+
cpu = select_task_rq(p, p->wake_cpu, &wake_flags);
if (task_cpu(p) != cpu) {
if (p->in_iowait) {
@@ -4242,9 +4326,10 @@ int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
ttwu_queue(p, cpu, wake_flags);
}
out:
- if (success)
+ if (success) {
+ trace_android_rvh_try_to_wake_up_success(p);
ttwu_stat(p, task_cpu(p), wake_flags);
-
+ }
return success;
}
@@ -4410,6 +4495,8 @@ static void __sched_fork(u64 clone_flags, struct task_struct *p)
#endif
#endif
+ trace_android_rvh_sched_fork_init(p);
+
#ifdef CONFIG_SCHEDSTATS
/* Even if schedstat is disabled, there should not be garbage */
memset(&p->stats, 0, sizeof(p->stats));
@@ -4626,6 +4713,8 @@ late_initcall(sched_core_sysctl_init);
*/
int sched_fork(u64 clone_flags, struct task_struct *p)
{
+ trace_android_rvh_sched_fork(p);
+
__sched_fork(clone_flags, p);
/*
* We mark the process as NEW here. This guarantees that
@@ -4638,6 +4727,7 @@ int sched_fork(u64 clone_flags, struct task_struct *p)
* Make sure we do not leak PI boosting priority to the child.
*/
p->prio = current->normal_prio;
+ trace_android_rvh_prepare_prio_fork(p);
uclamp_fork(p);
@@ -4680,6 +4770,7 @@ int sched_fork(u64 clone_flags, struct task_struct *p)
}
init_entity_runnable_average(&p->se);
+ trace_android_rvh_finish_prio_fork(p);
#ifdef CONFIG_SCHED_INFO
@@ -4764,6 +4855,8 @@ void wake_up_new_task(struct task_struct *p)
struct rq *rq;
int wake_flags = WF_FORK;
+ trace_android_rvh_wake_up_new_task(p);
+
raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
WRITE_ONCE(p->__state, TASK_RUNNING);
/*
@@ -4779,6 +4872,7 @@ void wake_up_new_task(struct task_struct *p)
rq = __task_rq_lock(p, &rf);
update_rq_clock(rq);
post_init_entity_util_avg(p);
+ trace_android_rvh_new_task_stats(p);
activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL);
trace_sched_wakeup_new(p);
@@ -4944,6 +5038,7 @@ struct balance_callback balance_push_callback = {
.next = NULL,
.func = balance_push,
};
+EXPORT_SYMBOL_GPL(balance_push_callback);
static inline struct balance_callback *
__splice_balance_callbacks(struct rq *rq, bool split)
@@ -5181,6 +5276,8 @@ static struct rq *finish_task_switch(struct task_struct *prev)
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
+ trace_android_rvh_flush_task(prev);
+
/*
* sched_ext_dead() must come before cgroup_task_dead() to
* prevent cgroups from being removed while its member tasks are
@@ -5410,6 +5507,11 @@ void sched_exec(void)
struct task_struct *p = current;
struct migration_arg arg;
int dest_cpu;
+ bool cond = false;
+
+ trace_android_rvh_sched_exec(&cond);
+ if (cond)
+ return;
scoped_guard (raw_spinlock_irqsave, &p->pi_lock) {
dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
@@ -5561,6 +5663,7 @@ void sched_tick(void)
psi_account_irqtime(rq, donor, NULL);
update_rq_clock(rq);
+ trace_android_rvh_tick_entry(rq);
hw_pressure = arch_scale_hw_pressure(cpu_of(rq));
update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure);
@@ -5588,6 +5691,8 @@ void sched_tick(void)
rq->idle_balance = idle_cpu(cpu);
sched_balance_trigger(rq);
}
+
+ trace_android_vh_scheduler_tick(rq);
}
#ifdef CONFIG_NO_HZ_FULL
@@ -5841,6 +5946,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
}
check_panic_on_warn("scheduling while atomic");
+ trace_android_rvh_schedule_bug(prev);
+
dump_stack();
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
}
@@ -6861,6 +6968,7 @@ static void __sched notrace __schedule(int sched_mode)
keep_resched:
rq->last_seen_need_resched_ns = 0;
+ trace_android_rvh_schedule(prev, next, rq);
is_switch = prev != next;
if (likely(is_switch)) {
rq->nr_switches++;
@@ -7310,6 +7418,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
struct rq_flags rf;
struct rq *rq;
+ trace_android_rvh_rtmutex_prepare_setprio(p, pi_task);
/* XXX used to be waiter->prio, not waiter->task->prio */
prio = __rt_effective_prio(pi_task, p->normal_prio);
@@ -8450,6 +8559,7 @@ int sched_cpu_starting(unsigned int cpu)
sched_core_cpu_starting(cpu);
sched_rq_cpu_starting(cpu);
sched_tick_start(cpu);
+ trace_android_rvh_sched_cpu_starting(cpu);
return 0;
}
@@ -8529,6 +8639,8 @@ int sched_cpu_dying(unsigned int cpu)
#endif
rq_unlock_irqrestore(rq, &rf);
+ trace_android_rvh_sched_cpu_dying(cpu);
+
calc_load_migrate(rq);
update_max_interval();
hrtick_clear(rq);
@@ -8586,7 +8698,9 @@ int in_sched_functions(unsigned long addr)
* Every task in system belongs to this group at bootup.
*/
struct task_group root_task_group;
+EXPORT_SYMBOL_GPL(root_task_group);
LIST_HEAD(task_groups);
+EXPORT_SYMBOL_GPL(task_groups);
/* Cacheline aligned slab cache for task_group */
static struct kmem_cache *task_group_cache __ro_after_init;
@@ -8881,6 +8995,8 @@ void __might_resched(const char *file, int line, unsigned int offsets)
print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
preempt_disable_ip);
+ trace_android_rvh_schedule_bug(NULL);
+
dump_stack();
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
}
@@ -9228,6 +9344,7 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
cpu_util_update_eff(css);
#endif
+ trace_android_rvh_cpu_cgroup_online(css);
return 0;
}
@@ -9280,6 +9397,8 @@ static void cpu_cgroup_attach(struct cgroup_taskset *tset)
cgroup_taskset_for_each(task, css, tset)
sched_move_task(task, false);
+
+ trace_android_rvh_cpu_cgroup_attach(tset);
}
static void cpu_cgroup_cancel_attach(struct cgroup_taskset *tset)
@@ -9459,6 +9578,27 @@ static int cpu_uclamp_max_show(struct seq_file *sf, void *v)
cpu_uclamp_print(sf, UCLAMP_MAX);
return 0;
}
+
+static int cpu_uclamp_ls_write_u64(struct cgroup_subsys_state *css,
+ struct cftype *cftype, u64 ls)
+{
+ struct task_group *tg;
+
+ if (ls > 1)
+ return -EINVAL;
+ tg = css_tg(css);
+ tg->latency_sensitive = (unsigned int) ls;
+
+ return 0;
+}
+
+static u64 cpu_uclamp_ls_read_u64(struct cgroup_subsys_state *css,
+ struct cftype *cft)
+{
+ struct task_group *tg = css_tg(css);
+
+ return (u64) tg->latency_sensitive;
+}
#endif /* CONFIG_UCLAMP_TASK_GROUP */
#ifdef CONFIG_GROUP_SCHED_WEIGHT
@@ -9968,6 +10108,12 @@ static struct cftype cpu_legacy_files[] = {
.seq_show = cpu_uclamp_max_show,
.write = cpu_uclamp_max_write,
},
+ {
+ .name = "uclamp.latency_sensitive",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .read_u64 = cpu_uclamp_ls_read_u64,
+ .write_u64 = cpu_uclamp_ls_write_u64,
+ },
#endif
{ } /* Terminate */
};
@@ -10233,6 +10379,12 @@ static struct cftype cpu_files[] = {
.seq_show = cpu_uclamp_max_show,
.write = cpu_uclamp_max_write,
},
+ {
+ .name = "uclamp.latency_sensitive",
+ .flags = CFTYPE_NOT_ON_ROOT,
+ .read_u64 = cpu_uclamp_ls_read_u64,
+ .write_u64 = cpu_uclamp_ls_write_u64,
+ },
#endif /* CONFIG_UCLAMP_TASK_GROUP */
{ } /* terminate */
};
diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c
index 742fb9e..cb4214f 100644
--- a/kernel/sched/cpufreq.c
+++ b/kernel/sched/cpufreq.c
@@ -8,6 +8,7 @@
#include "sched.h"
DEFINE_PER_CPU(struct update_util_data __rcu *, cpufreq_update_util_data);
+EXPORT_PER_CPU_SYMBOL_GPL(cpufreq_update_util_data);
/**
* cpufreq_add_update_util_hook - Populate the CPU's update_util_data pointer.
@@ -73,3 +74,4 @@ bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy)
(policy->dvfs_possible_from_any_cpu &&
rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)));
}
+EXPORT_SYMBOL_GPL(cpufreq_this_cpu_can_update);
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
index 8f2237e8..a1ece5d 100644
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -197,6 +197,7 @@ int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p,
return 0;
}
+EXPORT_SYMBOL_GPL(cpupri_find_fitness);
/**
* cpupri_set - update the CPU priority setting
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index fbf31db..fe49ad5 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -5,6 +5,9 @@
#include <linux/sched/cputime.h>
#include <linux/tsacct_kern.h>
#include "sched.h"
+#include <linux/cpufreq_times.h>
+#include <trace/hooks/sched.h>
+#undef TRACE_INCLUDE_PATH
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#include <asm/cputime.h>
@@ -26,6 +29,7 @@ DEFINE_STATIC_KEY_FALSE(sched_clock_irqtime);
* compromise in place of having locks on each IRQ in account_system_time.
*/
DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_irqtime);
void enable_sched_clock_irqtime(void)
{
@@ -60,6 +64,7 @@ void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
unsigned int pc;
s64 delta;
int cpu;
+ bool irq_start = true;
if (!irqtime_enabled())
return;
@@ -75,10 +80,15 @@ void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
* in that case, so as not to confuse scheduler with a special task
* that do not consume any time, but still wants to run.
*/
- if (pc & HARDIRQ_MASK)
+ if (pc & HARDIRQ_MASK) {
irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
- else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd())
+ irq_start = false;
+ } else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd()) {
irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
+ irq_start = false;
+ }
+
+ trace_android_rvh_account_irq(curr, cpu, delta, irq_start);
}
static u64 irqtime_tick_accounted(u64 maxtime)
@@ -135,6 +145,9 @@ void account_user_time(struct task_struct *p, u64 cputime)
/* Account for user time used */
acct_account_cputime(p);
+
+ /* Account power usage for user time */
+ cpufreq_acct_update_power(p, cputime);
}
/*
@@ -179,6 +192,9 @@ void account_system_index_time(struct task_struct *p,
/* Account for system time used */
acct_account_cputime(p);
+
+ /* Account power usage for system time */
+ cpufreq_acct_update_power(p, cputime);
}
/*
@@ -468,6 +484,7 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
*ut = cputime.utime;
*st = cputime.stime;
}
+EXPORT_SYMBOL_GPL(thread_group_cputime_adjusted);
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */
@@ -644,6 +661,8 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
thread_group_cputime(p, &cputime);
cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
}
+EXPORT_SYMBOL_GPL(thread_group_cputime_adjusted);
+
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index d08b004..626e6d3 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2619,6 +2619,7 @@ static struct task_struct *__pick_task_dl(struct rq *rq, struct rq_flags *rf)
goto again;
}
rq->dl_server = dl_se;
+ trace_android_vh_dump_dl_server(dl_se, p);
} else {
p = dl_task_of(dl_se);
}
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index b24f40f..8657839 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -50,9 +50,10 @@ static unsigned long nsec_low(unsigned long long nsec)
#define SCHED_FEAT(name, enabled) \
#name ,
-static const char * const sched_feat_names[] = {
+const char * const sched_feat_names[] = {
#include "features.h"
};
+EXPORT_SYMBOL_GPL(sched_feat_names);
#undef SCHED_FEAT
@@ -81,6 +82,7 @@ static int sched_feat_show(struct seq_file *m, void *v)
struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
#include "features.h"
};
+EXPORT_SYMBOL_GPL(sched_feat_keys);
#undef SCHED_FEAT
@@ -1127,6 +1129,7 @@ static void sched_debug_header(struct seq_file *m)
#define PN(x) \
SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
PN(sysctl_sched_base_slice);
+ P(sysctl_sched_child_runs_first);
P(sysctl_sched_features);
#undef PN
#undef P
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 62b1f3a..add09a4 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -192,6 +192,7 @@ MODULE_PARM_DESC(bypass_lb_intv_us, "bypass load balance interval in microsecond
#define CREATE_TRACE_POINTS
#include <trace/events/sched_ext.h>
+#undef CREATE_TRACE_POINTS
static void process_ddsp_deferred_locals(struct rq *rq);
static bool task_dead_and_done(struct task_struct *p);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index eea99ec0..68805bc 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -58,6 +58,24 @@
#include "stats.h"
#include "autogroup.h"
+#include <trace/hooks/sched.h>
+
+/*
+ * Targeted preemption latency for CPU-bound tasks:
+ *
+ * NOTE: this latency value is not the same as the concept of
+ * 'timeslice length' - timeslices in CFS are of variable length
+ * and have no persistent notion like in traditional, time-slice
+ * based scheduling concepts.
+ *
+ * (to see the precise effective timeslice length of your workload,
+ * run vmstat and monitor the context-switches (cs) field)
+ *
+ * (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
+ */
+unsigned int sysctl_sched_latency = 6000000ULL;
+EXPORT_SYMBOL_GPL(sysctl_sched_latency);
+
/*
* The initial- and re-scaling of tunables is configurable
*
@@ -79,6 +97,12 @@ unsigned int sysctl_sched_tunable_scaling = SCHED_TUNABLESCALING_LOG;
unsigned int sysctl_sched_base_slice = 700000ULL;
static unsigned int normalized_sysctl_sched_base_slice = 700000ULL;
+/*
+ * After fork, child runs first. If set to 0 (default) then
+ * parent will (try to) run first.
+ */
+unsigned int sysctl_sched_child_runs_first __read_mostly;
+
__read_mostly unsigned int sysctl_sched_migration_cost = 500000UL;
static int __init setup_sched_thermal_decay_shift(char *str)
@@ -132,6 +156,13 @@ static unsigned int sysctl_numa_balancing_promote_rate_limit = 65536;
#ifdef CONFIG_SYSCTL
static const struct ctl_table sched_fair_sysctls[] = {
+ {
+ .procname = "sched_child_runs_first",
+ .data = &sysctl_sched_child_runs_first,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
#ifdef CONFIG_CFS_BANDWIDTH
{
.procname = "sched_cfs_bandwidth_slice_us",
@@ -855,6 +886,7 @@ RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity,
*/
static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
+ trace_android_rvh_enqueue_entity(cfs_rq, se);
sum_w_vruntime_add(cfs_rq, se);
update_zero_vruntime(cfs_rq);
se->min_vruntime = se->vruntime;
@@ -865,6 +897,7 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
+ trace_android_rvh_dequeue_entity(cfs_rq, se);
rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
&min_vruntime_cb);
sum_w_vruntime_sub(cfs_rq, se);
@@ -4835,6 +4868,11 @@ static inline void util_est_update(struct cfs_rq *cfs_rq,
bool task_sleep)
{
unsigned int ewma, dequeued, last_ewma_diff;
+ int ret = 0;
+
+ trace_android_rvh_util_est_update(cfs_rq, p, task_sleep, &ret);
+ if (ret)
+ return;
if (!sched_feat(UTIL_EST))
return;
@@ -5050,11 +5088,13 @@ static inline int task_fits_cpu(struct task_struct *p, int cpu)
return (util_fits_cpu(util, uclamp_min, uclamp_max, cpu) > 0);
}
-static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+inline void update_misfit_status(struct task_struct *p, struct rq *rq)
{
int cpu = cpu_of(rq);
+ bool need_update = true;
- if (!sched_asym_cpucap_active())
+ trace_android_rvh_update_misfit_status(p, rq, &need_update);
+ if (!sched_asym_cpucap_active() || !need_update)
return;
/*
@@ -5075,6 +5115,7 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
*/
rq->misfit_task_load = max_t(unsigned long, task_h_load(p), 1);
}
+EXPORT_SYMBOL_GPL(update_misfit_status);
void __setparam_fair(struct task_struct *p, const struct sched_attr *attr)
{
@@ -5198,6 +5239,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* EEVDF: vd_i = ve_i + r_i/w_i
*/
se->deadline = se->vruntime + vslice;
+ trace_android_rvh_place_entity(cfs_rq, se, flags, &vruntime);
}
static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
@@ -5419,8 +5461,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
return true;
}
-static void
-set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
clear_buddies(cfs_rq, se);
@@ -5534,6 +5575,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
return;
}
#endif
+ trace_android_rvh_entity_tick(cfs_rq, curr);
}
@@ -6780,6 +6822,11 @@ static inline void hrtick_update(struct rq *rq)
static inline bool cpu_overutilized(int cpu)
{
unsigned long rq_util_min, rq_util_max;
+ int overutilized = -1;
+
+ trace_android_rvh_cpu_overutilized(cpu, &overutilized);
+ if (overutilized != -1)
+ return overutilized;
if (!sched_energy_enabled())
return false;
@@ -6936,6 +6983,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
flags = ENQUEUE_WAKEUP;
}
+ trace_android_rvh_enqueue_task_fair(rq, p, flags);
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
@@ -7053,6 +7101,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags)
flags &= ~(DEQUEUE_DELAYED | DEQUEUE_SPECIAL);
}
+ trace_android_rvh_dequeue_task_fair(rq, p, flags);
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
@@ -8311,7 +8360,7 @@ compute_energy(struct energy_env *eenv, struct perf_domain *pd,
* other use-cases too. So, until someone finds a better way to solve this,
* let's keep things simple by re-using the existing slow path.
*/
-static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync)
{
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
@@ -8326,11 +8375,21 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
struct perf_domain *pd;
struct energy_env eenv;
+ sync_entity_load_avg(&p->se);
+
rcu_read_lock();
pd = rcu_dereference_all(rd->pd);
if (!pd)
goto unlock;
+ cpu = smp_processor_id();
+ if (sync && cpu_rq(cpu)->nr_running == 1 &&
+ cpumask_test_cpu(cpu, p->cpus_ptr) &&
+ task_fits_cpu(p, cpu)) {
+ rcu_read_unlock();
+ return cpu;
+ }
+
/*
* Energy-aware wake-up happens on the lowest sched_domain starting
* from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
@@ -8514,9 +8573,18 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
int cpu = smp_processor_id();
int new_cpu = prev_cpu;
int want_affine = 0;
+ int target_cpu = -1;
/* SD_flags and WF_flags share the first nibble */
int sd_flag = wake_flags & 0xF;
+ if (trace_android_rvh_select_task_rq_fair_enabled() &&
+ !(sd_flag & SD_BALANCE_FORK))
+ sync_entity_load_avg(&p->se);
+ trace_android_rvh_select_task_rq_fair(p, prev_cpu, sd_flag,
+ wake_flags, &target_cpu);
+ if (target_cpu >= 0)
+ return target_cpu;
+
/*
* required for stable ->cpus_allowed
*/
@@ -8529,7 +8597,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
return cpu;
if (!is_rd_overutilized(this_rq()->rd)) {
- new_cpu = find_energy_efficient_cpu(p, prev_cpu);
+ new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync);
if (new_cpu >= 0)
return new_cpu;
new_cpu = prev_cpu;
@@ -8742,6 +8810,8 @@ static void wakeup_preempt_fair(struct rq *rq, struct task_struct *p, int wake_f
struct sched_entity *se = &donor->se, *pse = &p->se;
struct cfs_rq *cfs_rq = task_cfs_rq(donor);
int cse_is_idle, pse_is_idle;
+ bool ignore = false;
+ bool preempt = false;
/*
* XXX Getting preempted by higher class, try and find idle CPU?
@@ -8807,6 +8877,12 @@ static void wakeup_preempt_fair(struct rq *rq, struct task_struct *p, int wake_f
cfs_rq = cfs_rq_of(se);
update_curr(cfs_rq);
+ trace_android_rvh_check_preempt_wakeup_fair(rq, p, &preempt, &ignore,
+ wake_flags, se, pse);
+ if (preempt)
+ goto preempt;
+ if (ignore)
+ return;
/*
* If @p has a shorter slice than current and @p is eligible, override
* current's slice protection in order to allow preemption.
@@ -8914,6 +8990,8 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
again:
p = pick_task_fair(rq, rf);
+ trace_android_rvh_replace_next_task_fair(rq, &p, prev);
+
if (!p)
goto idle;
se = &p->se;
@@ -9192,7 +9270,8 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
* rewrite all of this once again.]
*/
-static unsigned long __read_mostly max_load_balance_interval = HZ/10;
+unsigned long __read_mostly max_load_balance_interval = HZ/10;
+EXPORT_SYMBOL_GPL(max_load_balance_interval);
enum fbq_type { regular, remote, all };
@@ -9277,6 +9356,7 @@ struct lb_env {
enum fbq_type fbq_type;
enum migration_type migration_type;
struct list_head tasks;
+ struct rq_flags *src_rq_rf;
};
/*
@@ -9414,11 +9494,16 @@ static
int can_migrate_task(struct task_struct *p, struct lb_env *env)
{
long degrades, hot;
+ int can_migrate = 1;
lockdep_assert_rq_held(env->src_rq);
if (p->sched_task_hot)
p->sched_task_hot = 0;
+ trace_android_rvh_can_migrate_task(p, env->dst_cpu, &can_migrate);
+ if (!can_migrate)
+ return 0;
+
/*
* We do not migrate tasks that are:
* 1) delayed dequeued unless we migrate load, or
@@ -9523,8 +9608,20 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
*/
static void detach_task(struct task_struct *p, struct lb_env *env)
{
+ int detached = 0;
+
lockdep_assert_rq_held(env->src_rq);
+ /*
+ * The vendor hook may drop the lock temporarily, so
+ * pass the rq flags to unpin lock. We expect the
+ * rq lock to be held after return.
+ */
+ trace_android_rvh_migrate_queued_task(env->src_rq, env->src_rq_rf, p,
+ env->dst_cpu, &detached);
+ if (detached)
+ return;
+
if (p->sched_task_hot) {
p->sched_task_hot = 0;
schedstat_inc(env->sd->lb_hot_gained[env->idle]);
@@ -10050,6 +10147,7 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
if (!capacity)
capacity = 1;
+ trace_android_rvh_update_cpu_capacity(cpu, &capacity);
cpu_rq(cpu)->cpu_capacity = capacity;
trace_sched_cpu_capacity_tp(cpu_rq(cpu));
@@ -11340,6 +11438,7 @@ static struct sched_group *sched_balance_find_src_group(struct lb_env *env)
{
struct sg_lb_stats *local, *busiest;
struct sd_lb_stats sds;
+ int out_balance = 1;
init_sd_lb_stats(&sds);
@@ -11359,8 +11458,9 @@ static struct sched_group *sched_balance_find_src_group(struct lb_env *env)
if (busiest->group_type == group_misfit_task)
goto force_balance;
+ trace_android_rvh_sched_balance_find_src_group(sds.busiest, env->dst_rq, &out_balance);
if (!is_rd_overutilized(env->dst_rq->rd) &&
- rcu_dereference_all(env->dst_rq->rd->pd))
+ rcu_dereference_all(env->dst_rq->rd->pd) && out_balance)
goto out_balanced;
/* ASYM feature bypasses nice load balance check */
@@ -11480,7 +11580,12 @@ static struct rq *sched_balance_find_src_rq(struct lb_env *env,
struct rq *busiest = NULL, *rq;
unsigned long busiest_util = 0, busiest_load = 0, busiest_capacity = 1;
unsigned int busiest_nr = 0;
- int i;
+ int i, done = 0;
+
+ trace_android_rvh_find_busiest_queue(env->dst_cpu, group, env->cpus,
+ &busiest, &done);
+ if (done)
+ return busiest;
for_each_cpu_and(i, sched_group_span(group), env->cpus) {
unsigned long capacity, load, util;
@@ -11866,6 +11971,7 @@ static int sched_balance_rq(int this_cpu, struct rq *this_rq,
more_balance:
rq_lock_irqsave(busiest, &rf);
+ env.src_rq_rf = &rf;
update_rq_clock(busiest);
/*
@@ -12173,6 +12279,7 @@ static int active_load_balance_cpu_stop(void *data)
.src_rq = busiest_rq,
.idle = CPU_IDLE,
.flags = LBF_ACTIVE_LB,
+ .src_rq_rf = &rf,
};
schedstat_inc(sd->alb_count);
@@ -12271,6 +12378,10 @@ static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
int need_decay = 0;
u64 max_cost = 0;
+ trace_android_rvh_sched_rebalance_domains(rq, &continue_balancing);
+ if (!continue_balancing)
+ return;
+
rcu_read_lock();
for_each_domain(cpu, sd) {
/*
@@ -12347,6 +12458,11 @@ static inline int find_new_ilb(void)
{
const struct cpumask *hk_mask;
int ilb_cpu;
+ int new_ilb = nr_cpu_ids;
+
+ trace_android_rvh_find_new_ilb(nohz.idle_cpus_mask, &new_ilb);
+ if (new_ilb != nr_cpu_ids)
+ return new_ilb;
hk_mask = housekeeping_cpumask(HK_TYPE_KERNEL_NOISE);
@@ -12418,6 +12534,7 @@ static void nohz_balancer_kick(struct rq *rq)
struct sched_domain *sd;
int nr_busy, i, cpu = rq->cpu;
unsigned int flags = 0;
+ int done = 0;
if (unlikely(rq->idle_balance))
return;
@@ -12451,6 +12568,10 @@ static void nohz_balancer_kick(struct rq *rq)
if (unlikely(cpumask_empty(nohz.idle_cpus_mask)))
return;
+ trace_android_rvh_sched_nohz_balancer_kick(rq, &flags, &done);
+ if (done)
+ goto out;
+
if (rq->nr_running >= 2) {
flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
goto out;
@@ -12858,6 +12979,11 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
u64 t0, t1, curr_cost = 0;
struct sched_domain *sd;
int pulled_task = 0;
+ int done = 0;
+
+ trace_android_rvh_sched_newidle_balance(this_rq, rf, &pulled_task, &done);
+ if (done)
+ return pulled_task;
update_misfit_status(NULL, this_rq);
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index 8977908..0a13b4f0 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -295,6 +295,12 @@ ___update_load_avg(struct sched_avg *sa, unsigned long load)
int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
{
+ int ret = -1;
+
+ trace_android_rvh_update_load_avg_blocked_se(now, se, &ret);
+ if (ret != -1)
+ return ret;
+
if (___update_load_sum(now, &se->avg, 0, 0, 0)) {
___update_load_avg(&se->avg, se_weight(se));
trace_pelt_se_tp(se);
@@ -303,9 +309,16 @@ int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
return 0;
}
+EXPORT_SYMBOL_GPL(__update_load_avg_blocked_se);
int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
{
+ int ret = -1;
+
+ trace_android_rvh_update_load_avg_se(now, cfs_rq, se, &ret);
+ if (ret != -1)
+ return ret;
+
if (___update_load_sum(now, &se->avg, !!se->on_rq, se_runnable(se),
cfs_rq->curr == se)) {
@@ -320,6 +333,12 @@ int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se
int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
{
+ int ret = -1;
+
+ trace_android_rvh_update_load_avg_cfs_rq(now, cfs_rq, &ret);
+ if (ret != -1)
+ return ret;
+
if (___update_load_sum(now, &cfs_rq->avg,
scale_load_down(cfs_rq->load.weight),
cfs_rq->h_nr_runnable,
@@ -346,6 +365,12 @@ int __update_load_avg_cfs_rq(u64 now, struct cfs_rq *cfs_rq)
int update_rt_rq_load_avg(u64 now, struct rq *rq, int running)
{
+ int ret = -1;
+
+ trace_android_rvh_update_rt_rq_load_avg_internal(now, rq, running, &ret);
+ if (ret != -1)
+ return ret;
+
if (___update_load_sum(now, &rq->avg_rt,
running,
running,
diff --git a/kernel/sched/pelt.h b/kernel/sched/pelt.h
index f921302..e976a41 100644
--- a/kernel/sched/pelt.h
+++ b/kernel/sched/pelt.h
@@ -1,6 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#ifndef _KERNEL_SCHED_PELT_H
#define _KERNEL_SCHED_PELT_H
+#include <trace/hooks/sched.h>
#include "sched.h"
#include "sched-pelt.h"
@@ -99,6 +100,12 @@ static inline void _update_idle_rq_clock_pelt(struct rq *rq)
*/
static inline void update_rq_clock_pelt(struct rq *rq, s64 delta)
{
+ int ret = 0;
+
+ trace_android_rvh_update_rq_clock_pelt(rq, delta, &ret);
+ if (ret)
+ return;
+
if (unlikely(is_idle_task(rq->curr))) {
_update_idle_rq_clock_pelt(rq);
return;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index f69e1f1..9534b4b 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -4,6 +4,8 @@
* policies)
*/
+#include <trace/hooks/sched.h>
+#undef TRACE_INCLUDE_PATH
#include "sched.h"
#include "pelt.h"
@@ -885,6 +887,13 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
if (likely(rt_b->rt_runtime)) {
rt_rq->rt_throttled = 1;
printk_deferred_once("sched: RT throttling activated\n");
+
+ trace_android_vh_dump_throttled_rt_tasks(
+ raw_smp_processor_id(),
+ rq_clock(rq_of_rt_rq(rt_rq)),
+ sched_rt_period(rt_rq),
+ runtime,
+ hrtimer_get_expires_ns(&rt_b->rt_period_timer));
} else {
/*
* In case we did anyway, make it go away,
@@ -1424,6 +1433,27 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
enqueue_top_rt_rq(&rq->rt);
}
+#ifdef CONFIG_SMP
+static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
+ bool sync)
+{
+ /*
+ * If the waker is CFS, then an RT sync wakeup would preempt the waker
+ * and force it to run for a likely small time after the RT wakee is
+ * done. So, only honor RT sync wakeups from RT wakers.
+ */
+ return sync && task_has_rt_policy(rq->curr) &&
+ p->prio <= rq->rt.highest_prio.next &&
+ rq->rt.rt_nr_running <= 2;
+}
+#else
+static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
+ bool sync)
+{
+ return 0;
+}
+#endif
+
/*
* Adding/removing a task to/from a priority array:
*/
@@ -1431,6 +1461,7 @@ static void
enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
+ bool sync = !!(flags & ENQUEUE_WAKEUP_SYNC);
if (flags & ENQUEUE_WAKEUP)
rt_se->timeout = 0;
@@ -1443,7 +1474,8 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
if (task_is_blocked(p))
return;
- if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
+ if (!task_current(rq, p) && p->nr_cpus_allowed > 1 &&
+ !should_honor_rt_sync(rq, p, sync))
enqueue_pushable_task(rq, p);
}
@@ -1495,12 +1527,47 @@ static void yield_task_rt(struct rq *rq)
static int find_lowest_rq(struct task_struct *task);
+#ifdef CONFIG_RT_SOFTIRQ_AWARE_SCHED
+/*
+ * Return whether the given cpu is currently non-preemptible
+ * while handling a potentially long softirq, or if the current
+ * task is likely to block preemptions soon because it is a
+ * ksoftirq thread that is handling softirqs.
+ */
+static bool cpu_busy_with_softirqs(int cpu)
+{
+ u32 softirqs = per_cpu(active_softirqs, cpu) |
+ __cpu_softirq_pending(cpu);
+
+ return softirqs & LONG_SOFTIRQ_MASK;
+}
+#else
+static bool cpu_busy_with_softirqs(int cpu)
+{
+ return false;
+}
+#endif /* CONFIG_RT_SOFTIRQ_AWARE_SCHED */
+
+static bool rt_task_fits_cpu(struct task_struct *p, int cpu)
+{
+ return rt_task_fits_capacity(p, cpu) && !cpu_busy_with_softirqs(cpu);
+}
+
static int
select_task_rq_rt(struct task_struct *p, int cpu, int flags)
{
struct task_struct *curr, *donor;
struct rq *rq;
+ struct rq *this_cpu_rq;
bool test;
+ int target_cpu = -1;
+ bool sync = !!(flags & WF_SYNC);
+ int this_cpu;
+
+ trace_android_rvh_select_task_rq_rt(p, cpu, flags & 0xF,
+ flags, &target_cpu);
+ if (target_cpu >= 0)
+ return target_cpu;
/* For anything but wake ups, just return the task_cpu */
if (!(flags & (WF_TTWU | WF_FORK)))
@@ -1511,6 +1578,8 @@ select_task_rq_rt(struct task_struct *p, int cpu, int flags)
rcu_read_lock();
curr = READ_ONCE(rq->curr); /* unlocked access */
donor = READ_ONCE(rq->donor);
+ this_cpu = smp_processor_id();
+ this_cpu_rq = cpu_rq(this_cpu);
/*
* If the current task on @p's runqueue is an RT task, then
@@ -1534,22 +1603,33 @@ select_task_rq_rt(struct task_struct *p, int cpu, int flags)
* This test is optimistic, if we get it wrong the load-balancer
* will have to sort it out.
*
- * We take into account the capacity of the CPU to ensure it fits the
- * requirement of the task - which is only important on heterogeneous
- * systems like big.LITTLE.
+ * We use rt_task_fits_cpu() to evaluate if the CPU is busy with
+ * potentially long-running softirq work, as well as take into
+ * account the capacity of the CPU to ensure it fits the
+ * requirement of the task - which is only important on
+ * heterogeneous systems like big.LITTLE.
*/
test = curr &&
unlikely(rt_task(donor)) &&
(curr->nr_cpus_allowed < 2 || donor->prio <= p->prio);
- if (test || !rt_task_fits_capacity(p, cpu)) {
+ /*
+ * Respect the sync flag as long as the task can run on this CPU.
+ */
+ if (should_honor_rt_sync(this_cpu_rq, p, sync) &&
+ cpumask_test_cpu(this_cpu, p->cpus_ptr)) {
+ cpu = this_cpu;
+ goto out_unlock;
+ }
+
+ if (test || !rt_task_fits_cpu(p, cpu)) {
int target = find_lowest_rq(p);
/*
* Bail out if we were forcing a migration to find a better
* fitting CPU but our search failed.
*/
- if (!test && target != -1 && !rt_task_fits_capacity(p, target))
+ if (!test && target != -1 && !rt_task_fits_cpu(p, target))
goto out_unlock;
/*
@@ -1594,6 +1674,8 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
{
if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
+ int done = 0;
+
/*
* This is OK, because current is on_cpu, which avoids it being
* picked for load-balance and preemption/IRQs are still
@@ -1601,7 +1683,9 @@ static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
* not yet started the picking loop.
*/
rq_unpin_lock(rq, rf);
- pull_rt_task(rq);
+ trace_android_rvh_sched_balance_rt(rq, p, &done);
+ if (!done)
+ pull_rt_task(rq);
rq_repin_lock(rq, rf);
}
@@ -1742,7 +1826,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct task_s
* Return the highest pushable rq's task, which is suitable to be executed
* on the CPU, NULL otherwise
*/
-static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
+struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
{
struct plist_head *head = &rq->rt.pushable_tasks;
struct task_struct *p;
@@ -1757,6 +1841,7 @@ static struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu)
return NULL;
}
+EXPORT_SYMBOL_GPL(pick_highest_pushable_task);
static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask);
@@ -1765,7 +1850,7 @@ static int find_lowest_rq(struct task_struct *task)
struct sched_domain *sd;
struct cpumask *lowest_mask = this_cpu_cpumask_var_ptr(local_cpu_mask);
int this_cpu = smp_processor_id();
- int cpu = task_cpu(task);
+ int cpu = -1;
int ret;
/* Make sure the mask is initialized first */
@@ -1776,23 +1861,32 @@ static int find_lowest_rq(struct task_struct *task)
return -1; /* No other targets possible */
/*
- * If we're on asym system ensure we consider the different capacities
- * of the CPUs when searching for the lowest_mask.
+ * If we're using the softirq optimization or if we are
+ * on asym system, ensure we consider the softirq processing
+ * or different capacities of the CPUs when searching for the
+ * lowest_mask.
*/
- if (sched_asym_cpucap_active()) {
+ if (IS_ENABLED(CONFIG_RT_SOFTIRQ_AWARE_SCHED) ||
+ sched_asym_cpucap_active()) {
ret = cpupri_find_fitness(&task_rq(task)->rd->cpupri,
task, lowest_mask,
- rt_task_fits_capacity);
+ rt_task_fits_cpu);
} else {
ret = cpupri_find(&task_rq(task)->rd->cpupri,
task, lowest_mask);
}
+ trace_android_rvh_find_lowest_rq(task, lowest_mask, ret, &cpu);
+ if (cpu >= 0)
+ return cpu;
+
if (!ret)
return -1; /* No targets found */
+ cpu = task_cpu(task);
+
/*
* At this point we have built a mask of CPUs representing the
* lowest priority tasks in the system. Now we want to elect
@@ -2128,6 +2222,9 @@ static int rto_next_cpu(struct root_domain *rd)
/* When rto_cpu is -1 this acts like cpumask_first() */
cpu = cpumask_next(rd->rto_cpu, rd->rto_mask);
+ /* this will be any CPU in the rd->rto_mask, and can be a halted cpu update it */
+ trace_android_rvh_rto_next_cpu(rd->rto_cpu, rd->rto_mask, &cpu);
+
rd->rto_cpu = cpu;
/* Do not send IPI to self */
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b82fb70..48d0c2d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -47,6 +47,7 @@
#include <linux/memcontrol.h>
#include <linux/minmax.h>
#include <linux/mm.h>
+#include <linux/mmu_context.h>
#include <linux/module.h>
#include <linux/mutex_api.h>
#include <linux/plist.h>
@@ -74,6 +75,7 @@
#include <linux/workqueue_api.h>
#include <linux/delayacct.h>
#include <linux/mmu_context.h>
+#include <linux/android_vendor.h>
#include <trace/events/power.h>
#include <trace/events/sched.h>
@@ -104,6 +106,8 @@ extern __read_mostly int scheduler_running;
extern unsigned long calc_load_update;
extern atomic_long_t calc_load_tasks;
+extern unsigned int sysctl_sched_child_runs_first;
+
extern void calc_global_load_tick(struct rq *this_rq);
extern long calc_load_fold_active(struct rq *this_rq, long adjust);
@@ -522,6 +526,10 @@ struct task_group {
struct uclamp_se uclamp_req[UCLAMP_CNT];
/* Effective clamp values used for a task group */
struct uclamp_se uclamp[UCLAMP_CNT];
+ /* Latency-sensitive flag used for a task group */
+ unsigned int latency_sensitive;
+
+ ANDROID_VENDOR_DATA_ARRAY(1, 4);
#endif
};
@@ -1066,6 +1074,7 @@ static inline void set_rd_overloaded(struct root_domain *rd, int status)
#ifdef HAVE_RT_PUSH_IPI
extern void rto_push_irq_work_func(struct irq_work *work);
#endif
+extern struct task_struct *pick_highest_pushable_task(struct rq *rq, int cpu);
#ifdef CONFIG_UCLAMP_TASK
/*
@@ -1819,6 +1828,9 @@ struct rq_flags {
unsigned int clock_update_flags;
};
+extern struct rq *__migrate_task(struct rq *rq, struct rq_flags *rf,
+ struct task_struct *p, int dest_cpu);
+
extern struct balance_callback balance_push_callback;
#ifdef CONFIG_SCHED_CLASS_EXT
@@ -2047,8 +2059,6 @@ enum numa_faults_stats {
extern void sched_setnuma(struct task_struct *p, int node);
extern int migrate_task_to(struct task_struct *p, int cpu);
-extern int migrate_swap(struct task_struct *p, struct task_struct *t,
- int cpu, int scpu);
extern void init_numa_balancing(u64 clone_flags, struct task_struct *p);
#else /* !CONFIG_NUMA_BALANCING: */
@@ -2060,6 +2070,11 @@ init_numa_balancing(u64 clone_flags, struct task_struct *p)
#endif /* !CONFIG_NUMA_BALANCING */
+#ifdef CONFIG_SMP
+extern int migrate_swap(struct task_struct *p, struct task_struct *t,
+ int cpu, int scpu);
+#endif
+
static inline void
queue_balance_callback(struct rq *rq,
struct balance_callback *head,
@@ -2331,6 +2346,8 @@ static __always_inline bool static_branch_##name(struct static_key *key) \
#undef SCHED_FEAT
extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
+extern const char * const sched_feat_names[__SCHED_FEAT_NR];
+
#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
#else /* !CONFIG_JUMP_LABEL: */
@@ -2484,6 +2501,8 @@ extern const u32 sched_prio_to_wmult[40];
#define ENQUEUE_INITIAL 0x00080000
#define ENQUEUE_RQ_SELECTED 0x00100000
+#define ENQUEUE_WAKEUP_SYNC 0x80
+
#define RETRY_TASK ((void *)-1UL)
struct affinity_context {
@@ -2828,6 +2847,7 @@ static inline struct task_struct *get_push_task(struct rq *rq)
extern int push_cpu_stop(void *arg);
+extern unsigned long __read_mostly max_load_balance_interval;
#ifdef CONFIG_CPU_IDLE
static inline void idle_set_state(struct rq *rq,
diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c
index 6f10db36..a5a7b67 100644
--- a/kernel/sched/syscalls.c
+++ b/kernel/sched/syscalls.c
@@ -16,6 +16,9 @@
#include "sched.h"
#include "autogroup.h"
+#include <trace/hooks/sched.h>
+#undef TRACE_INCLUDE_PATH
+
static inline int __normal_prio(int policy, int rt_prio, int nice)
{
int prio;
@@ -378,12 +381,14 @@ static void __setscheduler_uclamp(struct task_struct *p,
attr->sched_util_min != -1) {
uclamp_se_set(&p->uclamp_req[UCLAMP_MIN],
attr->sched_util_min, true);
+ trace_android_vh_setscheduler_uclamp(p, UCLAMP_MIN, attr->sched_util_min);
}
if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP_MAX &&
attr->sched_util_max != -1) {
uclamp_se_set(&p->uclamp_req[UCLAMP_MAX],
attr->sched_util_max, true);
+ trace_android_vh_setscheduler_uclamp(p, UCLAMP_MAX, attr->sched_util_max);
}
}
@@ -452,6 +457,12 @@ static int user_check_sched_setscheduler(struct task_struct *p,
if (p->sched_reset_on_fork && !reset_on_fork)
goto req_priv;
+ if (!capable(CAP_SYS_NICE)) {
+ /* Can't change util-clamps */
+ if (attr->sched_flags & SCHED_FLAG_UTIL_CLAMP)
+ return -EPERM;
+ }
+
return 0;
req_priv:
@@ -655,6 +666,7 @@ int __sched_setscheduler(struct task_struct *p,
__setscheduler_params(p, attr);
p->sched_class = next_class;
p->prio = newprio;
+ trace_android_rvh_setscheduler(p);
}
__setscheduler_uclamp(p, attr);
@@ -730,6 +742,7 @@ int sched_setscheduler(struct task_struct *p, int policy,
{
return _sched_setscheduler(p, policy, param, true);
}
+EXPORT_SYMBOL_GPL(sched_setscheduler);
int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
{
@@ -760,6 +773,7 @@ int sched_setscheduler_nocheck(struct task_struct *p, int policy,
{
return _sched_setscheduler(p, policy, param, false);
}
+EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
/*
* SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
@@ -1306,6 +1320,8 @@ static void do_sched_yield(void)
schedstat_inc(rq->yld_count);
rq->donor->sched_class->yield_task(rq);
+ trace_android_rvh_do_sched_yield(rq);
+
preempt_disable();
rq_unlock_irq(rq, &rf);
sched_preempt_enable_no_resched();
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 32dcdda..c1912d0 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -5,9 +5,13 @@
#include <linux/sched/isolation.h>
#include <linux/bsearch.h>
+#include <trace/hooks/sched.h>
#include "sched.h"
DEFINE_MUTEX(sched_domains_mutex);
+#ifdef CONFIG_LOCKDEP
+EXPORT_SYMBOL_GPL(sched_domains_mutex);
+#endif
void sched_domains_mutex_lock(void)
{
mutex_lock(&sched_domains_mutex);
@@ -412,11 +416,12 @@ static bool build_perf_domains(const struct cpumask *cpu_map)
struct perf_domain *pd = NULL, *tmp;
int cpu = cpumask_first(cpu_map);
struct root_domain *rd = cpu_rq(cpu)->rd;
+ bool eas_check = false;
if (!sysctl_sched_energy_aware)
goto free;
-
- if (!sched_is_eas_possible(cpu_map))
+ trace_android_rvh_build_perf_domains(&eas_check);
+ if (!sched_is_eas_possible(cpu_map) && !eas_check)
goto free;
for_each_cpu(i, cpu_map) {
@@ -2695,6 +2700,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
if (rq && sched_debug_verbose)
pr_info("root domain span: %*pbl\n", cpumask_pr_args(cpu_map));
+ trace_android_vh_build_sched_domains(has_asym);
ret = 0;
error:
diff --git a/kernel/sched/vendor_hooks.c b/kernel/sched/vendor_hooks.c
new file mode 100644
index 0000000..ee2233e
--- /dev/null
+++ b/kernel/sched/vendor_hooks.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* vendor_hook.c
+ *
+ * Copyright 2022 Google LLC
+ */
+#include <linux/sched/cputime.h>
+#include "sched.h"
+#include "pelt.h"
+#include "smp.h"
+
+#define CREATE_TRACE_POINTS
+#include <trace/hooks/vendor_hooks.h>
+#include <linux/tracepoint.h>
+#include <trace/hooks/sched.h>
+
+/* keep-sorted start */
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_account_irq);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_after_dequeue_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_after_enqueue_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_build_perf_domains);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_can_migrate_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_check_preempt_tick);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_check_preempt_wakeup_fair);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_cpu_overutilized);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dequeue_entity);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dequeue_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dequeue_task_fair);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_do_sched_yield);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_enqueue_entity);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_enqueue_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_enqueue_task_fair);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_entity_tick);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_busiest_queue);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_lowest_rq);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_find_new_ilb);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_finish_prio_fork);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_flush_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_get_nohz_timer_target);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_is_cpu_allowed);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_migrate_queued_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_new_task_stats);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_place_entity);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_prepare_prio_fork);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_replace_next_task_fair);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_rtmutex_prepare_setprio);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_rto_next_cpu);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_balance_find_src_group);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_balance_rt);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_cpu_dying);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_cpu_starting);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_exec);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_fork);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_fork_init);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_getaffinity);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_newidle_balance);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_nohz_balancer_kick);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_rebalance_domains);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_setaffinity);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_schedule);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_schedule_bug);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_select_fallback_rq);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_select_task_rq_fair);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_select_task_rq_rt);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_cpus_allowed_by_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_cpus_allowed_ptr);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_task_cpu);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_user_nice_locked);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_setscheduler);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_tick_entry);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_try_to_wake_up);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_try_to_wake_up_success);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_ttwu_cond);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_uclamp_eff_get);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_cpu_capacity);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_load_avg_blocked_se);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_load_avg_cfs_rq);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_load_avg_se);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_misfit_status);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_rq_clock_pelt);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_rt_rq_load_avg_internal);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_thermal_stats);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_util_est_update);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_wake_up_new_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_build_sched_domains);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_wake_up_sync);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dump_dl_server);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dump_throttled_rt_tasks);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_dup_task_struct);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_task);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_jiffies_update);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_scheduler_tick);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_set_wake_flags);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_setscheduler_uclamp);
+EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_update_topology_flags_workfn);
+/* keep-sorted end */
diff --git a/kernel/sched/wait.c b/kernel/sched/wait.c
index 20f27e2..44b85ed 100644
--- a/kernel/sched/wait.c
+++ b/kernel/sched/wait.c
@@ -4,6 +4,7 @@
*
* (C) 2004 Nadia Yvette Chambers, Oracle
*/
+#include <trace/hooks/sched.h>
#include "sched.h"
void __init_waitqueue_head(struct wait_queue_head *wq_head, const char *name, struct lock_class_key *key)
@@ -186,10 +187,13 @@ EXPORT_SYMBOL_GPL(__wake_up_locked_key);
void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode,
void *key)
{
+ int wake_flags = WF_SYNC;
+
if (unlikely(!wq_head))
return;
- __wake_up_common_lock(wq_head, mode, 1, WF_SYNC, key);
+ trace_android_vh_set_wake_flags(&wake_flags, &mode);
+ __wake_up_common_lock(wq_head, mode, 1, wake_flags, key);
}
EXPORT_SYMBOL_GPL(__wake_up_sync_key);
diff --git a/kernel/signal.c b/kernel/signal.c
index d65d0fe..fb27f4f 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -61,6 +61,8 @@
#include "time/posix-timers.h"
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/signal.h>
/*
* SLAB caches for signal bits.
*/
@@ -1264,7 +1266,7 @@ int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p
{
unsigned long flags;
int ret = -ESRCH;
-
+ trace_android_vh_do_send_sig_info(sig, current, p);
if (lock_task_sighand(p, &flags)) {
ret = send_signal_locked(sig, info, p, type);
unlock_task_sighand(p, &flags);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 7719891..b8f0b0c 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -34,6 +34,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
+EXPORT_TRACEPOINT_SYMBOL_GPL(irq_handler_entry);
+
/*
- No shared variables, all the data are CPU local.
- If a softirq needs serialization, let it serialize itself
@@ -60,6 +62,22 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+EXPORT_PER_CPU_SYMBOL_GPL(ksoftirqd);
+
+#ifdef CONFIG_RT_SOFTIRQ_AWARE_SCHED
+/*
+ * active_softirqs -- per cpu, a mask of softirqs that are being handled,
+ * with the expectation that approximate answers are acceptable and therefore
+ * no synchronization.
+ */
+DEFINE_PER_CPU(u32, active_softirqs);
+static inline void set_active_softirqs(u32 pending)
+{
+ __this_cpu_write(active_softirqs, pending);
+}
+#else /* CONFIG_RT_SOFTIRQ_AWARE_SCHED */
+static inline void set_active_softirqs(u32 pending) {};
+#endif /* CONFIG_RT_SOFTIRQ_AWARE_SCHED */
const char * const softirq_to_name[NR_SOFTIRQS] = {
"HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL",
@@ -576,6 +594,21 @@ static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif
+#ifdef CONFIG_RT_SOFTIRQ_AWARE_SCHED
+static __u32 softirq_deferred_for_rt(__u32 *pending)
+{
+ __u32 deferred = 0;
+
+ if (rt_task(current)) {
+ deferred = *pending & LONG_SOFTIRQ_MASK;
+ *pending &= ~LONG_SOFTIRQ_MASK;
+ }
+ return deferred;
+}
+#else
+#define softirq_deferred_for_rt(x) (0)
+#endif
+
static void handle_softirqs(bool ksirqd)
{
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
@@ -583,6 +616,7 @@ static void handle_softirqs(bool ksirqd)
int max_restart = MAX_SOFTIRQ_RESTART;
struct softirq_action *h;
bool in_hardirq;
+ __u32 deferred;
__u32 pending;
int softirq_bit;
@@ -594,14 +628,17 @@ static void handle_softirqs(bool ksirqd)
current->flags &= ~PF_MEMALLOC;
pending = local_softirq_pending();
+ deferred = softirq_deferred_for_rt(&pending);
softirq_handle_begin();
+
in_hardirq = lockdep_softirq_start();
account_softirq_enter(current);
restart:
/* Reset the pending bitmask before enabling irqs */
- set_softirq_pending(0);
+ set_softirq_pending(deferred);
+ set_active_softirqs(pending);
local_irq_enable();
@@ -631,20 +668,24 @@ static void handle_softirqs(bool ksirqd)
pending >>= softirq_bit;
}
+ set_active_softirqs(0);
if (!IS_ENABLED(CONFIG_PREEMPT_RT) && ksirqd)
rcu_softirq_qs();
local_irq_disable();
pending = local_softirq_pending();
+ deferred = softirq_deferred_for_rt(&pending);
+
if (pending) {
if (time_before(jiffies, end) && !need_resched() &&
--max_restart)
goto restart;
-
- wakeup_softirqd();
}
+ if (pending | deferred)
+ wakeup_softirqd();
+
account_softirq_exit(current);
lockdep_softirq_end(in_hardirq);
softirq_handle_end();
@@ -782,6 +823,7 @@ void raise_softirq(unsigned int nr)
raise_softirq_irqoff(nr);
local_irq_restore(flags);
}
+EXPORT_SYMBOL_GPL(raise_softirq);
void __raise_softirq_irqoff(unsigned int nr)
{
diff --git a/kernel/stacktrace.c b/kernel/stacktrace.c
index afb3c11..de66159 100644
--- a/kernel/stacktrace.c
+++ b/kernel/stacktrace.c
@@ -175,6 +175,7 @@ unsigned int stack_trace_save_regs(struct pt_regs *regs, unsigned long *store,
arch_stack_walk(consume_entry, &c, current, regs);
return c.len;
}
+EXPORT_SYMBOL_GPL(stack_trace_save_regs);
#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
/**
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 3fe6b0c..3627496 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -150,6 +150,7 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg)
wait_for_completion(&done.completion);
return done.ret;
}
+EXPORT_SYMBOL_GPL(stop_one_cpu);
/* This controls the threads on each CPU. */
enum multi_stop_state {
@@ -388,6 +389,7 @@ bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, .caller = _RET_IP_, };
return cpu_stop_queue_work(cpu, work_buf);
}
+EXPORT_SYMBOL_GPL(stop_one_cpu_nowait);
static bool queue_stop_cpus_work(const struct cpumask *cpumask,
cpu_stop_fn_t fn, void *arg,
diff --git a/kernel/sys.c b/kernel/sys.c
index c86eba9..81c24fb 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -80,6 +80,8 @@
#include "uid16.h"
+#include <trace/hooks/sys.h>
+
#ifndef SET_UNALIGN_CTL
# define SET_UNALIGN_CTL(a, b) (-EINVAL)
#endif
@@ -2908,6 +2910,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
error = -EINVAL;
break;
}
+ trace_android_vh_syscall_prctl_finished(option, me);
return error;
}
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index f3aaef6..13628b1 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -17,6 +17,7 @@
#include <linux/sched_clock.h>
#include <linux/seqlock.h>
#include <linux/bitops.h>
+#include <trace/hooks/epoch.h>
#include "timekeeping.h"
@@ -292,6 +293,7 @@ int sched_clock_suspend(void)
update_sched_clock();
hrtimer_cancel(&sched_clock_timer);
rd->read_sched_clock = suspended_sched_clock_read;
+ trace_android_vh_show_suspend_epoch_val(rd->epoch_ns, rd->epoch_cyc);
return 0;
}
@@ -308,6 +310,7 @@ void sched_clock_resume(void)
rd->epoch_cyc = cd.actual_read_sched_clock();
hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL_HARD);
rd->read_sched_clock = cd.actual_read_sched_clock;
+ trace_android_vh_show_resume_epoch_val(rd->epoch_cyc);
}
static void sched_clock_syscore_resume(void *data)
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index d305d85..d46e870 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -18,6 +18,7 @@
#include <linux/sched.h>
#include <linux/module.h>
#include <trace/events/power.h>
+#include <trace/hooks/sched.h>
#include <asm/irq_regs.h>
@@ -96,6 +97,7 @@ static void tick_periodic(int cpu)
write_seqcount_end(&jiffies_seq);
raw_spin_unlock(&jiffies_lock);
update_wall_time();
+ trace_android_vh_jiffies_update(NULL);
}
update_process_times(user_mode(get_irq_regs()));
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index f7907fa..54eedd7 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -27,6 +27,7 @@
#include <linux/posix-timers.h>
#include <linux/context_tracking.h>
#include <linux/mm.h>
+#include <trace/hooks/sched.h>
#include <asm/irq_regs.h>
@@ -249,8 +250,10 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now)
}
/* Check if jiffies need an update */
- if (tick_cpu == cpu)
+ if (tick_cpu == cpu) {
tick_do_update_jiffies64(now);
+ trace_android_vh_jiffies_update(NULL);
+ }
/*
* If the jiffies update stalled for too long (timekeeper in stop_machine()
@@ -1399,6 +1402,7 @@ ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
return ktime_sub(next_event, now);
}
+EXPORT_SYMBOL_GPL(tick_nohz_get_sleep_length);
/**
* tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
@@ -1415,6 +1419,7 @@ unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
return ts->idle_calls;
}
+EXPORT_SYMBOL_GPL(tick_nohz_get_idle_calls_cpu);
static void tick_nohz_account_idle_time(struct tick_sched *ts,
ktime_t now)
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 0ba8e3c..c12544f 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -745,6 +745,7 @@ u64 nsec_to_clock_t(u64 x)
return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
#endif
}
+EXPORT_SYMBOL_GPL(nsec_to_clock_t);
/**
* jiffies64_to_nsecs - Convert jiffies64 to nanoseconds
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 7e1e3bd..74cb106 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -56,6 +56,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/timer.h>
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/timer.h>
__visible u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
@@ -533,6 +535,7 @@ static inline unsigned calc_index(unsigned long expires, unsigned lvl,
*
* Round up with level granularity to prevent this.
*/
+ trace_android_vh_timer_calc_index(lvl, &expires);
expires = (expires >> LVL_SHIFT(lvl)) + 1;
*bucket_expiry = expires << LVL_SHIFT(lvl);
return LVL_OFFS(lvl) + (expires & LVL_MASK);
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c
index f2fe335..0abacee 100644
--- a/kernel/trace/power-traces.c
+++ b/kernel/trace/power-traces.c
@@ -17,4 +17,6 @@
EXPORT_TRACEPOINT_SYMBOL_GPL(suspend_resume);
EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_frequency);
+EXPORT_TRACEPOINT_SYMBOL_GPL(device_pm_callback_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(device_pm_callback_end);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 23de371..231d1df 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -804,7 +804,6 @@ void tracing_on(void)
{
tracer_tracing_on(&global_trace);
}
-EXPORT_SYMBOL_GPL(tracing_on);
#ifdef CONFIG_TRACER_SNAPSHOT
static void tracing_snapshot_instance_cond(struct trace_array *tr,
@@ -1277,7 +1276,6 @@ int tracing_is_on(void)
{
return tracer_tracing_is_on(&global_trace);
}
-EXPORT_SYMBOL_GPL(tracing_is_on);
static int __init set_buf_size(char *str)
{
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 91905aa..5b2c471 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -14,6 +14,7 @@
#include <linux/sched/signal.h>
#include <linux/sched/task.h>
#include <linux/static_key.h>
+#include <trace/hooks/vendor_hooks.h>
enum tp_func_state {
TP_FUNC_0,
@@ -789,3 +790,82 @@ void syscall_unregfunc(void)
}
}
#endif
+
+#ifdef CONFIG_ANDROID_VENDOR_HOOKS
+
+static void *rvh_zalloc_funcs(int count)
+{
+ return kzalloc(sizeof(struct tracepoint_func) * count, GFP_KERNEL);
+}
+
+#define ANDROID_RVH_NR_PROBES_MAX 2
+static int rvh_func_add(struct tracepoint *tp, struct tracepoint_func *func)
+{
+ int i;
+
+ if (!static_key_enabled(&tp->key)) {
+ /* '+ 1' for the last NULL element */
+ tp->funcs = rvh_zalloc_funcs(ANDROID_RVH_NR_PROBES_MAX + 1);
+ if (!tp->funcs)
+ return ENOMEM;
+ }
+
+ for (i = 0; i < ANDROID_RVH_NR_PROBES_MAX; i++) {
+ if (!tp->funcs[i].func) {
+ if (!static_key_enabled(&tp->key))
+ tp->funcs[i].data = func->data;
+ WRITE_ONCE(tp->funcs[i].func, func->func);
+
+ return 0;
+ }
+ }
+
+ return -EBUSY;
+}
+
+static int android_rvh_add_func(struct tracepoint *tp, struct tracepoint_func *func)
+{
+ int ret;
+
+ if (tp->ext && tp->ext->regfunc && !static_key_enabled(&tp->key)) {
+ ret = tp->ext->regfunc();
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = rvh_func_add(tp, func);
+ if (ret)
+ return ret;
+ tracepoint_update_call(tp, tp->funcs);
+ static_branch_enable(&tp->key);
+
+ return 0;
+}
+
+int android_rvh_probe_register(struct tracepoint *tp, void *probe, void *data)
+{
+ struct tracepoint_func tp_func;
+ int ret;
+
+ /*
+ * Once the static key has been flipped, the array may be read
+ * concurrently. Although __traceiter_*() always checks .func first,
+ * it doesn't enforce read->read dependencies, and we can't strongly
+ * guarantee it will see the correct .data for the second element
+ * without adding smp_load_acquire() in the fast path. But this is a
+ * corner case which is unlikely to be needed by anybody in practice,
+ * so let's just forbid it and keep the fast path clean.
+ */
+ if (WARN_ON(static_key_enabled(&tp->key) && data))
+ return -EINVAL;
+
+ mutex_lock(&tracepoints_mutex);
+ tp_func.func = probe;
+ tp_func.data = data;
+ ret = android_rvh_add_func(tp, &tp_func);
+ mutex_unlock(&tracepoints_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(android_rvh_probe_register);
+#endif
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 57ba7f1..8b490f32 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -454,10 +454,16 @@
endif # DEBUG_INFO
+config FORCE_FRAME_WARN_TO_2K
+ bool "Force FRAME_WARN to 2048 for 32-bit allmod/allyes builds"
+ default n
+ depends on !64BIT
+
config FRAME_WARN
int "Warn for stack frames larger than"
range 0 8192
default 0 if KMSAN
+ default 2048 if FORCE_FRAME_WARN_TO_2K
default 2048 if GCC_PLUGIN_LATENT_ENTROPY
default 2048 if PARISC
default 1536 if (!64BIT && XTENSA)
@@ -1493,7 +1499,7 @@
For more details, see Documentation/locking/lockdep-design.rst.
config PROVE_RAW_LOCK_NESTING
- bool "Enable raw_spinlock - spinlock nesting checks" if !ARCH_SUPPORTS_RT
+ bool "Enable raw_spinlock - spinlock nesting checks" if !PREEMPT_RT
depends on PROVE_LOCKING
default y if ARCH_SUPPORTS_RT
help
diff --git a/lib/OWNERS b/lib/OWNERS
new file mode 100644
index 0000000..80a0cbe
--- /dev/null
+++ b/lib/OWNERS
@@ -0,0 +1 @@
+per-file crypto/**=file:/crypto/OWNERS
diff --git a/mm/Kconfig b/mm/Kconfig
index ebd8ea3..07e026c 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -1315,6 +1315,18 @@
config MEMFD_CREATE
bool "Enable memfd_create() system call" if EXPERT
+config MEMFD_ASHMEM_SHIM
+ bool "Memfd ashmem ioctl compatibility support"
+ default y
+ depends on MEMFD_CREATE && ASHMEM_C
+ help
+ This provides compatibility support for ashmem ioctl commands against
+ memfd file descriptors. This is useful for compatibility on Android
+ for older applications that may use ashmem's ioctl commands on the
+ now memfds passed to them.
+
+ Unless you are running Android, say N.
+
config SECRETMEM
default y
bool "Enable memfd_secret() system call" if EXPERT
diff --git a/mm/Makefile b/mm/Makefile
index 8ad2ab08..719f789 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -141,6 +141,7 @@
obj-$(CONFIG_ZONE_DEVICE) += memremap.o
obj-$(CONFIG_HMM_MIRROR) += hmm.o
obj-$(CONFIG_MEMFD_CREATE) += memfd.o
+obj-$(CONFIG_MEMFD_ASHMEM_SHIM) += memfd-ashmem-shim.o
obj-$(CONFIG_MAPPING_DIRTY_HELPERS) += mapping_dirty_helpers.o
obj-$(CONFIG_PTDUMP) += ptdump.o
obj-$(CONFIG_PAGE_REPORTING) += page_reporting.o
diff --git a/mm/OWNERS b/mm/OWNERS
new file mode 100644
index 0000000..5f97cfd
--- /dev/null
+++ b/mm/OWNERS
@@ -0,0 +1,3 @@
+kaleshsingh@google.com
+surenb@google.com
+minchan@google.com
diff --git a/mm/TEST_MAPPING b/mm/TEST_MAPPING
new file mode 100644
index 0000000..9b69323
--- /dev/null
+++ b/mm/TEST_MAPPING
@@ -0,0 +1,320 @@
+{
+ "imports": [
+ {
+ "path": "frameworks/base/apex/jobscheduler/service/java/com/android/server/job/"
+ },
+ {
+ "path": "system/memory/libmeminfo"
+ },
+ {
+ "path": "system/memory/libmeminfo/libdmabufinfo"
+ },
+ {
+ "path": "system/memory/libmemunreachable"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsJobSchedulerTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsWifiBroadcastsHostTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "CtsJobSchedulerTestCases",
+ "options": [
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testCellularConstraintExecutedAndStopped"
+ },
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testConnectivityConstraintExecutes_transitionNetworks"
+ },
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testConnectivityConstraintExecutes_withMobile"
+ },
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testEJMeteredConstraintFails_withMobile_DataSaverOn"
+ },
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testMeteredConstraintFails_withMobile_DataSaverOn"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "VtsBootconfigTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/mm/cma.c b/mm/cma.c
index 94b5da4..ee10b71 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -20,6 +20,7 @@
#include <linux/err.h>
#include <linux/list.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -29,6 +30,8 @@
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/kmemleak.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
#include <trace/events/cma.h>
#include "internal.h"
@@ -52,6 +55,7 @@ const char *cma_get_name(const struct cma *cma)
{
return cma->name;
}
+EXPORT_SYMBOL_GPL(cma_get_name);
static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
unsigned int align_order)
@@ -785,6 +789,8 @@ static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr,
unsigned long start, pfn, mask, offset;
int ret = -EBUSY;
struct page *page = NULL;
+ int num_attempts = 0;
+ int max_retries = 5;
mask = cma_bitmap_aligned_mask(cma, align);
offset = cma_bitmap_aligned_offset(cma, cmr, align);
@@ -808,8 +814,29 @@ static int cma_range_alloc(struct cma *cma, struct cma_memrange *cmr,
bitmap_maxno, start, bitmap_count, mask,
offset);
if (bitmap_no >= bitmap_maxno) {
- spin_unlock_irq(&cma->lock);
- break;
+ if ((num_attempts < max_retries) && (ret == -EBUSY)) {
+ spin_unlock_irq(&cma->lock);
+
+ if (fatal_signal_pending(current) ||
+ (gfp & __GFP_NORETRY))
+ break;
+
+ /*
+ * Page may be momentarily pinned by some other
+ * process which has been scheduled out, e.g.
+ * in exit path, during unmap call, or process
+ * fork and so cannot be freed there. Sleep
+ * for 100ms and retry the allocation.
+ */
+ start = 0;
+ ret = -ENOMEM;
+ schedule_timeout_killable(msecs_to_jiffies(100));
+ num_attempts++;
+ continue;
+ } else {
+ spin_unlock_irq(&cma->lock);
+ break;
+ }
}
pfn = cmr->base_pfn + (bitmap_no << cma->order_per_bit);
@@ -865,6 +892,10 @@ static struct page *__cma_alloc_frozen(struct cma *cma,
unsigned long i;
const char *name = cma ? cma->name : NULL;
+ if (WARN_ON_ONCE((gfp & GFP_KERNEL) == 0 ||
+ (gfp & ~(GFP_KERNEL|__GFP_NOWARN|__GFP_NORETRY)) != 0))
+ return page;
+
if (!cma || !cma->count)
return page;
@@ -951,6 +982,7 @@ struct page *cma_alloc(struct cma *cma, unsigned long count,
return page;
}
+EXPORT_SYMBOL_GPL(cma_alloc);
static struct cma_memrange *find_cma_memrange(struct cma *cma,
const struct page *pages, unsigned long count)
@@ -1027,6 +1059,7 @@ bool cma_release(struct cma *cma, const struct page *pages,
return true;
}
+EXPORT_SYMBOL_GPL(cma_release);
bool cma_release_frozen(struct cma *cma, const struct page *pages,
unsigned long count)
@@ -1055,6 +1088,7 @@ int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
return 0;
}
+EXPORT_SYMBOL_GPL(cma_for_each_area);
bool cma_intersects(struct cma *cma, unsigned long start, unsigned long end)
{
diff --git a/mm/memfd-ashmem-shim.c b/mm/memfd-ashmem-shim.c
new file mode 100644
index 0000000..dc741d9
--- /dev/null
+++ b/mm/memfd-ashmem-shim.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Ashmem compatability for memfd
+ *
+ * Copyright (c) 2025, Google LLC.
+ * Author: Isaac J. Manjarres <isaacmanjarres@google.com>
+ */
+
+#include <asm-generic/mman-common.h>
+#include <linux/capability.h>
+#include <linux/fs.h>
+#include <linux/memfd.h>
+#include <linux/uaccess.h>
+
+#include "../drivers/staging/android/ashmem.h"
+
+/* memfd file names all start with memfd: */
+#define MEMFD_PREFIX "memfd:"
+#define MEMFD_PREFIX_LEN (sizeof(MEMFD_PREFIX) - 1)
+
+static long get_name(struct file *file, void __user *name)
+{
+ struct name_snapshot snapshot;
+ /* ASHMEM_NAME_LEN is larger than the max length for memfd names so this is enough space. */
+ char file_name[ASHMEM_NAME_LEN];
+ ssize_t count;
+ unsigned int offset = 0;
+
+ take_dentry_name_snapshot(&snapshot, file->f_path.dentry);
+ /* Strip MEMFD_PREFIX to retain compatibility with ashmem driver if this is a memfd. */
+ if (!strncmp(snapshot.name.name, MEMFD_PREFIX, MEMFD_PREFIX_LEN))
+ offset = MEMFD_PREFIX_LEN;
+ count = strscpy(file_name, snapshot.name.name + offset);
+ release_dentry_name_snapshot(&snapshot);
+ /* Return the truncated name and NUL terminating byte if the original name was too big. */
+ count = count == -E2BIG ? ASHMEM_NAME_LEN : count + 1;
+ return copy_to_user(name, file_name, count) ? -EFAULT : 0;
+}
+
+static long get_prot_mask(struct file *file)
+{
+ long prot_mask = PROT_READ | PROT_EXEC;
+ long seals = memfd_fcntl(file, F_GET_SEALS, 0);
+
+ if (seals < 0)
+ return seals;
+
+ /* memfds are readable and executable by default. Only writability can be changed. */
+ if (!(seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)))
+ prot_mask |= PROT_WRITE;
+
+ return prot_mask;
+}
+
+static long set_prot_mask(struct file *file, unsigned long prot)
+{
+ long curr_prot = get_prot_mask(file);
+ long ret = 0;
+
+ if (curr_prot < 0)
+ return curr_prot;
+
+ /*
+ * memfds are always readable and executable; there is no way to remove either mapping
+ * permission, nor is there a known usecase that requires it.
+ *
+ * Attempting to remove either of these mapping permissions will return successfully, but
+ * will be a nop, as the buffer will still be mappable with these permissions.
+ */
+ prot |= PROT_READ | PROT_EXEC;
+
+ /* Only allow permissions to be removed. */
+ if ((curr_prot & prot) != prot)
+ return -EINVAL;
+
+ /*
+ * Removing PROT_WRITE:
+ *
+ * We could prevent any other mappings from having write permissions by adding the
+ * F_SEAL_WRITE mapping. However, that would conflict with known usecases where it is
+ * desirable to maintain an existing writable mapping, but forbid future writable mappings.
+ *
+ * To support those usecases, we use F_SEAL_FUTURE_WRITE.
+ */
+ if (!(prot & PROT_WRITE))
+ ret = memfd_fcntl(file, F_ADD_SEALS, F_SEAL_FUTURE_WRITE);
+
+ return ret;
+}
+
+/*
+ * ashmem_memfd_ioctl - ioctl handler for ashmem commands
+ * @file: The shmem file.
+ * @cmd: The ioctl command.
+ * @arg: The argument for the ioctl command.
+ *
+ * The purpose of this handler is to allow old applications to continue working
+ * on newer kernels by allowing them to invoke ashmem ioctl commands on memfds.
+ *
+ * The ioctl handler attempts to retain as much compatibility with the ashmem
+ * driver as possible.
+ */
+long ashmem_memfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ long ret = -ENOTTY;
+ unsigned long inode_nr;
+
+#ifdef CONFIG_COMPAT
+ if (cmd == COMPAT_ASHMEM_SET_SIZE)
+ cmd = ASHMEM_SET_SIZE;
+ else if (cmd == COMPAT_ASHMEM_SET_PROT_MASK)
+ cmd = ASHMEM_SET_PROT_MASK;
+#endif
+
+ switch (cmd) {
+ /*
+ * Older applications won't create memfds and try to use ASHMEM_SET_NAME/ASHMEM_SET_SIZE on
+ * them intentionally.
+ *
+ * Instead, we can end up in this scenario if an old application receives a memfd that was
+ * created by another process.
+ *
+ * However, the current process shouldn't expect to be able to reliably [re]name/size a
+ * buffer that was shared with it, since the process that shared that buffer with it, or
+ * any other process that references the buffer could have already mapped it.
+ *
+ * Additionally in the case of ASHMEM_SET_SIZE, when processes create memfds that are going
+ * to be shared with other processes in Android, they also specify the size of the memory
+ * region and seal the file against any size changes. Therefore, ASHMEM_SET_SIZE should not
+ * be supported anyway.
+ *
+ * Therefore, it is reasonable to return -EINVAL here, as if the buffer was already mapped.
+ */
+ case ASHMEM_SET_NAME:
+ case ASHMEM_SET_SIZE:
+ ret = -EINVAL;
+ break;
+ case ASHMEM_GET_NAME:
+ ret = get_name(file, (void __user *)arg);
+ break;
+ case ASHMEM_GET_SIZE:
+ ret = i_size_read(file_inode(file));
+ break;
+ case ASHMEM_SET_PROT_MASK:
+ ret = set_prot_mask(file, arg);
+ break;
+ case ASHMEM_GET_PROT_MASK:
+ ret = get_prot_mask(file);
+ break;
+ /*
+ * Unpinning ashmem buffers was deprecated with the release of Android 10,
+ * as it did not yield any remarkable benefits. Therefore, ignore pinning
+ * related requests.
+ *
+ * This makes it so that memory is always "pinned" or never entirely freed
+ * until all references to the ashmem buffer are dropped. The memory occupied
+ * by the buffer is still subject to being reclaimed (swapped out) under memory
+ * pressure, but that is not the same as being freed.
+ *
+ * This makes it so that:
+ *
+ * 1. Memory is always pinned and therefore never purged.
+ * 2. Requests to unpin memory (make it a candidate for being freed) are ignored.
+ */
+ case ASHMEM_PIN:
+ ret = ASHMEM_NOT_PURGED;
+ break;
+ case ASHMEM_UNPIN:
+ ret = 0;
+ break;
+ case ASHMEM_GET_PIN_STATUS:
+ ret = ASHMEM_IS_PINNED;
+ break;
+ case ASHMEM_PURGE_ALL_CACHES:
+ ret = capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
+ break;
+ case ASHMEM_GET_FILE_ID:
+ inode_nr = file_inode(file)->i_ino;
+ if (copy_to_user((void __user *)arg, &inode_nr, sizeof(inode_nr)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+ break;
+ }
+
+ return ret;
+}
diff --git a/mm/memory.c b/mm/memory.c
index 07778814..3f2fbee 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -181,6 +181,7 @@ void mm_trace_rss_stat(struct mm_struct *mm, int member)
{
trace_rss_stat(mm, member);
}
+EXPORT_SYMBOL_GPL(mm_trace_rss_stat);
/*
* Note: this doesn't free the actual pages themselves. That
diff --git a/mm/mmap.c b/mm/mmap.c
index 8431609..4f53cf0 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -673,6 +673,7 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
trace_vm_unmapped_area(addr, info);
return addr;
}
+EXPORT_SYMBOL_GPL(vm_unmapped_area);
/* Get an address range which is currently unmapped.
* For shmat() with addr=0.
@@ -862,6 +863,7 @@ __get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
error = security_mmap_addr(addr);
return error ? error : addr;
}
+EXPORT_SYMBOL(__get_unmapped_area);
unsigned long
mm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
@@ -1075,6 +1077,7 @@ EXPORT_SYMBOL(vm_munmap);
SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
{
addr = untagged_addr(addr);
+ profile_munmap(addr);
return __vm_munmap(addr, len, true);
}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 5c6c95c..de52716 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -422,7 +422,7 @@ static int dump_task(struct task_struct *p, void *arg)
* State information includes task's pid, uid, tgid, vm size, rss,
* pgtables_bytes, swapents, oom_score_adj value, and name.
*/
-static void dump_tasks(struct oom_control *oc)
+void dump_tasks(struct oom_control *oc)
{
pr_info("Tasks state (memory values in pages):\n");
pr_info("[ pid ] uid tgid total_vm rss rss_anon rss_file rss_shmem pgtables_bytes swapents oom_score_adj name\n");
@@ -443,6 +443,7 @@ static void dump_tasks(struct oom_control *oc)
rcu_read_unlock();
}
}
+EXPORT_SYMBOL_GPL(dump_tasks);
static void dump_oom_victim(struct oom_control *oc, struct task_struct *victim)
{
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2d4b6f1..755ac90 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6847,6 +6847,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
unsigned int nr_reclaimed;
unsigned long pfn = start;
unsigned int tries = 0;
+ unsigned int max_tries = 5;
int ret = 0;
struct migration_target_control mtc = {
.nid = zone_to_nid(cc->zone),
@@ -6854,6 +6855,9 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
.reason = MR_CONTIG_RANGE,
};
+ if (cc->gfp_mask & __GFP_NORETRY)
+ max_tries = 1;
+
lru_cache_disable();
while (pfn < end || !list_empty(&cc->migratepages)) {
@@ -6869,7 +6873,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
break;
pfn = cc->migrate_pfn;
tries = 0;
- } else if (++tries == 5) {
+ } else if (++tries == max_tries) {
ret = -EBUSY;
break;
}
@@ -6999,7 +7003,11 @@ int alloc_contig_frozen_range_noprof(unsigned long start, unsigned long end,
.nr_migratepages = 0,
.order = -1,
.zone = page_zone(pfn_to_page(start)),
- .mode = MIGRATE_SYNC,
+ /*
+ * Use MIGRATE_ASYNC for __GFP_NORETRY requests as it never
+ * blocks.
+ */
+ .mode = gfp_mask & __GFP_NORETRY ? MIGRATE_ASYNC : MIGRATE_SYNC,
.ignore_skip_hint = true,
.no_set_skip_hint = true,
.alloc_contig = true,
@@ -7059,7 +7067,7 @@ int alloc_contig_frozen_range_noprof(unsigned long start, unsigned long end,
* -EBUSY is not accidentally used or returned to caller.
*/
ret = __alloc_contig_migrate_range(&cc, start, end);
- if (ret && ret != -EBUSY)
+ if (ret && (ret != -EBUSY || (gfp_mask & __GFP_NORETRY)))
goto done;
/*
diff --git a/mm/page_ext.c b/mm/page_ext.c
index e2e92bd..c7542ec 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -537,6 +537,7 @@ struct page_ext *page_ext_get(const struct page *page)
return page_ext;
}
+EXPORT_SYMBOL_NS_GPL(page_ext_get, "MINIDUMP");
/**
* page_ext_from_phys() - Get the page_ext structure for a physical address.
@@ -578,3 +579,4 @@ void page_ext_put(struct page_ext *page_ext)
rcu_read_unlock();
}
+EXPORT_SYMBOL_NS_GPL(page_ext_put, "MINIDUMP");
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 8178e0b..07a1f20 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -8,7 +8,6 @@
#include <linux/page_owner.h>
#include <linux/jump_label.h>
#include <linux/migrate.h>
-#include <linux/stackdepot.h>
#include <linux/seq_file.h>
#include <linux/memcontrol.h>
#include <linux/sched/clock.h>
@@ -152,6 +151,25 @@ static inline struct page_owner *get_page_owner(struct page_ext *page_ext)
return page_ext_data(page_ext, &page_owner_ops);
}
+depot_stack_handle_t get_page_owner_handle(struct page_ext *page_ext, unsigned long pfn)
+{
+ struct page_owner *page_owner;
+ depot_stack_handle_t handle;
+
+ if (!static_branch_unlikely(&page_owner_inited))
+ return 0;
+
+ page_owner = get_page_owner(page_ext);
+
+ /* skip handle for tail pages of higher order allocations */
+ if (!IS_ALIGNED(pfn, 1 << page_owner->order))
+ return 0;
+
+ handle = READ_ONCE(page_owner->handle);
+ return handle;
+}
+EXPORT_SYMBOL_NS_GPL(get_page_owner_handle, "MINIDUMP");
+
static noinline depot_stack_handle_t save_stack(gfp_t flags)
{
unsigned long entries[PAGE_OWNER_STACK_DEPTH];
diff --git a/mm/percpu.c b/mm/percpu.c
index a2107bd..598195f 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -2394,6 +2394,7 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
return page_to_phys(pcpu_addr_to_page(addr)) +
offset_in_page(addr);
}
+EXPORT_SYMBOL_GPL(per_cpu_ptr_to_phys);
/**
* pcpu_alloc_alloc_info - allocate percpu allocation info
@@ -3374,6 +3375,7 @@ unsigned long pcpu_nr_pages(void)
{
return data_race(READ_ONCE(pcpu_nr_populated)) * pcpu_nr_units;
}
+EXPORT_SYMBOL_NS_GPL(pcpu_nr_pages, "MINIDUMP");
/*
* Percpu allocator is initialized early during boot when neither slab or
diff --git a/mm/shmem.c b/mm/shmem.c
index b40f3cd..d0481bc 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -86,6 +86,10 @@ static struct vfsmount *shm_mnt __ro_after_init;
#include "internal.h"
+#ifdef CONFIG_ASHMEM
+#include "../drivers/staging/android/ashmem.h"
+#endif
+
#define VM_ACCT(size) (PAGE_ALIGN(size) >> PAGE_SHIFT)
/* Pretend that each entry is of this size in directory's i_size */
@@ -5236,6 +5240,12 @@ static const struct file_operations shmem_file_operations = {
.fallocate = shmem_fallocate,
.setlease = generic_setlease,
#endif
+#ifdef CONFIG_ASHMEM
+ .unlocked_ioctl = ashmem_memfd_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = ashmem_memfd_ioctl,
+#endif
+#endif
};
static const struct inode_operations shmem_inode_operations = {
diff --git a/mm/slab.h b/mm/slab.h
index f6ef862..88508a3 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -11,6 +11,7 @@
#include <linux/memcontrol.h>
#include <linux/kfence.h>
#include <linux/kasan.h>
+#include <linux/stackdepot.h>
/*
* Internal slab definitions
@@ -444,6 +445,22 @@ struct slabinfo {
void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
+/*
+ * Tracking user of a slab.
+ */
+#define TRACK_ADDRS_COUNT 16
+struct track {
+ unsigned long addr; /* Called from address */
+#ifdef CONFIG_STACKDEPOT
+ depot_stack_handle_t handle;
+#endif
+ int cpu; /* Was running on cpu */
+ int pid; /* Pid context */
+ unsigned long when; /* When did the operation occur */
+};
+
+enum track_item { TRACK_ALLOC, TRACK_FREE };
+
#ifdef CONFIG_SLUB_DEBUG
#ifdef CONFIG_SLUB_DEBUG_ON
DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
@@ -452,6 +469,12 @@ DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
#endif
extern void print_tracking(struct kmem_cache *s, void *object);
long validate_slab_cache(struct kmem_cache *s);
+extern struct track *get_track(struct kmem_cache *s, void *object,
+ enum track_item alloc);
+extern unsigned long get_each_kmemcache_object(struct kmem_cache *s,
+ int (*fn)(struct kmem_cache *, void *, void *),
+ void *private);
+
static inline bool __slub_debug_enabled(void)
{
return static_branch_unlikely(&slub_debug_enabled);
diff --git a/mm/slub.c b/mm/slub.c
index 0c906fe..aa7c035 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -31,7 +31,6 @@
#include <linux/cpuset.h>
#include <linux/mempolicy.h>
#include <linux/ctype.h>
-#include <linux/stackdepot.h>
#include <linux/debugobjects.h>
#include <linux/kallsyms.h>
#include <linux/kfence.h>
@@ -302,22 +301,6 @@ void *fixup_red_left(struct kmem_cache *s, void *p)
#define __CMPXCHG_DOUBLE __SLAB_FLAG_UNUSED
#endif
-/*
- * Tracking user of a slab.
- */
-#define TRACK_ADDRS_COUNT 16
-struct track {
- unsigned long addr; /* Called from address */
-#ifdef CONFIG_STACKDEPOT
- depot_stack_handle_t handle;
-#endif
- int cpu; /* Was running on cpu */
- int pid; /* Pid context */
- unsigned long when; /* When did the operation occur */
-};
-
-enum track_item { TRACK_ALLOC, TRACK_FREE };
-
#ifdef SLAB_SUPPORTS_SYSFS
static int sysfs_slab_add(struct kmem_cache *);
#else
@@ -1013,8 +996,8 @@ static void print_section(char *level, char *text, u8 *addr,
metadata_access_disable();
}
-static struct track *get_track(struct kmem_cache *s, void *object,
- enum track_item alloc)
+struct track *get_track(struct kmem_cache *s, void *object,
+ enum track_item alloc)
{
struct track *p;
@@ -1022,6 +1005,52 @@ static struct track *get_track(struct kmem_cache *s, void *object,
return kasan_reset_tag(p + alloc);
}
+EXPORT_SYMBOL(get_track);
+
+static inline unsigned long node_nr_slabs(struct kmem_cache_node *n);
+
+unsigned long get_each_kmemcache_object(struct kmem_cache *s,
+ int (*fn)(struct kmem_cache *, void *, void *),
+ void *private)
+{
+ int node;
+ unsigned long ret = 0;
+ struct kmem_cache_node *n;
+
+ for_each_kmem_cache_node(s, node, n) {
+ unsigned long flags;
+ struct slab *slab;
+ void *p;
+
+ if (!node_nr_slabs(n))
+ continue;
+
+ spin_lock_irqsave(&n->list_lock, flags);
+ list_for_each_entry(slab, &n->partial, slab_list) {
+ for_each_object(p, s, slab_address(slab), slab->objects) {
+ ret = fn(s, p, private);
+ if (ret) {
+ spin_unlock_irqrestore(&n->list_lock, flags);
+ return ret;
+ }
+ }
+ }
+#ifdef CONFIG_SLUB_DEBUG
+ list_for_each_entry(slab, &n->full, slab_list) {
+ for_each_object(p, s, slab_address(slab), slab->objects) {
+ ret = fn(s, p, private);
+ if (ret) {
+ spin_unlock_irqrestore(&n->list_lock, flags);
+ return ret;
+ }
+ }
+ }
+#endif
+ spin_unlock_irqrestore(&n->list_lock, flags);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(get_each_kmemcache_object, "MINIDUMP");
#ifdef CONFIG_STACKDEPOT
static noinline depot_stack_handle_t set_track_prepare(gfp_t gfp_flags)
@@ -9814,4 +9843,6 @@ void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
sinfo->objects_per_slab = oo_objects(s->oo);
sinfo->cache_order = oo_order(s->oo);
}
+EXPORT_SYMBOL_NS_GPL(get_slabinfo, "MINIDUMP");
+
#endif /* CONFIG_SLUB_DEBUG */
diff --git a/mm/swap.c b/mm/swap.c
index bb19ccb..db93712 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -900,6 +900,7 @@ void lru_add_drain_all(void)
#endif /* CONFIG_SMP */
atomic_t lru_disable_count = ATOMIC_INIT(0);
+EXPORT_SYMBOL_GPL(lru_disable_count);
/*
* lru_cache_disable() needs to be called before we start compiling
@@ -911,7 +912,12 @@ atomic_t lru_disable_count = ATOMIC_INIT(0);
*/
void lru_cache_disable(void)
{
- atomic_inc(&lru_disable_count);
+ /*
+ * If someone is already disabled lru_cache, just return with
+ * increasing the lru_disable_count.
+ */
+ if (atomic_inc_not_zero(&lru_disable_count))
+ return;
/*
* Readers of lru_disable_count are protected by either disabling
* preemption or rcu_read_lock:
@@ -931,7 +937,9 @@ void lru_cache_disable(void)
#else
lru_add_and_bh_lrus_drain();
#endif
+ atomic_inc(&lru_disable_count);
}
+EXPORT_SYMBOL_GPL(lru_cache_disable);
/**
* folios_put_refs - Reduce the reference count on a batch of folios.
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 94af29d..617671d 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3586,6 +3586,7 @@ void si_swapinfo(struct sysinfo *val)
val->totalswap = total_swap_pages + nr_to_be_unused;
spin_unlock(&swap_lock);
}
+EXPORT_SYMBOL_NS_GPL(si_swapinfo, "MINIDUMP");
/*
* Verify that nr swap entries are valid and increment their swap map counts.
diff --git a/mm/util.c b/mm/util.c
index b05ab6f..942694b 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -34,6 +34,10 @@
#include "internal.h"
#include "swap.h"
+#ifndef __GENSYMS__
+#include <trace/hooks/syscall_check.h>
+#endif
+
/**
* kfree_const - conditionally free memory
* @x: pointer to the memory
@@ -585,6 +589,7 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
if (populate)
mm_populate(ret, populate);
}
+ trace_android_vh_check_mmap_file(file, prot, flag, ret);
return ret;
}
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 61caa55..00a31b1 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1075,6 +1075,7 @@ unsigned long vmalloc_nr_pages(void)
{
return atomic_long_read(&nr_vmalloc_pages);
}
+EXPORT_SYMBOL_NS_GPL(vmalloc_nr_pages, "MINIDUMP");
static struct vmap_area *__find_vmap_area(unsigned long addr, struct rb_root *root)
{
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0fc9373..843f1633 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -71,6 +71,9 @@
#define CREATE_TRACE_POINTS
#include <trace/events/vmscan.h>
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/vmscan.h>
+
struct scan_control {
/* How many pages shrink_list() should reclaim */
unsigned long nr_to_reclaim;
@@ -2534,6 +2537,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
u64 denominator = 0; /* gcc */
enum scan_balance scan_balance;
enum lru_list lru;
+ bool balance_anon_file_reclaim = false;
/* If we have no swap space, do not bother scanning anon folios. */
if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) {
@@ -2578,12 +2582,14 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
goto out;
}
+ trace_android_rvh_set_balance_anon_file_reclaim(&balance_anon_file_reclaim);
+
/*
* If there is enough inactive page cache, we do not reclaim
* anything from the anonymous working right now to make sure
* a streaming file access pattern doesn't cause swapping.
*/
- if (sc->cache_trim_mode) {
+ if (!balance_anon_file_reclaim && sc->cache_trim_mode) {
scan_balance = SCAN_FILE;
goto out;
}
diff --git a/net/OWNERS b/net/OWNERS
new file mode 100644
index 0000000..cbbfa70
--- /dev/null
+++ b/net/OWNERS
@@ -0,0 +1,2 @@
+lorenzo@google.com
+maze@google.com
diff --git a/net/TEST_MAPPING b/net/TEST_MAPPING
new file mode 100644
index 0000000..dd2177a
--- /dev/null
+++ b/net/TEST_MAPPING
@@ -0,0 +1,336 @@
+{
+ "presubmit": [
+ {
+ "name": "CtsAppEnumerationTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.appenumeration.cts.AppEnumerationTests"
+ }
+ ]
+ },
+ {
+ "name": "CtsHostsideNetworkTests",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsNetTestCasesLatestSdk",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "CtsTelecomTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ },
+ {
+ "include-filter": "android.telecom.cts.ExtendedInCallServiceTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsWifiBroadcastsHostTestCases",
+ "options": [
+ {
+ "exclude-annotation": "com.android.testutils.SkipPresubmit"
+ }
+ ]
+ },
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ },
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ },
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_b_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bl_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_bo_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_2G"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_l_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_500k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_o_5k"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ },
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ },
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ },
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ },
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ },
+ {
+ "include-filter": "kselftest_seccomp_seccomp_bpf"
+ },
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ },
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ },
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ },
+ {
+ "include-filter": "kselftest_x86_test_mremap_vdso"
+ }
+ ]
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "CtsJobSchedulerTestCases",
+ "options": [
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testCellularConstraintExecutedAndStopped"
+ },
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testConnectivityConstraintExecutes_transitionNetworks"
+ },
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testConnectivityConstraintExecutes_withMobile"
+ },
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testEJMeteredConstraintFails_withMobile_DataSaverOn"
+ },
+ {
+ "include-filter": "android.jobscheduler.cts.ConnectivityConstraintTest#testMeteredConstraintFails_withMobile_DataSaverOn"
+ }
+ ]
+ }
+ ],
+ "kernel-presubmit": [
+ {
+ "name": "CtsCameraTestCases",
+ "options": [
+ {
+ "include-filter": "android.hardware.camera2.cts.FastBasicsTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsIncrementalInstallHostTestCases",
+ "options": [
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalFeatureTest"
+ },
+ {
+ "include-filter": "android.incrementalinstall.cts.IncrementalInstallTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsLibcoreLegacy22TestCases",
+ "options": [
+ {
+ "include-filter": "android.util.cts.FloatMathTest"
+ }
+ ]
+ },
+ {
+ "name": "CtsRootBluetoothTestCases"
+ },
+ {
+ "name": "vts_kernel_net_tests"
+ },
+ {
+ "name": "KernelAbilistTest"
+ },
+ {
+ "name": "VtsAidlHalSensorsTargetTest"
+ },
+ {
+ "name": "VtsBootconfigTest"
+ },
+ {
+ "name": "binderDriverInterfaceTest"
+ },
+ {
+ "name": "binderLibTest"
+ },
+ {
+ "name": "binderSafeInterfaceTest"
+ },
+ {
+ "name": "memunreachable_binder_test"
+ },
+ {
+ "name": "VtsHalBluetoothAudioTargetTest"
+ },
+ {
+ "name": "CtsBionicTestCases"
+ },
+ {
+ "name": "CtsUsbTests"
+ },
+ {
+ "name": "CtsDrmTestCases",
+ "options": [
+ {
+ "exclude-filter": "android.drm.cts.DRMTest#testForwardLockAccess"
+ }
+ ]
+ }
+ ]
+}
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 7b7640f..c68c4c0 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -310,11 +310,9 @@ static bool batadv_is_cfg80211_netdev(struct net_device *net_device)
if (!net_device)
return false;
-#if IS_ENABLED(CONFIG_CFG80211)
/* cfg80211 drivers have to set ieee80211_ptr */
if (net_device->ieee80211_ptr)
return true;
-#endif
return false;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index c1a9f7f..268ae11 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -163,6 +163,7 @@
#include <net/page_pool/memory_provider.h>
#include <net/rps.h>
#include <linux/phy_link_topology.h>
+#include <trace/hooks/net.h>
#include "dev.h"
#include "devmem.h"
@@ -593,6 +594,12 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
static inline struct list_head *ptype_head(const struct packet_type *pt)
{
+ struct list_head vendor_pt = { .next = NULL, };
+
+ trace_android_vh_ptype_head(pt, &vendor_pt);
+ if (vendor_pt.next)
+ return vendor_pt.next;
+
if (pt->type == htons(ETH_P_ALL)) {
if (!pt->af_packet_net && !pt->dev)
return NULL;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 8ca6349..70f2a8f 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -1288,10 +1288,10 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
const struct nlmsghdr *nlh = cb->nlh;
struct net *net = sock_net(skb->sk);
struct fib_rules_ops *ops;
- int err, idx = 0, family;
+ int idx = 0, family;
if (cb->strict_check) {
- err = fib_valid_dumprule_req(nlh, cb->extack);
+ int err = fib_valid_dumprule_req(nlh, cb->extack);
if (err < 0)
return err;
@@ -1304,17 +1304,17 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
if (ops == NULL)
return -EAFNOSUPPORT;
- return dump_rules(skb, cb, ops);
+ dump_rules(skb, cb, ops);
+
+ return skb->len;
}
- err = 0;
rcu_read_lock();
list_for_each_entry_rcu(ops, &net->rules_ops, list) {
if (idx < cb->args[0] || !try_module_get(ops->owner))
goto skip;
- err = dump_rules(skb, cb, ops);
- if (err < 0)
+ if (dump_rules(skb, cb, ops) < 0)
break;
cb->args[1] = 0;
@@ -1324,7 +1324,7 @@ static int fib_nl_dumprule(struct sk_buff *skb, struct netlink_callback *cb)
rcu_read_unlock();
cb->args[0] = idx;
- return err;
+ return skb->len;
}
static void notify_rule_change(int event, struct fib_rule *rule,
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 07624b6..c2e5048 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -905,14 +905,10 @@ static const struct attribute_group wireless_group = {
static bool wireless_group_needed(struct net_device *ndev)
{
-#if IS_ENABLED(CONFIG_CFG80211)
if (ndev->ieee80211_ptr)
return true;
-#endif
-#if IS_ENABLED(CONFIG_WIRELESS_EXT)
if (ndev->wireless_handlers)
return true;
-#endif
return false;
}
diff --git a/net/core/net-traces.c b/net/core/net-traces.c
index f2fa34b..d98fdab 100644
--- a/net/core/net-traces.c
+++ b/net/core/net-traces.c
@@ -35,14 +35,12 @@
#include <trace/events/tcp.h>
#include <trace/events/fib.h>
#include <trace/events/qdisc.h>
-#if IS_ENABLED(CONFIG_BRIDGE)
#include <trace/events/bridge.h>
EXPORT_TRACEPOINT_SYMBOL_GPL(br_fdb_add);
EXPORT_TRACEPOINT_SYMBOL_GPL(br_fdb_external_learn_add);
EXPORT_TRACEPOINT_SYMBOL_GPL(fdb_delete);
EXPORT_TRACEPOINT_SYMBOL_GPL(br_fdb_update);
EXPORT_TRACEPOINT_SYMBOL_GPL(br_mdb_full);
-#endif
#if IS_ENABLED(CONFIG_PAGE_POOL)
#include <trace/events/page_pool.h>
diff --git a/net/core/sock.c b/net/core/sock.c
index 5976100..8e85662 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -141,6 +141,7 @@
#include <net/bpf_sk_storage.h>
#include <trace/events/sock.h>
+#include <trace/hooks/sched.h>
#include <net/tcp.h>
#include <net/busy_poll.h>
@@ -3607,9 +3608,19 @@ void sock_def_readable(struct sock *sk)
rcu_read_lock();
wq = rcu_dereference(sk->sk_wq);
- if (skwq_has_sleeper(wq))
+
+ if (skwq_has_sleeper(wq)) {
+ int done = 0;
+
+ trace_android_vh_do_wake_up_sync(&wq->wait, &done, sk);
+ if (done)
+ goto out;
+
wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN | EPOLLPRI |
EPOLLRDNORM | EPOLLRDBAND);
+ }
+
+out:
sk_wake_async_rcu(sk, SOCK_WAKE_WAITD, POLL_IN);
rcu_read_unlock();
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 0e55f13..690e815 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -213,6 +213,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.accept_ra_rt_info_max_plen = 0,
#endif
#endif
+ .accept_ra_rt_table = 0,
.proxy_ndp = 0,
.accept_source_route = 0, /* we do not accept RH0 by default. */
.disable_ipv6 = 0,
@@ -278,6 +279,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.accept_ra_rt_info_max_plen = 0,
#endif
#endif
+ .accept_ra_rt_table = 0,
.proxy_ndp = 0,
.accept_source_route = 0, /* we do not accept RH0 by default. */
.disable_ipv6 = 0,
@@ -2452,6 +2454,26 @@ static void ipv6_gen_rnd_iid(struct in6_addr *addr)
goto regen;
}
+u32 addrconf_rt_table(const struct net_device *dev, u32 default_table)
+{
+ struct inet6_dev *idev = in6_dev_get(dev);
+ int sysctl;
+ u32 table;
+
+ if (!idev)
+ return default_table;
+ sysctl = idev->cnf.accept_ra_rt_table;
+ if (sysctl == 0) {
+ table = default_table;
+ } else if (sysctl > 0) {
+ table = (u32) sysctl;
+ } else {
+ table = (unsigned) dev->ifindex + (-sysctl);
+ }
+ in6_dev_put(idev);
+ return table;
+}
+
/*
* Add prefix route.
*/
@@ -2462,7 +2484,7 @@ addrconf_prefix_route(struct in6_addr *pfx, int plen, u32 metric,
u32 flags, gfp_t gfp_flags)
{
struct fib6_config cfg = {
- .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
+ .fc_table = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_PREFIX),
.fc_metric = metric ? : IP6_RT_PRIO_ADDRCONF,
.fc_ifindex = dev->ifindex,
.fc_expires = expires,
@@ -2497,7 +2519,7 @@ static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
struct fib6_node *fn;
struct fib6_info *rt = NULL;
struct fib6_table *table;
- u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
+ u32 tb_id = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_PREFIX);
table = fib6_get_table(dev_net(dev), tb_id);
if (!table)
@@ -7061,6 +7083,13 @@ static const struct ctl_table addrconf_sysctl[] = {
#endif
#endif
{
+ .procname = "accept_ra_rt_table",
+ .data = &ipv6_devconf.accept_ra_rt_table,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
.procname = "proxy_ndp",
.data = &ipv6_devconf.proxy_ndp,
.maxlen = sizeof(int),
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 85df25c..046493e 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -4361,7 +4361,7 @@ static struct fib6_info *rt6_get_route_info(struct net *net,
const struct in6_addr *gwaddr,
struct net_device *dev)
{
- u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
+ u32 tb_id = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_INFO);
int ifindex = dev->ifindex;
struct fib6_node *fn;
struct fib6_info *rt = NULL;
@@ -4415,7 +4415,7 @@ static struct fib6_info *rt6_add_route_info(struct net *net,
.fc_nlinfo.nl_net = net,
};
- cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
+ cfg.fc_table = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_INFO);
cfg.fc_dst = *prefix;
cfg.fc_gateway = *gwaddr;
@@ -4433,7 +4433,7 @@ struct fib6_info *rt6_get_dflt_router(struct net *net,
const struct in6_addr *addr,
struct net_device *dev)
{
- u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
+ u32 tb_id = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_DFLT);
struct fib6_info *rt;
struct fib6_table *table;
@@ -4469,7 +4469,7 @@ struct fib6_info *rt6_add_dflt_router(struct net *net,
int lifetime)
{
struct fib6_config cfg = {
- .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
+ .fc_table = l3mdev_fib_table(dev) ? : addrconf_rt_table(dev, RT6_TABLE_DFLT),
.fc_metric = defrtr_usr_metric,
.fc_ifindex = dev->ifindex,
.fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
@@ -4495,47 +4495,24 @@ struct fib6_info *rt6_add_dflt_router(struct net *net,
return rt6_get_dflt_router(net, gwaddr, dev);
}
-static void __rt6_purge_dflt_routers(struct net *net,
- struct fib6_table *table)
+static int rt6_addrconf_purge(struct fib6_info *rt, void *arg)
{
- struct fib6_info *rt;
+ struct net_device *dev = fib6_info_nh_dev(rt);
+ struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
-restart:
- rcu_read_lock();
- for_each_fib6_node_rt_rcu(&table->tb6_root) {
- struct net_device *dev = fib6_info_nh_dev(rt);
- struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
-
- if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
- (!idev || idev->cnf.accept_ra != 2) &&
- fib6_info_hold_safe(rt)) {
- rcu_read_unlock();
- ip6_del_rt(net, rt, false);
- goto restart;
- }
+ if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
+ (!idev || idev->cnf.accept_ra != 2)) {
+ /* Delete this route. See fib6_clean_tree() */
+ return -1;
}
- rcu_read_unlock();
- table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
+ /* Continue walking */
+ return 0;
}
void rt6_purge_dflt_routers(struct net *net)
{
- struct fib6_table *table;
- struct hlist_head *head;
- unsigned int h;
-
- rcu_read_lock();
-
- for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
- head = &net->ipv6.fib_table_hash[h];
- hlist_for_each_entry_rcu(table, head, tb6_hlist) {
- if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
- __rt6_purge_dflt_routers(net, table);
- }
- }
-
- rcu_read_unlock();
+ fib6_clean_all(net, rt6_addrconf_purge, NULL);
}
static void rtmsg_to_fib6_config(struct net *net,
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 6cdc994..2182a09 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -1531,6 +1531,29 @@
If you want to compile it as a module, say M here and read
<file:Documentation/kbuild/modules.rst>. If unsure, say `N'.
+config NETFILTER_XT_MATCH_QUOTA2
+ tristate '"quota2" match support'
+ depends on NETFILTER_ADVANCED
+ help
+ This option adds a `quota2' match, which allows to match on a
+ byte counter correctly and not per CPU.
+ It allows naming the quotas.
+ This is based on http://xtables-addons.git.sourceforge.net
+
+ If you want to compile it as a module, say M here and read
+ <file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
+
+config NETFILTER_XT_MATCH_QUOTA2_LOG
+ bool '"quota2" Netfilter LOG support'
+ depends on NETFILTER_XT_MATCH_QUOTA2
+ default n
+ help
+ This option allows `quota2' to log ONCE when a quota limit
+ is passed. It logs via NETLINK using the NETLINK_NFLOG family.
+ It logs similarly to how ipt_ULOG would without data.
+
+ If unsure, say `N'.
+
config NETFILTER_XT_MATCH_RATEEST
tristate '"rateest" match support'
depends on NETFILTER_ADVANCED
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 6bfc250..1b61280 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -220,6 +220,7 @@
obj-$(CONFIG_NETFILTER_XT_MATCH_PKTTYPE) += xt_pkttype.o
obj-$(CONFIG_NETFILTER_XT_MATCH_POLICY) += xt_policy.o
obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA) += xt_quota.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_QUOTA2) += xt_quota2.o
obj-$(CONFIG_NETFILTER_XT_MATCH_RATEEST) += xt_rateest.o
obj-$(CONFIG_NETFILTER_XT_MATCH_REALM) += xt_realm.o
obj-$(CONFIG_NETFILTER_XT_MATCH_RECENT) += xt_recent.o
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index 5d93e22..0091bd8 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -28,6 +28,11 @@
#include <linux/kobject.h>
#include <linux/workqueue.h>
#include <linux/sysfs.h>
+#include <linux/suspend.h>
+#include <net/sock.h>
+#include <net/inet_sock.h>
+
+#define NLMSG_MAX_SIZE 64
struct idletimer_tg {
struct list_head entry;
@@ -38,15 +43,112 @@ struct idletimer_tg {
struct kobject *kobj;
struct device_attribute attr;
+ struct timespec64 delayed_timer_trigger;
+ struct timespec64 last_modified_timer;
+ struct timespec64 last_suspend_time;
+ struct notifier_block pm_nb;
+
+ int timeout;
unsigned int refcnt;
u8 timer_type;
+
+ bool work_pending;
+ bool send_nl_msg;
+ bool active;
+ uid_t uid;
+ bool suspend_time_valid;
};
static LIST_HEAD(idletimer_tg_list);
static DEFINE_MUTEX(list_mutex);
+static DEFINE_SPINLOCK(timestamp_lock);
static struct kobject *idletimer_tg_kobj;
+static bool check_for_delayed_trigger(struct idletimer_tg *timer,
+ struct timespec64 *ts)
+{
+ bool state;
+ struct timespec64 temp;
+ spin_lock_bh(×tamp_lock);
+ timer->work_pending = false;
+ if ((ts->tv_sec - timer->last_modified_timer.tv_sec) > timer->timeout ||
+ timer->delayed_timer_trigger.tv_sec != 0) {
+ state = false;
+ temp.tv_sec = timer->timeout;
+ temp.tv_nsec = 0;
+ if (timer->delayed_timer_trigger.tv_sec != 0) {
+ temp = timespec64_add(timer->delayed_timer_trigger,
+ temp);
+ ts->tv_sec = temp.tv_sec;
+ ts->tv_nsec = temp.tv_nsec;
+ timer->delayed_timer_trigger.tv_sec = 0;
+ timer->work_pending = true;
+ schedule_work(&timer->work);
+ } else {
+ temp = timespec64_add(timer->last_modified_timer, temp);
+ ts->tv_sec = temp.tv_sec;
+ ts->tv_nsec = temp.tv_nsec;
+ }
+ } else {
+ state = timer->active;
+ }
+ spin_unlock_bh(×tamp_lock);
+ return state;
+}
+
+static void notify_netlink_uevent(const char *iface, struct idletimer_tg *timer)
+{
+ char iface_msg[NLMSG_MAX_SIZE];
+ char state_msg[NLMSG_MAX_SIZE];
+ char timestamp_msg[NLMSG_MAX_SIZE];
+ char uid_msg[NLMSG_MAX_SIZE];
+ char *envp[] = { iface_msg, state_msg, timestamp_msg, uid_msg, NULL };
+ int res;
+ struct timespec64 ts;
+ u64 time_ns;
+ bool state;
+
+ res = snprintf(iface_msg, NLMSG_MAX_SIZE, "INTERFACE=%s",
+ iface);
+ if (NLMSG_MAX_SIZE <= res) {
+ pr_err("message too long (%d)\n", res);
+ return;
+ }
+
+ ts = ktime_to_timespec64(ktime_get_boottime());
+ state = check_for_delayed_trigger(timer, &ts);
+ res = snprintf(state_msg, NLMSG_MAX_SIZE, "STATE=%s",
+ state ? "active" : "inactive");
+
+ if (NLMSG_MAX_SIZE <= res) {
+ pr_err("message too long (%d)\n", res);
+ return;
+ }
+
+ if (state) {
+ res = snprintf(uid_msg, NLMSG_MAX_SIZE, "UID=%u", timer->uid);
+ if (NLMSG_MAX_SIZE <= res)
+ pr_err("message too long (%d)\n", res);
+ } else {
+ res = snprintf(uid_msg, NLMSG_MAX_SIZE, "UID=");
+ if (NLMSG_MAX_SIZE <= res)
+ pr_err("message too long (%d)\n", res);
+ }
+
+ time_ns = timespec64_to_ns(&ts);
+ res = snprintf(timestamp_msg, NLMSG_MAX_SIZE, "TIME_NS=%llu", time_ns);
+ if (NLMSG_MAX_SIZE <= res) {
+ timestamp_msg[0] = '\0';
+ pr_err("message too long (%d)\n", res);
+ }
+
+ pr_debug("putting nlmsg: <%s> <%s> <%s> <%s>\n", iface_msg, state_msg,
+ timestamp_msg, uid_msg);
+ kobject_uevent_env(idletimer_tg_kobj, KOBJ_CHANGE, envp);
+ return;
+}
+
static
struct idletimer_tg *__idletimer_tg_find_by_label(const char *label)
{
@@ -67,6 +169,7 @@ static ssize_t idletimer_tg_show(struct device *dev,
unsigned long expires = 0;
struct timespec64 ktimespec = {};
long time_diff = 0;
+ unsigned long now = jiffies;
mutex_lock(&list_mutex);
@@ -78,15 +181,19 @@ static ssize_t idletimer_tg_show(struct device *dev,
time_diff = ktimespec.tv_sec;
} else {
expires = timer->timer.expires;
- time_diff = jiffies_to_msecs(expires - jiffies) / 1000;
+ time_diff = jiffies_to_msecs(expires - now) / 1000;
}
}
mutex_unlock(&list_mutex);
- if (time_after(expires, jiffies) || ktimespec.tv_sec > 0)
+ if (time_after(expires, now) || ktimespec.tv_sec > 0)
return sysfs_emit(buf, "%ld\n", time_diff);
+ if (timer->send_nl_msg)
+ return sysfs_emit(buf, "0 %d\n",
+ jiffies_to_msecs(now - expires) / 1000);
+
return sysfs_emit(buf, "0\n");
}
@@ -96,6 +203,9 @@ static void idletimer_tg_work(struct work_struct *work)
work);
sysfs_notify(idletimer_tg_kobj, NULL, timer->attr.attr.name);
+
+ if (timer->send_nl_msg)
+ notify_netlink_uevent(timer->attr.attr.name, timer);
}
static void idletimer_tg_expired(struct timer_list *t)
@@ -104,7 +214,62 @@ static void idletimer_tg_expired(struct timer_list *t)
pr_debug("timer %s expired\n", timer->attr.attr.name);
+ spin_lock_bh(×tamp_lock);
+ timer->active = false;
+ timer->work_pending = true;
schedule_work(&timer->work);
+ spin_unlock_bh(×tamp_lock);
+}
+
+static int idletimer_resume(struct notifier_block *notifier,
+ unsigned long pm_event, void *unused)
+{
+ struct timespec64 ts;
+ unsigned long time_diff, now = jiffies;
+ struct idletimer_tg *timer = container_of(notifier,
+ struct idletimer_tg, pm_nb);
+ if (!timer)
+ return NOTIFY_DONE;
+
+ switch (pm_event) {
+ case PM_SUSPEND_PREPARE:
+ timer->last_suspend_time =
+ ktime_to_timespec64(ktime_get_boottime());
+ timer->suspend_time_valid = true;
+ break;
+ case PM_POST_SUSPEND:
+ if (!timer->suspend_time_valid)
+ break;
+ timer->suspend_time_valid = false;
+
+ spin_lock_bh(×tamp_lock);
+ if (!timer->active) {
+ spin_unlock_bh(×tamp_lock);
+ break;
+ }
+ /* since jiffies are not updated when suspended now represents
+ * the time it would have suspended */
+ if (time_after(timer->timer.expires, now)) {
+ ts = ktime_to_timespec64(ktime_get_boottime());
+ ts = timespec64_sub(ts, timer->last_suspend_time);
+ time_diff = timespec64_to_jiffies(&ts);
+ if (timer->timer.expires > (time_diff + now)) {
+ mod_timer_pending(&timer->timer,
+ (timer->timer.expires - time_diff));
+ } else {
+ timer_delete(&timer->timer);
+ timer->timer.expires = 0;
+ timer->active = false;
+ timer->work_pending = true;
+ schedule_work(&timer->work);
+ }
+ }
+ spin_unlock_bh(×tamp_lock);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_DONE;
}
static void idletimer_tg_alarmproc(struct alarm *alarm, ktime_t now)
@@ -156,17 +321,34 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
ret = sysfs_create_file(idletimer_tg_kobj, &info->timer->attr.attr);
if (ret < 0) {
- pr_debug("couldn't add file to sysfs");
+ pr_debug("couldn't add file to sysfs\n");
goto out_free_attr;
}
list_add(&info->timer->entry, &idletimer_tg_list);
-
- timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
+ pr_debug("timer type value is 0.\n");
+ info->timer->timer_type = 0;
info->timer->refcnt = 1;
+ info->timer->send_nl_msg = false;
+ info->timer->active = true;
+ info->timer->timeout = info->timeout;
+
+ info->timer->delayed_timer_trigger.tv_sec = 0;
+ info->timer->delayed_timer_trigger.tv_nsec = 0;
+ info->timer->work_pending = false;
+ info->timer->uid = 0;
+ info->timer->last_modified_timer =
+ ktime_to_timespec64(ktime_get_boottime());
+
+ info->timer->pm_nb.notifier_call = idletimer_resume;
+ ret = register_pm_notifier(&info->timer->pm_nb);
+ if (ret)
+ printk(KERN_WARNING "[%s] Failed to register pm notifier %d\n",
+ __func__, ret);
INIT_WORK(&info->timer->work, idletimer_tg_work);
+ timer_setup(&info->timer->timer, idletimer_tg_expired, 0);
mod_timer(&info->timer->timer,
secs_to_jiffies(info->timeout) + jiffies);
@@ -205,7 +387,7 @@ static int idletimer_tg_create_v1(struct idletimer_tg_info_v1 *info)
ret = sysfs_create_file(idletimer_tg_kobj, &info->timer->attr.attr);
if (ret < 0) {
- pr_debug("couldn't add file to sysfs");
+ pr_debug("couldn't add file to sysfs\n");
goto out_free_attr;
}
@@ -213,9 +395,26 @@ static int idletimer_tg_create_v1(struct idletimer_tg_info_v1 *info)
kobject_uevent(idletimer_tg_kobj,KOBJ_ADD);
list_add(&info->timer->entry, &idletimer_tg_list);
- pr_debug("timer type value is %u", info->timer_type);
+ pr_debug("timer type value is %u\n", info->timer_type);
info->timer->timer_type = info->timer_type;
info->timer->refcnt = 1;
+ info->timer->send_nl_msg = (info->send_nl_msg != 0);
+ info->timer->active = true;
+ info->timer->timeout = info->timeout;
+
+ info->timer->delayed_timer_trigger.tv_sec = 0;
+ info->timer->delayed_timer_trigger.tv_nsec = 0;
+ info->timer->work_pending = false;
+ info->timer->uid = 0;
+ info->timer->last_modified_timer =
+ ktime_to_timespec64(ktime_get_boottime());
+ info->timer->suspend_time_valid = false;
+
+ info->timer->pm_nb.notifier_call = idletimer_resume;
+ ret = register_pm_notifier(&info->timer->pm_nb);
+ if (ret)
+ printk(KERN_WARNING "[%s] Failed to register pm notifier %d\n",
+ __func__, ret);
INIT_WORK(&info->timer->work, idletimer_tg_work);
@@ -242,6 +441,41 @@ static int idletimer_tg_create_v1(struct idletimer_tg_info_v1 *info)
return ret;
}
+static void reset_timer(struct idletimer_tg * const info_timer,
+ const __u32 info_timeout,
+ struct sk_buff *skb)
+{
+ unsigned long now = jiffies;
+ bool timer_prev;
+
+ spin_lock_bh(×tamp_lock);
+ timer_prev = info_timer->active;
+ info_timer->active = true;
+ /* timer_prev is used to guard overflow problem in time_before*/
+ if (!timer_prev || time_before(info_timer->timer.expires, now)) {
+ pr_debug("Starting Checkentry timer (Expired, Jiffies): %lu, %lu\n",
+ info_timer->timer.expires, now);
+
+ /* Stores the uid resposible for waking up the radio */
+ if (skb && (skb->sk)) {
+ info_timer->uid = from_kuid_munged(current_user_ns(),
+ sk_uid(skb_to_full_sk(skb)));
+ }
+
+ /* checks if there is a pending inactive notification*/
+ if (info_timer->work_pending)
+ info_timer->delayed_timer_trigger = info_timer->last_modified_timer;
+ else {
+ info_timer->work_pending = true;
+ schedule_work(&info_timer->work);
+ }
+ }
+
+ info_timer->last_modified_timer = ktime_to_timespec64(ktime_get_boottime());
+ mod_timer(&info_timer->timer, secs_to_jiffies(info_timeout) + now);
+ spin_unlock_bh(×tamp_lock);
+}
+
/*
* The actual xt_tables plugin.
*/
@@ -249,12 +483,21 @@ static unsigned int idletimer_tg_target(struct sk_buff *skb,
const struct xt_action_param *par)
{
const struct idletimer_tg_info *info = par->targinfo;
+ unsigned long now = jiffies;
pr_debug("resetting timer %s, timeout period %u\n",
info->label, info->timeout);
- mod_timer(&info->timer->timer,
- secs_to_jiffies(info->timeout) + jiffies);
+ info->timer->active = true;
+
+ if (time_before(info->timer->timer.expires, now)) {
+ schedule_work(&info->timer->work);
+ pr_debug("Starting timer %s (Expired, Jiffies): %lu, %lu\n",
+ info->label, info->timer->timer.expires, now);
+ }
+
+ /* TODO: Avoid modifying timers on each packet */
+ reset_timer(info->timer, info->timeout, skb);
return XT_CONTINUE;
}
@@ -266,6 +509,7 @@ static unsigned int idletimer_tg_target_v1(struct sk_buff *skb,
const struct xt_action_param *par)
{
const struct idletimer_tg_info_v1 *info = par->targinfo;
+ unsigned long now = jiffies;
pr_debug("resetting timer %s, timeout period %u\n",
info->label, info->timeout);
@@ -274,8 +518,16 @@ static unsigned int idletimer_tg_target_v1(struct sk_buff *skb,
ktime_t tout = ktime_set(info->timeout, 0);
alarm_start_relative(&info->timer->alarm, tout);
} else {
- mod_timer(&info->timer->timer,
- secs_to_jiffies(info->timeout) + jiffies);
+ info->timer->active = true;
+
+ if (time_before(info->timer->timer.expires, now)) {
+ schedule_work(&info->timer->work);
+ pr_debug("Starting timer %s (Expired, Jiffies): %lu, %lu\n",
+ info->label, info->timer->timer.expires, now);
+ }
+
+ /* TODO: Avoid modifying timers on each packet */
+ reset_timer(info->timer, info->timeout, skb);
}
return XT_CONTINUE;
@@ -319,9 +571,7 @@ static int idletimer_tg_checkentry(const struct xt_tgchk_param *par)
info->timer = __idletimer_tg_find_by_label(info->label);
if (info->timer) {
info->timer->refcnt++;
- mod_timer(&info->timer->timer,
- secs_to_jiffies(info->timeout) + jiffies);
-
+ reset_timer(info->timer, info->timeout, NULL);
pr_debug("increased refcnt of timer %s to %u\n",
info->label, info->timer->refcnt);
} else {
@@ -344,9 +594,6 @@ static int idletimer_tg_checkentry_v1(const struct xt_tgchk_param *par)
pr_debug("checkentry targinfo%s\n", info->label);
- if (info->send_nl_msg)
- return -EOPNOTSUPP;
-
ret = idletimer_tg_helper((struct idletimer_tg_info *)info);
if(ret < 0)
{
@@ -359,6 +606,11 @@ static int idletimer_tg_checkentry_v1(const struct xt_tgchk_param *par)
return -EINVAL;
}
+ if (info->send_nl_msg > 1) {
+ pr_debug("invalid value for send_nl_msg\n");
+ return -EINVAL;
+ }
+
mutex_lock(&list_mutex);
info->timer = __idletimer_tg_find_by_label(info->label);
@@ -381,8 +633,7 @@ static int idletimer_tg_checkentry_v1(const struct xt_tgchk_param *par)
alarm_start_relative(&info->timer->alarm, tout);
}
} else {
- mod_timer(&info->timer->timer,
- secs_to_jiffies(info->timeout) + jiffies);
+ reset_timer(info->timer, info->timeout, NULL);
}
pr_debug("increased refcnt of timer %s to %u\n",
info->label, info->timer->refcnt);
@@ -420,8 +671,9 @@ static void idletimer_tg_destroy(const struct xt_tgdtor_param *par)
mutex_unlock(&list_mutex);
timer_shutdown_sync(&info->timer->timer);
- cancel_work_sync(&info->timer->work);
sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
+ unregister_pm_notifier(&info->timer->pm_nb);
+ cancel_work_sync(&info->timer->work);
kfree(info->timer->attr.attr.name);
kfree(info->timer);
}
@@ -451,8 +703,9 @@ static void idletimer_tg_destroy_v1(const struct xt_tgdtor_param *par)
} else {
timer_shutdown_sync(&info->timer->timer);
}
- cancel_work_sync(&info->timer->work);
sysfs_remove_file(idletimer_tg_kobj, &info->timer->attr.attr);
+ unregister_pm_notifier(&info->timer->pm_nb);
+ cancel_work_sync(&info->timer->work);
kfree(info->timer->attr.attr.name);
kfree(info->timer);
}
@@ -563,3 +816,4 @@ MODULE_DESCRIPTION("Xtables: idle time monitor");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("ipt_IDLETIMER");
MODULE_ALIAS("ip6t_IDLETIMER");
+MODULE_ALIAS("arpt_IDLETIMER");
diff --git a/net/netfilter/xt_quota2.c b/net/netfilter/xt_quota2.c
new file mode 100644
index 0000000..e05616e
--- /dev/null
+++ b/net/netfilter/xt_quota2.c
@@ -0,0 +1,397 @@
+/*
+ * xt_quota2 - enhanced xt_quota that can count upwards and in packets
+ * as a minimal accounting match.
+ * by Jan Engelhardt <jengelh@medozas.de>, 2008
+ *
+ * Originally based on xt_quota.c:
+ * netfilter module to enforce network quotas
+ * Sam Johnston <samj@samj.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License; either
+ * version 2 of the License, as published by the Free Software Foundation.
+ */
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+#include <net/netlink.h>
+
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_quota2.h>
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+/* For compatibility, these definitions are copied from the
+ * deprecated header file <linux/netfilter_ipv4/ipt_ULOG.h> */
+#define ULOG_MAC_LEN 80
+#define ULOG_PREFIX_LEN 32
+
+/* Format of the ULOG packets passed through netlink */
+typedef struct ulog_packet_msg {
+ unsigned long mark;
+ long timestamp_sec;
+ long timestamp_usec;
+ unsigned int hook;
+ char indev_name[IFNAMSIZ];
+ char outdev_name[IFNAMSIZ];
+ size_t data_len;
+ char prefix[ULOG_PREFIX_LEN];
+ unsigned char mac_len;
+ unsigned char mac[ULOG_MAC_LEN];
+ unsigned char payload[0];
+} ulog_packet_msg_t;
+#endif
+
+/**
+ * @lock: lock to protect quota writers from each other
+ */
+struct xt_quota_counter {
+ u_int64_t quota;
+ spinlock_t lock;
+ struct list_head list;
+ atomic_t ref;
+ char name[sizeof(((struct xt_quota_mtinfo2 *)NULL)->name)];
+ struct proc_dir_entry *procfs_entry;
+};
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+/* Harald's favorite number +1 :D From ipt_ULOG.C */
+static int qlog_nl_event = 112;
+module_param_named(event_num, qlog_nl_event, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(event_num,
+ "Event number for NETLINK_NFLOG message. 0 disables log."
+ "111 is what ipt_ULOG uses.");
+static struct sock *nflognl;
+#endif
+
+static LIST_HEAD(counter_list);
+static DEFINE_SPINLOCK(counter_list_lock);
+
+static struct proc_dir_entry *proc_xt_quota;
+static unsigned int quota_list_perms = S_IRUGO | S_IWUSR;
+static kuid_t quota_list_uid = KUIDT_INIT(0);
+static kgid_t quota_list_gid = KGIDT_INIT(0);
+module_param_named(perms, quota_list_perms, uint, S_IRUGO | S_IWUSR);
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+static void quota2_log(unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const char *prefix)
+{
+ ulog_packet_msg_t *pm;
+ struct sk_buff *log_skb;
+ size_t size;
+ struct nlmsghdr *nlh;
+
+ if (!qlog_nl_event)
+ return;
+
+ size = NLMSG_SPACE(sizeof(*pm));
+ size = max(size, (size_t)NLMSG_GOODSIZE);
+ log_skb = alloc_skb(size, GFP_ATOMIC);
+ if (!log_skb) {
+ pr_err("xt_quota2: cannot alloc skb for logging\n");
+ return;
+ }
+
+ nlh = nlmsg_put(log_skb, /*pid*/0, /*seq*/0, qlog_nl_event,
+ sizeof(*pm), 0);
+ if (!nlh) {
+ pr_err("xt_quota2: nlmsg_put failed\n");
+ kfree_skb(log_skb);
+ return;
+ }
+ pm = nlmsg_data(nlh);
+ memset(pm, 0, sizeof(*pm));
+ if (skb->tstamp == 0)
+ __net_timestamp((struct sk_buff *)skb);
+ pm->hook = hooknum;
+ if (prefix != NULL)
+ strscpy(pm->prefix, prefix, sizeof(pm->prefix));
+ if (in)
+ strscpy(pm->indev_name, in->name, sizeof(pm->indev_name));
+ if (out)
+ strscpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
+
+ NETLINK_CB(log_skb).dst_group = 1;
+ pr_debug("throwing 1 packets to netlink group 1\n");
+ netlink_broadcast(nflognl, log_skb, 0, 1, GFP_ATOMIC);
+}
+#else
+static void quota2_log(unsigned int hooknum,
+ const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const char *prefix)
+{
+}
+#endif /* if+else CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG */
+
+static ssize_t quota_proc_read(struct file *file, char __user *buf,
+ size_t size, loff_t *ppos)
+{
+ struct xt_quota_counter *e = pde_data(file_inode(file));
+ char tmp[24];
+ size_t tmp_size;
+
+ spin_lock_bh(&e->lock);
+ tmp_size = scnprintf(tmp, sizeof(tmp), "%llu\n", e->quota);
+ spin_unlock_bh(&e->lock);
+ return simple_read_from_buffer(buf, size, ppos, tmp, tmp_size);
+}
+
+static ssize_t quota_proc_write(struct file *file, const char __user *input,
+ size_t size, loff_t *ppos)
+{
+ struct xt_quota_counter *e = pde_data(file_inode(file));
+ char buf[sizeof("18446744073709551616")];
+
+ if (size > sizeof(buf))
+ size = sizeof(buf);
+ if (copy_from_user(buf, input, size) != 0)
+ return -EFAULT;
+ buf[sizeof(buf)-1] = '\0';
+ if (size < sizeof(buf))
+ buf[size] = '\0';
+
+ spin_lock_bh(&e->lock);
+ e->quota = simple_strtoull(buf, NULL, 0);
+ spin_unlock_bh(&e->lock);
+ return size;
+}
+
+static const struct proc_ops q2_counter_fops = {
+ .proc_read = quota_proc_read,
+ .proc_write = quota_proc_write,
+ .proc_lseek = default_llseek,
+};
+
+static struct xt_quota_counter *
+q2_new_counter(const struct xt_quota_mtinfo2 *q, bool anon)
+{
+ struct xt_quota_counter *e;
+ unsigned int size;
+
+ /* Do not need all the procfs things for anonymous counters. */
+ size = anon ? offsetof(typeof(*e), list) : sizeof(*e);
+ e = kmalloc(size, GFP_KERNEL);
+ if (e == NULL)
+ return NULL;
+
+ e->quota = q->quota;
+ spin_lock_init(&e->lock);
+ if (!anon) {
+ INIT_LIST_HEAD(&e->list);
+ atomic_set(&e->ref, 1);
+ strscpy(e->name, q->name, sizeof(e->name));
+ }
+ return e;
+}
+
+/**
+ * q2_get_counter - get ref to counter or create new
+ * @name: name of counter
+ */
+static struct xt_quota_counter *
+q2_get_counter(const struct xt_quota_mtinfo2 *q)
+{
+ struct proc_dir_entry *p;
+ struct xt_quota_counter *e = NULL;
+ struct xt_quota_counter *new_e;
+
+ if (*q->name == '\0')
+ return q2_new_counter(q, true);
+
+ /* No need to hold a lock while getting a new counter */
+ new_e = q2_new_counter(q, false);
+ if (new_e == NULL)
+ goto out;
+
+ spin_lock_bh(&counter_list_lock);
+ list_for_each_entry(e, &counter_list, list)
+ if (strcmp(e->name, q->name) == 0) {
+ atomic_inc(&e->ref);
+ spin_unlock_bh(&counter_list_lock);
+ kfree(new_e);
+ pr_debug("xt_quota2: old counter name=%s", e->name);
+ return e;
+ }
+ e = new_e;
+ pr_debug("xt_quota2: new_counter name=%s", e->name);
+ list_add_tail(&e->list, &counter_list);
+ /* The entry having a refcount of 1 is not directly destructible.
+ * This func has not yet returned the new entry, thus iptables
+ * has not references for destroying this entry.
+ * For another rule to try to destroy it, it would 1st need for this
+ * func* to be re-invoked, acquire a new ref for the same named quota.
+ * Nobody will access the e->procfs_entry either.
+ * So release the lock. */
+ spin_unlock_bh(&counter_list_lock);
+
+ /* create_proc_entry() is not spin_lock happy */
+ p = e->procfs_entry = proc_create_data(e->name, quota_list_perms,
+ proc_xt_quota, &q2_counter_fops, e);
+
+ if (IS_ERR_OR_NULL(p)) {
+ spin_lock_bh(&counter_list_lock);
+ list_del(&e->list);
+ spin_unlock_bh(&counter_list_lock);
+ goto out;
+ }
+ proc_set_user(p, quota_list_uid, quota_list_gid);
+ return e;
+
+ out:
+ kfree(e);
+ return NULL;
+}
+
+static int quota_mt2_check(const struct xt_mtchk_param *par)
+{
+ struct xt_quota_mtinfo2 *q = par->matchinfo;
+
+ pr_debug("xt_quota2: check() flags=0x%04x", q->flags);
+
+ if (q->flags & ~XT_QUOTA_MASK)
+ return -EINVAL;
+
+ q->name[sizeof(q->name)-1] = '\0';
+ if (*q->name == '.' || strchr(q->name, '/') != NULL) {
+ printk(KERN_ERR "xt_quota.3: illegal name\n");
+ return -EINVAL;
+ }
+
+ q->master = q2_get_counter(q);
+ if (q->master == NULL) {
+ printk(KERN_ERR "xt_quota.3: memory alloc failure\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void quota_mt2_destroy(const struct xt_mtdtor_param *par)
+{
+ struct xt_quota_mtinfo2 *q = par->matchinfo;
+ struct xt_quota_counter *e = q->master;
+
+ if (*q->name == '\0') {
+ kfree(e);
+ return;
+ }
+
+ spin_lock_bh(&counter_list_lock);
+ if (!atomic_dec_and_test(&e->ref)) {
+ spin_unlock_bh(&counter_list_lock);
+ return;
+ }
+
+ list_del(&e->list);
+ spin_unlock_bh(&counter_list_lock);
+ remove_proc_entry(e->name, proc_xt_quota);
+ kfree(e);
+}
+
+static bool
+quota_mt2(const struct sk_buff *skb, struct xt_action_param *par)
+{
+ struct xt_quota_mtinfo2 *q = (void *)par->matchinfo;
+ struct xt_quota_counter *e = q->master;
+ int charge = (q->flags & XT_QUOTA_PACKET) ? 1 : skb->len;
+ bool no_change = q->flags & XT_QUOTA_NO_CHANGE;
+ bool ret = q->flags & XT_QUOTA_INVERT;
+
+ spin_lock_bh(&e->lock);
+ if (q->flags & XT_QUOTA_GROW) {
+ /*
+ * While no_change is pointless in "grow" mode, we will
+ * implement it here simply to have a consistent behavior.
+ */
+ if (!no_change)
+ e->quota += charge;
+ ret = true; /* note: does not respect inversion (bug??) */
+ } else {
+ if (e->quota > charge) {
+ if (!no_change)
+ e->quota -= charge;
+ ret = !ret;
+ } else if (e->quota) {
+ /* We are transitioning, log that fact. */
+ quota2_log(xt_hooknum(par),
+ skb,
+ xt_in(par),
+ xt_out(par),
+ q->name);
+ /* we do not allow even small packets from now on */
+ e->quota = 0;
+ }
+ }
+ spin_unlock_bh(&e->lock);
+ return ret;
+}
+
+static struct xt_match quota_mt2_reg[] __read_mostly = {
+ {
+ .name = "quota2",
+ .revision = 3,
+ .family = NFPROTO_IPV4,
+ .checkentry = quota_mt2_check,
+ .match = quota_mt2,
+ .destroy = quota_mt2_destroy,
+ .matchsize = sizeof(struct xt_quota_mtinfo2),
+ .usersize = offsetof(struct xt_quota_mtinfo2, master),
+ .me = THIS_MODULE,
+ },
+ {
+ .name = "quota2",
+ .revision = 3,
+ .family = NFPROTO_IPV6,
+ .checkentry = quota_mt2_check,
+ .match = quota_mt2,
+ .destroy = quota_mt2_destroy,
+ .matchsize = sizeof(struct xt_quota_mtinfo2),
+ .usersize = offsetof(struct xt_quota_mtinfo2, master),
+ .me = THIS_MODULE,
+ },
+};
+
+static int __init quota_mt2_init(void)
+{
+ int ret;
+ pr_debug("xt_quota2: init()");
+
+#ifdef CONFIG_NETFILTER_XT_MATCH_QUOTA2_LOG
+ nflognl = netlink_kernel_create(&init_net, NETLINK_NFLOG, NULL);
+ if (!nflognl)
+ return -ENOMEM;
+#endif
+
+ proc_xt_quota = proc_mkdir("xt_quota", init_net.proc_net);
+ if (proc_xt_quota == NULL)
+ return -EACCES;
+
+ ret = xt_register_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+ if (ret < 0)
+ remove_proc_entry("xt_quota", init_net.proc_net);
+ pr_debug("xt_quota2: init() %d", ret);
+ return ret;
+}
+
+static void __exit quota_mt2_exit(void)
+{
+ xt_unregister_matches(quota_mt2_reg, ARRAY_SIZE(quota_mt2_reg));
+ remove_proc_entry("xt_quota", init_net.proc_net);
+}
+
+module_init(quota_mt2_init);
+module_exit(quota_mt2_exit);
+MODULE_DESCRIPTION("Xtables: countdown quota match; up counter");
+MODULE_AUTHOR("Sam Johnston <samj@samj.net>");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_quota2");
+MODULE_ALIAS("ip6t_quota2");
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index 8a9fb23..f53146e 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -331,7 +331,24 @@ static struct sk_buff *virtio_transport_alloc_skb(struct virtio_vsock_pkt_info *
static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
struct virtio_vsock_pkt_info *info)
{
- u32 max_skb_len = VIRTIO_VSOCK_MAX_PKT_BUF_SIZE;
+ /* ANDROID:
+ *
+ * Older host kernels (including the 5.10-based images used by
+ * Cuttlefish) only support linear SKBs on the RX path.
+ * Consequently, if we transmit a VIRTIO_VSOCK_MAX_PKT_BUF_SIZE
+ * packet, the host allocation can fail and the packet will be
+ * silently dropped.
+ *
+ * As a nasty workaround, limit the entire SKB to ~28KiB, which
+ * allows for 4KiB of SKB wiggle room whilst keeping the
+ * allocation below PAGE_ALLOC_COSTLY_ORDER.
+ *
+ * This can be removed when all supported host kernels have
+ * support for non-linear RX buffers introduced by Change-Id
+ * I4212a8daf9f19b5bbffc06ce93338c823de7bb19.
+ */
+ u32 max_skb_len = min_t(u32, VIRTIO_VSOCK_MAX_PKT_BUF_SIZE,
+ SKB_WITH_OVERHEAD(SZ_32K - VIRTIO_VSOCK_SKB_HEADROOM) - SZ_4K);
u32 src_cid, src_port, dst_cid, dst_port;
const struct virtio_transport *t_ops;
struct virtio_vsock_sock *vvs;
@@ -376,7 +393,7 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk,
can_zcopy = virtio_transport_can_zcopy(t_ops, info, pkt_len);
if (can_zcopy)
- max_skb_len = min_t(u32, VIRTIO_VSOCK_MAX_PKT_BUF_SIZE,
+ max_skb_len = min_t(u32, max_skb_len,
(MAX_SKB_FRAGS * PAGE_SIZE));
}
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 54222fc..dfb5358 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -481,13 +481,11 @@ static int xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
return -EOPNOTSUPP;
}
-#if IS_ENABLED(CONFIG_NET_PKTGEN)
int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
{
return xfrm_outer_mode_output(x, skb);
}
EXPORT_SYMBOL_GPL(pktgen_xfrm_outer_mode_output);
-#endif
static int xfrm_output_one(struct sk_buff *skb, int err)
{
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index 083cc44..6ecd4ed 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -64,7 +64,9 @@
#include <linux/jiffies.h>
#include <linux/jump_label.h>
#include <linux/mdio.h>
+#include <linux/memfd.h>
#include <linux/mm.h>
+#include <linux/mman.h>
#include <linux/miscdevice.h>
#include <linux/of_device.h>
#include <linux/pci.h>
@@ -80,6 +82,7 @@
#include <linux/regulator/consumer.h>
#include <linux/sched.h>
#include <linux/security.h>
+#include <linux/shmem_fs.h>
#include <linux/slab.h>
#include <linux/sys_soc.h>
#include <linux/task_work.h>
@@ -89,6 +92,8 @@
#include <linux/workqueue.h>
#include <linux/xarray.h>
#include <trace/events/rust_sample.h>
+#include <uapi/linux/falloc.h>
+#include <uapi/linux/sched/types.h>
/*
* The driver-core Rust code needs to know about some C driver-core private
@@ -151,3 +156,9 @@ const vm_flags_t RUST_CONST_HELPER_VM_NOHUGEPAGE = VM_NOHUGEPAGE;
#include "../../drivers/android/binder/rust_binder_events.h"
#include "../../drivers/android/binder/page_range_helper.h"
#endif
+
+#ifdef CONFIG_ASHMEM_RUST
+#include "../../drivers/staging/android/ashmem.h"
+const size_t RUST_CONST_HELPER_ASHMEM_NAME_PREFIX_LEN = ASHMEM_NAME_PREFIX_LEN;
+const size_t RUST_CONST_HELPER_ASHMEM_FULL_NAME_LEN = ASHMEM_FULL_NAME_LEN;
+#endif
diff --git a/rust/helpers/fs.c b/rust/helpers/fs.c
index 789d60f..e170945 100644
--- a/rust/helpers/fs.c
+++ b/rust/helpers/fs.c
@@ -10,3 +10,8 @@ __rust_helper struct file *rust_helper_get_file(struct file *f)
{
return get_file(f);
}
+
+loff_t rust_helper_i_size_read(const struct inode *inode)
+{
+ return i_size_read(inode);
+}
diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c
index a3c42e5..f55a694 100644
--- a/rust/helpers/helpers.c
+++ b/rust/helpers/helpers.c
@@ -37,6 +37,7 @@
#include "kunit.c"
#include "maple_tree.c"
#include "mm.c"
+#include "mman.c"
#include "mutex.c"
#include "of.c"
#include "page.c"
diff --git a/rust/helpers/mman.c b/rust/helpers/mman.c
new file mode 100644
index 0000000..1ebcf42
--- /dev/null
+++ b/rust/helpers/mman.c
@@ -0,0 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/mman.h>
+
+void rust_helper_lockdep_set_class_rwsem(struct rw_semaphore *lock, struct lock_class_key *key,
+ const char *name)
+{
+ lockdep_set_class_and_name(lock, key, name);
+}
diff --git a/rust/kernel/miscdevice.rs b/rust/kernel/miscdevice.rs
index c3c2052..38644c8 100644
--- a/rust/kernel/miscdevice.rs
+++ b/rust/kernel/miscdevice.rs
@@ -13,7 +13,7 @@
device::Device,
error::{to_result, Error, Result, VTABLE_DEFAULT_ERROR},
ffi::{c_int, c_long, c_uint, c_ulong},
- fs::{File, Kiocb},
+ fs::{File, Kiocb, LocalFile},
iov::{IovIterDest, IovIterSource},
mm::virt::VmaNew,
prelude::*,
@@ -22,6 +22,10 @@
};
use core::{marker::PhantomData, pin::Pin};
+/// The kernel `loff_t` type.
+#[allow(non_camel_case_types)]
+pub type loff_t = bindings::loff_t;
+
/// Options for creating a misc device.
#[derive(Copy, Clone)]
pub struct MiscDeviceOptions {
@@ -141,6 +145,16 @@ fn mmap(
build_error!(VTABLE_DEFAULT_ERROR)
}
+ /// Seeks this miscdevice.
+ fn llseek(
+ _device: <Self::Ptr as ForeignOwnable>::Borrowed<'_>,
+ _file: &LocalFile,
+ _offset: loff_t,
+ _whence: c_int,
+ ) -> Result<loff_t> {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
/// Read from this miscdevice.
fn read_iter(_kiocb: Kiocb<'_, Self::Ptr>, _iov: &mut IovIterDest<'_>) -> Result<usize> {
build_error!(VTABLE_DEFAULT_ERROR)
@@ -325,6 +339,30 @@ impl<T: MiscDevice> MiscdeviceVTable<T> {
/// # Safety
///
/// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
+ unsafe extern "C" fn llseek(
+ file: *mut bindings::file,
+ offset: loff_t,
+ whence: c_int,
+ ) -> loff_t {
+ // SAFETY: The release call of a file owns the private data.
+ let private = unsafe { (*file).private_data };
+ // SAFETY: Ioctl calls can borrow the private data of the file.
+ let device = unsafe { <T::Ptr as ForeignOwnable>::borrow(private) };
+ // SAFETY:
+ // * The file is valid for the duration of this call.
+ // * We are inside an fdget_pos region, so there cannot be any active fdget_pos regions on
+ // other threads.
+ let file = unsafe { LocalFile::from_raw_file(file) };
+
+ match T::llseek(device, file, offset, whence) {
+ Ok(res) => res as loff_t,
+ Err(err) => err.to_errno() as loff_t,
+ }
+ }
+
+ /// # Safety
+ ///
+ /// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
unsafe extern "C" fn ioctl(file: *mut bindings::file, cmd: c_uint, arg: c_ulong) -> c_long {
// SAFETY: The ioctl call of a file can access the private data.
let private = unsafe { (*file).private_data };
@@ -391,6 +429,11 @@ impl<T: MiscDevice> MiscdeviceVTable<T> {
open: Some(Self::open),
release: Some(Self::release),
mmap: if T::HAS_MMAP { Some(Self::mmap) } else { None },
+ llseek: if T::HAS_LLSEEK {
+ Some(Self::llseek)
+ } else {
+ None
+ },
read_iter: if T::HAS_READ_ITER {
Some(Self::read_iter)
} else {
diff --git a/scripts/Makefile.dtbs b/scripts/Makefile.dtbs
index c4e4663..2583819 100644
--- a/scripts/Makefile.dtbs
+++ b/scripts/Makefile.dtbs
@@ -122,9 +122,11 @@
# Set -@ if the target is a base DTB that overlay is applied onto
DTC_FLAGS += $(if $(filter $(patsubst $(obj)/%,%,$@), $(base-dtb-y)), -@)
-DTC_INCLUDE := $(srctree)/scripts/dtc/include-prefixes
+# ANDROID: Allow DTC_INCLUDE to be set by the BUILD_CONFIG. This allows one to
+# compile an out-of-tree device tree.
+DTC_INCLUDE += $(srctree)/scripts/dtc/include-prefixes
-dtc_cpp_flags = -Wp,-MMD,$(depfile).pre.tmp -nostdinc -I $(DTC_INCLUDE) -undef -D__DTS__
+dtc_cpp_flags = -Wp,-MMD,$(depfile).pre.tmp -nostdinc $(addprefix -I,$(DTC_INCLUDE)) -undef -D__DTS__
dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp)
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 0718e39c..310fb69 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -370,10 +370,10 @@
cmd_lzo_with_size = { cat $(real-prereqs) | $(KLZOP) -9; $(size_append); } > $@
quiet_cmd_lz4 = LZ4 $@
- cmd_lz4 = cat $(real-prereqs) | $(LZ4) -l -9 - - > $@
+ cmd_lz4 = cat $(real-prereqs) | $(LZ4) -l -12 --favor-decSpeed - - > $@
quiet_cmd_lz4_with_size = LZ4 $@
- cmd_lz4_with_size = { cat $(real-prereqs) | $(LZ4) -l -9 - -; \
+ cmd_lz4_with_size = { cat $(real-prereqs) | $(LZ4) -l -12 --favor-decSpeed - -; \
$(size_append); } > $@
# U-Boot mkimage
diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal
index adcbcde..9ced739 100644
--- a/scripts/Makefile.modfinal
+++ b/scripts/Makefile.modfinal
@@ -55,7 +55,7 @@
printf '%s\n' 'savedcmd_$@ := $(make-cmd)' > $(dot-target).cmd, @:)
# Re-generate module BTFs if either module's .ko or vmlinux changed
-%.ko: %.o %.mod.o .module-common.o $(objtree)/scripts/module.lds $(and $(CONFIG_DEBUG_INFO_BTF_MODULES),$(KBUILD_BUILTIN),$(objtree)/vmlinux) FORCE
+%.ko: %.o %.mod.o $(extmod_prefix).module-common.o $(objtree)/scripts/module.lds $(and $(CONFIG_DEBUG_INFO_BTF_MODULES),$(KBUILD_BUILTIN),$(objtree)/vmlinux) FORCE
+$(call if_changed_except,ld_ko_o,$(objtree)/vmlinux)
ifdef CONFIG_DEBUG_INFO_BTF_MODULES
+$(if $(newer-prereqs),$(call cmd,btf_ko))
diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
index 9ba45e5..11b3b7f 100644
--- a/scripts/Makefile.modinst
+++ b/scripts/Makefile.modinst
@@ -60,6 +60,10 @@
endif
modules := $(patsubst %.o, $(dst)/%.ko$(suffix-y), $(modules))
+ifneq ($(KBUILD_EXTMOD),)
+extmod_suffix := $(shell echo "${KBUILD_EXTMOD}" | md5sum | cut -d " " -f 1)
+modules += $(dst)/modules.order.$(extmod_suffix)
+endif
install-$(CONFIG_MODULES) += $(modules)
__modinst: $(install-y)
@@ -124,6 +128,13 @@
$(call cmd,strip)
$(call cmd,sign)
+ifneq ($(KBUILD_EXTMOD),)
+$(dst)/modules.order.$(extmod_suffix): modules.order FORCE
+ $(call cmd,install)
+ @sed -e 's:^\(.*\)\.o$$:$(INSTALL_MOD_DIR)/\1.ko:' \
+ -i $@
+endif
+
ifdef CONFIG_MODULES
__modinst: depmod
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index d7d45067..16d380d 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -38,6 +38,8 @@
include $(objtree)/include/config/auto.conf
include $(srctree)/scripts/Kbuild.include
+mixed-build-prefix = $(if $(KBUILD_MIXED_TREE),$(KBUILD_MIXED_TREE)/)
+
MODPOST = $(objtree)/scripts/mod/modpost
modpost-args = \
@@ -67,6 +69,33 @@
modpost-deps += modules.order
endif
+ifeq ($(CONFIG_MODULE_SCMVERSION),y)
+ifeq ($(KBUILD_EXTMOD),)
+module_srcpath := $(srctree)
+else
+# Get the external module's source path. KBUILD_EXTMOD could either be an
+# absolute path or relative path from $(srctree). This makes sure that we
+# aren't using a relative path from a separate working directory (O= or
+# KBUILD_OUTPUT) since that may not be the actual module's SCM project path. So
+# check the path relative to $(srctree) first.
+ifneq ($(realpath $(srctree)/$(KBUILD_EXTMOD) 2>/dev/null),)
+ module_srcpath := $(srctree)/$(KBUILD_EXTMOD)
+else
+ module_srcpath := $(KBUILD_EXTMOD)
+endif
+endif
+
+# Get the SCM version of the module. Sed verifies setlocalversion returns
+# a proper revision based on the SCM type, e.g. git, mercurial, or svn.
+# Note: relative M= paths are not supported when building the kernel out of the
+# srctree since setlocalversion won't be able to find the module srctree.
+module_scmversion := $(shell $(srctree)/scripts/setlocalversion $(module_srcpath) | \
+ sed -n 's/.*-\(\(g\|hg\)[a-fA-F0-9]\+\(-dirty\)\?\|svn[0-9]\+\).*/\1/p')
+ifneq ($(module_scmversion),)
+modpost-args += -v $(module_scmversion)
+endif
+endif
+
ifeq ($(KBUILD_EXTMOD),)
# Generate the list of in-tree objects in vmlinux
@@ -102,8 +131,20 @@
endif
ifeq ($(wildcard vmlinux.o),)
+
+# ANDROID: Use vmlinux.symvers from base build when doing mixed build
+ifneq ($(wildcard vmlinux.symvers),)
+modpost-args += -i vmlinux.symvers
+# Not adding vmlinux.symvers to modpost-deps to avoid dependency on
+# .vmlinux.objs. Just use the one from base build.
+# Note: we don't want to include the vmlinux symbols here because we want to be
+# sure we only use the vmlinux symbols from the GKI kernel.
+output-symdump := modules-only.symvers
+else # vmlinux.symvers does not exist
missing-input := vmlinux.o
output-symdump := modules-only.symvers
+endif # whether vmlinux.symvers exists
+
else
modpost-args += vmlinux.o
modpost-deps += vmlinux.o
diff --git a/scripts/gcc-plugins/Kconfig b/scripts/gcc-plugins/Kconfig
index 6b34ba1..67fa6cf 100644
--- a/scripts/gcc-plugins/Kconfig
+++ b/scripts/gcc-plugins/Kconfig
@@ -9,6 +9,8 @@
bool "GCC plugins"
depends on HAVE_GCC_PLUGINS
depends on CC_IS_GCC
+ # ANDROID: GCC_PLUGINS are broken for 32-bit ARM builds (gcc 12.2.0)
+ depends on !ARM
depends on $(success,test -e $(shell,$(CC) -print-file-name=plugin)/include/plugin-version.h)
default y
help
diff --git a/scripts/gen_gki_modules_headers.sh b/scripts/gen_gki_modules_headers.sh
new file mode 100755
index 0000000..ca435f4
--- /dev/null
+++ b/scripts/gen_gki_modules_headers.sh
@@ -0,0 +1,123 @@
+#!/bin/bash
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Copyright 2022 Google LLC
+# Author: ramjiyani@google.com (Ramji Jiyani)
+#
+
+#
+# Generates header file with list of unprotected symbols
+#
+# Called By: KERNEL_SRC/kernel/Makefile if CONFIG_MODULE_SIG_PROTECT=y
+#
+# gki_module_unprotected.h: Symbols allowed to _access_ by unsigned modules
+#
+# If valid symbol file doesn't exists then still generates valid C header files for
+# compilation to proceed with no symbols to protect
+#
+
+# Collect arguments from Makefile
+TARGET=$1
+SRCTREE=$2
+SYMBOL_LIST=$3
+
+set -e
+
+#
+# Common Definitions
+#
+# Use "make V=1" to debug this script.
+case "$KBUILD_VERBOSE" in
+*1*)
+ set -x
+ ;;
+esac
+
+#
+# generate_header():
+# Args: $1 = Name of the header file
+# $2 = Input symbol list
+# $3 = Symbol type ("unprotected")
+#
+generate_header() {
+ local header_file=$1
+ local symbol_file=$2
+ local symbol_type=$3
+
+ if [ -f "${header_file}" ]; then
+ rm -f -- "${header_file}"
+ fi
+
+ # If symbol_file exist preprocess it and find maximum name length
+ if [ -s "${symbol_file}" ]; then
+ # Remove any trailing CR, leading / trailing whitespace,
+ # line comments, empty lines and symbol list markers.
+ sed -i '
+ s/\r$//
+ s/^[[:space:]]*//
+ s/[[:space:]]*$//
+ /^#/d
+ /^$/d
+ /^\[abi_symbol_list\]$/d
+ ' "${symbol_file}"
+
+ # Sort in byte order for kernel binary search at runtime
+ LC_ALL=C sort -u -o "${symbol_file}" "${symbol_file}"
+
+ # Trim white spaces & +1 for null termination
+ local max_name_len=$(awk '
+ {
+ $1=$1;
+ if ( length > L ) {
+ L=length
+ }
+ } END { print ++L }' "${symbol_file}")
+ else
+ # Set to 1 to generate valid C header file
+ local max_name_len=1
+ fi
+
+ # Header generation
+ cat > "${header_file}" <<- EOT
+ /*
+ * DO NOT EDIT
+ *
+ * Build generated header file with ${symbol_type}
+ */
+
+ #define NR_$(printf ${symbol_type} | tr [:lower:] [:upper:])_SYMBOLS \\
+ $(printf '\t')(ARRAY_SIZE(gki_${symbol_type}_symbols))
+ #define MAX_$(printf ${symbol_type} | tr [:lower:] [:upper:])_NAME_LEN (${max_name_len})
+
+ static const char gki_${symbol_type}_symbols[][MAX_$(printf ${symbol_type} |
+ tr [:lower:] [:upper:])_NAME_LEN] = {
+ EOT
+
+ # If a valid symbol_file present add symbols in an array except the 1st line
+ if [ -s "${symbol_file}" ]; then
+ sed -e 's/^[ \t]*/\t"/;s/[ \t]*$/",/' "${symbol_file}" >> "${header_file}"
+ fi
+
+ # Terminate the file
+ echo "};" >> "${header_file}"
+}
+
+if [ "$(basename "${TARGET}")" = "gki_module_unprotected.h" ]; then
+ # Union of vendor symbol lists
+ GKI_VENDOR_SYMBOLS="${SYMBOL_LIST}"
+ generate_header "${TARGET}" "${GKI_VENDOR_SYMBOLS}" "unprotected"
+else
+ # Sorted list of exported symbols
+ GKI_EXPORTED_SYMBOLS="include/config/abi_gki_protected_exports"
+
+ if [ -z "${SYMBOL_LIST}" ]; then
+ # Create empty list if ARCH doesn't have protected exports
+ touch "${GKI_EXPORTED_SYMBOLS}"
+ else
+ # Make a temp copy to avoid changing source during pre-processing
+ cp -f "${SYMBOL_LIST}" "${GKI_EXPORTED_SYMBOLS}"
+ fi
+
+ generate_header "${TARGET}" "${GKI_EXPORTED_SYMBOLS}" "protected_exports"
+fi
+
diff --git a/scripts/get_emails.sh b/scripts/get_emails.sh
new file mode 100755
index 0000000..3acba97
--- /dev/null
+++ b/scripts/get_emails.sh
@@ -0,0 +1,43 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+# Copyright (c) 2020, Google LLC. All rights reserved.
+# Author: Saravana Kannan <saravanak@google.com>
+
+# Find top of git repo
+while [ `pwd` != '/' -a ! -d .git ]
+do
+ cd ..
+done
+
+# Exit if you can't find it
+if [ ! -d .git ]
+then
+ exit 1
+fi
+
+if [ ! -f ./scripts/get_maintainer.pl ]
+then
+ exit 2
+fi
+
+opt='--no-rolestats --no-git-fallback --multiline --n'
+if [ "$1" = '-cc' ]
+then
+ shift
+ opt="$opt --no-m --no-r --l"
+ echo kernel-team@android.com
+else
+ opt="$opt --m --r --no-l"
+fi
+
+# Special case for git send-email that only passes one patch at a time
+if [ $# -eq 1 ]
+then
+ prefix="$1"
+ # Intentionally stripping out only the last 2 digits in the patch
+ # number
+ prefix=$(echo $1 | sed -e "s/[0-9][0-9]-.*\.patch/*.patch/")
+ set "$prefix"
+fi
+
+./scripts/get_maintainer.pl $opt $* 2>/dev/null
diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh
index 9c15e74..ff1890e 100755
--- a/scripts/headers_install.sh
+++ b/scripts/headers_install.sh
@@ -38,7 +38,7 @@
s@#(ifndef|define|endif[[:space:]]*/[*])[[:space:]]*_UAPI@#\1 @
' $INFILE > $TMPFILE || exit 1
-scripts/unifdef -U__KERNEL__ -D__EXPORTED_HEADERS__ $TMPFILE > $OUTFILE
+${objtree}/scripts/unifdef -U__KERNEL__ -D__EXPORTED_HEADERS__ $TMPFILE > $OUTFILE
[ $? -gt 1 ] && exit 1
# Remove /* ... */ style comments, and find CONFIG_ references in code
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 0c25b5a..f5a2ea1 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -41,6 +41,8 @@ static bool basic_modversions;
static bool extended_modversions;
/* If we are modposting external module set to 1 */
static bool external_module;
+#define MODULE_SCMVERSION_SIZE 64
+static char module_scmversion[MODULE_SCMVERSION_SIZE];
/* Only warn about unresolved symbols */
static bool warn_unresolved;
@@ -1854,6 +1856,9 @@ static void add_header(struct buffer *b, struct module *mod)
if (!external_module)
buf_printf(b, "\nMODULE_INFO(intree, \"Y\");\n");
+ if (module_scmversion[0] != '\0')
+ buf_printf(b, "\nMODULE_INFO(scmversion, \"%s\");\n", module_scmversion);
+
if (strstarts(mod->name, "drivers/staging"))
buf_printf(b, "\nMODULE_INFO(staging, \"Y\");\n");
@@ -2277,7 +2282,7 @@ int main(int argc, char **argv)
LIST_HEAD(dump_lists);
struct dump_list *dl, *dl2;
- while ((opt = getopt(argc, argv, "ei:MmnT:to:au:WwENd:xb")) != -1) {
+ while ((opt = getopt(argc, argv, "ei:MmnT:to:au:WwENd:xbv:")) != -1) {
switch (opt) {
case 'e':
external_module = true;
@@ -2332,6 +2337,9 @@ int main(int argc, char **argv)
case 'x':
extended_modversions = true;
break;
+ case 'v':
+ strncpy(module_scmversion, optarg, sizeof(module_scmversion) - 1);
+ break;
default:
exit(1);
}
diff --git a/scripts/setlocalversion b/scripts/setlocalversion
index 28169d7..c13fe6e 100755
--- a/scripts/setlocalversion
+++ b/scripts/setlocalversion
@@ -186,16 +186,16 @@
exit 0
fi
-if ! test -e include/config/auto.conf; then
+if ! test -e ${objtree}/include/config/auto.conf; then
echo "Error: kernelrelease not valid - run 'make prepare' to update it" >&2
exit 1
fi
# version string from CONFIG_LOCALVERSION
-config_localversion=$(sed -n 's/^CONFIG_LOCALVERSION=\(.*\)$/\1/p' include/config/auto.conf)
+config_localversion=$(sed -n 's/^CONFIG_LOCALVERSION=\(.*\)$/\1/p' ${objtree}/include/config/auto.conf)
# scm version string if not at the kernel version tag or at the file_localversion
-if grep -q "^CONFIG_LOCALVERSION_AUTO=y$" include/config/auto.conf; then
+if grep -q "^CONFIG_LOCALVERSION_AUTO=y$" ${objtree}/include/config/auto.conf; then
# full scm version string
scm_version="$(scm_version)"
elif [ "${LOCALVERSION+set}" != "set" ]; then
diff --git a/scripts/sign-file.c b/scripts/sign-file.c
index 73fbefd2..607240f 100644
--- a/scripts/sign-file.c
+++ b/scripts/sign-file.c
@@ -299,6 +299,9 @@ int main(int argc, char **argv)
cms = CMS_sign(NULL, NULL, NULL, NULL, flags);
ERR(!cms, "CMS_sign");
+ /* TODO(b/316589225): Remove once BoringSSL supports this */
+ flags &= ~CMS_PARTIAL;
+
ERR(!CMS_add1_signer(cms, x509, private_key, digest_algo, flags),
"CMS_add1_signer");
ERR(CMS_final(cms, bm, NULL, flags) != 1,
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index 813e82b..740b6b3 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -45,6 +45,9 @@
#define avc_cache_stats_incr(field) do {} while (0)
#endif
+#undef CREATE_TRACE_POINTS
+#include <trace/hooks/avc.h>
+
struct avc_entry {
u32 ssid;
u32 tsid;
@@ -436,6 +439,7 @@ static void avc_node_free(struct rcu_head *rhead)
static void avc_node_delete(struct avc_node *node)
{
+ trace_android_rvh_selinux_avc_node_delete(node);
hlist_del_rcu(&node->list);
call_rcu(&node->rhead, avc_node_free);
atomic_dec(&selinux_avc.avc_cache.active_nodes);
@@ -451,6 +455,7 @@ static void avc_node_kill(struct avc_node *node)
static void avc_node_replace(struct avc_node *new, struct avc_node *old)
{
+ trace_android_rvh_selinux_avc_node_replace(old, new);
hlist_replace_rcu(&old->list, &new->list);
call_rcu(&old->rhead, avc_node_free);
atomic_dec(&selinux_avc.avc_cache.active_nodes);
@@ -557,8 +562,10 @@ static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass)
avc_cache_stats_incr(lookups);
node = avc_search_node(ssid, tsid, tclass);
- if (node)
+ if (node) {
+ trace_android_rvh_selinux_avc_lookup(node, ssid, tsid, tclass);
return node;
+ }
avc_cache_stats_incr(misses);
return NULL;
@@ -638,6 +645,7 @@ static void avc_insert(u32 ssid, u32 tsid, u16 tclass,
}
}
hlist_add_head_rcu(&node->list, head);
+ trace_android_rvh_selinux_avc_insert(node);
found:
spin_unlock_irqrestore(lock, flag);
}
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index e8e7ccb..0c22eab 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -69,6 +69,8 @@
#include "policycap_names.h"
#include "ima.h"
+#include <trace/hooks/selinux.h>
+
struct selinux_policy_convert_data {
struct convert_context_args args;
struct sidtab_convert_params sidtab_params;
@@ -2281,6 +2283,7 @@ void selinux_policy_commit(struct selinux_load_state *load_state)
*/
selinux_mark_initialized();
selinux_complete_init();
+ trace_android_rvh_selinux_is_initialized(state);
}
/* Free the old policy */
diff --git a/sound/hda/codecs/side-codecs/cs35l56_hda.c b/sound/hda/codecs/side-codecs/cs35l56_hda.c
index 1ace4be..e1d9c9d 100644
--- a/sound/hda/codecs/side-codecs/cs35l56_hda.c
+++ b/sound/hda/codecs/side-codecs/cs35l56_hda.c
@@ -1259,6 +1259,16 @@ const struct dev_pm_ops cs35l56_hda_pm_ops = {
};
EXPORT_SYMBOL_NS_GPL(cs35l56_hda_pm_ops, "SND_HDA_SCODEC_CS35L56");
+#if IS_ENABLED(CONFIG_SND_HDA_SCODEC_CS35L56_KUNIT_TEST)
+/* Hooks to export static function to KUnit test */
+
+int cs35l56_hda_test_hook_get_speaker_id(struct device *dev, int amp_index, int num_amps)
+{
+ return cs35l56_hda_get_speaker_id(dev, amp_index, num_amps);
+}
+EXPORT_SYMBOL_NS_GPL(cs35l56_hda_test_hook_get_speaker_id, SND_HDA_SCODEC_CS35L56);
+#endif
+
MODULE_DESCRIPTION("CS35L56 HDA Driver");
MODULE_IMPORT_NS("FW_CS_DSP");
MODULE_IMPORT_NS("SND_HDA_CIRRUS_SCODEC");
diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile
index ef08360..7672208 100644
--- a/tools/bpf/resolve_btfids/Makefile
+++ b/tools/bpf/resolve_btfids/Makefile
@@ -23,6 +23,7 @@
HOSTCC ?= gcc
HOSTLD ?= ld
HOSTAR ?= ar
+HOSTPKG_CONFIG ?= pkg-config
CROSS_COMPILE =
OUTPUT ?= $(srctree)/tools/bpf/resolve_btfids/
@@ -63,10 +64,14 @@
$(abspath $@) install_headers
LIBELF_FLAGS := $(shell $(HOSTPKG_CONFIG) libelf --cflags 2>/dev/null)
+
+ifneq ($(filter -static,$(EXTRA_LDFLAGS)),)
+LIBELF_LIBS := $(shell $(HOSTPKG_CONFIG) libelf --libs --static 2>/dev/null || echo -lelf -lzstd)
+else
LIBELF_LIBS := $(shell $(HOSTPKG_CONFIG) libelf --libs 2>/dev/null || echo -lelf)
+endif
ZLIB_LIBS := $(shell $(HOSTPKG_CONFIG) zlib --libs 2>/dev/null || echo -lz)
-ZSTD_LIBS := $(shell $(HOSTPKG_CONFIG) libzstd --libs 2>/dev/null || echo -lzstd)
HOSTCFLAGS_resolve_btfids += -g \
-I$(srctree)/tools/include \
@@ -76,7 +81,7 @@
$(LIBELF_FLAGS) \
-Wall -Werror
-LIBS = $(LIBELF_LIBS) $(ZLIB_LIBS) $(ZSTD_LIBS)
+LIBS = $(LIBELF_LIBS) $(ZLIB_LIBS)
export srctree OUTPUT HOSTCFLAGS_resolve_btfids Q HOSTCC HOSTLD HOSTAR
include $(srctree)/tools/build/Makefile.include
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 6964175..61192f7 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -13,7 +13,7 @@
ifeq ($(ARCH_HAS_KLP),y)
HAVE_XXHASH = $(shell printf "$(pound)include <xxhash.h>\nXXH3_state_t *state;int main() {}" | \
- $(HOSTCC) -xc - -o /dev/null -lxxhash 2> /dev/null && echo y || echo n)
+ $(HOSTCC) $(HOSTCFLAGS) -xc - -o /dev/null -lxxhash 2> /dev/null && echo y || echo n)
ifeq ($(HAVE_XXHASH),y)
BUILD_KLP := y
LIBXXHASH_CFLAGS := $(shell $(HOSTPKG_CONFIG) libxxhash --cflags 2>/dev/null) \
@@ -63,7 +63,7 @@
OBJTOOL_CFLAGS := -std=gnu11 -fomit-frame-pointer -O2 -g $(WARNINGS) \
$(INCLUDES) $(LIBELF_FLAGS) $(LIBXXHASH_CFLAGS) $(HOSTCFLAGS)
-OBJTOOL_LDFLAGS := $(LIBSUBCMD) $(LIBELF_LIBS) $(LIBXXHASH_LIBS) $(HOSTLDFLAGS)
+OBJTOOL_LDFLAGS := $(LIBSUBCMD) $(LIBELF_LIBS) $(LIBXXHASH_LIBS) $(KBUILD_HOSTLDFLAGS)
# Allow old libelf to be used:
elfshdr := $(shell echo '$(pound)include <libelf.h>' | $(HOSTCC) $(OBJTOOL_CFLAGS) -x c -E - 2>/dev/null | grep elf_getshdr)
diff --git a/tools/testing/OWNERS b/tools/testing/OWNERS
new file mode 100644
index 0000000..e932d41
--- /dev/null
+++ b/tools/testing/OWNERS
@@ -0,0 +1,3 @@
+bettyzhou@google.com
+hwj@google.com
+wakel@google.com
diff --git a/tools/testing/kunit/android/README b/tools/testing/kunit/android/README
new file mode 100644
index 0000000..d038396
--- /dev/null
+++ b/tools/testing/kunit/android/README
@@ -0,0 +1,371 @@
+HOW TO RUN KUNIT TESTS IN ANDROID
+=================================
+
+Prerequisites
+-------------
+ If you want to run a vendor module KUnit tests, please run the tests with a
+ "no trim" kernel (e.g. add `--notrim` to bazel build command).
+
+Run tests on a physical or virtual device:
+------------------------------------------
+ ```
+ $ kernel/tests/tools/run_test_only.sh -t kunit -s <serial_number> -td <test_dir>
+ ```
+
+ test_dir is the same directory as specified when running:
+ ```
+ $ tools/bazel run //common:kunit_tests_arm64 -- -v --destdir <test_dir>
+ ```
+
+Before the tests, you can use the following command to launch a virtual device:
+ ```
+ $ kernel/tests/tools/launch_cvd.sh
+ ```
+
+After the tests, you can use the following command to remove the virtual device:
+ ```
+ $ prebuilts/asuite/acloud/linux-x86/acloud delete
+ ```
+
+The following are command examples:
+ * Build kernel and launch a virtual device from a specific platform build:
+ ```
+ $ kernel/tests/tools/launch_cvd.sh -pb \
+ ab://aosp-main/aosp_cf_x86_64_phone-trunk_staging-userdebug/12505199
+ ```
+
+ * Run a specific test:
+ ```
+ $ kernel/tests/tools/run_test_only.sh \
+ -t 'kunit soc-utils-test' -s <serial_number>
+ ```
+
+ * Check other available options:
+ ```
+ $ kernel/tests/tools/launch_cvd.sh -h
+ $ kernel/tests/tools/run_test_only.sh -h
+ ```
+
+Load and run a test module on Android device manually
+-----------------------------------------------------
+ * Push the KUnit test framework module kunit.ko over to the device. For
+ example:
+ ```
+ $ adb push kunit.ko /data
+ ```
+
+ * Load test module on device:
+ ```
+ $ cd /data
+ $ insmod kunit.ko enable=1
+ ```
+
+ If the kunit.ko has been installed already but without enable=1 passed,
+ it needs to remove it first via the rmmod command, and install again
+ via the insmod command
+
+ * Push the KUnit test module over to the device. For example using adb:
+ ```
+ $ adb push kunit-example-test.ko /data
+ ```
+
+ * (Optional) - Mount debugfs on device:
+ ```
+ $ mount -t debugfs debugfs /sys/kernel/debug
+ ```
+
+ * Load test module on device:
+ ```
+ $ cd /data
+ $ insmod kunit-example-test.ko
+ ```
+
+ * View test results
+ * If debugfs is mounted:
+ ```
+ $ cat /sys/kernel/debug/kunit/<test name>/results
+ KTAP version 1
+ 1..1
+ KTAP version 1
+ # Subtest: example
+ 1..4
+ # example_simple_test: initializing
+
+ ok 1 example_simple_test
+ ....
+ ```
+
+ * Via dmesg (check before log cycles out):
+ ```
+ $ dmesg
+ ....
+ [172434.032618] 1..1
+ [172434.032618] KTAP version 1
+ [172434.032618] # Subtest: example
+ [172434.032618] 1..4
+ [172434.032618] # example_simple_test: initializing
+ [172434.032618]
+ [172434.032618] ok 1 example_simple_test
+ ....
+ ```
+
+Run KUnit tests on Android Device via test automation infrastructure tradefed
+-----------------------------------------------------------------------------
+ * Build ACK KUnit tests and install (e.g. /tmp/kunit_tests):
+ ```
+ $ tools/bazel run -- //common:kunit_tests_x86_64 -v --destdir /tmp/kunit_tests
+ ```
+ Or
+ ```
+ $ tools/bazel run -- //common:kunit_tests_arm64 -v --destdir /tmp/kunit_tests
+ ```
+
+ * With device connected and accessible via adb run the tests:
+ ```
+ $ prebuilts/tradefed/filegroups/tradefed/tradefed.sh run commandAndExit \
+ template/local_min --template:map test=suite/test_mapping_suite \
+ --include-filter kunit --tests-dir=/tmp/kunit_tests \
+ -s <your_device_serial_number>
+ ....
+ =======================================================
+ =============== Summary ===============
+ Total Run time: 23s
+ 1/1 modules completed
+ Total Tests : 9
+ PASSED : 9
+ FAILED : 0
+ ============== End of Results ==============
+ ============================================
+ ....
+ ```
+
+Troubleshooting
+---------------
+
+1. Test module fails to load.
+
+ Check dmesg for load errors. If undefined symbol errors are shown, you're
+ likely running with a trimmed kernel where the symbols are not available.
+ Run with a "no trim" kernel.
+
+ Check the test module dependency with `modinfo <module_name>.ko` on your
+ local host machine or on the Android device with
+ `adb shell modinfo <module_name>.ko`.
+ All dependent modules need to be installed before the test module can be
+ installed successfully.
+
+ Check if the module is already installed with `adb shell lsmod`. The
+ `adb shell rmmod` can be used to remove the already installed test module,
+ and installing the test module again will trigger the test rerun.
+
+ `adb shell lsmod` will also show the module dependency for your test module
+ in the `Used by` column. You can not remove a module with `adb shell rmmod`
+ if it is being used by another module. Other modules that are using it need
+ to be removed first.
+
+2. Test module loaded but no test results.
+
+ Check dmesg for KUnit errors.
+ ```
+ $ dmesg | grep kunit
+ ```
+
+ If "kunit: disabled" is shown then kunit.ko is not installed with
+ `enable=1`.
+
+ If kunit.ko or \<module_name\>.ko fails to install, check for whether they
+ are already installed with `adb shell lsmod`.
+
+HOW TO WRITE KUNIT TESTS
+========================
+
+If you want to
+just run the example test and skip through the walkthrough, it is sufficient to
+apply [patch1.txt](patch1.txt) and [patch2.txt](patch2.txt) and
+then [run the test](#run-tests-on-a-physical-or-virtual-device).
+
+Walkthrough
+-----------
+
+This section follows the
+[Writing Your First Test](https://docs.kernel.org/dev-tools/kunit/start.html#writing-your-first-test)
+example from the KUnit docs.
+
+### Create an example driver to test
+
+1. Create the header file for the driver `common/drivers/misc/example.h`:
+
+ ```c
+ int misc_example_add(int left, int right);
+ ```
+
+2. Define and export `misc_example_add` that will be tested later
+ `common/drivers/misc/example.c`:
+
+ ```c
+ #include <linux/module.h>
+
+ #include "example.h"
+
+ int misc_example_add(int left, int right)
+ {
+ return left + right;
+ }
+ EXPORT_SYMBOL_GPL(misc_example_add);
+ ```
+
+3. Add a kconfig option for the driver to `common/drivers/misc/Kconfig`:
+
+ ```
+ config MISC_EXAMPLE
+ bool "My example"
+ ```
+
+4. Add the build rule to `common/drivers/misc/Makefile`:
+
+ ```makefile
+ obj-$(CONFIG_MISC_EXAMPLE) += example.o
+ ```
+
+### Write the test case
+
+1. Define the test functions and suite `common/drivers/misc/example_test.c`:
+
+ ```c
+ #include <kunit/test.h>
+ #include "example.h"
+
+ /* Define the test cases. */
+
+ static void misc_example_add_test_basic(struct kunit *test)
+ {
+ KUNIT_EXPECT_EQ(test, 1, misc_example_add(1, 0));
+ KUNIT_EXPECT_EQ(test, 2, misc_example_add(1, 1));
+ KUNIT_EXPECT_EQ(test, 0, misc_example_add(-1, 1));
+ KUNIT_EXPECT_EQ(test, INT_MAX, misc_example_add(0, INT_MAX));
+ KUNIT_EXPECT_EQ(test, -1, misc_example_add(INT_MAX, INT_MIN));
+ }
+
+ static void misc_example_test_failure(struct kunit *test)
+ {
+ KUNIT_FAIL(test, "This test never passes.");
+ }
+
+ static struct kunit_case misc_example_test_cases[] = {
+ KUNIT_CASE(misc_example_add_test_basic),
+ KUNIT_CASE(misc_example_test_failure),
+ {}
+ };
+
+ static struct kunit_suite misc_example_test_suite = {
+ .name = "misc-example",
+ .test_cases = misc_example_test_cases,
+ };
+ kunit_test_suite(misc_example_test_suite);
+
+ MODULE_DESCRIPTION("KUnit test for misc_example_add");
+ MODULE_LICENSE("GPL");
+ ```
+
+ Consult the
+ [KUnit usage guide](https://docs.kernel.org/dev-tools/kunit/usage.html#) and
+ the
+ [KUnit API reference](https://docs.kernel.org/dev-tools/kunit/api/index.html)
+ for more in-depth guidance on the test API.
+
+2. Create a kconfig option for the test `common/drivers/misc/Kconfig`:
+
+ ```
+ config MISC_EXAMPLE_TEST
+ tristate "Test for my example" if !KUNIT_ALL_TESTS
+ depends on MISC_EXAMPLE && KUNIT
+ default KUNIT_ALL_TESTS
+ ```
+
+3. Add the build rule to the makefile `common/drivers/misc/Makefile`:
+
+ ```makefile
+ obj-$(CONFIG_MISC_EXAMPLE_TEST) += example_test.o
+ ```
+
+4. Add the kconfig options for kunit.py `common/drivers/misc/.kunitconfig`:
+
+ ```bash
+ CONFIG_KUNIT=y
+ CONFIG_MISC_EXAMPLE=y
+ CONFIG_MISC_EXAMPLE_TEST=y
+ ```
+
+### Run the test against User-mode Linux
+
+1. The test can then be run against User-mode Linux using the kunit.py script:
+
+ ```
+ $ cd $REPO/common
+ $ tools/testing/kunit/kunit.py run --kunitconfig drivers/misc
+ [01:21:36] Configuring KUnit Kernel ...
+ Regenerating .config ...
+ Populating config with:
+ $ make ARCH=um O=.kunit olddefconfig
+ [01:21:38] Building KUnit Kernel ...
+ Populating config with:
+ $ make ARCH=um O=.kunit olddefconfig
+ Building with:
+ $ make ARCH=um O=.kunit --jobs=96
+ [01:21:49] Starting KUnit Kernel (1/1)...
+ [01:21:49] ============================================================
+ Running tests with:
+ $ .kunit/linux kunit.enable=1 mem=1G console=tty kunit_shutdown=halt
+ [01:22:01] ================ misc-example (2 subtests) =================
+ [01:22:01] [PASSED] misc_example_add_test_basic
+ [01:22:01] # misc_example_test_failure: EXPECTATION FAILED at drivers/misc/example_test.c:17
+ [01:22:01] This test never passes.
+ [01:22:01] [FAILED] misc_example_test_failure
+ [01:22:01] # module: example_test
+ [01:22:01] # misc-example: pass:1 fail:1 skip:0 total:2
+ [01:22:01] # Totals: pass:1 fail:1 skip:0 total:2
+ [01:22:01] ================== [FAILED] misc-example ===================
+ [01:22:01] ============================================================
+ [01:22:01] Testing complete. Ran 2 tests: passed: 1, failed: 1
+ [01:22:01] Elapsed time: 24.906s total, 2.271s configuring, 10.889s building, 11.694s running
+ ```
+
+2. `kunit.py` will create symlinks under the `.kunit` directory that interfere
+ with Kleaf/Bazel. This directory is ignored by `.bazelignore`.
+
+### Configure the test for Android
+
+Now that the example test is created, the test needs to be configured for
+Android. We will need to add build rules for Kleaf/Bazel and create a Tradefed
+test config.
+
+1. Set `CONFIG_MISC_EXAMPLE=y` and `CONFIG_MISC_EXAMPLE_TEST=m` in
+ [gki_defconfig](https://source.corp.google.com/h/android/kernel/superproject/+/common-android-mainline:common/arch/x86/configs/gki_defconfig).
+ (This can be done using menuconfig; refer to the
+ [Kleaf documentation](https://android.googlesource.com/kernel/build/+/refs/heads/main-kernel/kleaf/docs/kernel_config.md)):
+
+ ```
+ $ cd $REPO
+ $ tools/bazel run //common:kernel_x86_64_config -- menuconfig
+ ```
+
+2. Add the test module to `_KUNIT_COMMON_MODULES_LIST` in `common/modules.bzl`:
+
+ ```bazel
+ _KUNIT_COMMON_MODULES_LIST = [
+ ....
+ "drivers/misc/example_test.ko",
+ ....
+ ]
+ ```
+
+3. Add the test under `KUnitModuleTest` in the tradefed configuration
+ `common/tools/testing/kunit/android/tradefed_configs/config_x86_64.xml`:
+
+ ```xml
+ <test class="com.android.tradefed.testtype.binary.KUnitModuleTest" >
+ ....
+ <option name='binary' key='drivers/misc/example_test' value='/data/kunit/arm64/example_test.ko' />
+ ....
+ </test>
+ ```
diff --git a/tools/testing/kunit/android/patch1.txt b/tools/testing/kunit/android/patch1.txt
new file mode 100644
index 0000000..610d0f6
--- /dev/null
+++ b/tools/testing/kunit/android/patch1.txt
@@ -0,0 +1,154 @@
+From 0c730ea49928beabf00183f74a83532a829aea5d Mon Sep 17 00:00:00 2001
+From: Developer <developer@example.com>
+Date: Fri, 27 Sep 2024 21:09:07 +0000
+Subject: [PATCH 1/2] Create an example KUnit test
+
+Following the "Writing Your First Test" guide from the KUnit
+documentation, adds an example driver and KUnit test to drivers/misc.
+
+Link: https://docs.kernel.org/dev-tools/kunit/start.html#writing-your-first-test
+Test: tools/testing/kunit/kunit.py run --kunitconfig drivers/misc
+Change-Id: I2f9570b7815bb600e83b1871852454c29ac42e0a
+Signed-off-by: Developer <developer@example.com>
+---
+ drivers/misc/.kunitconfig | 4 +++
+ drivers/misc/Kconfig | 8 +++++
+ drivers/misc/Makefile | 2 ++
+ drivers/misc/example.c | 13 +++++++
+ drivers/misc/example.h | 5 +++
+ drivers/misc/example_test.c | 37 ++++++++++++++++++++
+ tools/testing/kunit/configs/all_tests.config | 2 ++
+ 7 files changed, 71 insertions(+)
+ create mode 100644 drivers/misc/.kunitconfig
+ create mode 100644 drivers/misc/example.c
+ create mode 100644 drivers/misc/example.h
+ create mode 100644 drivers/misc/example_test.c
+
+diff --git a/drivers/misc/.kunitconfig b/drivers/misc/.kunitconfig
+new file mode 100644
+index 000000000000..beaba9037670
+--- /dev/null
++++ b/drivers/misc/.kunitconfig
+@@ -0,0 +1,4 @@
++CONFIG_KUNIT=y
++CONFIG_MISC_EXAMPLE=y
++CONFIG_MISC_EXAMPLE_TEST=y
++
+diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
+index d118aeeb1049..4af45d56cb04 100644
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -5,6 +5,14 @@
+
+ menu "Misc devices"
+
++config MISC_EXAMPLE
++ bool "My example"
++
++config MISC_EXAMPLE_TEST
++ tristate "Test for my example" if !KUNIT_ALL_TESTS
++ depends on MISC_EXAMPLE && KUNIT
++ default KUNIT_ALL_TESTS
++
+ config SENSORS_LIS3LV02D
+ tristate
+ depends on INPUT
+diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
+index 35c790e23072..c0bdb54f3af8 100644
+--- a/drivers/misc/Makefile
++++ b/drivers/misc/Makefile
+@@ -3,6 +3,8 @@
+ # Makefile for misc devices that really don't fit anywhere else.
+ #
+
++obj-$(CONFIG_MISC_EXAMPLE) += example.o
++obj-$(CONFIG_MISC_EXAMPLE_TEST) += example_test.o
+ obj-$(CONFIG_IBM_ASM) += ibmasm/
+ obj-$(CONFIG_IBMVMC) += ibmvmc.o
+ obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o
+diff --git a/drivers/misc/example.c b/drivers/misc/example.c
+new file mode 100644
+index 000000000000..75a1d8984f7c
+--- /dev/null
++++ b/drivers/misc/example.c
+@@ -0,0 +1,13 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright 2025 Google LLC.
++ */
++#include <linux/module.h>
++
++#include "example.h"
++
++int misc_example_add(int left, int right)
++{
++ return left + right;
++}
++EXPORT_SYMBOL_GPL(misc_example_add);
+diff --git a/drivers/misc/example.h b/drivers/misc/example.h
+new file mode 100644
+index 000000000000..698a31ac54cd
+--- /dev/null
++++ b/drivers/misc/example.h
+@@ -0,0 +1,5 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright 2025 Google LLC.
++ */
++int misc_example_add(int left, int right);
+diff --git a/drivers/misc/example_test.c b/drivers/misc/example_test.c
+new file mode 100644
+index 000000000000..77ffcb863ccc
+--- /dev/null
++++ b/drivers/misc/example_test.c
+@@ -0,0 +1,37 @@
++// SPDX-License-Identifier: GPL-2.0
++/*
++ * Copyright 2025 Google LLC.
++ */
++#include <kunit/test.h>
++#include "example.h"
++
++/* Define the test cases. */
++
++static void misc_example_add_test_basic(struct kunit *test)
++{
++ KUNIT_EXPECT_EQ(test, 1, misc_example_add(1, 0));
++ KUNIT_EXPECT_EQ(test, 2, misc_example_add(1, 1));
++ KUNIT_EXPECT_EQ(test, 0, misc_example_add(-1, 1));
++ KUNIT_EXPECT_EQ(test, INT_MAX, misc_example_add(0, INT_MAX));
++ KUNIT_EXPECT_EQ(test, -1, misc_example_add(INT_MAX, INT_MIN));
++}
++
++static void misc_example_test_failure(struct kunit *test)
++{
++ KUNIT_FAIL(test, "This test never passes.");
++}
++
++static struct kunit_case misc_example_test_cases[] = {
++ KUNIT_CASE(misc_example_add_test_basic),
++ KUNIT_CASE(misc_example_test_failure),
++ {}
++};
++
++static struct kunit_suite misc_example_test_suite = {
++ .name = "misc-example",
++ .test_cases = misc_example_test_cases,
++};
++kunit_test_suite(misc_example_test_suite);
++
++MODULE_DESCRIPTION("KUnit test for misc_example_add");
++MODULE_LICENSE("GPL");
+diff --git a/tools/testing/kunit/configs/all_tests.config b/tools/testing/kunit/configs/all_tests.config
+index 422e186cf3cf..08b31354ce97 100644
+--- a/tools/testing/kunit/configs/all_tests.config
++++ b/tools/testing/kunit/configs/all_tests.config
+@@ -29,6 +29,8 @@ CONFIG_MCTP_FLOWS=y
+ CONFIG_INET=y
+ CONFIG_MPTCP=y
+
++CONFIG_MISC_EXAMPLE=y
++
+ CONFIG_NETDEVICES=y
+ CONFIG_WLAN=y
+ CONFIG_CFG80211=y
diff --git a/tools/testing/kunit/android/patch2.txt b/tools/testing/kunit/android/patch2.txt
new file mode 100644
index 0000000..b1f180e
--- /dev/null
+++ b/tools/testing/kunit/android/patch2.txt
@@ -0,0 +1,80 @@
+From c74fd0410a9ecbd9cb5735b5215e5fbf1789b7f5 Mon Sep 17 00:00:00 2001
+From: Developer <developer@example.com>
+Date: Mon, 26 Aug 2024 19:11:30 +0000
+Subject: [PATCH 2/2] ANDROID: Configure example KUnit test
+
+Configures an example KUnit test to run on Android devices.
+
+Test: common/tools/testing/android/bin/kunit.sh -t example_test.ko
+Change-Id: I335407e917012d2126cce4dfcb8bb82d3320b66f
+Signed-off-by: Developer <developer@example.com>
+---
+ arch/arm64/configs/gki_defconfig | 2 ++
+ arch/x86/configs/gki_defconfig | 2 ++
+ modules.bzl | 1 +
+ tools/testing/kunit/android/tradefed_configs/config_arm64.xml | 1 +
+ tools/testing/kunit/android/tradefed_configs/config_x86_64.xml | 1 +
+ 5 files changed, 7 insertions(+)
+
+diff --git a/arch/arm64/configs/gki_defconfig b/arch/arm64/configs/gki_defconfig
+index 01b5114a322d..b25e053e6797 100644
+--- a/arch/arm64/configs/gki_defconfig
++++ b/arch/arm64/configs/gki_defconfig
+@@ -323,6 +323,8 @@ CONFIG_VIRTIO_BLK=m
+ CONFIG_BLK_DEV_UBLK=y
+ CONFIG_BLK_DEV_NVME=y
+ CONFIG_NVME_MULTIPATH=y
++CONFIG_MISC_EXAMPLE=y
++CONFIG_MISC_EXAMPLE_TEST=m
+ CONFIG_SRAM=y
+ CONFIG_UID_SYS_STATS=y
+ CONFIG_OPEN_DICE=m
+diff --git a/arch/x86/configs/gki_defconfig b/arch/x86/configs/gki_defconfig
+index fc8e18d6861b..447429e8f3c1 100644
+--- a/arch/x86/configs/gki_defconfig
++++ b/arch/x86/configs/gki_defconfig
+@@ -312,6 +312,8 @@ CONFIG_VIRTIO_BLK=m
+ CONFIG_BLK_DEV_UBLK=y
+ CONFIG_BLK_DEV_NVME=y
+ CONFIG_NVME_MULTIPATH=y
++CONFIG_MISC_EXAMPLE=y
++CONFIG_MISC_EXAMPLE_TEST=m
+ CONFIG_SRAM=y
+ CONFIG_UID_SYS_STATS=y
+ CONFIG_VCPU_STALL_DETECTOR=m
+diff --git a/modules.bzl b/modules.bzl
+index 3085b9833bce..fc2b76932063 100644
+--- a/modules.bzl
++++ b/modules.bzl
+@@ -155,6 +155,7 @@ _KUNIT_COMMON_MODULES_LIST = [
+ "drivers/iio/test/iio-test-format.ko",
+ "drivers/input/tests/input_test.ko",
+ "drivers/of/of_kunit_helpers.ko",
++ "drivers/misc/example_test.ko",
+ "drivers/rtc/lib_test.ko",
+ "fs/ext4/ext4-inode-test.ko",
+ "fs/fat/fat_test.ko",
+diff --git a/tools/testing/kunit/android/tradefed_configs/config_arm64.xml b/tools/testing/kunit/android/tradefed_configs/config_arm64.xml
+index 7906f8caef1d..43cb7bca296d 100644
+--- a/tools/testing/kunit/android/tradefed_configs/config_arm64.xml
++++ b/tools/testing/kunit/android/tradefed_configs/config_arm64.xml
+@@ -29,6 +29,7 @@
+ <option name='binary' key='drivers/hid/hid-uclogic-test' value='/data/kunit/arm64/hid-uclogic-test.ko' />
+ <!-- <option name='binary' key='drivers/iio/test/iio-test-format' value='/data/kunit/arm64/iio-test-format.ko' /> -->
+ <option name='binary' key='drivers/input/tests/input_test' value='/data/kunit/arm64/input_test.ko' />
++ <option name='binary' key='drivers/misc/example_test' value='/data/kunit/arm64/example_test.ko' />
+ <option name='binary' key='drivers/rtc/lib_test' value='/data/kunit/arm64/lib_test.ko' />
+ <option name='binary' key='fs/ext4/ext4-inode-test' value='/data/kunit/arm64/ext4-inode-test.ko' />
+ <option name='binary' key='fs/fat/fat_test' value='/data/kunit/arm64/fat_test.ko' />
+diff --git a/tools/testing/kunit/android/tradefed_configs/config_x86_64.xml b/tools/testing/kunit/android/tradefed_configs/config_x86_64.xml
+index b7fc9a93170a..1a4d54d20192 100644
+--- a/tools/testing/kunit/android/tradefed_configs/config_x86_64.xml
++++ b/tools/testing/kunit/android/tradefed_configs/config_x86_64.xml
+@@ -29,6 +29,7 @@
+ <option name='binary' key='drivers/hid/hid-uclogic-test' value='/data/kunit/x86_64/hid-uclogic-test.ko' />
+ <!-- <option name='binary' key='drivers/iio/test/iio-test-format' value='/data/kunit/x86_64/iio-test-format.ko' /> -->
+ <option name='binary' key='drivers/input/tests/input_test' value='/data/kunit/x86_64/input_test.ko' />
++ <option name='binary' key='drivers/misc/example_test' value='/data/kunit/x86_64/example_test.ko' />
+ <option name='binary' key='drivers/rtc/lib_test' value='/data/kunit/x86_64/lib_test.ko' />
+ <option name='binary' key='fs/ext4/ext4-inode-test' value='/data/kunit/x86_64/ext4-inode-test.ko' />
+ <option name='binary' key='fs/fat/fat_test' value='/data/kunit/x86_64/fat_test.ko' />
diff --git a/tools/testing/kunit/android/tradefed_configs/config_arm64.xml b/tools/testing/kunit/android/tradefed_configs/config_arm64.xml
new file mode 100644
index 0000000..ee48328
--- /dev/null
+++ b/tools/testing/kunit/android/tradefed_configs/config_arm64.xml
@@ -0,0 +1,59 @@
+<configuration description="kunit">
+ <object type="module_controller" class="com.android.tradefed.testtype.suite.module.KernelTestModuleController">
+ <option name="arch" value="arm64" />
+ </object>
+
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer"/>
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <option name="push-file" key="kunit" value="/data/kunit" />
+ </target_preparer>
+
+ <target_preparer class="com.android.tradefed.targetprep.InstallKernelModulePreparer">
+ <option name="module-path" value="/data/kunit/arm64/kunit.ko" />
+ <option name="install-arg" value="enable=1" />
+ </target_preparer>
+
+ <!-- KUnit tests dependencies -->
+ <target_preparer class="com.android.tradefed.targetprep.InstallKernelModulePreparer">
+ <option name='module-path' key='drivers/base/regmap/regmap-ram' value='/data/kunit/arm64/regmap-ram.ko' />
+ <option name='module-path' key='drivers/base/regmap/regmap-raw-ram' value='/data/kunit/arm64/regmap-raw-ram.ko' />
+ </target_preparer>
+
+ <!-- Modules that leave the OS in an unstable state have been temporarily commented out. -->
+ <test class="com.android.tradefed.testtype.binary.KUnitModuleTest" >
+ <option name="skip-binary-check" value="true" />
+ <option name="ktap-result-parser-resolution" value="INDIVIDUAL_LEAVES" />
+ <option name='binary' key='drivers/android/tests/binder_alloc_kunit' value='/data/kunit/arm64/binder_alloc_kunit.ko' />
+ <option name='binary' key='drivers/base/regmap/regmap-kunit' value='/data/kunit/arm64/regmap-kunit.ko' />
+ <option name='binary' key='drivers/hid/hid-uclogic-test' value='/data/kunit/arm64/hid-uclogic-test.ko' />
+ <!-- <option name='binary' key='drivers/iio/test/iio-test-format' value='/data/kunit/arm64/iio-test-format.ko' /> -->
+ <option name='binary' key='drivers/input/tests/input_test' value='/data/kunit/arm64/input_test.ko' />
+ <option name='binary' key='drivers/rtc/test_rtc_lib' value='/data/kunit/arm64/test_rtc_lib.ko' />
+ <option name='binary' key='fs/ext4/ext4-inode-test' value='/data/kunit/arm64/ext4-inode-test.ko' />
+ <option name='binary' key='fs/fat/fat_test' value='/data/kunit/arm64/fat_test.ko' />
+ <option name='binary' key='kernel/time/time_test' value='/data/kunit/arm64/time_test.ko' />
+ <option name='binary' key='lib/crypto/tests/blake2b_kunit' value='/data/kunit/arm64/blake2b_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/blake2s_kunit' value='/data/kunit/arm64/blake2s_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/curve25519_kunit' value='/data/kunit/arm64/curve25519_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/md5_kunit' value='/data/kunit/arm64/md5_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/poly1305_kunit' value='/data/kunit/arm64/poly1305_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/polyval_kunit' value='/data/kunit/arm64/polyval_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/sha1_kunit' value='/data/kunit/arm64/sha1_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/sha224_kunit' value='/data/kunit/arm64/sha224_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/sha256_kunit' value='/data/kunit/arm64/sha256_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/sha3_kunit' value='/data/kunit/arm64/sha3_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/sha384_kunit' value='/data/kunit/arm64/sha384_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/sha512_kunit' value='/data/kunit/arm64/sha512_kunit.ko' />
+ <!-- <option name='binary' key='lib/kunit/kunit-example-test' value='/data/kunit/arm64/kunit-example-test.ko' /> -->
+ <!-- <option name='binary' key='lib/kunit/kunit-test' value='/data/kunit/arm64/kunit-test.ko' /> -->
+ <!-- <option name='binary' key='mm/kfence/kfence_test' value='/data/kunit/arm64/kfence_test.ko' /> -->
+ <option name='binary' key='net/core/dev_addr_lists_test' value='/data/kunit/arm64/dev_addr_lists_test.ko' />
+ <!-- <option name='binary' key='sound/soc/soc-topology-test' value='/data/kunit/arm64/soc-topology-test.ko' /> -->
+ <option name='binary' key='sound/soc/soc-utils-test' value='/data/kunit/arm64/soc-utils-test.ko' />
+
+ <!-- <option name='binary' key='drivers/clk/clk-gate_test' value='/data/kunit/arm64/clk-gate_test.ko' /> -->
+ <!-- <option name='binary' key='drivers/clk/clk_test' value='/data/kunit/arm64/clk_test.ko' /> -->
+ </test>
+</configuration>
diff --git a/tools/testing/kunit/android/tradefed_configs/config_x86_64.xml b/tools/testing/kunit/android/tradefed_configs/config_x86_64.xml
new file mode 100644
index 0000000..25de102
--- /dev/null
+++ b/tools/testing/kunit/android/tradefed_configs/config_x86_64.xml
@@ -0,0 +1,56 @@
+<configuration description="kunit">
+ <object type="module_controller" class="com.android.tradefed.testtype.suite.module.KernelTestModuleController">
+ <option name="arch" value="x86_64" />
+ </object>
+
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer"/>
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <option name="push-file" key="kunit" value="/data/kunit" />
+ </target_preparer>
+
+ <target_preparer class="com.android.tradefed.targetprep.InstallKernelModulePreparer">
+ <option name="module-path" value="/data/kunit/x86_64/kunit.ko" />
+ <option name="install-arg" value="enable=1" />
+ </target_preparer>
+
+ <!-- KUnit tests dependencies -->
+ <target_preparer class="com.android.tradefed.targetprep.InstallKernelModulePreparer">
+ <option name='module-path' key='drivers/base/regmap/regmap-ram' value='/data/kunit/x86_64/regmap-ram.ko' />
+ <option name='module-path' key='drivers/base/regmap/regmap-raw-ram' value='/data/kunit/x86_64/regmap-raw-ram.ko' />
+ </target_preparer>
+
+ <!-- Modules that leave the OS in an unstable state have been temporarily commented out. -->
+ <test class="com.android.tradefed.testtype.binary.KUnitModuleTest" >
+ <option name="skip-binary-check" value="true" />
+ <option name="ktap-result-parser-resolution" value="INDIVIDUAL_LEAVES" />
+ <option name='binary' key='drivers/android/tests/binder_alloc_kunit' value='/data/kunit/x86_64/binder_alloc_kunit.ko' />
+ <option name='binary' key='drivers/base/regmap/regmap-kunit' value='/data/kunit/x86_64/regmap-kunit.ko' />
+ <option name='binary' key='drivers/hid/hid-uclogic-test' value='/data/kunit/x86_64/hid-uclogic-test.ko' />
+ <!-- <option name='binary' key='drivers/iio/test/iio-test-format' value='/data/kunit/x86_64/iio-test-format.ko' /> -->
+ <option name='binary' key='drivers/input/tests/input_test' value='/data/kunit/x86_64/input_test.ko' />
+ <option name='binary' key='drivers/rtc/test_rtc_lib' value='/data/kunit/x86_64/test_rtc_lib.ko' />
+ <option name='binary' key='fs/ext4/ext4-inode-test' value='/data/kunit/x86_64/ext4-inode-test.ko' />
+ <option name='binary' key='fs/fat/fat_test' value='/data/kunit/x86_64/fat_test.ko' />
+ <option name='binary' key='kernel/time/time_test' value='/data/kunit/x86_64/time_test.ko' />
+ <option name='binary' key='lib/crypto/tests/blake2b_kunit' value='/data/kunit/x86_64/blake2b_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/blake2s_kunit' value='/data/kunit/x86_64/blake2s_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/curve25519_kunit' value='/data/kunit/x86_64/curve25519_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/md5_kunit' value='/data/kunit/x86_64/md5_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/poly1305_kunit' value='/data/kunit/x86_64/poly1305_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/polyval_kunit' value='/data/kunit/x86_64/polyval_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/sha1_kunit' value='/data/kunit/x86_64/sha1_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/sha224_kunit' value='/data/kunit/x86_64/sha224_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/sha256_kunit' value='/data/kunit/x86_64/sha256_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/sha3_kunit' value='/data/kunit/x86_64/sha3_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/sha384_kunit' value='/data/kunit/x86_64/sha384_kunit.ko' />
+ <option name='binary' key='lib/crypto/tests/sha512_kunit' value='/data/kunit/x86_64/sha512_kunit.ko' />
+ <!-- <option name='binary' key='lib/kunit/kunit-example-test' value='/data/kunit/x86_64/kunit-example-test.ko' /> -->
+ <!-- <option name='binary' key='lib/kunit/kunit-test' value='/data/kunit/x86_64/kunit-test.ko' /> -->
+ <!-- <option name='binary' key='mm/kfence/kfence_test' value='/data/kunit/x86_64/kfence_test.ko' /> -->
+ <option name='binary' key='net/core/dev_addr_lists_test' value='/data/kunit/x86_64/dev_addr_lists_test.ko' />
+ <!-- <option name='binary' key='sound/soc/soc-topology-test' value='/data/kunit/x86_64/soc-topology-test.ko' /> -->
+ <option name='binary' key='sound/soc/soc-utils-test' value='/data/kunit/x86_64/soc-utils-test.ko' />
+ </test>
+</configuration>
diff --git a/tools/testing/kunit/configs/android/kunit_clk_defconfig b/tools/testing/kunit/configs/android/kunit_clk_defconfig
new file mode 100644
index 0000000..83c4aa7
--- /dev/null
+++ b/tools/testing/kunit/configs/android/kunit_clk_defconfig
@@ -0,0 +1,3 @@
+# Only for architectures that set CONFIG_COMMON_CLK
+CONFIG_CLK_KUNIT_TEST=m
+CONFIG_CLK_GATE_KUNIT_TEST=m
diff --git a/tools/testing/kunit/configs/android/kunit_defconfig b/tools/testing/kunit/configs/android/kunit_defconfig
new file mode 100644
index 0000000..f3a96fc
--- /dev/null
+++ b/tools/testing/kunit/configs/android/kunit_defconfig
@@ -0,0 +1,37 @@
+# Defconfig fragment for Android Kunit targets
+#
+# Instead of setting CONFIG_KUNIT_ALL_TESTS=m, we enable individual tests
+# because:
+# - The defconfig fragment is applied after make defconfig
+# - If additional tests are added to CONFIG_KUNIT_ALL_TESTS in the future,
+# //common:kunit_* module_outs needs to be updated.
+
+# CONFIG_MODULE_SIG_ALL is not set
+
+# Corresponds to BUILD.bazel, _KUNIT_COMMON_MODULES
+CONFIG_TIME_KUNIT_TEST=m
+CONFIG_NETDEV_ADDR_LIST_TEST=m
+CONFIG_REGMAP_KUNIT=m
+CONFIG_INPUT_KUNIT_TEST=m
+CONFIG_SND_SOC_TOPOLOGY_KUNIT_TEST=m
+CONFIG_SND_SOC_UTILS_KUNIT_TEST=m
+CONFIG_HID_KUNIT_TEST=m
+CONFIG_RTC_LIB_KUNIT_TEST=m
+CONFIG_IIO_FORMAT_KUNIT_TEST=m
+CONFIG_EXT4_KUNIT_TESTS=m
+CONFIG_FAT_KUNIT_TEST=m
+# CONFIG_KFENCE_KUNIT_TEST=m
+CONFIG_KUNIT_TEST=m
+CONFIG_KUNIT_EXAMPLE_TEST=m
+
+# CONFIG_NET_HANDSHAKE is not enabled in gki_defconfig.
+# CONFIG_NET_HANDSHAKE_KUNIT_TEST=m
+
+# TODO(b/296116800): Enable these tests
+# CONFIG_DRM_KUNIT_TEST=m
+# CONFIG_KASAN_KUNIT_TEST=m
+
+# TODO(b/296116800): These are booleans, not tristates.
+# CONFIG_BINFMT_ELF_KUNIT_TEST=y
+# CONFIG_PM_QOS_KUNIT_TEST=y
+# CONFIG_DRIVER_PE_KUNIT_TEST=y
diff --git a/tools/testing/selftests/OWNERS b/tools/testing/selftests/OWNERS
new file mode 100644
index 0000000..e932d41
--- /dev/null
+++ b/tools/testing/selftests/OWNERS
@@ -0,0 +1,3 @@
+bettyzhou@google.com
+hwj@google.com
+wakel@google.com
diff --git a/tools/testing/selftests/android/README b/tools/testing/selftests/android/README
new file mode 100644
index 0000000..69dbc82
--- /dev/null
+++ b/tools/testing/selftests/android/README
@@ -0,0 +1,24 @@
+HOW TO RUN SELFTESTS IN ANDROID
+=================================
+
+Run tests on a physical or virtual device:
+ $ kernel/tests/tools/run_test_only.sh -t selftests -s <serial_number>
+
+Before the tests, you can use the following command to launch a virtual device:
+ $ kernel/tests/tools/launch_cvd.sh
+
+After the tests, you can use the following command to remove the virtual device:
+ $ prebuilts/asuite/acloud/linux-x86/acloud delete
+
+The following are command examples:
+ * Build kernel and launch a virtual device from a specific platform build:
+ $ kernel/tests/tools/launch_cvd.sh -pb \
+ ab://aosp-main/aosp_cf_x86_64_phone-trunk_staging-userdebug/12505199
+
+ * Run a specific test:
+ $ kernel/tests/tools/run_test_only.sh \
+ -t 'selftests kselftest_net_socket' -s <serial_number>
+
+ * Check other available options:
+ $ kernel/tests/tools/launch_cvd.sh -h
+ $ kernel/tests/tools/run_test_only.sh -h
diff --git a/tools/testing/selftests/android/config_arm.xml b/tools/testing/selftests/android/config_arm.xml
new file mode 100644
index 0000000..d66f967
--- /dev/null
+++ b/tools/testing/selftests/android/config_arm.xml
@@ -0,0 +1,63 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2023 The Android Open Source Project
+SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+-->
+<!DOCTYPE configuration [
+<!ENTITY ktest_dir "/data/selftests/arm">
+]>
+<configuration description="kselftest">
+ <option name="test-suite-tag" value="kernel-test" />
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer" />
+ <target_preparer class="com.android.tradefed.targetprep.StopServicesSetup" />
+
+ <object type="module_controller" class="com.android.tradefed.testtype.suite.module.KernelTestModuleController" >
+ <option name="arch" value="arm"/>
+ </object>
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <option name="push-file" key="selftests" value="/data/selftests" />
+ <option name="skip-abi-filtering" value="true" />
+ <option name="post-push" value='chmod -R 755 /data/selftests; find /data/selftests -type f | xargs grep -l -e "bin/sh" -e "bin/bash" | xargs sed -i -e "s?/bin/echo?echo?" -i -e "s?#!/bin/sh?#!/system/bin/sh?" -i -e "s?#!/bin/bash?#!/system/bin/sh?" || echo "There were no files to process"' />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.binary.KernelTargetTest" >
+ <option name="exit-code-skip" value="4" />
+ <option name="skip-binary-check" value="true" />
+ <option name="ktap-result-parser-resolution" value="INDIVIDUAL_LEAVES" />
+ <option name="test-command-line" key="kselftest_binderfs_binderfs_test" value="cd &ktest_dir;; ./kselftest_binderfs_binderfs_test" />
+ <option name="test-command-line" key="kselftest_breakpoints_breakpoint_test" value="cd &ktest_dir;; ./kselftest_breakpoints_breakpoint_test" />
+ <option name="test-command-line" key="kselftest_capabilities_test_execve" value="cd &ktest_dir;; ./kselftest_capabilities_test_execve" />
+ <option name="test-command-line" key="kselftest_futex_requeue" value="cd &ktest_dir;; ./futex_requeue" />
+ <option name="test-command-line" key="kselftest_futex_requeue_pi" value="cd &ktest_dir;; ./futex_requeue_pi" />
+ <option name="test-command-line" key="kselftest_futex_requeue_pi_mismatched_ops" value="cd &ktest_dir;; ./futex_requeue_pi_mismatched_ops" />
+ <option name="test-command-line" key="kselftest_futex_requeue_pi_signal_restart" value="cd &ktest_dir;; ./futex_requeue_pi_signal_restart" />
+ <option name="test-command-line" key="kselftest_futex_wait" value="cd &ktest_dir;; ./futex_wait" />
+ <option name="test-command-line" key="kselftest_futex_wait_private_mapped_file" value="cd &ktest_dir;; ./futex_wait_private_mapped_file" />
+ <option name="test-command-line" key="kselftest_futex_wait_timeout" value="cd &ktest_dir;; ./futex_wait_timeout" />
+ <option name="test-command-line" key="kselftest_futex_wait_uninitialized_heap" value="cd &ktest_dir;; ./futex_wait_uninitialized_heap" />
+ <option name="test-command-line" key="kselftest_futex_wait_wouldblock" value="cd &ktest_dir;; ./futex_wait_wouldblock" />
+ <option name="test-command-line" key="kselftest_kcmp_kcmp_test" value="cd &ktest_dir;; ./kselftest_kcmp_kcmp_test" />
+ <option name="test-command-line" key="kselftest_mm_mremap_dontunmap" value="cd &ktest_dir;; ./kselftest_mm_mremap_dontunmap" />
+ <option name="test-command-line" key="kselftest_mm_mremap_test" value="cd &ktest_dir;; ./kselftest_mm_mremap_test" />
+ <option name="test-command-line" key="kselftest_mm_uffd_unit_tests" value="cd &ktest_dir;; ./kselftest_mm_uffd_unit_tests" />
+ <option name="test-command-line" key="kselftest_net_socket" value="cd &ktest_dir;; ./kselftest_net_socket" />
+ <!--option name="test-command-line" key="kselftest_net_psock_tpacket" value="cd &ktest_dir;; ./kselftest_net_psock_tpacket" /-->
+ <option name="test-command-line" key="kselftest_net_reuseaddr_conflict" value="cd &ktest_dir;; ./kselftest_net_reuseaddr_conflict" />
+ <option name="test-command-line" key="kselftest_ptrace_peeksiginfo" value="cd &ktest_dir;; ./kselftest_ptrace_peeksiginfo" />
+ <option name="test-command-line" key="kselftest_rtc_rtctest" value="cd &ktest_dir;; ./kselftest_rtc_rtctest" />
+ <!--option name="test-command-line" key="kselftest_seccomp_seccomp_bpf" value="cd &ktest_dir;; ./kselftest_seccomp_seccomp_bpf" /-->
+ <option name="test-command-line" key="kselftest_size_test_get_size" value="cd &ktest_dir;; ./kselftest_size_test_get_size" />
+ <option name="test-command-line" key="kselftest_timers_inconsistency_check" value="cd &ktest_dir;; ./inconsistency-check" />
+ <option name="test-command-line" key="kselftest_timers_nanosleep" value="cd &ktest_dir;; ./nanosleep" />
+ <option name="test-command-line" key="kselftest_timers_nsleep_lat" value="cd &ktest_dir;; ./nsleep-lat" />
+ <option name="test-command-line" key="kselftest_timers_posix_timers" value="cd &ktest_dir;; ./kselftest_timers_posix_timers" />
+ <option name="test-command-line" key="kselftest_timers_set_timer_lat" value="cd &ktest_dir;; ./kselftest_timers_set_timer_lat" />
+ <option name="test-command-line" key="kselftest_timers_tests_raw_skew" value="cd &ktest_dir;; ./raw_skew" />
+ <option name="test-command-line" key="kselftest_timers_threadtest" value="cd &ktest_dir;; ./kselftest_timers_threadtest" />
+ <option name="test-command-line" key="kselftest_timers_valid_adjtimex" value="cd &ktest_dir;; ./kselftest_timers_valid_adjtimex" />
+ <option name="test-command-line" key="kselftest_vdso_vdso_test_abi" value="cd &ktest_dir;; ./kselftest_vdso_vdso_test_abi" />
+ <option name="test-command-line" key="kselftest_vdso_vdso_test_getcpu" value="cd &ktest_dir;; ./kselftest_vdso_vdso_test_getcpu" />
+ <option name="test-command-line" key="kselftest_vdso_vdso_test_gettimeofday" value="cd &ktest_dir;; ./kselftest_vdso_vdso_test_gettimeofday" />
+ </test>
+</configuration>
diff --git a/tools/testing/selftests/android/config_arm64.xml b/tools/testing/selftests/android/config_arm64.xml
new file mode 100644
index 0000000..c16a27b
--- /dev/null
+++ b/tools/testing/selftests/android/config_arm64.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2023 The Android Open Source Project
+SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+-->
+<!DOCTYPE configuration [
+<!ENTITY ktest_dir "/data/selftests/arm64">
+]>
+<configuration description="kselftest">
+ <option name="test-suite-tag" value="kernel-test" />
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer" />
+ <target_preparer class="com.android.tradefed.targetprep.StopServicesSetup" />
+
+ <object type="module_controller" class="com.android.tradefed.testtype.suite.module.KernelTestModuleController" >
+ <option name="arch" value="arm64"/>
+ </object>
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <option name="push-file" key="selftests" value="/data/selftests" />
+ <option name="skip-abi-filtering" value="true" />
+ <option name="post-push" value='chmod -R 755 /data/selftests; find /data/selftests -type f | xargs grep -l -e "bin/sh" -e "bin/bash" | xargs sed -i -e "s?/bin/echo?echo?" -i -e "s?#!/bin/sh?#!/system/bin/sh?" -i -e "s?#!/bin/bash?#!/system/bin/sh?" || echo "There were no files to process"' />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.binary.KernelTargetTest" >
+ <option name="exit-code-skip" value="4" />
+ <option name="skip-binary-check" value="true" />
+ <option name="ktap-result-parser-resolution" value="INDIVIDUAL_LEAVES" />
+ <option name="test-command-line" key="kselftest_binderfs_binderfs_test" value="cd &ktest_dir;; ./kselftest_binderfs_binderfs_test" />
+ <option name="test-command-line" key="kselftest_breakpoints_breakpoint_test" value="cd &ktest_dir;; ./kselftest_breakpoints_breakpoint_test" />
+ <option name="test-command-line" key="kselftest_capabilities_test_execve" value="cd &ktest_dir;; ./kselftest_capabilities_test_execve" />
+ <option name="test-command-line" key="kselftest_futex_requeue" value="cd &ktest_dir;; ./futex_requeue" />
+ <option name="test-command-line" key="kselftest_futex_requeue_pi" value="cd &ktest_dir;; ./futex_requeue_pi" />
+ <option name="test-command-line" key="kselftest_futex_requeue_pi_mismatched_ops" value="cd &ktest_dir;; ./futex_requeue_pi_mismatched_ops" />
+ <option name="test-command-line" key="kselftest_futex_requeue_pi_signal_restart" value="cd &ktest_dir;; ./futex_requeue_pi_signal_restart" />
+ <option name="test-command-line" key="kselftest_futex_wait" value="cd &ktest_dir;; ./futex_wait" />
+ <option name="test-command-line" key="kselftest_futex_wait_private_mapped_file" value="cd &ktest_dir;; ./futex_wait_private_mapped_file" />
+ <option name="test-command-line" key="kselftest_futex_wait_timeout" value="cd &ktest_dir;; ./futex_wait_timeout" />
+ <option name="test-command-line" key="kselftest_futex_wait_uninitialized_heap" value="cd &ktest_dir;; ./futex_wait_uninitialized_heap" />
+ <option name="test-command-line" key="kselftest_futex_wait_wouldblock" value="cd &ktest_dir;; ./futex_wait_wouldblock" />
+ <option name="test-command-line" key="kselftest_kcmp_kcmp_test" value="cd &ktest_dir;; ./kselftest_kcmp_kcmp_test" />
+ <option name="test-command-line" key="kselftest_memfd_test" value="cd &ktest_dir;; ./kselftest_memfd_test" />
+ <option name="test-command-line" key="kselftest_mm_mremap_dontunmap" value="cd &ktest_dir;; ./kselftest_mm_mremap_dontunmap" />
+ <option name="test-command-line" key="kselftest_mm_mremap_test" value="cd &ktest_dir;; ./kselftest_mm_mremap_test" />
+ <option name="test-command-line" key="kselftest_mm_uffd_unit_tests" value="cd &ktest_dir;; ./kselftest_mm_uffd_unit_tests" />
+ <option name="test-command-line" key="kselftest_net_socket" value="cd &ktest_dir;; ./kselftest_net_socket" />
+ <option name="test-command-line" key="kselftest_net_psock_tpacket" value="cd &ktest_dir;; ./kselftest_net_psock_tpacket" />
+ <option name="test-command-line" key="kselftest_net_reuseaddr_conflict" value="cd &ktest_dir;; ./kselftest_net_reuseaddr_conflict" />
+ <option name="test-command-line" key="kselftest_ptrace_peeksiginfo" value="cd &ktest_dir;; ./kselftest_ptrace_peeksiginfo" />
+ <option name="test-command-line" key="kselftest_rtc_rtctest" value="cd &ktest_dir;; ./kselftest_rtc_rtctest" />
+ <option name="test-command-line" key="kselftest_seccomp_seccomp_bpf" value="cd &ktest_dir;; ./kselftest_seccomp_seccomp_bpf" />
+ <option name="test-command-line" key="kselftest_size_test_get_size" value="cd &ktest_dir;; ./kselftest_size_test_get_size" />
+ <option name="test-command-line" key="kselftest_timers_inconsistency_check" value="cd &ktest_dir;; ./inconsistency-check" />
+ <option name="test-command-line" key="kselftest_timers_nanosleep" value="cd &ktest_dir;; ./nanosleep" />
+ <option name="test-command-line" key="kselftest_timers_nsleep_lat" value="cd &ktest_dir;; ./nsleep-lat" />
+ <option name="test-command-line" key="kselftest_timers_posix_timers" value="cd &ktest_dir;; ./kselftest_timers_posix_timers" />
+ <option name="test-command-line" key="kselftest_timers_set_timer_lat" value="cd &ktest_dir;; ./kselftest_timers_set_timer_lat" />
+ <option name="test-command-line" key="kselftest_timers_tests_raw_skew" value="cd &ktest_dir;; ./raw_skew" />
+ <option name="test-command-line" key="kselftest_timers_threadtest" value="cd &ktest_dir;; ./kselftest_timers_threadtest" />
+ <option name="test-command-line" key="kselftest_timers_valid_adjtimex" value="cd &ktest_dir;; ./kselftest_timers_valid_adjtimex" />
+ <option name="test-command-line" key="kselftest_vdso_vdso_test_abi" value="cd &ktest_dir;; ./kselftest_vdso_vdso_test_abi" />
+ <option name="test-command-line" key="kselftest_vdso_vdso_test_getcpu" value="cd &ktest_dir;; ./kselftest_vdso_vdso_test_getcpu" />
+ <option name="test-command-line" key="kselftest_vdso_vdso_test_gettimeofday" value="cd &ktest_dir;; ./kselftest_vdso_vdso_test_gettimeofday" />
+ </test>
+</configuration>
diff --git a/tools/testing/selftests/android/config_x86.xml b/tools/testing/selftests/android/config_x86.xml
new file mode 100644
index 0000000..f57189f
--- /dev/null
+++ b/tools/testing/selftests/android/config_x86.xml
@@ -0,0 +1,68 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2023 The Android Open Source Project
+SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+-->
+<!DOCTYPE configuration [
+<!ENTITY ktest_dir "/data/selftests/x86">
+]>
+<configuration description="kselftest">
+ <option name="test-suite-tag" value="kernel-test" />
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer" />
+ <target_preparer class="com.android.tradefed.targetprep.StopServicesSetup" />
+
+ <object type="module_controller" class="com.android.tradefed.testtype.suite.module.KernelTestModuleController" >
+ <option name="arch" value="x86"/>
+ </object>
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <option name="push-file" key="selftests" value="/data/selftests" />
+ <option name="skip-abi-filtering" value="true" />
+ <option name="post-push" value='chmod -R 755 /data/selftests; find /data/selftests -type f | xargs grep -l -e "bin/sh" -e "bin/bash" | xargs sed -i -e "s?/bin/echo?echo?" -i -e "s?#!/bin/sh?#!/system/bin/sh?" -i -e "s?#!/bin/bash?#!/system/bin/sh?" || echo "There were no files to process"' />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.binary.KernelTargetTest" >
+ <option name="exit-code-skip" value="4" />
+ <option name="skip-binary-check" value="true" />
+ <option name="ktap-result-parser-resolution" value="INDIVIDUAL_LEAVES" />
+ <option name="test-command-line" key="kselftest_binderfs_binderfs_test" value="cd &ktest_dir;; ./kselftest_binderfs_binderfs_test" />
+ <option name="test-command-line" key="kselftest_breakpoints_breakpoint_test" value="cd &ktest_dir;; ./kselftest_breakpoints_breakpoint_test" />
+ <option name="test-command-line" key="kselftest_capabilities_test_execve" value="cd &ktest_dir;; ./kselftest_capabilities_test_execve" />
+ <option name="test-command-line" key="kselftest_futex_requeue" value="cd &ktest_dir;; ./futex_requeue" />
+ <option name="test-command-line" key="kselftest_futex_requeue_pi" value="cd &ktest_dir;; ./futex_requeue_pi" />
+ <option name="test-command-line" key="kselftest_futex_requeue_pi_mismatched_ops" value="cd &ktest_dir;; ./futex_requeue_pi_mismatched_ops" />
+ <option name="test-command-line" key="kselftest_futex_requeue_pi_signal_restart" value="cd &ktest_dir;; ./futex_requeue_pi_signal_restart" />
+ <option name="test-command-line" key="kselftest_futex_wait" value="cd &ktest_dir;; ./futex_wait" />
+ <option name="test-command-line" key="kselftest_futex_wait_private_mapped_file" value="cd &ktest_dir;; ./futex_wait_private_mapped_file" />
+ <option name="test-command-line" key="kselftest_futex_wait_timeout" value="cd &ktest_dir;; ./futex_wait_timeout" />
+ <option name="test-command-line" key="kselftest_futex_wait_uninitialized_heap" value="cd &ktest_dir;; ./futex_wait_uninitialized_heap" />
+ <option name="test-command-line" key="kselftest_futex_wait_wouldblock" value="cd &ktest_dir;; ./futex_wait_wouldblock" />
+ <option name="test-command-line" key="kselftest_kcmp_kcmp_test" value="cd &ktest_dir;; ./kselftest_kcmp_kcmp_test" />
+ <option name="test-command-line" key="kselftest_mm_mremap_dontunmap" value="cd &ktest_dir;; ./kselftest_mm_mremap_dontunmap" />
+ <option name="test-command-line" key="kselftest_mm_mremap_test" value="cd &ktest_dir;; ./kselftest_mm_mremap_test" />
+ <option name="test-command-line" key="kselftest_mm_uffd_unit_tests" value="cd &ktest_dir;; ./kselftest_mm_uffd_unit_tests" />
+ <option name="test-command-line" key="kselftest_net_socket" value="cd &ktest_dir;; ./kselftest_net_socket" />
+ <option name="test-command-line" key="kselftest_net_psock_tpacket" value="cd &ktest_dir;; ./kselftest_net_psock_tpacket" />
+ <option name="test-command-line" key="kselftest_net_reuseaddr_conflict" value="cd &ktest_dir;; ./kselftest_net_reuseaddr_conflict" />
+ <option name="test-command-line" key="kselftest_ptrace_peeksiginfo" value="cd &ktest_dir;; ./kselftest_ptrace_peeksiginfo" />
+ <option name="test-command-line" key="kselftest_rtc_rtctest" value="cd &ktest_dir;; ./kselftest_rtc_rtctest" />
+ <option name="test-command-line" key="kselftest_seccomp_seccomp_bpf" value="cd &ktest_dir;; ./kselftest_seccomp_seccomp_bpf" />
+ <option name="test-command-line" key="kselftest_size_test_get_size" value="cd &ktest_dir;; ./kselftest_size_test_get_size" />
+ <option name="test-command-line" key="kselftest_timers_inconsistency_check" value="cd &ktest_dir;; ./inconsistency-check" />
+ <option name="test-command-line" key="kselftest_timers_nanosleep" value="cd &ktest_dir;; ./nanosleep" />
+ <option name="test-command-line" key="kselftest_timers_nsleep_lat" value="cd &ktest_dir;; ./nsleep-lat" />
+ <option name="test-command-line" key="kselftest_timers_posix_timers" value="cd &ktest_dir;; ./kselftest_timers_posix_timers" />
+ <option name="test-command-line" key="kselftest_timers_set_timer_lat" value="cd &ktest_dir;; ./kselftest_timers_set_timer_lat" />
+ <option name="test-command-line" key="kselftest_timers_tests_raw_skew" value="cd &ktest_dir;; ./raw_skew" />
+ <option name="test-command-line" key="kselftest_timers_threadtest" value="cd &ktest_dir;; ./kselftest_timers_threadtest" />
+ <option name="test-command-line" key="kselftest_timers_valid_adjtimex" value="cd &ktest_dir;; ./kselftest_timers_valid_adjtimex" />
+ <option name="test-command-line" key="kselftest_vdso_vdso_test_abi" value="cd &ktest_dir;; ./kselftest_vdso_vdso_test_abi" />
+ <option name="test-command-line" key="kselftest_vdso_vdso_test_getcpu" value="cd &ktest_dir;; ./kselftest_vdso_vdso_test_getcpu" />
+ <option name="test-command-line" key="kselftest_vdso_vdso_test_gettimeofday" value="cd &ktest_dir;; ./kselftest_vdso_vdso_test_gettimeofday" />
+ <option name="test-command-line" key="kselftest_x86_check_initial_reg_state" value="cd &ktest_dir;; ./kselftest_x86_check_initial_reg_state" />
+ <option name="test-command-line" key="kselftest_x86_ldt_gdt" value="cd &ktest_dir;; ./kselftest_x86_ldt_gdt" />
+ <option name="test-command-line" key="kselftest_x86_ptrace_syscall" value="cd &ktest_dir;; ./kselftest_x86_ptrace_syscall" />
+ <option name="test-command-line" key="kselftest_x86_single_step_syscall" value="cd &ktest_dir;; ./kselftest_x86_single_step_syscall" />
+ <option name="test-command-line" key="kselftest_x86_syscall_nt" value="cd &ktest_dir;; ./kselftest_x86_syscall_nt" />
+ </test>
+</configuration>
diff --git a/tools/testing/selftests/android/config_x86_64.xml b/tools/testing/selftests/android/config_x86_64.xml
new file mode 100644
index 0000000..3282b8f
--- /dev/null
+++ b/tools/testing/selftests/android/config_x86_64.xml
@@ -0,0 +1,69 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2023 The Android Open Source Project
+SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+-->
+<!DOCTYPE configuration [
+<!ENTITY ktest_dir "/data/selftests/x86_64">
+]>
+<configuration description="kselftest">
+ <option name="test-suite-tag" value="kernel-test" />
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer" />
+ <target_preparer class="com.android.tradefed.targetprep.StopServicesSetup" />
+
+ <object type="module_controller" class="com.android.tradefed.testtype.suite.module.KernelTestModuleController" >
+ <option name="arch" value="x86_64"/>
+ </object>
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <option name="push-file" key="selftests" value="/data/selftests" />
+ <option name="skip-abi-filtering" value="true" />
+ <option name="post-push" value='chmod -R 755 /data/selftests; find /data/selftests -type f | xargs grep -l -e "bin/sh" -e "bin/bash" | xargs sed -i -e "s?/bin/echo?echo?" -i -e "s?#!/bin/sh?#!/system/bin/sh?" -i -e "s?#!/bin/bash?#!/system/bin/sh?" || echo "There were no files to process"' />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.binary.KernelTargetTest" >
+ <option name="exit-code-skip" value="4" />
+ <option name="skip-binary-check" value="true" />
+ <option name="ktap-result-parser-resolution" value="INDIVIDUAL_LEAVES" />
+ <option name="test-command-line" key="kselftest_binderfs_binderfs_test" value="cd &ktest_dir;; ./kselftest_binderfs_binderfs_test" />
+ <option name="test-command-line" key="kselftest_breakpoints_breakpoint_test" value="cd &ktest_dir;; ./kselftest_breakpoints_breakpoint_test" />
+ <option name="test-command-line" key="kselftest_capabilities_test_execve" value="cd &ktest_dir;; ./kselftest_capabilities_test_execve" />
+ <option name="test-command-line" key="kselftest_futex_requeue" value="cd &ktest_dir;; ./futex_requeue" />
+ <option name="test-command-line" key="kselftest_futex_requeue_pi" value="cd &ktest_dir;; ./futex_requeue_pi" />
+ <option name="test-command-line" key="kselftest_futex_requeue_pi_mismatched_ops" value="cd &ktest_dir;; ./futex_requeue_pi_mismatched_ops" />
+ <option name="test-command-line" key="kselftest_futex_requeue_pi_signal_restart" value="cd &ktest_dir;; ./futex_requeue_pi_signal_restart" />
+ <option name="test-command-line" key="kselftest_futex_wait" value="cd &ktest_dir;; ./futex_wait" />
+ <option name="test-command-line" key="kselftest_futex_wait_private_mapped_file" value="cd &ktest_dir;; ./futex_wait_private_mapped_file" />
+ <option name="test-command-line" key="kselftest_futex_wait_timeout" value="cd &ktest_dir;; ./futex_wait_timeout" />
+ <option name="test-command-line" key="kselftest_futex_wait_uninitialized_heap" value="cd &ktest_dir;; ./futex_wait_uninitialized_heap" />
+ <option name="test-command-line" key="kselftest_futex_wait_wouldblock" value="cd &ktest_dir;; ./futex_wait_wouldblock" />
+ <option name="test-command-line" key="kselftest_kcmp_kcmp_test" value="cd &ktest_dir;; ./kselftest_kcmp_kcmp_test" />
+ <option name="test-command-line" key="kselftest_memfd_test" value="cd &ktest_dir;; ./kselftest_memfd_test" />
+ <option name="test-command-line" key="kselftest_mm_mremap_dontunmap" value="cd &ktest_dir;; ./kselftest_mm_mremap_dontunmap" />
+ <option name="test-command-line" key="kselftest_mm_mremap_test" value="cd &ktest_dir;; ./kselftest_mm_mremap_test" />
+ <option name="test-command-line" key="kselftest_mm_uffd_unit_tests" value="cd &ktest_dir;; ./kselftest_mm_uffd_unit_tests" />
+ <option name="test-command-line" key="kselftest_net_socket" value="cd &ktest_dir;; ./kselftest_net_socket" />
+ <option name="test-command-line" key="kselftest_net_psock_tpacket" value="cd &ktest_dir;; ./kselftest_net_psock_tpacket" />
+ <option name="test-command-line" key="kselftest_net_reuseaddr_conflict" value="cd &ktest_dir;; ./kselftest_net_reuseaddr_conflict" />
+ <option name="test-command-line" key="kselftest_ptrace_peeksiginfo" value="cd &ktest_dir;; ./kselftest_ptrace_peeksiginfo" />
+ <option name="test-command-line" key="kselftest_rtc_rtctest" value="cd &ktest_dir;; ./kselftest_rtc_rtctest" />
+ <option name="test-command-line" key="kselftest_seccomp_seccomp_bpf" value="cd &ktest_dir;; ./kselftest_seccomp_seccomp_bpf" />
+ <option name="test-command-line" key="kselftest_size_test_get_size" value="cd &ktest_dir;; ./kselftest_size_test_get_size" />
+ <option name="test-command-line" key="kselftest_timers_inconsistency_check" value="cd &ktest_dir;; ./inconsistency-check" />
+ <option name="test-command-line" key="kselftest_timers_nanosleep" value="cd &ktest_dir;; ./nanosleep" />
+ <option name="test-command-line" key="kselftest_timers_nsleep_lat" value="cd &ktest_dir;; ./nsleep-lat" />
+ <option name="test-command-line" key="kselftest_timers_posix_timers" value="cd &ktest_dir;; ./kselftest_timers_posix_timers" />
+ <option name="test-command-line" key="kselftest_timers_set_timer_lat" value="cd &ktest_dir;; ./kselftest_timers_set_timer_lat" />
+ <option name="test-command-line" key="kselftest_timers_tests_raw_skew" value="cd &ktest_dir;; ./raw_skew" />
+ <option name="test-command-line" key="kselftest_timers_threadtest" value="cd &ktest_dir;; ./kselftest_timers_threadtest" />
+ <option name="test-command-line" key="kselftest_timers_valid_adjtimex" value="cd &ktest_dir;; ./kselftest_timers_valid_adjtimex" />
+ <option name="test-command-line" key="kselftest_vdso_vdso_test_abi" value="cd &ktest_dir;; ./kselftest_vdso_vdso_test_abi" />
+ <option name="test-command-line" key="kselftest_vdso_vdso_test_getcpu" value="cd &ktest_dir;; ./kselftest_vdso_vdso_test_getcpu" />
+ <option name="test-command-line" key="kselftest_vdso_vdso_test_gettimeofday" value="cd &ktest_dir;; ./kselftest_vdso_vdso_test_gettimeofday" />
+ <option name="test-command-line" key="kselftest_x86_check_initial_reg_state" value="cd &ktest_dir;; ./kselftest_x86_check_initial_reg_state" />
+ <option name="test-command-line" key="kselftest_x86_ldt_gdt" value="cd &ktest_dir;; ./kselftest_x86_ldt_gdt" />
+ <option name="test-command-line" key="kselftest_x86_ptrace_syscall" value="cd &ktest_dir;; ./kselftest_x86_ptrace_syscall" />
+ <option name="test-command-line" key="kselftest_x86_single_step_syscall" value="cd &ktest_dir;; ./kselftest_x86_single_step_syscall" />
+ <option name="test-command-line" key="kselftest_x86_syscall_nt" value="cd &ktest_dir;; ./kselftest_x86_syscall_nt" />
+ </test>
+</configuration>
diff --git a/tools/testing/selftests/android/include/bionic-compat.h b/tools/testing/selftests/android/include/bionic-compat.h
new file mode 100644
index 0000000..109fb44
--- /dev/null
+++ b/tools/testing/selftests/android/include/bionic-compat.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __BIONIC_COMPAT_H
+#define __BIONIC_COMPAT_H
+
+#define _GNU_SOURCE
+#include <sys/types.h>
+
+static inline int pthread_cancel(pthread_t thread)
+{
+ return 0;
+}
+
+#endif /* __BIONIC_COMPAT_H */
diff --git a/tools/testing/selftests/android/vts_config_arm.xml b/tools/testing/selftests/android/vts_config_arm.xml
new file mode 100644
index 0000000..e4da7ad
--- /dev/null
+++ b/tools/testing/selftests/android/vts_config_arm.xml
@@ -0,0 +1,47 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2025 The Android Open Source Project
+SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+-->
+<!DOCTYPE configuration [
+<!ENTITY ktest_dir "/data/selftests/arm">
+]>
+<configuration description="kselftest">
+ <option name="test-suite-tag" value="kernel-test" />
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer" />
+ <target_preparer class="com.android.tradefed.targetprep.StopServicesSetup" />
+
+ <object type="module_controller" class="com.android.tradefed.testtype.suite.module.KernelTestModuleController" >
+ <option name="arch" value="arm"/>
+ </object>
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <option name="push-file" key="selftests" value="/data/selftests" />
+ <option name="skip-abi-filtering" value="true" />
+ <option name="post-push" value='chmod -R 755 /data/selftests; find /data/selftests -type f | xargs grep -l -e "bin/sh" -e "bin/bash" | xargs sed -i -e "s?/bin/echo?echo?" -i -e "s?#!/bin/sh?#!/system/bin/sh?" -i -e "s?#!/bin/bash?#!/system/bin/sh?" || echo "There were no files to process"' />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.binary.KernelTargetTest" >
+ <option name="exit-code-skip" value="4" />
+ <option name="skip-binary-check" value="true" />
+ <option name="ktap-result-parser-resolution" value="INDIVIDUAL_LEAVES" />
+ <option name="test-command-line" key="kselftest_binderfs_binderfs_test" value="cd &ktest_dir;; ./kselftest_binderfs_binderfs_test" />
+ <option name="test-command-line" key="kselftest_capabilities_test_execve" value="cd &ktest_dir;; ./kselftest_capabilities_test_execve" />
+ <option name="test-command-line" key="kselftest_capabilities_validate_cap" value="cd &ktest_dir;; ./validate_cap 1 1 0 0" />
+ <option name="test-command-line" key="kselftest_futex_futex_requeue_pi_mismatched_ops" value="cd &ktest_dir;; ./futex_requeue_pi_mismatched_ops" />
+ <option name="test-command-line" key="kselftest_futex_futex_requeue_pi_signal_restart" value="cd &ktest_dir;; ./futex_requeue_pi_signal_restart" />
+ <option name="test-command-line" key="kselftest_futex_futex_requeue_pi" value="cd &ktest_dir;; ./futex_requeue_pi" />
+ <option name="test-command-line" key="kselftest_futex_futex_requeue" value="cd &ktest_dir;; ./futex_requeue" />
+ <option name="test-command-line" key="kselftest_futex_futex_wait_private_mapped_file" value="cd &ktest_dir;; ./futex_wait_private_mapped_file" />
+ <option name="test-command-line" key="kselftest_futex_futex_wait_timeout" value="cd &ktest_dir;; ./futex_wait_timeout" />
+ <option name="test-command-line" key="kselftest_futex_futex_wait_uninitialized_heap" value="cd &ktest_dir;; ./futex_wait_uninitialized_heap" />
+ <option name="test-command-line" key="kselftest_futex_futex_wait_wouldblock" value="cd &ktest_dir;; ./futex_wait_wouldblock" />
+ <option name="test-command-line" key="kselftest_futex_futex_wait" value="cd &ktest_dir;; ./futex_wait" />
+ <option name="test-command-line" key="kselftest_kcmp_kcmp_test" value="cd &ktest_dir;; ./kselftest_kcmp_kcmp_test" />
+ <option name="test-command-line" key="kselftest_rtc_rtctest" value="cd &ktest_dir;; ./kselftest_rtc_rtctest" />
+ <option name="test-command-line" key="kselftest_net_tests_socket" value="cd &ktest_dir;; ./kselftest_net_tests_socket" />
+ <option name="test-command-line" key="kselftest_net_tests_psock_tpacket" value="cd &ktest_dir;; ./kselftest_net_tests_psock_tpacket" />
+ <option name="test-command-line" key="kselftest_net_tests_reuseport_dualstack" value="cd &ktest_dir;; ./kselftest_net_tests_reuseport_dualstack" />
+ <option name="test-command-line" key="kselftest_net_tests_reuseaddr_conflict" value="cd &ktest_dir;; ./kselftest_net_tests_reuseaddr_conflict" />
+ </test>
+</configuration>
diff --git a/tools/testing/selftests/android/vts_config_arm64.xml b/tools/testing/selftests/android/vts_config_arm64.xml
new file mode 100644
index 0000000..7316edb
--- /dev/null
+++ b/tools/testing/selftests/android/vts_config_arm64.xml
@@ -0,0 +1,48 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2025 The Android Open Source Project
+SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+-->
+<!DOCTYPE configuration [
+<!ENTITY ktest_dir "/data/selftests/arm64">
+]>
+<configuration description="kselftest">
+ <option name="test-suite-tag" value="kernel-test" />
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer" />
+ <target_preparer class="com.android.tradefed.targetprep.StopServicesSetup" />
+
+ <object type="module_controller" class="com.android.tradefed.testtype.suite.module.KernelTestModuleController" >
+ <option name="arch" value="arm64"/>
+ </object>
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <option name="push-file" key="selftests" value="/data/selftests" />
+ <option name="skip-abi-filtering" value="true" />
+ <option name="post-push" value='chmod -R 755 /data/selftests; find /data/selftests -type f | xargs grep -l -e "bin/sh" -e "bin/bash" | xargs sed -i -e "s?/bin/echo?echo?" -i -e "s?#!/bin/sh?#!/system/bin/sh?" -i -e "s?#!/bin/bash?#!/system/bin/sh?" || echo "There were no files to process"' />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.binary.KernelTargetTest" >
+ <option name="exit-code-skip" value="4" />
+ <option name="skip-binary-check" value="true" />
+ <option name="ktap-result-parser-resolution" value="INDIVIDUAL_LEAVES" />
+ <option name="test-command-line" key="kselftest_binderfs_binderfs_test" value="cd &ktest_dir;; ./kselftest_binderfs_binderfs_test" />
+ <option name="test-command-line" key="kselftest_breakpoints_breakpoint_test" value="cd &ktest_dir;; ./kselftest_breakpoints_breakpoint_test" />
+ <option name="test-command-line" key="kselftest_capabilities_test_execve" value="cd &ktest_dir;; ./kselftest_capabilities_test_execve" />
+ <option name="test-command-line" key="kselftest_capabilities_validate_cap" value="cd &ktest_dir;; ./validate_cap 1 1 0 0" />
+ <option name="test-command-line" key="kselftest_futex_futex_requeue_pi_mismatched_ops" value="cd &ktest_dir;; ./futex_requeue_pi_mismatched_ops" />
+ <option name="test-command-line" key="kselftest_futex_futex_requeue_pi_signal_restart" value="cd &ktest_dir;; ./futex_requeue_pi_signal_restart" />
+ <option name="test-command-line" key="kselftest_futex_futex_requeue_pi" value="cd &ktest_dir;; ./futex_requeue_pi" />
+ <option name="test-command-line" key="kselftest_futex_futex_requeue" value="cd &ktest_dir;; ./futex_requeue" />
+ <option name="test-command-line" key="kselftest_futex_futex_wait_private_mapped_file" value="cd &ktest_dir;; ./futex_wait_private_mapped_file" />
+ <option name="test-command-line" key="kselftest_futex_futex_wait_timeout" value="cd &ktest_dir;; ./futex_wait_timeout" />
+ <option name="test-command-line" key="kselftest_futex_futex_wait_uninitialized_heap" value="cd &ktest_dir;; ./futex_wait_uninitialized_heap" />
+ <option name="test-command-line" key="kselftest_futex_futex_wait_wouldblock" value="cd &ktest_dir;; ./futex_wait_wouldblock" />
+ <option name="test-command-line" key="kselftest_futex_futex_wait" value="cd &ktest_dir;; ./futex_wait" />
+ <option name="test-command-line" key="kselftest_kcmp_kcmp_test" value="cd &ktest_dir;; ./kselftest_kcmp_kcmp_test" />
+ <option name="test-command-line" key="kselftest_rtc_rtctest" value="cd &ktest_dir;; ./kselftest_rtc_rtctest" />
+ <option name="test-command-line" key="kselftest_net_tests_socket" value="cd &ktest_dir;; ./kselftest_net_tests_socket" />
+ <option name="test-command-line" key="kselftest_net_tests_psock_tpacket" value="cd &ktest_dir;; ./kselftest_net_tests_psock_tpacket" />
+ <option name="test-command-line" key="kselftest_net_tests_reuseport_dualstack" value="cd &ktest_dir;; ./kselftest_net_tests_reuseport_dualstack" />
+ <option name="test-command-line" key="kselftest_net_tests_reuseaddr_conflict" value="cd &ktest_dir;; ./kselftest_net_tests_reuseaddr_conflict" />
+ </test>
+</configuration>
diff --git a/tools/testing/selftests/android/vts_config_x86.xml b/tools/testing/selftests/android/vts_config_x86.xml
new file mode 100644
index 0000000..026510a
--- /dev/null
+++ b/tools/testing/selftests/android/vts_config_x86.xml
@@ -0,0 +1,48 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2023 The Android Open Source Project
+SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+-->
+<!DOCTYPE configuration [
+<!ENTITY ktest_dir "/data/selftests/x86">
+]>
+<configuration description="kselftest">
+ <option name="test-suite-tag" value="kernel-test" />
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer" />
+ <target_preparer class="com.android.tradefed.targetprep.StopServicesSetup" />
+
+ <object type="module_controller" class="com.android.tradefed.testtype.suite.module.KernelTestModuleController" >
+ <option name="arch" value="x86"/>
+ </object>
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <option name="push-file" key="selftests" value="/data/selftests" />
+ <option name="skip-abi-filtering" value="true" />
+ <option name="post-push" value='chmod -R 755 /data/selftests; find /data/selftests -type f | xargs grep -l -e "bin/sh" -e "bin/bash" | xargs sed -i -e "s?/bin/echo?echo?" -i -e "s?#!/bin/sh?#!/system/bin/sh?" -i -e "s?#!/bin/bash?#!/system/bin/sh?" || echo "There were no files to process"' />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.binary.KernelTargetTest" >
+ <option name="exit-code-skip" value="4" />
+ <option name="skip-binary-check" value="true" />
+ <option name="ktap-result-parser-resolution" value="INDIVIDUAL_LEAVES" />
+ <option name="test-command-line" key="kselftest_binderfs_binderfs_test" value="cd &ktest_dir;; ./kselftest_binderfs_binderfs_test" />
+ <option name="test-command-line" key="kselftest_breakpoints_breakpoint_test" value="cd &ktest_dir;; ./kselftest_breakpoints_breakpoint_test" />
+ <option name="test-command-line" key="kselftest_capabilities_test_execve" value="cd &ktest_dir;; ./kselftest_capabilities_test_execve" />
+ <option name="test-command-line" key="kselftest_capabilities_validate_cap" value="cd &ktest_dir;; ./validate_cap 1 1 0 0" />
+ <option name="test-command-line" key="kselftest_futex_futex_requeue_pi_mismatched_ops" value="cd &ktest_dir;; ./futex_requeue_pi_mismatched_ops" />
+ <option name="test-command-line" key="kselftest_futex_futex_requeue_pi_signal_restart" value="cd &ktest_dir;; ./futex_requeue_pi_signal_restart" />
+ <option name="test-command-line" key="kselftest_futex_futex_requeue_pi" value="cd &ktest_dir;; ./futex_requeue_pi" />
+ <option name="test-command-line" key="kselftest_futex_futex_requeue" value="cd &ktest_dir;; ./futex_requeue" />
+ <option name="test-command-line" key="kselftest_futex_futex_wait_private_mapped_file" value="cd &ktest_dir;; ./futex_wait_private_mapped_file" />
+ <option name="test-command-line" key="kselftest_futex_futex_wait_timeout" value="cd &ktest_dir;; ./futex_wait_timeout" />
+ <option name="test-command-line" key="kselftest_futex_futex_wait_uninitialized_heap" value="cd &ktest_dir;; ./futex_wait_uninitialized_heap" />
+ <option name="test-command-line" key="kselftest_futex_futex_wait_wouldblock" value="cd &ktest_dir;; ./futex_wait_wouldblock" />
+ <option name="test-command-line" key="kselftest_futex_futex_wait" value="cd &ktest_dir;; ./futex_wait" />
+ <option name="test-command-line" key="kselftest_kcmp_kcmp_test" value="cd &ktest_dir;; ./kselftest_kcmp_kcmp_test" />
+ <option name="test-command-line" key="kselftest_rtc_rtctest" value="cd &ktest_dir;; ./kselftest_rtc_rtctest" />
+ <option name="test-command-line" key="kselftest_net_tests_socket" value="cd &ktest_dir;; ./kselftest_net_tests_socket" />
+ <option name="test-command-line" key="kselftest_net_tests_psock_tpacket" value="cd &ktest_dir;; ./kselftest_net_tests_psock_tpacket" />
+ <option name="test-command-line" key="kselftest_net_tests_reuseport_dualstack" value="cd &ktest_dir;; ./kselftest_net_tests_reuseport_dualstack" />
+ <option name="test-command-line" key="kselftest_net_tests_reuseaddr_conflict" value="cd &ktest_dir;; ./kselftest_net_tests_reuseaddr_conflict" />
+ </test>
+</configuration>
diff --git a/tools/testing/selftests/android/vts_config_x86_64.xml b/tools/testing/selftests/android/vts_config_x86_64.xml
new file mode 100644
index 0000000..d065b4e
--- /dev/null
+++ b/tools/testing/selftests/android/vts_config_x86_64.xml
@@ -0,0 +1,48 @@
+<?xml version="1.0" encoding="utf-8"?>
+<!-- Copyright (C) 2025 The Android Open Source Project
+SPDX-License-Identifier: GPL-2.0 OR Apache-2.0
+-->
+<!DOCTYPE configuration [
+<!ENTITY ktest_dir "/data/selftests/x86_64">
+]>
+<configuration description="kselftest">
+ <option name="test-suite-tag" value="kernel-test" />
+ <target_preparer class="com.android.tradefed.targetprep.RootTargetPreparer" />
+ <target_preparer class="com.android.tradefed.targetprep.StopServicesSetup" />
+
+ <object type="module_controller" class="com.android.tradefed.testtype.suite.module.KernelTestModuleController" >
+ <option name="arch" value="x86_64"/>
+ </object>
+
+ <target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
+ <option name="cleanup" value="true" />
+ <option name="push-file" key="selftests" value="/data/selftests" />
+ <option name="skip-abi-filtering" value="true" />
+ <option name="post-push" value='chmod -R 755 /data/selftests; find /data/selftests -type f | xargs grep -l -e "bin/sh" -e "bin/bash" | xargs sed -i -e "s?/bin/echo?echo?" -i -e "s?#!/bin/sh?#!/system/bin/sh?" -i -e "s?#!/bin/bash?#!/system/bin/sh?" || echo "There were no files to process"' />
+ </target_preparer>
+
+ <test class="com.android.tradefed.testtype.binary.KernelTargetTest" >
+ <option name="exit-code-skip" value="4" />
+ <option name="skip-binary-check" value="true" />
+ <option name="ktap-result-parser-resolution" value="INDIVIDUAL_LEAVES" />
+ <option name="test-command-line" key="kselftest_binderfs_binderfs_test" value="cd &ktest_dir;; ./kselftest_binderfs_binderfs_test" />
+ <option name="test-command-line" key="kselftest_breakpoints_breakpoint_test" value="cd &ktest_dir;; ./kselftest_breakpoints_breakpoint_test" />
+ <option name="test-command-line" key="kselftest_capabilities_test_execve" value="cd &ktest_dir;; ./kselftest_capabilities_test_execve" />
+ <option name="test-command-line" key="kselftest_capabilities_validate_cap" value="cd &ktest_dir;; ./validate_cap 1 1 0 0" />
+ <option name="test-command-line" key="kselftest_futex_futex_requeue_pi_mismatched_ops" value="cd &ktest_dir;; ./futex_requeue_pi_mismatched_ops" />
+ <option name="test-command-line" key="kselftest_futex_futex_requeue_pi_signal_restart" value="cd &ktest_dir;; ./futex_requeue_pi_signal_restart" />
+ <option name="test-command-line" key="kselftest_futex_futex_requeue_pi" value="cd &ktest_dir;; ./futex_requeue_pi" />
+ <option name="test-command-line" key="kselftest_futex_futex_requeue" value="cd &ktest_dir;; ./futex_requeue" />
+ <option name="test-command-line" key="kselftest_futex_futex_wait_private_mapped_file" value="cd &ktest_dir;; ./futex_wait_private_mapped_file" />
+ <option name="test-command-line" key="kselftest_futex_futex_wait_timeout" value="cd &ktest_dir;; ./futex_wait_timeout" />
+ <option name="test-command-line" key="kselftest_futex_futex_wait_uninitialized_heap" value="cd &ktest_dir;; ./futex_wait_uninitialized_heap" />
+ <option name="test-command-line" key="kselftest_futex_futex_wait_wouldblock" value="cd &ktest_dir;; ./futex_wait_wouldblock" />
+ <option name="test-command-line" key="kselftest_futex_futex_wait" value="cd &ktest_dir;; ./futex_wait" />
+ <option name="test-command-line" key="kselftest_kcmp_kcmp_test" value="cd &ktest_dir;; ./kselftest_kcmp_kcmp_test" />
+ <option name="test-command-line" key="kselftest_rtc_rtctest" value="cd &ktest_dir;; ./kselftest_rtc_rtctest" />
+ <option name="test-command-line" key="kselftest_net_tests_socket" value="cd &ktest_dir;; ./kselftest_net_tests_socket" />
+ <option name="test-command-line" key="kselftest_net_tests_psock_tpacket" value="cd &ktest_dir;; ./kselftest_net_tests_psock_tpacket" />
+ <option name="test-command-line" key="kselftest_net_tests_reuseport_dualstack" value="cd &ktest_dir;; ./kselftest_net_tests_reuseport_dualstack" />
+ <option name="test-command-line" key="kselftest_net_tests_reuseaddr_conflict" value="cd &ktest_dir;; ./kselftest_net_tests_reuseaddr_conflict" />
+ </test>
+</configuration>
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 72a9ba4..d5acbeb 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -409,6 +409,7 @@
CC="$(HOSTCC)" LD="$(HOSTLD)" AR="$(HOSTAR)" \
LIBBPF_INCLUDE=$(HOST_INCLUDE_DIR) \
EXTRA_LDFLAGS='$(SAN_LDFLAGS) $(EXTRA_LDFLAGS)' \
+ HOSTPKG_CONFIG=$(PKG_CONFIG) \
OUTPUT=$(HOST_BUILD_DIR)/resolve_btfids/ BPFOBJ=$(HOST_BPFOBJ)
# Get Clang's default includes on this system, as opposed to those seen by
diff --git a/tools/testing/selftests/breakpoints/TEST_MAPPING b/tools/testing/selftests/breakpoints/TEST_MAPPING
new file mode 100644
index 0000000..d2110a3b
--- /dev/null
+++ b/tools/testing/selftests/breakpoints/TEST_MAPPING
@@ -0,0 +1,12 @@
+{
+ "presubmit": [
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_breakpoints_breakpoint_test"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/tools/testing/selftests/capabilities/TEST_MAPPING b/tools/testing/selftests/capabilities/TEST_MAPPING
new file mode 100644
index 0000000..1443760
--- /dev/null
+++ b/tools/testing/selftests/capabilities/TEST_MAPPING
@@ -0,0 +1,12 @@
+{
+ "presubmit": [
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_capabilities_test_execve"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/tools/testing/selftests/filesystems/binderfs/TEST_MAPPING b/tools/testing/selftests/filesystems/binderfs/TEST_MAPPING
new file mode 100644
index 0000000..ed8b47d
--- /dev/null
+++ b/tools/testing/selftests/filesystems/binderfs/TEST_MAPPING
@@ -0,0 +1,12 @@
+{
+ "presubmit": [
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_binderfs_binderfs_test"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
index a1a79a6..553c8fa 100644
--- a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
+++ b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
@@ -292,6 +292,11 @@ static int write_id_mapping(enum idmap_type type, pid_t pid, const char *buf,
return 0;
}
+static bool has_userns(void)
+{
+ return (access("/proc/self/ns/user", F_OK) == 0);
+}
+
static void change_userns(struct __test_metadata *_metadata, int syncfds[2])
{
int ret;
@@ -379,6 +384,9 @@ static void *binder_version_thread(void *data)
*/
TEST(binderfs_stress)
{
+ if (!has_userns())
+ SKIP(return, "%s: user namespace not supported\n", __func__);
+
int fds[1000];
int syncfds[2];
pid_t pid;
@@ -503,6 +511,8 @@ TEST(binderfs_test_privileged)
TEST(binderfs_test_unprivileged)
{
+ if (!has_userns())
+ SKIP(return, "%s: user namespace not supported\n", __func__);
int ret;
int syncfds[2];
pid_t pid;
diff --git a/tools/testing/selftests/filesystems/incfs/.gitignore b/tools/testing/selftests/filesystems/incfs/.gitignore
new file mode 100644
index 0000000..f0e3cd9
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/.gitignore
@@ -0,0 +1,3 @@
+incfs_test
+incfs_stress
+incfs_perf
diff --git a/tools/testing/selftests/filesystems/incfs/Makefile b/tools/testing/selftests/filesystems/incfs/Makefile
new file mode 100644
index 0000000..a203348
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+CFLAGS += -D_FILE_OFFSET_BITS=64 -Wall -Wno-deprecated-declarations -Werror -I../.. -I../../../../.. -fno-omit-frame-pointer -g
+LDLIBS := -llz4 -lzstd -lcrypto -lpthread
+TEST_GEN_PROGS := incfs_test incfs_stress incfs_perf
+
+include ../../lib.mk
+
+# Put after include ../../lib.mk since that changes $(TEST_GEN_PROGS)
+# Otherwise you get multiple targets, this becomes the default, and it's a mess
+EXTRA_SOURCES := utils.c
+$(TEST_GEN_PROGS) : $(EXTRA_SOURCES)
diff --git a/tools/testing/selftests/filesystems/incfs/OWNERS b/tools/testing/selftests/filesystems/incfs/OWNERS
new file mode 100644
index 0000000..f1f993f
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/OWNERS
@@ -0,0 +1,2 @@
+akailash@google.com
+paullawrence@google.com
\ No newline at end of file
diff --git a/tools/testing/selftests/filesystems/incfs/incfs_perf.c b/tools/testing/selftests/filesystems/incfs/incfs_perf.c
new file mode 100644
index 0000000..8fe83c3
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/incfs_perf.c
@@ -0,0 +1,719 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Google LLC
+ */
+#include <errno.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <lz4.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <time.h>
+#include <ctype.h>
+#include <unistd.h>
+
+#include <kselftest.h>
+
+#include "utils.h"
+
+#define err_msg(...) \
+ do { \
+ fprintf(stderr, "%s: (%d) ", TAG, __LINE__); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, " (%s)\n", strerror(errno)); \
+ } while (false)
+
+#define TAG "incfs_perf"
+
+struct options {
+ int blocks; /* -b number of diff block sizes */
+ bool no_cleanup; /* -c don't clean up after */
+ const char *test_dir; /* -d working directory */
+ const char *file_types; /* -f sScCvV */
+ bool no_native; /* -n don't test native files */
+ bool no_random; /* -r don't do random reads*/
+ bool no_linear; /* -R random reads only */
+ size_t size; /* -s file size as power of 2 */
+ int tries; /* -t times to run test*/
+};
+
+enum flags {
+ SHUFFLE = 1,
+ COMPRESS = 2,
+ VERIFY = 4,
+ LAST_FLAG = 8,
+};
+
+void print_help(void)
+{
+ puts(
+ "incfs_perf. Performance test tool for incfs\n"
+ "\tTests read performance of incfs by creating files of various types\n"
+ "\tflushing caches and then reading them back.\n"
+ "\tEach file is read with different block sizes and average\n"
+ "\tthroughput in megabytes/second and memory usage are reported for\n"
+ "\teach block size\n"
+ "\tNative files are tested for comparison\n"
+ "\tNative files are created in native folder, incfs files are created\n"
+ "\tin src folder which is mounted on dst folder\n"
+ "\n"
+ "\t-bn (default 8) number of different block sizes, starting at 4096\n"
+ "\t and doubling\n"
+ "\t-c don't Clean up - leave files and mount point\n"
+ "\t-d dir create directories in dir\n"
+ "\t-fs|Sc|Cv|V restrict which files are created.\n"
+ "\t s blocks not shuffled, S blocks shuffled\n"
+ "\t c blocks not compress, C blocks compressed\n"
+ "\t v files not verified, V files verified\n"
+ "\t If a letter is omitted, both options are tested\n"
+ "\t If no letter are given, incfs is not tested\n"
+ "\t-n Don't test native files\n"
+ "\t-r No random reads (sequential only)\n"
+ "\t-R Random reads only (no sequential)\n"
+ "\t-sn (default 30)File size as power of 2\n"
+ "\t-tn (default 5) Number of tries per file. Results are averaged\n"
+ );
+}
+
+int parse_options(int argc, char *const *argv, struct options *options)
+{
+ signed char c;
+
+ /* Set defaults here */
+ *options = (struct options){
+ .blocks = 8,
+ .test_dir = ".",
+ .tries = 5,
+ .size = 30,
+ };
+
+ /* Load options from command line here */
+ while ((c = getopt(argc, argv, "b:cd:f::hnrRs:t:")) != -1) {
+ switch (c) {
+ case 'b':
+ options->blocks = strtol(optarg, NULL, 10);
+ break;
+
+ case 'c':
+ options->no_cleanup = true;
+ break;
+
+ case 'd':
+ options->test_dir = optarg;
+ break;
+
+ case 'f':
+ if (optarg)
+ options->file_types = optarg;
+ else
+ options->file_types = "sS";
+ break;
+
+ case 'h':
+ print_help();
+ exit(0);
+
+ case 'n':
+ options->no_native = true;
+ break;
+
+ case 'r':
+ options->no_random = true;
+ break;
+
+ case 'R':
+ options->no_linear = true;
+ break;
+
+ case 's':
+ options->size = strtol(optarg, NULL, 10);
+ break;
+
+ case 't':
+ options->tries = strtol(optarg, NULL, 10);
+ break;
+
+ default:
+ print_help();
+ return -EINVAL;
+ }
+ }
+
+ options->size = 1L << options->size;
+
+ return 0;
+}
+
+void shuffle(size_t *buffer, size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; ++i) {
+ size_t j = random() * (size - i - 1) / RAND_MAX;
+ size_t temp = buffer[i];
+
+ buffer[i] = buffer[j];
+ buffer[j] = temp;
+ }
+}
+
+int get_free_memory(void)
+{
+ FILE *meminfo = fopen("/proc/meminfo", "re");
+ char field[256];
+ char value[256] = {};
+
+ if (!meminfo)
+ return -ENOENT;
+
+ while (fscanf(meminfo, "%[^:]: %s kB\n", field, value) == 2) {
+ if (!strcmp(field, "MemFree"))
+ break;
+ *value = 0;
+ }
+
+ fclose(meminfo);
+
+ if (!*value)
+ return -ENOENT;
+
+ return strtol(value, NULL, 10);
+}
+
+int write_data(int cmd_fd, int dir_fd, const char *name, size_t size, int flags)
+{
+ int fd = openat(dir_fd, name, O_RDWR | O_CLOEXEC);
+ struct incfs_permit_fill permit_fill = {
+ .file_descriptor = fd,
+ };
+ int block_count = 1 + (size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+ size_t *blocks = malloc(sizeof(size_t) * block_count);
+ int error = 0;
+ size_t i;
+ uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE] = {};
+ uint8_t compressed_data[INCFS_DATA_FILE_BLOCK_SIZE] = {};
+ struct incfs_fill_block fill_block = {
+ .compression = COMPRESSION_NONE,
+ .data_len = sizeof(data),
+ .data = ptr_to_u64(data),
+ };
+
+ if (!blocks) {
+ err_msg("Out of memory");
+ error = -errno;
+ goto out;
+ }
+
+ if (fd == -1) {
+ err_msg("Could not open file for writing %s", name);
+ error = -errno;
+ goto out;
+ }
+
+ if (ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill)) {
+ err_msg("Failed to call PERMIT_FILL");
+ error = -errno;
+ goto out;
+ }
+
+ for (i = 0; i < block_count; ++i)
+ blocks[i] = i;
+
+ if (flags & SHUFFLE)
+ shuffle(blocks, block_count);
+
+ if (flags & COMPRESS) {
+ size_t comp_size = LZ4_compress_default(
+ (char *)data, (char *)compressed_data, sizeof(data),
+ ARRAY_SIZE(compressed_data));
+
+ if (comp_size <= 0) {
+ error = -EBADMSG;
+ goto out;
+ }
+ fill_block.compression = COMPRESSION_LZ4;
+ fill_block.data = ptr_to_u64(compressed_data);
+ fill_block.data_len = comp_size;
+ }
+
+ for (i = 0; i < block_count; ++i) {
+ struct incfs_fill_blocks fill_blocks = {
+ .count = 1,
+ .fill_blocks = ptr_to_u64(&fill_block),
+ };
+
+ fill_block.block_index = blocks[i];
+ int written = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+
+ if (written != 1) {
+ error = -errno;
+ err_msg("Failed to write block %lu in file %s", i,
+ name);
+ break;
+ }
+ }
+
+out:
+ free(blocks);
+ close(fd);
+ sync();
+ return error;
+}
+
+int measure_read_throughput_internal(const char *tag, int dir, const char *name,
+ const struct options *options, bool random)
+{
+ int block;
+
+ if (random)
+ printf("%32s(random)", tag);
+ else
+ printf("%40s", tag);
+
+ for (block = 0; block < options->blocks; ++block) {
+ size_t buffer_size;
+ char *buffer;
+ int try;
+ double time = 0;
+ double throughput;
+ int memory = 0;
+
+ buffer_size = 1 << (block + 12);
+ buffer = malloc(buffer_size);
+
+ for (try = 0; try < options->tries; ++try) {
+ int err;
+ struct timespec start_time, end_time;
+ off_t i;
+ int fd;
+ size_t offsets_size = options->size / buffer_size;
+ size_t *offsets =
+ malloc(offsets_size * sizeof(*offsets));
+ int start_memory, end_memory;
+
+ if (!offsets) {
+ err_msg("Not enough memory");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < offsets_size; ++i)
+ offsets[i] = i * buffer_size;
+
+ if (random)
+ shuffle(offsets, offsets_size);
+
+ err = drop_caches();
+ if (err) {
+ err_msg("Failed to drop caches");
+ return err;
+ }
+
+ start_memory = get_free_memory();
+ if (start_memory < 0) {
+ err_msg("Failed to get start memory");
+ return start_memory;
+ }
+
+ fd = openat(dir, name, O_RDONLY | O_CLOEXEC);
+ if (fd == -1) {
+ err_msg("Failed to open file");
+ return err;
+ }
+
+ err = clock_gettime(CLOCK_MONOTONIC, &start_time);
+ if (err) {
+ err_msg("Failed to get start time");
+ return err;
+ }
+
+ for (i = 0; i < offsets_size; ++i)
+ if (pread(fd, buffer, buffer_size,
+ offsets[i]) != buffer_size) {
+ err_msg("Failed to read file");
+ err = -errno;
+ goto fail;
+ }
+
+ err = clock_gettime(CLOCK_MONOTONIC, &end_time);
+ if (err) {
+ err_msg("Failed to get start time");
+ goto fail;
+ }
+
+ end_memory = get_free_memory();
+ if (end_memory < 0) {
+ err_msg("Failed to get end memory");
+ return end_memory;
+ }
+
+ time += end_time.tv_sec - start_time.tv_sec;
+ time += (end_time.tv_nsec - start_time.tv_nsec) / 1e9;
+
+ close(fd);
+ fd = -1;
+ memory += start_memory - end_memory;
+
+fail:
+ free(offsets);
+ close(fd);
+ if (err)
+ return err;
+ }
+
+ throughput = options->size * options->tries / time;
+ printf("%10.3e %10d", throughput, memory / options->tries);
+ free(buffer);
+ }
+
+ printf("\n");
+ return 0;
+}
+
+int measure_read_throughput(const char *tag, int dir, const char *name,
+ const struct options *options)
+{
+ int err = 0;
+
+ if (!options->no_linear)
+ err = measure_read_throughput_internal(tag, dir, name, options,
+ false);
+
+ if (!err && !options->no_random)
+ err = measure_read_throughput_internal(tag, dir, name, options,
+ true);
+ return err;
+}
+
+int test_native_file(int dir, const struct options *options)
+{
+ const char *name = "file";
+ int fd;
+ char buffer[4096] = {};
+ off_t i;
+ int err;
+
+ fd = openat(dir, name, O_CREAT | O_WRONLY | O_CLOEXEC, 0600);
+ if (fd == -1) {
+ err_msg("Could not open native file");
+ return -errno;
+ }
+
+ for (i = 0; i < options->size; i += sizeof(buffer))
+ if (pwrite(fd, buffer, sizeof(buffer), i) != sizeof(buffer)) {
+ err_msg("Failed to write file");
+ err = -errno;
+ goto fail;
+ }
+
+ close(fd);
+ sync();
+ fd = -1;
+
+ err = measure_read_throughput("native", dir, name, options);
+
+fail:
+ close(fd);
+ return err;
+}
+
+struct hash_block {
+ char data[INCFS_DATA_FILE_BLOCK_SIZE];
+};
+
+static struct hash_block *build_mtree(size_t size, char *root_hash,
+ int *mtree_block_count)
+{
+ char data[INCFS_DATA_FILE_BLOCK_SIZE] = {};
+ const int digest_size = SHA256_DIGEST_SIZE;
+ const int hash_per_block = INCFS_DATA_FILE_BLOCK_SIZE / digest_size;
+ int block_count = 0;
+ int hash_block_count = 0;
+ int total_tree_block_count = 0;
+ int tree_lvl_index[INCFS_MAX_MTREE_LEVELS] = {};
+ int tree_lvl_count[INCFS_MAX_MTREE_LEVELS] = {};
+ int levels_count = 0;
+ int i, level;
+ struct hash_block *mtree;
+
+ if (size == 0)
+ return 0;
+
+ block_count = 1 + (size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+ hash_block_count = block_count;
+ for (i = 0; hash_block_count > 1; i++) {
+ hash_block_count = (hash_block_count + hash_per_block - 1) /
+ hash_per_block;
+ tree_lvl_count[i] = hash_block_count;
+ total_tree_block_count += hash_block_count;
+ }
+ levels_count = i;
+
+ for (i = 0; i < levels_count; i++) {
+ int prev_lvl_base = (i == 0) ? total_tree_block_count :
+ tree_lvl_index[i - 1];
+
+ tree_lvl_index[i] = prev_lvl_base - tree_lvl_count[i];
+ }
+
+ *mtree_block_count = total_tree_block_count;
+ mtree = calloc(total_tree_block_count, sizeof(*mtree));
+ /* Build level 0 hashes. */
+ for (i = 0; i < block_count; i++) {
+ int block_index = tree_lvl_index[0] + i / hash_per_block;
+ int block_off = (i % hash_per_block) * digest_size;
+ char *hash_ptr = mtree[block_index].data + block_off;
+
+ sha256(data, INCFS_DATA_FILE_BLOCK_SIZE, hash_ptr);
+ }
+
+ /* Build higher levels of hash tree. */
+ for (level = 1; level < levels_count; level++) {
+ int prev_lvl_base = tree_lvl_index[level - 1];
+ int prev_lvl_count = tree_lvl_count[level - 1];
+
+ for (i = 0; i < prev_lvl_count; i++) {
+ int block_index =
+ i / hash_per_block + tree_lvl_index[level];
+ int block_off = (i % hash_per_block) * digest_size;
+ char *hash_ptr = mtree[block_index].data + block_off;
+
+ sha256(mtree[i + prev_lvl_base].data,
+ INCFS_DATA_FILE_BLOCK_SIZE, hash_ptr);
+ }
+ }
+
+ /* Calculate root hash from the top block */
+ sha256(mtree[0].data, INCFS_DATA_FILE_BLOCK_SIZE, root_hash);
+
+ return mtree;
+}
+
+static int load_hash_tree(int cmd_fd, int dir, const char *name,
+ struct hash_block *mtree, int mtree_block_count)
+{
+ int err;
+ int i;
+ int fd;
+ struct incfs_fill_block *fill_block_array =
+ calloc(mtree_block_count, sizeof(struct incfs_fill_block));
+ struct incfs_fill_blocks fill_blocks = {
+ .count = mtree_block_count,
+ .fill_blocks = ptr_to_u64(fill_block_array),
+ };
+ struct incfs_permit_fill permit_fill;
+
+ if (!fill_block_array)
+ return -ENOMEM;
+
+ for (i = 0; i < fill_blocks.count; i++) {
+ fill_block_array[i] = (struct incfs_fill_block){
+ .block_index = i,
+ .data_len = INCFS_DATA_FILE_BLOCK_SIZE,
+ .data = ptr_to_u64(mtree[i].data),
+ .flags = INCFS_BLOCK_FLAGS_HASH
+ };
+ }
+
+ fd = openat(dir, name, O_RDONLY | O_CLOEXEC);
+ if (fd < 0) {
+ err = errno;
+ goto failure;
+ }
+
+ permit_fill.file_descriptor = fd;
+ if (ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill)) {
+ err_msg("Failed to call PERMIT_FILL");
+ err = -errno;
+ goto failure;
+ }
+
+ err = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+ close(fd);
+ if (err < fill_blocks.count)
+ err = errno;
+ else
+ err = 0;
+
+failure:
+ free(fill_block_array);
+ return err;
+}
+
+int test_incfs_file(int dst_dir, const struct options *options, int flags)
+{
+ int cmd_file = openat(dst_dir, INCFS_PENDING_READS_FILENAME,
+ O_RDONLY | O_CLOEXEC);
+ int err;
+ char name[4];
+ incfs_uuid_t id;
+ char tag[256];
+
+ snprintf(name, sizeof(name), "%c%c%c",
+ flags & SHUFFLE ? 'S' : 's',
+ flags & COMPRESS ? 'C' : 'c',
+ flags & VERIFY ? 'V' : 'v');
+
+ if (cmd_file == -1) {
+ err_msg("Could not open command file");
+ return -errno;
+ }
+
+ if (flags & VERIFY) {
+ char root_hash[INCFS_MAX_HASH_SIZE];
+ int mtree_block_count;
+ struct hash_block *mtree = build_mtree(options->size, root_hash,
+ &mtree_block_count);
+
+ if (!mtree) {
+ err_msg("Failed to build hash tree");
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ err = crypto_emit_file(cmd_file, NULL, name, &id, options->size,
+ root_hash, "add_data");
+
+ if (!err)
+ err = load_hash_tree(cmd_file, dst_dir, name, mtree,
+ mtree_block_count);
+
+ free(mtree);
+ } else
+ err = emit_file(cmd_file, NULL, name, &id, options->size, NULL);
+
+ if (err) {
+ err_msg("Failed to create file %s", name);
+ goto fail;
+ }
+
+ if (write_data(cmd_file, dst_dir, name, options->size, flags))
+ goto fail;
+
+ snprintf(tag, sizeof(tag), "incfs%s%s%s",
+ flags & SHUFFLE ? "(shuffle)" : "",
+ flags & COMPRESS ? "(compress)" : "",
+ flags & VERIFY ? "(verify)" : "");
+
+ err = measure_read_throughput(tag, dst_dir, name, options);
+
+fail:
+ close(cmd_file);
+ return err;
+}
+
+bool skip(struct options const *options, int flag, char c)
+{
+ if (!options->file_types)
+ return false;
+
+ if (flag && strchr(options->file_types, tolower(c)))
+ return true;
+
+ if (!flag && strchr(options->file_types, toupper(c)))
+ return true;
+
+ return false;
+}
+
+int main(int argc, char *const *argv)
+{
+ struct options options;
+ int err;
+ const char *native_dir = "native";
+ const char *src_dir = "src";
+ const char *dst_dir = "dst";
+ int native_dir_fd = -1;
+ int src_dir_fd = -1;
+ int dst_dir_fd = -1;
+ int block;
+ int flags;
+
+ err = parse_options(argc, argv, &options);
+ if (err)
+ return err;
+
+ err = chdir(options.test_dir);
+ if (err) {
+ err_msg("Failed to change to %s", options.test_dir);
+ return -errno;
+ }
+
+ /* Clean up any interrupted previous runs */
+ while (!umount(dst_dir))
+ ;
+
+ err = remove_dir(native_dir) || remove_dir(src_dir) ||
+ remove_dir(dst_dir);
+ if (err)
+ return err;
+
+ err = mkdir(native_dir, 0700);
+ if (err) {
+ err_msg("Failed to make directory %s", src_dir);
+ err = -errno;
+ goto cleanup;
+ }
+
+ err = mkdir(src_dir, 0700);
+ if (err) {
+ err_msg("Failed to make directory %s", src_dir);
+ err = -errno;
+ goto cleanup;
+ }
+
+ err = mkdir(dst_dir, 0700);
+ if (err) {
+ err_msg("Failed to make directory %s", src_dir);
+ err = -errno;
+ goto cleanup;
+ }
+
+ err = mount_fs_opt(dst_dir, src_dir, "readahead=0,rlog_pages=0", 0);
+ if (err) {
+ err_msg("Failed to mount incfs");
+ goto cleanup;
+ }
+
+ native_dir_fd = open(native_dir, O_RDONLY | O_CLOEXEC);
+ src_dir_fd = open(src_dir, O_RDONLY | O_CLOEXEC);
+ dst_dir_fd = open(dst_dir, O_RDONLY | O_CLOEXEC);
+ if (native_dir_fd == -1 || src_dir_fd == -1 || dst_dir_fd == -1) {
+ err_msg("Failed to open native, src or dst dir");
+ err = -errno;
+ goto cleanup;
+ }
+
+ printf("%40s", "");
+ for (block = 0; block < options.blocks; ++block)
+ printf("%21d", 1 << (block + 12));
+ printf("\n");
+
+ if (!err && !options.no_native)
+ err = test_native_file(native_dir_fd, &options);
+
+ for (flags = 0; flags < LAST_FLAG && !err; ++flags) {
+ if (skip(&options, flags & SHUFFLE, 's') ||
+ skip(&options, flags & COMPRESS, 'c') ||
+ skip(&options, flags & VERIFY, 'v'))
+ continue;
+ err = test_incfs_file(dst_dir_fd, &options, flags);
+ }
+
+cleanup:
+ close(native_dir_fd);
+ close(src_dir_fd);
+ close(dst_dir_fd);
+ if (!options.no_cleanup) {
+ umount(dst_dir);
+ remove_dir(native_dir);
+ remove_dir(dst_dir);
+ remove_dir(src_dir);
+ }
+
+ return err;
+}
diff --git a/tools/testing/selftests/filesystems/incfs/incfs_stress.c b/tools/testing/selftests/filesystems/incfs/incfs_stress.c
new file mode 100644
index 0000000..a1d4917
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/incfs_stress.c
@@ -0,0 +1,322 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2020 Google LLC
+ */
+#include <errno.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <pthread.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "utils.h"
+
+#define err_msg(...) \
+ do { \
+ fprintf(stderr, "%s: (%d) ", TAG, __LINE__); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, " (%s)\n", strerror(errno)); \
+ } while (false)
+
+#define TAG "incfs_stress"
+
+struct options {
+ bool no_cleanup; /* -c */
+ const char *test_dir; /* -d */
+ unsigned int rng_seed; /* -g */
+ int num_reads; /* -n */
+ int readers; /* -r */
+ int size; /* -s */
+ int timeout; /* -t */
+};
+
+struct read_data {
+ const char *filename;
+ int dir_fd;
+ size_t filesize;
+ int num_reads;
+ unsigned int rng_seed;
+};
+
+int cancel_threads;
+
+int parse_options(int argc, char *const *argv, struct options *options)
+{
+ signed char c;
+
+ /* Set defaults here */
+ *options = (struct options){
+ .test_dir = ".",
+ .num_reads = 1000,
+ .readers = 10,
+ .size = 10,
+ };
+
+ /* Load options from command line here */
+ while ((c = getopt(argc, argv, "cd:g:n:r:s:t:")) != -1) {
+ switch (c) {
+ case 'c':
+ options->no_cleanup = true;
+ break;
+
+ case 'd':
+ options->test_dir = optarg;
+ break;
+
+ case 'g':
+ options->rng_seed = strtol(optarg, NULL, 10);
+ break;
+
+ case 'n':
+ options->num_reads = strtol(optarg, NULL, 10);
+ break;
+
+ case 'r':
+ options->readers = strtol(optarg, NULL, 10);
+ break;
+
+ case 's':
+ options->size = strtol(optarg, NULL, 10);
+ break;
+
+ case 't':
+ options->timeout = strtol(optarg, NULL, 10);
+ break;
+ }
+ }
+
+ return 0;
+}
+
+void *reader(void *data)
+{
+ struct read_data *read_data = (struct read_data *)data;
+ int i;
+ int fd = -1;
+ void *buffer = malloc(read_data->filesize);
+
+ if (!buffer) {
+ err_msg("Failed to alloc read buffer");
+ goto out;
+ }
+
+ fd = openat(read_data->dir_fd, read_data->filename,
+ O_RDONLY | O_CLOEXEC);
+ if (fd == -1) {
+ err_msg("Failed to open file");
+ goto out;
+ }
+
+ for (i = 0; i < read_data->num_reads && !cancel_threads; ++i) {
+ off_t offset = rnd(read_data->filesize, &read_data->rng_seed);
+ size_t count =
+ rnd(read_data->filesize - offset, &read_data->rng_seed);
+ ssize_t err = pread(fd, buffer, count, offset);
+
+ if (err != count)
+ err_msg("failed to read with value %lu", err);
+ }
+
+out:
+ close(fd);
+ free(read_data);
+ free(buffer);
+ return NULL;
+}
+
+int write_data(int cmd_fd, int dir_fd, const char *name, size_t size)
+{
+ int fd = openat(dir_fd, name, O_RDWR | O_CLOEXEC);
+ struct incfs_permit_fill permit_fill = {
+ .file_descriptor = fd,
+ };
+ int error = 0;
+ int i;
+ int block_count = 1 + (size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+
+ if (fd == -1) {
+ err_msg("Could not open file for writing %s", name);
+ return -errno;
+ }
+
+ if (ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill)) {
+ err_msg("Failed to call PERMIT_FILL");
+ error = -errno;
+ goto out;
+ }
+
+ for (i = 0; i < block_count; ++i) {
+ uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE] = {};
+ size_t block_size =
+ size > i * INCFS_DATA_FILE_BLOCK_SIZE ?
+ INCFS_DATA_FILE_BLOCK_SIZE :
+ size - (i * INCFS_DATA_FILE_BLOCK_SIZE);
+ struct incfs_fill_block fill_block = {
+ .compression = COMPRESSION_NONE,
+ .block_index = i,
+ .data_len = block_size,
+ .data = ptr_to_u64(data),
+ };
+ struct incfs_fill_blocks fill_blocks = {
+ .count = 1,
+ .fill_blocks = ptr_to_u64(&fill_block),
+ };
+ int written = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+
+ if (written != 1) {
+ error = -errno;
+ err_msg("Failed to write block %d in file %s", i, name);
+ break;
+ }
+ }
+out:
+ close(fd);
+ return error;
+}
+
+int test_files(int src_dir, int dst_dir, struct options const *options)
+{
+ unsigned int seed = options->rng_seed;
+ int cmd_file = openat(dst_dir, INCFS_PENDING_READS_FILENAME,
+ O_RDONLY | O_CLOEXEC);
+ int err;
+ const char *name = "001";
+ incfs_uuid_t id;
+ size_t size;
+ int i;
+ pthread_t *threads = NULL;
+
+ size = 1 << (rnd(options->size, &seed) + 12);
+ size += rnd(size, &seed);
+
+ if (cmd_file == -1) {
+ err_msg("Could not open command file");
+ return -errno;
+ }
+
+ err = emit_file(cmd_file, NULL, name, &id, size, NULL);
+ if (err) {
+ err_msg("Failed to create file %s", name);
+ return err;
+ }
+
+ threads = malloc(sizeof(pthread_t) * options->readers);
+ if (!threads) {
+ err_msg("Could not allocate memory for threads");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < options->readers; ++i) {
+ struct read_data *read_data = malloc(sizeof(*read_data));
+
+ if (!read_data) {
+ err_msg("Failed to allocate read_data");
+ err = -ENOMEM;
+ break;
+ }
+
+ *read_data = (struct read_data){
+ .filename = name,
+ .dir_fd = dst_dir,
+ .filesize = size,
+ .num_reads = options->num_reads,
+ .rng_seed = seed,
+ };
+
+ rnd(0, &seed);
+
+ err = pthread_create(threads + i, 0, reader, read_data);
+ if (err) {
+ err_msg("Failed to create thread");
+ free(read_data);
+ break;
+ }
+ }
+
+ if (err)
+ cancel_threads = 1;
+ else
+ err = write_data(cmd_file, dst_dir, name, size);
+
+ for (; i > 0; --i) {
+ if (pthread_join(threads[i - 1], NULL)) {
+ err_msg("FATAL: failed to join thread");
+ exit(-errno);
+ }
+ }
+
+ free(threads);
+ close(cmd_file);
+ return err;
+}
+
+int main(int argc, char *const *argv)
+{
+ struct options options;
+ int err;
+ const char *src_dir = "src";
+ const char *dst_dir = "dst";
+ int src_dir_fd = -1;
+ int dst_dir_fd = -1;
+
+ err = parse_options(argc, argv, &options);
+ if (err)
+ return err;
+
+ err = chdir(options.test_dir);
+ if (err) {
+ err_msg("Failed to change to %s", options.test_dir);
+ return -errno;
+ }
+
+ err = remove_dir(src_dir) || remove_dir(dst_dir);
+ if (err)
+ return err;
+
+ err = mkdir(src_dir, 0700);
+ if (err) {
+ err_msg("Failed to make directory %s", src_dir);
+ err = -errno;
+ goto cleanup;
+ }
+
+ err = mkdir(dst_dir, 0700);
+ if (err) {
+ err_msg("Failed to make directory %s", src_dir);
+ err = -errno;
+ goto cleanup;
+ }
+
+ err = mount_fs(dst_dir, src_dir, options.timeout);
+ if (err) {
+ err_msg("Failed to mount incfs");
+ goto cleanup;
+ }
+
+ src_dir_fd = open(src_dir, O_RDONLY | O_CLOEXEC);
+ dst_dir_fd = open(dst_dir, O_RDONLY | O_CLOEXEC);
+ if (src_dir_fd == -1 || dst_dir_fd == -1) {
+ err_msg("Failed to open src or dst dir");
+ err = -errno;
+ goto cleanup;
+ }
+
+ err = test_files(src_dir_fd, dst_dir_fd, &options);
+
+cleanup:
+ close(src_dir_fd);
+ close(dst_dir_fd);
+ if (!options.no_cleanup) {
+ umount(dst_dir);
+ remove_dir(dst_dir);
+ remove_dir(src_dir);
+ }
+
+ return err;
+}
diff --git a/tools/testing/selftests/filesystems/incfs/incfs_test.c b/tools/testing/selftests/filesystems/incfs/incfs_test.c
new file mode 100644
index 0000000..bc80c6c
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/incfs_test.c
@@ -0,0 +1,4809 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Google LLC
+ */
+#define _GNU_SOURCE
+
+#include <alloca.h>
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <lz4.h>
+#include <poll.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#include <zstd.h>
+
+#include <sys/inotify.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/xattr.h>
+#include <sys/statvfs.h>
+
+#include <linux/random.h>
+#include <linux/stat.h>
+#include <linux/unistd.h>
+
+#include <openssl/pem.h>
+#include <openssl/x509.h>
+
+#include <kselftest.h>
+#include <include/uapi/linux/fsverity.h>
+
+#include "utils.h"
+
+/* Can't include uapi/linux/fs.h because it clashes with mount.h */
+#define FS_IOC_GETFLAGS _IOR('f', 1, long)
+#define FS_VERITY_FL 0x00100000 /* Verity protected inode */
+
+#define TEST_SKIP 2
+#define TEST_FAILURE 1
+#define TEST_SUCCESS 0
+
+#define INCFS_ROOT_INODE 0
+
+#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define le16_to_cpu(x) (x)
+#define le32_to_cpu(x) (x)
+#define le64_to_cpu(x) (x)
+#else
+#error Big endian not supported!
+#endif
+
+struct {
+ int file;
+ int test;
+ bool verbose;
+} options;
+
+#define TESTCOND(condition) \
+ do { \
+ if (!(condition)) { \
+ ksft_print_msg("%s failed %d\n", \
+ __func__, __LINE__); \
+ goto out; \
+ } else if (options.verbose) \
+ ksft_print_msg("%s succeeded %d\n", \
+ __func__, __LINE__); \
+ } while (false)
+
+#define TEST(statement, condition) \
+ do { \
+ statement; \
+ TESTCOND(condition); \
+ } while (false)
+
+#define TESTEQUAL(statement, res) \
+ TESTCOND((statement) == (res))
+
+#define TESTNE(statement, res) \
+ TESTCOND((statement) != (res))
+
+#define TESTSYSCALL(statement) \
+ do { \
+ int res = statement; \
+ \
+ if (res) \
+ ksft_print_msg("Failed: %s (%d)\n", \
+ strerror(errno), errno); \
+ TESTEQUAL(res, 0); \
+ } while (false)
+
+void print_bytes(const void *data, size_t size)
+{
+ const uint8_t *bytes = data;
+ int i;
+
+ for (i = 0; i < size; ++i) {
+ if (i % 0x10 == 0)
+ printf("%08x:", i);
+ printf("%02x ", (unsigned int) bytes[i]);
+ if (i % 0x10 == 0x0f)
+ printf("\n");
+ }
+
+ if (i % 0x10 != 0)
+ printf("\n");
+}
+
+struct hash_block {
+ char data[INCFS_DATA_FILE_BLOCK_SIZE];
+};
+
+struct test_signature {
+ void *data;
+ size_t size;
+
+ char add_data[100];
+ size_t add_data_size;
+};
+
+struct test_file {
+ int index;
+ incfs_uuid_t id;
+ char *name;
+ off_t size;
+ char root_hash[INCFS_MAX_HASH_SIZE];
+ struct hash_block *mtree;
+ int mtree_block_count;
+ struct test_signature sig;
+ unsigned char *verity_sig;
+ size_t verity_sig_size;
+};
+
+struct test_files_set {
+ struct test_file *files;
+ int files_count;
+};
+
+struct linux_dirent64 {
+ uint64_t d_ino;
+ int64_t d_off;
+ unsigned short d_reclen;
+ unsigned char d_type;
+ char d_name[0];
+} __packed;
+
+struct test_files_set get_test_files_set(void)
+{
+ static struct test_file files[] = {
+ { .index = 0, .name = "file_one_byte", .size = 1 },
+ { .index = 1,
+ .name = "file_one_block",
+ .size = INCFS_DATA_FILE_BLOCK_SIZE },
+ { .index = 2,
+ .name = "file_one_and_a_half_blocks",
+ .size = INCFS_DATA_FILE_BLOCK_SIZE +
+ INCFS_DATA_FILE_BLOCK_SIZE / 2 },
+ { .index = 3,
+ .name = "file_three",
+ .size = 300 * INCFS_DATA_FILE_BLOCK_SIZE + 3 },
+ { .index = 4,
+ .name = "file_four",
+ .size = 400 * INCFS_DATA_FILE_BLOCK_SIZE + 7 },
+ { .index = 5,
+ .name = "file_five",
+ .size = 500 * INCFS_DATA_FILE_BLOCK_SIZE + 7 },
+ { .index = 6,
+ .name = "file_six",
+ .size = 600 * INCFS_DATA_FILE_BLOCK_SIZE + 7 },
+ { .index = 7,
+ .name = "file_seven",
+ .size = 700 * INCFS_DATA_FILE_BLOCK_SIZE + 7 },
+ { .index = 8,
+ .name = "file_eight",
+ .size = 800 * INCFS_DATA_FILE_BLOCK_SIZE + 7 },
+ { .index = 9,
+ .name = "file_nine",
+ .size = 900 * INCFS_DATA_FILE_BLOCK_SIZE + 7 },
+ { .index = 10, .name = "file_big", .size = 500 * 1024 * 1024 }
+ };
+
+ if (options.file)
+ return (struct test_files_set) {
+ .files = files + options.file - 1,
+ .files_count = 1,
+ };
+
+ return (struct test_files_set){ .files = files,
+ .files_count = ARRAY_SIZE(files) };
+}
+
+struct test_files_set get_small_test_files_set(void)
+{
+ static struct test_file files[] = {
+ { .index = 0, .name = "file_one_byte", .size = 1 },
+ { .index = 1,
+ .name = "file_one_block",
+ .size = INCFS_DATA_FILE_BLOCK_SIZE },
+ { .index = 2,
+ .name = "file_one_and_a_half_blocks",
+ .size = INCFS_DATA_FILE_BLOCK_SIZE +
+ INCFS_DATA_FILE_BLOCK_SIZE / 2 },
+ { .index = 3,
+ .name = "file_three",
+ .size = 300 * INCFS_DATA_FILE_BLOCK_SIZE + 3 },
+ { .index = 4,
+ .name = "file_four",
+ .size = 400 * INCFS_DATA_FILE_BLOCK_SIZE + 7 }
+ };
+ return (struct test_files_set){ .files = files,
+ .files_count = ARRAY_SIZE(files) };
+}
+
+static int get_file_block_seed(int file, int block)
+{
+ return 7919 * file + block;
+}
+
+static loff_t min(loff_t a, loff_t b)
+{
+ return a < b ? a : b;
+}
+
+static int ilog2(size_t n)
+{
+ int l = 0;
+
+ while (n > 1) {
+ ++l;
+ n >>= 1;
+ }
+ return l;
+}
+
+static pid_t flush_and_fork(void)
+{
+ fflush(stdout);
+ return fork();
+}
+
+static void print_error(char *msg)
+{
+ ksft_print_msg("%s: %s\n", msg, strerror(errno));
+}
+
+static int wait_for_process(pid_t pid)
+{
+ int status;
+ int wait_res;
+
+ wait_res = waitpid(pid, &status, 0);
+ if (wait_res <= 0) {
+ print_error("Can't wait for the child");
+ return -EINVAL;
+ }
+ if (!WIFEXITED(status)) {
+ ksft_print_msg("Unexpected child status pid=%d\n", pid);
+ return -EINVAL;
+ }
+ status = WEXITSTATUS(status);
+ if (status != 0)
+ return status;
+ return 0;
+}
+
+static void rnd_buf(uint8_t *data, size_t len, unsigned int seed)
+{
+ int i;
+
+ for (i = 0; i < len; i++) {
+ seed = 1103515245 * seed + 12345;
+ data[i] = (uint8_t)(seed >> (i % 13));
+ }
+}
+
+char *bin2hex(char *dst, const void *src, size_t count)
+{
+ const unsigned char *_src = src;
+ static const char hex_asc[] = "0123456789abcdef";
+
+ while (count--) {
+ unsigned char x = *_src++;
+
+ *dst++ = hex_asc[(x & 0xf0) >> 4];
+ *dst++ = hex_asc[(x & 0x0f)];
+ }
+ *dst = 0;
+ return dst;
+}
+
+static char *get_index_filename(const char *mnt_dir, incfs_uuid_t id)
+{
+ char path[FILENAME_MAX];
+ char str_id[1 + 2 * sizeof(id)];
+
+ bin2hex(str_id, id.bytes, sizeof(id.bytes));
+ snprintf(path, ARRAY_SIZE(path), "%s/.index/%s", mnt_dir, str_id);
+
+ return strdup(path);
+}
+
+static char *get_incomplete_filename(const char *mnt_dir, incfs_uuid_t id)
+{
+ char path[FILENAME_MAX];
+ char str_id[1 + 2 * sizeof(id)];
+
+ bin2hex(str_id, id.bytes, sizeof(id.bytes));
+ snprintf(path, ARRAY_SIZE(path), "%s/.incomplete/%s", mnt_dir, str_id);
+
+ return strdup(path);
+}
+
+int open_file_by_id(const char *mnt_dir, incfs_uuid_t id, bool use_ioctl)
+{
+ char *path = get_index_filename(mnt_dir, id);
+ int cmd_fd = open_commands_file(mnt_dir);
+ int fd = open(path, O_RDWR | O_CLOEXEC);
+ struct incfs_permit_fill permit_fill = {
+ .file_descriptor = fd,
+ };
+ int error = 0;
+
+ if (fd < 0) {
+ print_error("Can't open file by id.");
+ error = -errno;
+ goto out;
+ }
+
+ if (use_ioctl && ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill)) {
+ print_error("Failed to call PERMIT_FILL");
+ error = -errno;
+ goto out;
+ }
+
+ if (ioctl(fd, INCFS_IOC_PERMIT_FILL, &permit_fill) != -1) {
+ print_error(
+ "Successfully called PERMIT_FILL on non pending_read file");
+ return -errno;
+ goto out;
+ }
+
+out:
+ free(path);
+ close(cmd_fd);
+
+ if (error) {
+ close(fd);
+ return error;
+ }
+
+ return fd;
+}
+
+int get_file_attr(const char *mnt_dir, incfs_uuid_t id, char *value, int size)
+{
+ char *path = get_index_filename(mnt_dir, id);
+ int res;
+
+ res = getxattr(path, INCFS_XATTR_METADATA_NAME, value, size);
+ if (res < 0)
+ res = -errno;
+
+ free(path);
+ return res;
+}
+
+static bool same_id(incfs_uuid_t *id1, incfs_uuid_t *id2)
+{
+ return !memcmp(id1->bytes, id2->bytes, sizeof(id1->bytes));
+}
+
+ssize_t ZSTD_compress_default(char *data, char *comp_data, size_t data_size,
+ size_t comp_size)
+{
+ return ZSTD_compress(comp_data, comp_size, data, data_size, 1);
+}
+
+static int emit_test_blocks(const char *mnt_dir, struct test_file *file,
+ int blocks[], int count)
+{
+ uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE];
+ uint8_t comp_data[2 * INCFS_DATA_FILE_BLOCK_SIZE];
+ int block_count = (count > 32) ? 32 : count;
+ int data_buf_size = 2 * INCFS_DATA_FILE_BLOCK_SIZE * block_count;
+ uint8_t *data_buf = malloc(data_buf_size);
+ uint8_t *current_data = data_buf;
+ uint8_t *data_end = data_buf + data_buf_size;
+ struct incfs_fill_block *block_buf =
+ calloc(block_count, sizeof(struct incfs_fill_block));
+ struct incfs_fill_blocks fill_blocks = {
+ .count = block_count,
+ .fill_blocks = ptr_to_u64(block_buf),
+ };
+ ssize_t write_res = 0;
+ int fd = -1;
+ int error = 0;
+ int i = 0;
+ int blocks_written = 0;
+
+ for (i = 0; i < block_count; i++) {
+ int block_index = blocks[i];
+ bool compress_zstd = (file->index + block_index) % 4 == 2;
+ bool compress_lz4 = (file->index + block_index) % 4 == 0;
+ int seed = get_file_block_seed(file->index, block_index);
+ off_t block_offset =
+ ((off_t)block_index) * INCFS_DATA_FILE_BLOCK_SIZE;
+ size_t block_size = 0;
+
+ if (block_offset > file->size) {
+ error = -EINVAL;
+ break;
+ }
+ if (file->size - block_offset >
+ INCFS_DATA_FILE_BLOCK_SIZE)
+ block_size = INCFS_DATA_FILE_BLOCK_SIZE;
+ else
+ block_size = file->size - block_offset;
+
+ rnd_buf(data, block_size, seed);
+ if (compress_lz4) {
+ size_t comp_size = LZ4_compress_default((char *)data,
+ (char *)comp_data, block_size,
+ ARRAY_SIZE(comp_data));
+
+ if (comp_size <= 0) {
+ error = -EBADMSG;
+ break;
+ }
+ if (current_data + comp_size > data_end) {
+ error = -ENOMEM;
+ break;
+ }
+ memcpy(current_data, comp_data, comp_size);
+ block_size = comp_size;
+ block_buf[i].compression = COMPRESSION_LZ4;
+ } else if (compress_zstd) {
+ size_t comp_size = ZSTD_compress(comp_data,
+ ARRAY_SIZE(comp_data), data, block_size,
+ 1);
+
+ if (comp_size <= 0) {
+ error = -EBADMSG;
+ break;
+ }
+ if (current_data + comp_size > data_end) {
+ error = -ENOMEM;
+ break;
+ }
+ memcpy(current_data, comp_data, comp_size);
+ block_size = comp_size;
+ block_buf[i].compression = COMPRESSION_ZSTD;
+ } else {
+ if (current_data + block_size > data_end) {
+ error = -ENOMEM;
+ break;
+ }
+ memcpy(current_data, data, block_size);
+ block_buf[i].compression = COMPRESSION_NONE;
+ }
+
+ block_buf[i].block_index = block_index;
+ block_buf[i].data_len = block_size;
+ block_buf[i].data = ptr_to_u64(current_data);
+ current_data += block_size;
+ }
+
+ if (!error) {
+ fd = open_file_by_id(mnt_dir, file->id, false);
+ if (fd < 0) {
+ error = -errno;
+ goto out;
+ }
+ write_res = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+ if (write_res >= 0) {
+ ksft_print_msg("Wrote to file via normal fd error\n");
+ error = -EPERM;
+ goto out;
+ }
+
+ close(fd);
+ fd = open_file_by_id(mnt_dir, file->id, true);
+ if (fd < 0) {
+ error = -errno;
+ goto out;
+ }
+ write_res = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+ if (write_res < 0)
+ error = -errno;
+ else
+ blocks_written = write_res;
+ }
+ if (error) {
+ ksft_print_msg(
+ "Writing data block error. Write returned: %ld. Error:%s\n",
+ write_res, strerror(-error));
+ }
+
+out:
+ free(block_buf);
+ free(data_buf);
+ close(fd);
+ return (error < 0) ? error : blocks_written;
+}
+
+static int emit_test_block(const char *mnt_dir, struct test_file *file,
+ int block_index)
+{
+ int res = emit_test_blocks(mnt_dir, file, &block_index, 1);
+
+ if (res == 0)
+ return -EINVAL;
+ if (res == 1)
+ return 0;
+ return res;
+}
+
+static void shuffle(int array[], int count, unsigned int seed)
+{
+ int i;
+
+ for (i = 0; i < count - 1; i++) {
+ int items_left = count - i;
+ int shuffle_index;
+ int v;
+
+ seed = 1103515245 * seed + 12345;
+ shuffle_index = i + seed % items_left;
+
+ v = array[shuffle_index];
+ array[shuffle_index] = array[i];
+ array[i] = v;
+ }
+}
+
+static int emit_test_file_data(const char *mount_dir, struct test_file *file)
+{
+ int i;
+ int block_cnt = 1 + (file->size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+ int *block_indexes = NULL;
+ int result = 0;
+ int blocks_written = 0;
+
+ if (file->size == 0)
+ return 0;
+
+ block_indexes = calloc(block_cnt, sizeof(*block_indexes));
+ for (i = 0; i < block_cnt; i++)
+ block_indexes[i] = i;
+ shuffle(block_indexes, block_cnt, file->index);
+
+ for (i = 0; i < block_cnt; i += blocks_written) {
+ blocks_written = emit_test_blocks(mount_dir, file,
+ block_indexes + i, block_cnt - i);
+ if (blocks_written < 0) {
+ result = blocks_written;
+ goto out;
+ }
+ if (blocks_written == 0) {
+ result = -EIO;
+ goto out;
+ }
+ }
+out:
+ free(block_indexes);
+ return result;
+}
+
+static loff_t read_whole_file(const char *filename)
+{
+ int fd = -1;
+ loff_t result;
+ loff_t bytes_read = 0;
+ uint8_t buff[16 * 1024];
+
+ fd = open(filename, O_RDONLY | O_CLOEXEC);
+ if (fd <= 0)
+ return fd;
+
+ while (1) {
+ int read_result = read(fd, buff, ARRAY_SIZE(buff));
+
+ if (read_result < 0) {
+ print_error("Error during reading from a file.");
+ result = -errno;
+ goto cleanup;
+ } else if (read_result == 0)
+ break;
+
+ bytes_read += read_result;
+ }
+ result = bytes_read;
+
+cleanup:
+ close(fd);
+ return result;
+}
+
+static int read_test_file(uint8_t *buf, size_t len, char *filename,
+ int block_idx)
+{
+ int fd = -1;
+ int result;
+ int bytes_read = 0;
+ size_t bytes_to_read = len;
+ off_t offset = ((off_t)block_idx) * INCFS_DATA_FILE_BLOCK_SIZE;
+
+ fd = open(filename, O_RDONLY | O_CLOEXEC);
+ if (fd <= 0)
+ return fd;
+
+ if (lseek(fd, offset, SEEK_SET) != offset) {
+ print_error("Seek error");
+ return -errno;
+ }
+
+ while (bytes_read < bytes_to_read) {
+ int read_result =
+ read(fd, buf + bytes_read, bytes_to_read - bytes_read);
+ if (read_result < 0) {
+ result = -errno;
+ goto cleanup;
+ } else if (read_result == 0)
+ break;
+
+ bytes_read += read_result;
+ }
+ result = bytes_read;
+
+cleanup:
+ close(fd);
+ return result;
+}
+
+static char *create_backing_dir(const char *mount_dir)
+{
+ struct stat st;
+ char backing_dir_name[255];
+
+ snprintf(backing_dir_name, ARRAY_SIZE(backing_dir_name), "%s-src",
+ mount_dir);
+
+ if (stat(backing_dir_name, &st) == 0) {
+ if (S_ISDIR(st.st_mode)) {
+ int error = delete_dir_tree(backing_dir_name);
+
+ if (error) {
+ ksft_print_msg(
+ "Can't delete existing backing dir. %d\n",
+ error);
+ return NULL;
+ }
+ } else {
+ if (unlink(backing_dir_name)) {
+ print_error("Can't clear backing dir");
+ return NULL;
+ }
+ }
+ }
+
+ if (mkdir(backing_dir_name, 0777)) {
+ if (errno != EEXIST) {
+ print_error("Can't open/create backing dir");
+ return NULL;
+ }
+ }
+
+ return strdup(backing_dir_name);
+}
+
+static int validate_test_file_content_with_seed(const char *mount_dir,
+ struct test_file *file,
+ unsigned int shuffle_seed)
+{
+ int error = -1;
+ char *filename = concat_file_name(mount_dir, file->name);
+ off_t size = file->size;
+ loff_t actual_size = get_file_size(filename);
+ int block_cnt = 1 + (size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+ int *block_indexes = NULL;
+ int i;
+
+ block_indexes = alloca(sizeof(int) * block_cnt);
+ for (i = 0; i < block_cnt; i++)
+ block_indexes[i] = i;
+
+ if (shuffle_seed != 0)
+ shuffle(block_indexes, block_cnt, shuffle_seed);
+
+ if (actual_size != size) {
+ ksft_print_msg(
+ "File size doesn't match. name: %s expected size:%ld actual size:%ld\n",
+ filename, size, actual_size);
+ error = -1;
+ goto failure;
+ }
+
+ for (i = 0; i < block_cnt; i++) {
+ int block_idx = block_indexes[i];
+ uint8_t expected_block[INCFS_DATA_FILE_BLOCK_SIZE];
+ uint8_t actual_block[INCFS_DATA_FILE_BLOCK_SIZE];
+ int seed = get_file_block_seed(file->index, block_idx);
+ size_t bytes_to_compare = min(
+ (off_t)INCFS_DATA_FILE_BLOCK_SIZE,
+ size - ((off_t)block_idx) * INCFS_DATA_FILE_BLOCK_SIZE);
+ int read_result =
+ read_test_file(actual_block, INCFS_DATA_FILE_BLOCK_SIZE,
+ filename, block_idx);
+ if (read_result < 0) {
+ ksft_print_msg(
+ "Error reading block %d from file %s. Error: %s\n",
+ block_idx, filename, strerror(-read_result));
+ error = read_result;
+ goto failure;
+ }
+ rnd_buf(expected_block, INCFS_DATA_FILE_BLOCK_SIZE, seed);
+ if (memcmp(expected_block, actual_block, bytes_to_compare)) {
+ ksft_print_msg(
+ "File contents don't match. name: %s block:%d\n",
+ file->name, block_idx);
+ error = -2;
+ goto failure;
+ }
+ }
+ free(filename);
+ return 0;
+
+failure:
+ free(filename);
+ return error;
+}
+
+static int validate_test_file_content(const char *mount_dir,
+ struct test_file *file)
+{
+ return validate_test_file_content_with_seed(mount_dir, file, 0);
+}
+
+static int data_producer(const char *mount_dir, struct test_files_set *test_set)
+{
+ int ret = 0;
+ int timeout_ms = 1000;
+ struct incfs_pending_read_info prs[100] = {};
+ int prs_size = ARRAY_SIZE(prs);
+ int fd = open_commands_file(mount_dir);
+
+ if (fd < 0)
+ return -errno;
+
+ while ((ret = wait_for_pending_reads(fd, timeout_ms, prs, prs_size)) >
+ 0) {
+ int read_count = ret;
+ int i;
+
+ for (i = 0; i < read_count; i++) {
+ int j = 0;
+ struct test_file *file = NULL;
+
+ for (j = 0; j < test_set->files_count; j++) {
+ bool same = same_id(&(test_set->files[j].id),
+ &(prs[i].file_id));
+
+ if (same) {
+ file = &test_set->files[j];
+ break;
+ }
+ }
+ if (!file) {
+ ksft_print_msg(
+ "Unknown file in pending reads.\n");
+ break;
+ }
+
+ ret = emit_test_block(mount_dir, file,
+ prs[i].block_index);
+ if (ret < 0) {
+ ksft_print_msg("Emitting test data error: %s\n",
+ strerror(-ret));
+ break;
+ }
+ }
+ }
+ close(fd);
+ return ret;
+}
+
+static int data_producer2(const char *mount_dir,
+ struct test_files_set *test_set)
+{
+ int ret = 0;
+ int timeout_ms = 1000;
+ struct incfs_pending_read_info2 prs[100] = {};
+ int prs_size = ARRAY_SIZE(prs);
+ int fd = open_commands_file(mount_dir);
+
+ if (fd < 0)
+ return -errno;
+
+ while ((ret = wait_for_pending_reads2(fd, timeout_ms, prs, prs_size)) >
+ 0) {
+ int read_count = ret;
+ int i;
+
+ for (i = 0; i < read_count; i++) {
+ int j = 0;
+ struct test_file *file = NULL;
+
+ for (j = 0; j < test_set->files_count; j++) {
+ bool same = same_id(&(test_set->files[j].id),
+ &(prs[i].file_id));
+
+ if (same) {
+ file = &test_set->files[j];
+ break;
+ }
+ }
+ if (!file) {
+ ksft_print_msg(
+ "Unknown file in pending reads.\n");
+ break;
+ }
+
+ ret = emit_test_block(mount_dir, file,
+ prs[i].block_index);
+ if (ret < 0) {
+ ksft_print_msg("Emitting test data error: %s\n",
+ strerror(-ret));
+ break;
+ }
+ }
+ }
+ close(fd);
+ return ret;
+}
+
+static int build_mtree(struct test_file *file)
+{
+ char data[INCFS_DATA_FILE_BLOCK_SIZE] = {};
+ const int digest_size = SHA256_DIGEST_SIZE;
+ const int hash_per_block = INCFS_DATA_FILE_BLOCK_SIZE / digest_size;
+ int block_count = 0;
+ int hash_block_count = 0;
+ int total_tree_block_count = 0;
+ int tree_lvl_index[INCFS_MAX_MTREE_LEVELS] = {};
+ int tree_lvl_count[INCFS_MAX_MTREE_LEVELS] = {};
+ int levels_count = 0;
+ int i, level;
+
+ if (file->size == 0)
+ return 0;
+
+ block_count = 1 + (file->size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+ hash_block_count = block_count;
+ for (i = 0; hash_block_count > 1; i++) {
+ hash_block_count = (hash_block_count + hash_per_block - 1)
+ / hash_per_block;
+ tree_lvl_count[i] = hash_block_count;
+ total_tree_block_count += hash_block_count;
+ }
+ levels_count = i;
+
+ for (i = 0; i < levels_count; i++) {
+ int prev_lvl_base = (i == 0) ? total_tree_block_count :
+ tree_lvl_index[i - 1];
+
+ tree_lvl_index[i] = prev_lvl_base - tree_lvl_count[i];
+ }
+
+ file->mtree_block_count = total_tree_block_count;
+ if (block_count == 1) {
+ int seed = get_file_block_seed(file->index, 0);
+
+ memset(data, 0, INCFS_DATA_FILE_BLOCK_SIZE);
+ rnd_buf((uint8_t *)data, file->size, seed);
+ sha256(data, INCFS_DATA_FILE_BLOCK_SIZE, file->root_hash);
+ return 0;
+ }
+
+ file->mtree = calloc(total_tree_block_count, sizeof(*file->mtree));
+ /* Build level 0 hashes. */
+ for (i = 0; i < block_count; i++) {
+ off_t offset = i * INCFS_DATA_FILE_BLOCK_SIZE;
+ size_t block_size = INCFS_DATA_FILE_BLOCK_SIZE;
+ int block_index = tree_lvl_index[0] +
+ i / hash_per_block;
+ int block_off = (i % hash_per_block) * digest_size;
+ int seed = get_file_block_seed(file->index, i);
+ char *hash_ptr = file->mtree[block_index].data + block_off;
+
+ if (file->size - offset < block_size) {
+ block_size = file->size - offset;
+ memset(data, 0, INCFS_DATA_FILE_BLOCK_SIZE);
+ }
+
+ rnd_buf((uint8_t *)data, block_size, seed);
+ sha256(data, INCFS_DATA_FILE_BLOCK_SIZE, hash_ptr);
+ }
+
+ /* Build higher levels of hash tree. */
+ for (level = 1; level < levels_count; level++) {
+ int prev_lvl_base = tree_lvl_index[level - 1];
+ int prev_lvl_count = tree_lvl_count[level - 1];
+
+ for (i = 0; i < prev_lvl_count; i++) {
+ int block_index =
+ i / hash_per_block + tree_lvl_index[level];
+ int block_off = (i % hash_per_block) * digest_size;
+ char *hash_ptr =
+ file->mtree[block_index].data + block_off;
+
+ sha256(file->mtree[i + prev_lvl_base].data,
+ INCFS_DATA_FILE_BLOCK_SIZE, hash_ptr);
+ }
+ }
+
+ /* Calculate root hash from the top block */
+ sha256(file->mtree[0].data,
+ INCFS_DATA_FILE_BLOCK_SIZE, file->root_hash);
+
+ return 0;
+}
+
+static int load_hash_tree(const char *mount_dir, struct test_file *file)
+{
+ int err;
+ int i;
+ int fd;
+ struct incfs_fill_blocks fill_blocks = {
+ .count = file->mtree_block_count,
+ };
+ struct incfs_fill_block *fill_block_array =
+ calloc(fill_blocks.count, sizeof(struct incfs_fill_block));
+
+ if (fill_blocks.count == 0)
+ return 0;
+
+ if (!fill_block_array)
+ return -ENOMEM;
+ fill_blocks.fill_blocks = ptr_to_u64(fill_block_array);
+
+ for (i = 0; i < fill_blocks.count; i++) {
+ fill_block_array[i] = (struct incfs_fill_block){
+ .block_index = i,
+ .data_len = INCFS_DATA_FILE_BLOCK_SIZE,
+ .data = ptr_to_u64(file->mtree[i].data),
+ .flags = INCFS_BLOCK_FLAGS_HASH
+ };
+ }
+
+ fd = open_file_by_id(mount_dir, file->id, false);
+ if (fd < 0) {
+ err = errno;
+ goto failure;
+ }
+
+ err = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+ close(fd);
+ if (err >= 0) {
+ err = -EPERM;
+ goto failure;
+ }
+
+ fd = open_file_by_id(mount_dir, file->id, true);
+ if (fd < 0) {
+ err = errno;
+ goto failure;
+ }
+
+ err = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+ close(fd);
+ if (err < fill_blocks.count)
+ err = errno;
+ else
+ err = 0;
+
+failure:
+ free(fill_block_array);
+ return err;
+}
+
+static int cant_touch_index_test(const char *mount_dir)
+{
+ char *file_name = "test_file";
+ int file_size = 123;
+ incfs_uuid_t file_id;
+ char *index_path = concat_file_name(mount_dir, ".index");
+ char *subdir = concat_file_name(index_path, "subdir");
+ char *dst_name = concat_file_name(mount_dir, "something");
+ char *filename_in_index = NULL;
+ char *file_path = concat_file_name(mount_dir, file_name);
+ char *backing_dir;
+ int cmd_fd = -1;
+ int err;
+
+ backing_dir = create_backing_dir(mount_dir);
+ if (!backing_dir)
+ goto failure;
+
+ /* Mount FS and release the backing file. */
+ if (mount_fs(mount_dir, backing_dir, 50) != 0)
+ goto failure;
+ free(backing_dir);
+
+ cmd_fd = open_commands_file(mount_dir);
+ if (cmd_fd < 0)
+ goto failure;
+
+
+ err = mkdir(subdir, 0777);
+ if (err == 0 || errno != EBUSY) {
+ print_error("Shouldn't be able to crate subdir in index\n");
+ goto failure;
+ }
+
+ err = rmdir(index_path);
+ if (err == 0 || errno != EBUSY) {
+ print_error(".index directory should not be removed\n");
+ goto failure;
+ }
+
+ err = emit_file(cmd_fd, ".index", file_name, &file_id,
+ file_size, NULL);
+ if (err != -EBUSY) {
+ print_error("Shouldn't be able to crate a file in index\n");
+ goto failure;
+ }
+
+ err = emit_file(cmd_fd, NULL, file_name, &file_id,
+ file_size, NULL);
+ if (err < 0)
+ goto failure;
+ filename_in_index = get_index_filename(mount_dir, file_id);
+
+ err = unlink(filename_in_index);
+ if (err == 0 || errno != EBUSY) {
+ print_error("Shouldn't be delete from index\n");
+ goto failure;
+ }
+
+
+ err = rename(filename_in_index, dst_name);
+ if (err == 0 || errno != EBUSY) {
+ print_error("Shouldn't be able to move from index\n");
+ goto failure;
+ }
+
+ free(filename_in_index);
+ filename_in_index = concat_file_name(index_path, "abc");
+ err = link(file_path, filename_in_index);
+ if (err == 0 || errno != EBUSY) {
+ print_error("Shouldn't be able to link inside index\n");
+ goto failure;
+ }
+
+ err = rename(index_path, dst_name);
+ if (err == 0 || errno != EBUSY) {
+ print_error("Shouldn't rename .index directory\n");
+ goto failure;
+ }
+
+ close(cmd_fd);
+ free(subdir);
+ free(index_path);
+ free(dst_name);
+ free(filename_in_index);
+ if (umount(mount_dir) != 0) {
+ print_error("Can't unmout FS");
+ goto failure;
+ }
+
+ return TEST_SUCCESS;
+
+failure:
+ free(subdir);
+ free(dst_name);
+ free(index_path);
+ free(filename_in_index);
+ close(cmd_fd);
+ umount(mount_dir);
+ return TEST_FAILURE;
+}
+
+static bool iterate_directory(const char *dir_to_iterate, bool root,
+ int file_count)
+{
+ struct expected_name {
+ const char *name;
+ bool root_only;
+ bool found;
+ } names[] = {
+ {INCFS_LOG_FILENAME, true, false},
+ {INCFS_PENDING_READS_FILENAME, true, false},
+ {INCFS_BLOCKS_WRITTEN_FILENAME, true, false},
+ {".index", true, false},
+ {".incomplete", true, false},
+ {"..", false, false},
+ {".", false, false},
+ };
+
+ bool pass = true, found;
+ int i;
+
+ /* Test directory iteration */
+ int fd = open(dir_to_iterate, O_RDONLY | O_DIRECTORY | O_CLOEXEC);
+
+ if (fd < 0) {
+ print_error("Can't open directory\n");
+ return false;
+ }
+
+ for (;;) {
+ /* Enough space for one dirent - no name over 30 */
+ char buf[sizeof(struct linux_dirent64) + NAME_MAX];
+ struct linux_dirent64 *dirent = (struct linux_dirent64 *) buf;
+ int nread;
+ int i;
+
+ for (i = 0; i < NAME_MAX; ++i) {
+ nread = syscall(__NR_getdents64, fd, buf,
+ sizeof(struct linux_dirent64) + i);
+
+ if (nread >= 0)
+ break;
+ if (errno != EINVAL)
+ break;
+ }
+
+ if (nread == 0)
+ break;
+ if (nread < 0) {
+ print_error("Error iterating directory\n");
+ pass = false;
+ goto failure;
+ }
+
+ /* Expected size is rounded up to 8 byte boundary. Not sure if
+ * this is universal truth or just happenstance, but useful test
+ * for the moment
+ */
+ if (nread != (((sizeof(struct linux_dirent64)
+ + strlen(dirent->d_name) + 1) + 7) & ~7)) {
+ print_error("Wrong dirent size");
+ pass = false;
+ goto failure;
+ }
+
+ found = false;
+ for (i = 0; i < sizeof(names) / sizeof(*names); ++i)
+ if (!strcmp(dirent->d_name, names[i].name)) {
+ if (names[i].root_only && !root) {
+ print_error("Root file error");
+ pass = false;
+ goto failure;
+ }
+
+ if (names[i].found) {
+ print_error("File appears twice");
+ pass = false;
+ goto failure;
+ }
+
+ names[i].found = true;
+ found = true;
+ break;
+ }
+
+ if (!found)
+ --file_count;
+ }
+
+ for (i = 0; i < sizeof(names) / sizeof(*names); ++i) {
+ if (!names[i].found)
+ if (root || !names[i].root_only) {
+ print_error("Expected file not present");
+ pass = false;
+ goto failure;
+ }
+ }
+
+ if (file_count) {
+ print_error("Wrong number of files\n");
+ pass = false;
+ goto failure;
+ }
+
+failure:
+ close(fd);
+ return pass;
+}
+
+static int basic_file_ops_test(const char *mount_dir)
+{
+ struct test_files_set test = get_test_files_set();
+ const int file_num = test.files_count;
+ char *subdir1 = concat_file_name(mount_dir, "subdir1");
+ char *subdir2 = concat_file_name(mount_dir, "subdir2");
+ char *backing_dir;
+ int cmd_fd = -1;
+ int i, err;
+
+ backing_dir = create_backing_dir(mount_dir);
+ if (!backing_dir)
+ goto failure;
+
+ /* Mount FS and release the backing file. */
+ if (mount_fs(mount_dir, backing_dir, 50) != 0)
+ goto failure;
+ free(backing_dir);
+
+ cmd_fd = open_commands_file(mount_dir);
+ if (cmd_fd < 0)
+ goto failure;
+
+ err = mkdir(subdir1, 0777);
+ if (err < 0 && errno != EEXIST) {
+ print_error("Can't create subdir1\n");
+ goto failure;
+ }
+
+ err = mkdir(subdir2, 0777);
+ if (err < 0 && errno != EEXIST) {
+ print_error("Can't create subdir2\n");
+ goto failure;
+ }
+
+ /* Create all test files in subdir1 directory */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+ loff_t size;
+ char *file_path = concat_file_name(subdir1, file->name);
+
+ err = emit_file(cmd_fd, "subdir1", file->name, &file->id,
+ file->size, NULL);
+ if (err < 0)
+ goto failure;
+
+ size = get_file_size(file_path);
+ free(file_path);
+ if (size != file->size) {
+ ksft_print_msg("Wrong size %ld of %s.\n",
+ size, file->name);
+ goto failure;
+ }
+ }
+
+ if (!iterate_directory(subdir1, false, file_num))
+ goto failure;
+
+ /* Link the files to subdir2 */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+ char *src_name = concat_file_name(subdir1, file->name);
+ char *dst_name = concat_file_name(subdir2, file->name);
+ loff_t size;
+
+ err = link(src_name, dst_name);
+ if (err < 0) {
+ print_error("Can't move file\n");
+ goto failure;
+ }
+
+ size = get_file_size(dst_name);
+ if (size != file->size) {
+ ksft_print_msg("Wrong size %ld of %s.\n",
+ size, file->name);
+ goto failure;
+ }
+ free(src_name);
+ free(dst_name);
+ }
+
+ /* Move the files from subdir2 to the mount dir */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+ char *src_name = concat_file_name(subdir2, file->name);
+ char *dst_name = concat_file_name(mount_dir, file->name);
+ loff_t size;
+
+ err = rename(src_name, dst_name);
+ if (err < 0) {
+ print_error("Can't move file\n");
+ goto failure;
+ }
+
+ size = get_file_size(dst_name);
+ if (size != file->size) {
+ ksft_print_msg("Wrong size %ld of %s.\n",
+ size, file->name);
+ goto failure;
+ }
+ free(src_name);
+ free(dst_name);
+ }
+
+ /* +2 because there are 2 subdirs */
+ if (!iterate_directory(mount_dir, true, file_num + 2))
+ goto failure;
+
+ /* Open and close all files from the mount dir */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+ char *path = concat_file_name(mount_dir, file->name);
+ int fd;
+
+ fd = open(path, O_RDWR | O_CLOEXEC);
+ free(path);
+ if (fd <= 0) {
+ print_error("Can't open file");
+ goto failure;
+ }
+ if (close(fd)) {
+ print_error("Can't close file");
+ goto failure;
+ }
+ }
+
+ /* Delete all files from the mount dir */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+ char *path = concat_file_name(mount_dir, file->name);
+
+ err = unlink(path);
+ free(path);
+ if (err < 0) {
+ print_error("Can't unlink file");
+ goto failure;
+ }
+ }
+
+ err = delete_dir_tree(subdir1);
+ if (err) {
+ ksft_print_msg("Error deleting subdir1 %d", err);
+ goto failure;
+ }
+
+ err = rmdir(subdir2);
+ if (err) {
+ print_error("Error deleting subdir2");
+ goto failure;
+ }
+
+ close(cmd_fd);
+ cmd_fd = -1;
+ if (umount(mount_dir) != 0) {
+ print_error("Can't unmout FS");
+ goto failure;
+ }
+
+ return TEST_SUCCESS;
+
+failure:
+ close(cmd_fd);
+ umount(mount_dir);
+ return TEST_FAILURE;
+}
+
+static int dynamic_files_and_data_test(const char *mount_dir)
+{
+ struct test_files_set test = get_test_files_set();
+ const int file_num = test.files_count;
+ const int missing_file_idx = 5;
+ int cmd_fd = -1;
+ char *backing_dir;
+ int i;
+
+ backing_dir = create_backing_dir(mount_dir);
+ if (!backing_dir)
+ goto failure;
+
+ /* Mount FS and release the backing file. */
+ if (mount_fs(mount_dir, backing_dir, 50) != 0)
+ goto failure;
+ free(backing_dir);
+
+ cmd_fd = open_commands_file(mount_dir);
+ if (cmd_fd < 0)
+ goto failure;
+
+ /* Check that test files don't exist in the filesystem. */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+ char *filename = concat_file_name(mount_dir, file->name);
+
+ if (access(filename, F_OK) != -1) {
+ ksft_print_msg(
+ "File %s somehow already exists in a clean FS.\n",
+ filename);
+ goto failure;
+ }
+ free(filename);
+ }
+
+ /* Write test data into the command file. */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+ int res;
+
+ res = emit_file(cmd_fd, NULL, file->name, &file->id,
+ file->size, NULL);
+ if (res < 0) {
+ ksft_print_msg("Error %s emiting file %s.\n",
+ strerror(-res), file->name);
+ goto failure;
+ }
+
+ /* Skip writing data to one file so we can check */
+ /* that it's missing later. */
+ if (i == missing_file_idx)
+ continue;
+
+ res = emit_test_file_data(mount_dir, file);
+ if (res) {
+ ksft_print_msg("Error %s emiting data for %s.\n",
+ strerror(-res), file->name);
+ goto failure;
+ }
+ }
+
+ /* Validate contents of the FS */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+
+ if (i == missing_file_idx) {
+ /* No data has been written to this file. */
+ /* Check for read error; */
+ uint8_t buf;
+ char *filename =
+ concat_file_name(mount_dir, file->name);
+ int res = read_test_file(&buf, 1, filename, 0);
+
+ free(filename);
+ if (res > 0) {
+ ksft_print_msg(
+ "Data present, even though never writtern.\n");
+ goto failure;
+ }
+ if (res != -ETIME) {
+ ksft_print_msg("Wrong error code: %d.\n", res);
+ goto failure;
+ }
+ } else {
+ if (validate_test_file_content(mount_dir, file) < 0)
+ goto failure;
+ }
+ }
+
+ close(cmd_fd);
+ cmd_fd = -1;
+ if (umount(mount_dir) != 0) {
+ print_error("Can't unmout FS");
+ goto failure;
+ }
+
+ return TEST_SUCCESS;
+
+failure:
+ close(cmd_fd);
+ umount(mount_dir);
+ return TEST_FAILURE;
+}
+
+static int concurrent_reads_and_writes_test(const char *mount_dir)
+{
+ struct test_files_set test = get_test_files_set();
+ const int file_num = test.files_count;
+ /* Validate each file from that many child processes. */
+ const int child_multiplier = 3;
+ int cmd_fd = -1;
+ char *backing_dir;
+ int status;
+ int i;
+ pid_t producer_pid;
+ pid_t *child_pids = alloca(child_multiplier * file_num * sizeof(pid_t));
+
+ backing_dir = create_backing_dir(mount_dir);
+ if (!backing_dir)
+ goto failure;
+
+ /* Mount FS and release the backing file. */
+ if (mount_fs(mount_dir, backing_dir, 500) != 0)
+ goto failure;
+ free(backing_dir);
+
+ cmd_fd = open_commands_file(mount_dir);
+ if (cmd_fd < 0)
+ goto failure;
+
+ /* Tell FS about the files, without actually providing the data. */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+ int res;
+
+ res = emit_file(cmd_fd, NULL, file->name, &file->id,
+ file->size, NULL);
+ if (res)
+ goto failure;
+ }
+
+ /* Start child processes acessing data in the files */
+ for (i = 0; i < file_num * child_multiplier; i++) {
+ struct test_file *file = &test.files[i / child_multiplier];
+ pid_t child_pid = flush_and_fork();
+
+ if (child_pid == 0) {
+ /* This is a child process, do the data validation. */
+ int ret = validate_test_file_content_with_seed(
+ mount_dir, file, i);
+ if (ret >= 0) {
+ /* Zero exit status if data is valid. */
+ exit(0);
+ }
+
+ /* Positive status if validation error found. */
+ exit(-ret);
+ } else if (child_pid > 0) {
+ child_pids[i] = child_pid;
+ } else {
+ print_error("Fork error");
+ goto failure;
+ }
+ }
+
+ producer_pid = flush_and_fork();
+ if (producer_pid == 0) {
+ int ret;
+ /*
+ * This is a child that should provide data to
+ * pending reads.
+ */
+
+ ret = data_producer(mount_dir, &test);
+ exit(-ret);
+ } else {
+ status = wait_for_process(producer_pid);
+ if (status != 0) {
+ ksft_print_msg("Data produces failed. %d(%s) ", status,
+ strerror(status));
+ goto failure;
+ }
+ }
+
+ /* Check that all children has finished with 0 exit status */
+ for (i = 0; i < file_num * child_multiplier; i++) {
+ struct test_file *file = &test.files[i / child_multiplier];
+
+ status = wait_for_process(child_pids[i]);
+ if (status != 0) {
+ ksft_print_msg(
+ "Validation for the file %s failed with code %d (%s)\n",
+ file->name, status, strerror(status));
+ goto failure;
+ }
+ }
+
+ /* Check that there are no pending reads left */
+ {
+ struct incfs_pending_read_info prs[1] = {};
+ int timeout = 0;
+ int read_count = wait_for_pending_reads(cmd_fd, timeout, prs,
+ ARRAY_SIZE(prs));
+
+ if (read_count) {
+ ksft_print_msg(
+ "Pending reads pending when all data written\n");
+ goto failure;
+ }
+ }
+
+ close(cmd_fd);
+ cmd_fd = -1;
+ if (umount(mount_dir) != 0) {
+ print_error("Can't unmout FS");
+ goto failure;
+ }
+
+ return TEST_SUCCESS;
+
+failure:
+ close(cmd_fd);
+ umount(mount_dir);
+ return TEST_FAILURE;
+}
+
+static int work_after_remount_test(const char *mount_dir)
+{
+ struct test_files_set test = get_test_files_set();
+ const int file_num = test.files_count;
+ const int file_num_stage1 = file_num / 2;
+ const int file_num_stage2 = file_num;
+ char *backing_dir = NULL;
+ int i = 0;
+ int cmd_fd = -1;
+
+ backing_dir = create_backing_dir(mount_dir);
+ if (!backing_dir)
+ goto failure;
+
+ /* Mount FS and release the backing file. */
+ if (mount_fs(mount_dir, backing_dir, 50) != 0)
+ goto failure;
+
+ cmd_fd = open_commands_file(mount_dir);
+ if (cmd_fd < 0)
+ goto failure;
+
+ /* Write first half of the data into the command file. (stage 1) */
+ for (i = 0; i < file_num_stage1; i++) {
+ struct test_file *file = &test.files[i];
+
+ if (emit_file(cmd_fd, NULL, file->name, &file->id,
+ file->size, NULL))
+ goto failure;
+
+ if (emit_test_file_data(mount_dir, file))
+ goto failure;
+ }
+
+ /* Unmount and mount again, to see that data is persistent. */
+ close(cmd_fd);
+ cmd_fd = -1;
+ if (umount(mount_dir) != 0) {
+ print_error("Can't unmout FS");
+ goto failure;
+ }
+
+ if (mount_fs(mount_dir, backing_dir, 50) != 0)
+ goto failure;
+
+ cmd_fd = open_commands_file(mount_dir);
+ if (cmd_fd < 0)
+ goto failure;
+
+ /* Write the second half of the data into the command file. (stage 2) */
+ for (; i < file_num_stage2; i++) {
+ struct test_file *file = &test.files[i];
+ int res = emit_file(cmd_fd, NULL, file->name, &file->id,
+ file->size, NULL);
+
+ if (res)
+ goto failure;
+
+ if (emit_test_file_data(mount_dir, file))
+ goto failure;
+ }
+
+ /* Validate contents of the FS */
+ for (i = 0; i < file_num_stage2; i++) {
+ struct test_file *file = &test.files[i];
+
+ if (validate_test_file_content(mount_dir, file) < 0)
+ goto failure;
+ }
+
+ /* Delete all files */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+ char *filename = concat_file_name(mount_dir, file->name);
+ char *filename_in_index = get_index_filename(mount_dir,
+ file->id);
+
+ if (access(filename, F_OK) != 0) {
+ ksft_print_msg("File %s is not visible.\n", filename);
+ goto failure;
+ }
+
+ if (access(filename_in_index, F_OK) != 0) {
+ ksft_print_msg("File %s is not visible.\n",
+ filename_in_index);
+ goto failure;
+ }
+
+ unlink(filename);
+
+ if (access(filename, F_OK) != -1) {
+ ksft_print_msg("File %s is still present.\n", filename);
+ goto failure;
+ }
+
+ if (access(filename_in_index, F_OK) != -1) {
+ ksft_print_msg("File %s is still present.\n",
+ filename_in_index);
+ goto failure;
+ }
+ free(filename);
+ free(filename_in_index);
+ }
+
+ /* Unmount and mount again, to see that deleted files stay deleted. */
+ close(cmd_fd);
+ cmd_fd = -1;
+ if (umount(mount_dir) != 0) {
+ print_error("Can't unmout FS");
+ goto failure;
+ }
+
+ if (mount_fs(mount_dir, backing_dir, 50) != 0)
+ goto failure;
+
+ cmd_fd = open_commands_file(mount_dir);
+ if (cmd_fd < 0)
+ goto failure;
+
+ /* Validate all deleted files are still deleted. */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+ char *filename = concat_file_name(mount_dir, file->name);
+
+ if (access(filename, F_OK) != -1) {
+ ksft_print_msg("File %s is still visible.\n", filename);
+ goto failure;
+ }
+ free(filename);
+ }
+
+ /* Final unmount */
+ close(cmd_fd);
+ free(backing_dir);
+ cmd_fd = -1;
+ if (umount(mount_dir) != 0) {
+ print_error("Can't unmout FS");
+ goto failure;
+ }
+
+ return TEST_SUCCESS;
+
+failure:
+ close(cmd_fd);
+ free(backing_dir);
+ umount(mount_dir);
+ return TEST_FAILURE;
+}
+
+static int attribute_test(const char *mount_dir)
+{
+ char file_attr[] = "metadata123123";
+ char attr_buf[INCFS_MAX_FILE_ATTR_SIZE] = {};
+ int cmd_fd = -1;
+ incfs_uuid_t file_id;
+ int attr_res = 0;
+ char *backing_dir;
+
+
+ backing_dir = create_backing_dir(mount_dir);
+ if (!backing_dir)
+ goto failure;
+
+ /* Mount FS and release the backing file. */
+ if (mount_fs(mount_dir, backing_dir, 50) != 0)
+ goto failure;
+
+
+ cmd_fd = open_commands_file(mount_dir);
+ if (cmd_fd < 0)
+ goto failure;
+
+ if (emit_file(cmd_fd, NULL, "file", &file_id, 12, file_attr))
+ goto failure;
+
+ /* Test attribute values */
+ attr_res = get_file_attr(mount_dir, file_id, attr_buf,
+ ARRAY_SIZE(attr_buf));
+ if (attr_res != strlen(file_attr)) {
+ ksft_print_msg("Get file attr error: %d\n", attr_res);
+ goto failure;
+ }
+ if (strcmp(attr_buf, file_attr) != 0) {
+ ksft_print_msg("Incorrect file attr value: '%s'", attr_buf);
+ goto failure;
+ }
+
+ /* Unmount and mount again, to see that attributes are persistent. */
+ close(cmd_fd);
+ cmd_fd = -1;
+ if (umount(mount_dir) != 0) {
+ print_error("Can't unmout FS");
+ goto failure;
+ }
+
+ if (mount_fs(mount_dir, backing_dir, 50) != 0)
+ goto failure;
+
+ cmd_fd = open_commands_file(mount_dir);
+ if (cmd_fd < 0)
+ goto failure;
+
+ /* Test attribute values again after remount*/
+ attr_res = get_file_attr(mount_dir, file_id, attr_buf,
+ ARRAY_SIZE(attr_buf));
+ if (attr_res != strlen(file_attr)) {
+ ksft_print_msg("Get dir attr error: %d\n", attr_res);
+ goto failure;
+ }
+ if (strcmp(attr_buf, file_attr) != 0) {
+ ksft_print_msg("Incorrect file attr value: '%s'", attr_buf);
+ goto failure;
+ }
+
+ /* Final unmount */
+ close(cmd_fd);
+ free(backing_dir);
+ cmd_fd = -1;
+ if (umount(mount_dir) != 0) {
+ print_error("Can't unmout FS");
+ goto failure;
+ }
+
+ return TEST_SUCCESS;
+
+failure:
+ close(cmd_fd);
+ free(backing_dir);
+ umount(mount_dir);
+ return TEST_FAILURE;
+}
+
+static int child_procs_waiting_for_data_test(const char *mount_dir)
+{
+ struct test_files_set test = get_test_files_set();
+ const int file_num = test.files_count;
+ int cmd_fd = -1;
+ int i;
+ pid_t *child_pids = alloca(file_num * sizeof(pid_t));
+ char *backing_dir;
+
+ backing_dir = create_backing_dir(mount_dir);
+ if (!backing_dir)
+ goto failure;
+
+ /* Mount FS and release the backing file. (10s wait time) */
+ if (mount_fs(mount_dir, backing_dir, 10000) != 0)
+ goto failure;
+
+
+ cmd_fd = open_commands_file(mount_dir);
+ if (cmd_fd < 0)
+ goto failure;
+
+ /* Tell FS about the files, without actually providing the data. */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+
+ emit_file(cmd_fd, NULL, file->name, &file->id,
+ file->size, NULL);
+ }
+
+ /* Start child processes acessing data in the files */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+ pid_t child_pid = flush_and_fork();
+
+ if (child_pid == 0) {
+ /* This is a child process, do the data validation. */
+ int ret = validate_test_file_content(mount_dir, file);
+
+ if (ret >= 0) {
+ /* Zero exit status if data is valid. */
+ exit(0);
+ }
+
+ /* Positive status if validation error found. */
+ exit(-ret);
+ } else if (child_pid > 0) {
+ child_pids[i] = child_pid;
+ } else {
+ print_error("Fork error");
+ goto failure;
+ }
+ }
+
+ /* Write test data into the command file. */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+
+ if (emit_test_file_data(mount_dir, file))
+ goto failure;
+ }
+
+ /* Check that all children has finished with 0 exit status */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+ int status = wait_for_process(child_pids[i]);
+
+ if (status != 0) {
+ ksft_print_msg(
+ "Validation for the file %s failed with code %d (%s)\n",
+ file->name, status, strerror(status));
+ goto failure;
+ }
+ }
+
+ close(cmd_fd);
+ free(backing_dir);
+ cmd_fd = -1;
+ if (umount(mount_dir) != 0) {
+ print_error("Can't unmout FS");
+ goto failure;
+ }
+
+ return TEST_SUCCESS;
+
+failure:
+ close(cmd_fd);
+ free(backing_dir);
+ umount(mount_dir);
+ return TEST_FAILURE;
+}
+
+static int multiple_providers_test(const char *mount_dir)
+{
+ struct test_files_set test = get_test_files_set();
+ const int file_num = test.files_count;
+ const int producer_count = 5;
+ int cmd_fd = -1;
+ int status;
+ int i;
+ pid_t *producer_pids = alloca(producer_count * sizeof(pid_t));
+ char *backing_dir;
+
+ backing_dir = create_backing_dir(mount_dir);
+ if (!backing_dir)
+ goto failure;
+
+ /* Mount FS and release the backing file. (10s wait time) */
+ if (mount_fs_opt(mount_dir, backing_dir,
+ "read_timeout_ms=10000,report_uid", false) != 0)
+ goto failure;
+
+ cmd_fd = open_commands_file(mount_dir);
+ if (cmd_fd < 0)
+ goto failure;
+
+ /* Tell FS about the files, without actually providing the data. */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+
+ if (emit_file(cmd_fd, NULL, file->name, &file->id,
+ file->size, NULL) < 0)
+ goto failure;
+ }
+
+ /* Start producer processes */
+ for (i = 0; i < producer_count; i++) {
+ pid_t producer_pid = flush_and_fork();
+
+ if (producer_pid == 0) {
+ int ret;
+ /*
+ * This is a child that should provide data to
+ * pending reads.
+ */
+
+ ret = data_producer2(mount_dir, &test);
+ exit(-ret);
+ } else if (producer_pid > 0) {
+ producer_pids[i] = producer_pid;
+ } else {
+ print_error("Fork error");
+ goto failure;
+ }
+ }
+
+ /* Validate FS content */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+ char *filename = concat_file_name(mount_dir, file->name);
+ loff_t read_result = read_whole_file(filename);
+
+ free(filename);
+ if (read_result != file->size) {
+ ksft_print_msg(
+ "Error validating file %s. Result: %ld\n",
+ file->name, read_result);
+ goto failure;
+ }
+ }
+
+ /* Check that all producers has finished with 0 exit status */
+ for (i = 0; i < producer_count; i++) {
+ status = wait_for_process(producer_pids[i]);
+ if (status != 0) {
+ ksft_print_msg("Producer %d failed with code (%s)\n", i,
+ strerror(status));
+ goto failure;
+ }
+ }
+
+ close(cmd_fd);
+ free(backing_dir);
+ cmd_fd = -1;
+ if (umount(mount_dir) != 0) {
+ print_error("Can't unmout FS");
+ goto failure;
+ }
+
+ return TEST_SUCCESS;
+
+failure:
+ close(cmd_fd);
+ free(backing_dir);
+ umount(mount_dir);
+ return TEST_FAILURE;
+}
+
+static int validate_hash_tree(const char *mount_dir, struct test_file *file)
+{
+ int result = TEST_FAILURE;
+ char *filename = NULL;
+ int fd = -1;
+ unsigned char *buf;
+ int i, err;
+
+ TEST(filename = concat_file_name(mount_dir, file->name), filename);
+ TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+ TEST(buf = malloc(INCFS_DATA_FILE_BLOCK_SIZE * 8), buf);
+
+ for (i = 0; i < file->mtree_block_count; ) {
+ int blocks_to_read = i % 7 + 1;
+ struct fsverity_read_metadata_arg args = {
+ .metadata_type = FS_VERITY_METADATA_TYPE_MERKLE_TREE,
+ .offset = i * INCFS_DATA_FILE_BLOCK_SIZE,
+ .length = blocks_to_read * INCFS_DATA_FILE_BLOCK_SIZE,
+ .buf_ptr = ptr_to_u64(buf),
+ };
+
+ TEST(err = ioctl(fd, FS_IOC_READ_VERITY_METADATA, &args),
+ err == min(args.length, (file->mtree_block_count - i) *
+ INCFS_DATA_FILE_BLOCK_SIZE));
+ TESTEQUAL(memcmp(buf, file->mtree[i].data, err), 0);
+
+ i += blocks_to_read;
+ }
+
+ result = TEST_SUCCESS;
+
+out:
+ free(buf);
+ close(fd);
+ free(filename);
+ return result;
+}
+
+static int hash_tree_test(const char *mount_dir)
+{
+ int result = TEST_FAILURE;
+ char *backing_dir;
+ struct test_files_set test = get_test_files_set();
+ const int file_num = test.files_count;
+ const int corrupted_file_idx = 5;
+ int i = 0;
+ int cmd_fd = -1;
+
+ backing_dir = create_backing_dir(mount_dir);
+ if (!backing_dir)
+ goto failure;
+
+ /* Mount FS and release the backing file. */
+ if (mount_fs(mount_dir, backing_dir, 50) != 0)
+ goto failure;
+
+ cmd_fd = open_commands_file(mount_dir);
+ if (cmd_fd < 0)
+ goto failure;
+
+ /* Write hashes and data. */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+ int res;
+
+ build_mtree(file);
+ res = crypto_emit_file(cmd_fd, NULL, file->name, &file->id,
+ file->size, file->root_hash,
+ file->sig.add_data);
+
+ if (i == corrupted_file_idx) {
+ /* Corrupt third blocks hash */
+ file->mtree[0].data[2 * SHA256_DIGEST_SIZE] ^= 0xff;
+ }
+ if (emit_test_file_data(mount_dir, file))
+ goto failure;
+
+ res = load_hash_tree(mount_dir, file);
+ if (res) {
+ ksft_print_msg("Can't load hashes for %s. error: %s\n",
+ file->name, strerror(-res));
+ goto failure;
+ }
+ }
+
+ /* Validate data */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+
+ if (i == corrupted_file_idx) {
+ uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE];
+ char *filename =
+ concat_file_name(mount_dir, file->name);
+ int res;
+
+ res = read_test_file(data, INCFS_DATA_FILE_BLOCK_SIZE,
+ filename, 2);
+ free(filename);
+ if (res != -EBADMSG) {
+ ksft_print_msg("Hash violation missed1. %d\n",
+ res);
+ goto failure;
+ }
+ } else if (validate_test_file_content(mount_dir, file) < 0)
+ goto failure;
+ else if (validate_hash_tree(mount_dir, file) < 0)
+ goto failure;
+ }
+
+ /* Unmount and mount again, to that hashes are persistent. */
+ close(cmd_fd);
+ cmd_fd = -1;
+ if (umount(mount_dir) != 0) {
+ print_error("Can't unmout FS");
+ goto failure;
+ }
+ if (mount_fs(mount_dir, backing_dir, 50) != 0)
+ goto failure;
+
+ cmd_fd = open_commands_file(mount_dir);
+ if (cmd_fd < 0)
+ goto failure;
+
+ /* Validate data again */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+
+ if (i == corrupted_file_idx) {
+ uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE];
+ char *filename =
+ concat_file_name(mount_dir, file->name);
+ int res;
+
+ res = read_test_file(data, INCFS_DATA_FILE_BLOCK_SIZE,
+ filename, 2);
+ free(filename);
+ if (res != -EBADMSG) {
+ ksft_print_msg("Hash violation missed2. %d\n",
+ res);
+ goto failure;
+ }
+ } else if (validate_test_file_content(mount_dir, file) < 0)
+ goto failure;
+ }
+ result = TEST_SUCCESS;
+
+failure:
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+
+ free(file->mtree);
+ }
+
+ close(cmd_fd);
+ free(backing_dir);
+ umount(mount_dir);
+ return result;
+}
+
+enum expected_log { FULL_LOG, NO_LOG, PARTIAL_LOG };
+
+static int validate_logs(const char *mount_dir, int log_fd,
+ struct test_file *file,
+ enum expected_log expected_log,
+ bool report_uid, bool expect_data)
+{
+ int result = TEST_FAILURE;
+ uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE];
+ struct incfs_pending_read_info prs[2048] = {};
+ struct incfs_pending_read_info2 prs2[2048] = {};
+ struct incfs_pending_read_info *previous_record = NULL;
+ int prs_size = ARRAY_SIZE(prs);
+ int block_count = 1 + (file->size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+ int expected_read_count, read_count, block_index, read_index;
+ char *filename = NULL;
+ int fd = -1;
+
+ TEST(filename = concat_file_name(mount_dir, file->name), filename);
+ TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+
+ if (block_count > prs_size)
+ block_count = prs_size;
+ expected_read_count = block_count;
+
+ for (block_index = 0; block_index < block_count; block_index++) {
+ int result = pread(fd, data, sizeof(data),
+ INCFS_DATA_FILE_BLOCK_SIZE * block_index);
+
+ /* Make some read logs of type SAME_FILE_NEXT_BLOCK */
+ if (block_index % 100 == 10)
+ usleep(20000);
+
+ /* Skip some blocks to make logs of type SAME_FILE */
+ if (block_index % 10 == 5) {
+ ++block_index;
+ --expected_read_count;
+ }
+
+ if (expect_data)
+ TESTCOND(result > 0);
+
+ if (!expect_data)
+ TESTEQUAL(result, -1);
+ }
+
+ if (report_uid)
+ read_count = wait_for_pending_reads2(log_fd,
+ expected_log == NO_LOG ? 10 : 0,
+ prs2, prs_size);
+ else
+ read_count = wait_for_pending_reads(log_fd,
+ expected_log == NO_LOG ? 10 : 0,
+ prs, prs_size);
+
+ if (expected_log == NO_LOG)
+ TESTEQUAL(read_count, 0);
+
+ if (expected_log == PARTIAL_LOG)
+ TESTCOND(read_count > 0 &&
+ read_count <= expected_read_count);
+
+ if (expected_log == FULL_LOG)
+ TESTEQUAL(read_count, expected_read_count);
+
+ /* If read less than expected, advance block_index appropriately */
+ for (block_index = 0, read_index = 0;
+ read_index < expected_read_count - read_count;
+ block_index++, read_index++)
+ if (block_index % 10 == 5)
+ ++block_index;
+
+ for (read_index = 0; read_index < read_count;
+ block_index++, read_index++) {
+ struct incfs_pending_read_info *record = report_uid ?
+ (struct incfs_pending_read_info *) &prs2[read_index] :
+ &prs[read_index];
+
+ TESTCOND(same_id(&record->file_id, &file->id));
+ TESTEQUAL(record->block_index, block_index);
+ TESTNE(record->timestamp_us, 0);
+ if (previous_record)
+ TESTEQUAL(record->serial_number,
+ previous_record->serial_number + 1);
+
+ previous_record = record;
+ if (block_index % 10 == 5)
+ ++block_index;
+ }
+
+ result = TEST_SUCCESS;
+out:
+ close(fd);
+ free(filename);
+ return result;
+}
+
+static int read_log_test(const char *mount_dir)
+{
+ int result = TEST_FAILURE;
+ struct test_files_set test = get_test_files_set();
+ const int file_num = test.files_count;
+ int i = 0;
+ int cmd_fd = -1, log_fd = -1;
+ char *backing_dir = NULL;
+
+ /* Create files */
+ TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+ TESTEQUAL(mount_fs_opt(mount_dir, backing_dir,
+ "readahead=0,report_uid,read_timeout_ms=0",
+ false), 0);
+ TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+
+ TESTEQUAL(emit_file(cmd_fd, NULL, file->name, &file->id,
+ file->size, NULL), 0);
+ }
+ close(cmd_fd);
+ cmd_fd = -1;
+
+ /* Validate logs */
+ TEST(log_fd = open_log_file(mount_dir), log_fd != -1);
+ for (i = 0; i < file_num; i++)
+ TESTEQUAL(validate_logs(mount_dir, log_fd, &test.files[i],
+ FULL_LOG, true, false), 0);
+
+ /* Unmount and mount again without report_uid */
+ close(log_fd);
+ log_fd = -1;
+ TESTEQUAL(umount(mount_dir), 0);
+ TESTEQUAL(mount_fs_opt(mount_dir, backing_dir,
+ "readahead=0,read_timeout_ms=0", false), 0);
+
+ TEST(log_fd = open_log_file(mount_dir), log_fd != -1);
+ for (i = 0; i < file_num; i++)
+ TESTEQUAL(validate_logs(mount_dir, log_fd, &test.files[i],
+ FULL_LOG, false, false), 0);
+
+ /* No read log to make sure poll doesn't crash */
+ close(log_fd);
+ log_fd = -1;
+ TESTEQUAL(umount(mount_dir), 0);
+ TESTEQUAL(mount_fs_opt(mount_dir, backing_dir,
+ "readahead=0,rlog_pages=0,read_timeout_ms=0",
+ false), 0);
+
+ TEST(log_fd = open_log_file(mount_dir), log_fd != -1);
+ for (i = 0; i < file_num; i++)
+ TESTEQUAL(validate_logs(mount_dir, log_fd, &test.files[i],
+ NO_LOG, false, false), 0);
+
+ /* Remount and check that logs start working again */
+ TESTEQUAL(mount_fs_opt(mount_dir, backing_dir,
+ "readahead=0,rlog_pages=1,read_timeout_ms=0",
+ true), 0);
+ for (i = 0; i < file_num; i++)
+ TESTEQUAL(validate_logs(mount_dir, log_fd, &test.files[i],
+ PARTIAL_LOG, false, false), 0);
+
+ /* Remount and check that logs continue working */
+ TESTEQUAL(mount_fs_opt(mount_dir, backing_dir,
+ "readahead=0,rlog_pages=4,read_timeout_ms=0",
+ true), 0);
+ for (i = 0; i < file_num; i++)
+ TESTEQUAL(validate_logs(mount_dir, log_fd, &test.files[i],
+ FULL_LOG, false, false), 0);
+
+ /* Check logs work with data */
+ for (i = 0; i < file_num; i++) {
+ TESTEQUAL(emit_test_file_data(mount_dir, &test.files[i]), 0);
+ TESTEQUAL(validate_logs(mount_dir, log_fd, &test.files[i],
+ FULL_LOG, false, true), 0);
+ }
+
+ /* Final unmount */
+ close(log_fd);
+ log_fd = -1;
+ TESTEQUAL(umount(mount_dir), 0);
+
+ result = TEST_SUCCESS;
+out:
+ close(cmd_fd);
+ close(log_fd);
+ free(backing_dir);
+ umount(mount_dir);
+ return result;
+}
+
+static int emit_partial_test_file_data(const char *mount_dir,
+ struct test_file *file)
+{
+ int i, j;
+ int block_cnt = 1 + (file->size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+ int *block_indexes = NULL;
+ int result = 0;
+ int blocks_written = 0;
+ int bw_fd = -1;
+ char buffer[20];
+ struct pollfd pollfd;
+ long blocks_written_total, blocks_written_new_total;
+
+ if (file->size == 0)
+ return 0;
+
+ bw_fd = open_blocks_written_file(mount_dir);
+ if (bw_fd == -1)
+ return -errno;
+
+ result = read(bw_fd, buffer, sizeof(buffer));
+ if (result <= 0) {
+ result = -EIO;
+ goto out;
+ }
+
+ buffer[result] = 0;
+ blocks_written_total = strtol(buffer, NULL, 10);
+ result = 0;
+
+ pollfd = (struct pollfd) {
+ .fd = bw_fd,
+ .events = POLLIN,
+ };
+
+ result = poll(&pollfd, 1, 0);
+ if (result) {
+ result = -EIO;
+ goto out;
+ }
+
+ /* Emit 2 blocks, skip 2 blocks etc*/
+ block_indexes = calloc(block_cnt, sizeof(*block_indexes));
+ for (i = 0, j = 0; i < block_cnt; ++i)
+ if ((i & 2) == 0) {
+ block_indexes[j] = i;
+ ++j;
+ }
+
+ for (i = 0; i < j; i += blocks_written) {
+ blocks_written = emit_test_blocks(mount_dir, file,
+ block_indexes + i, j - i);
+ if (blocks_written < 0) {
+ result = blocks_written;
+ goto out;
+ }
+ if (blocks_written == 0) {
+ result = -EIO;
+ goto out;
+ }
+
+ result = poll(&pollfd, 1, 0);
+ if (result != 1 || pollfd.revents != POLLIN) {
+ result = -EIO;
+ goto out;
+ }
+
+ result = read(bw_fd, buffer, sizeof(buffer));
+ buffer[result] = 0;
+ blocks_written_new_total = strtol(buffer, NULL, 10);
+
+ if (blocks_written_new_total - blocks_written_total
+ != blocks_written) {
+ result = -EIO;
+ goto out;
+ }
+
+ blocks_written_total = blocks_written_new_total;
+ result = 0;
+ }
+out:
+ free(block_indexes);
+ close(bw_fd);
+ return result;
+}
+
+static int validate_ranges(const char *mount_dir, struct test_file *file)
+{
+ int block_cnt = 1 + (file->size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+ char *filename = concat_file_name(mount_dir, file->name);
+ int fd;
+ struct incfs_filled_range ranges[128];
+ struct incfs_get_filled_blocks_args fba = {
+ .range_buffer = ptr_to_u64(ranges),
+ .range_buffer_size = sizeof(ranges),
+ };
+ int error = TEST_SUCCESS;
+ int i;
+ int range_cnt;
+ int cmd_fd = -1;
+ struct incfs_permit_fill permit_fill;
+
+ fd = open(filename, O_RDONLY | O_CLOEXEC);
+ free(filename);
+ if (fd <= 0)
+ return TEST_FAILURE;
+
+ error = ioctl(fd, INCFS_IOC_GET_FILLED_BLOCKS, &fba);
+ if (error != -1 || errno != EPERM) {
+ ksft_print_msg("INCFS_IOC_GET_FILLED_BLOCKS not blocked\n");
+ error = -EPERM;
+ goto out;
+ }
+
+ cmd_fd = open_commands_file(mount_dir);
+ permit_fill.file_descriptor = fd;
+ if (ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill)) {
+ print_error("INCFS_IOC_PERMIT_FILL failed");
+ return -EPERM;
+ goto out;
+ }
+
+ error = ioctl(fd, INCFS_IOC_GET_FILLED_BLOCKS, &fba);
+ if (error && errno != ERANGE)
+ goto out;
+
+ if (error && errno == ERANGE && block_cnt < 509)
+ goto out;
+
+ if (!error && block_cnt >= 509) {
+ error = -ERANGE;
+ goto out;
+ }
+
+ if (fba.total_blocks_out != block_cnt) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ if (fba.data_blocks_out != block_cnt) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ range_cnt = (block_cnt + 3) / 4;
+ if (range_cnt > 128)
+ range_cnt = 128;
+ if (range_cnt != fba.range_buffer_size_out / sizeof(*ranges)) {
+ error = -ERANGE;
+ goto out;
+ }
+
+ error = TEST_SUCCESS;
+ for (i = 0; i < fba.range_buffer_size_out / sizeof(*ranges) - 1; ++i)
+ if (ranges[i].begin != i * 4 || ranges[i].end != i * 4 + 2) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ if (ranges[i].begin != i * 4 ||
+ (ranges[i].end != i * 4 + 1 && ranges[i].end != i * 4 + 2)) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ for (i = 0; i < 64; ++i) {
+ fba.start_index = i * 2;
+ fba.end_index = i * 2 + 2;
+ error = ioctl(fd, INCFS_IOC_GET_FILLED_BLOCKS, &fba);
+ if (error)
+ goto out;
+
+ if (fba.total_blocks_out != block_cnt) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ if (fba.start_index >= block_cnt) {
+ if (fba.index_out != fba.start_index) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ break;
+ }
+
+ if (i % 2) {
+ if (fba.range_buffer_size_out != 0) {
+ error = -EINVAL;
+ goto out;
+ }
+ } else {
+ if (fba.range_buffer_size_out != sizeof(*ranges)) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ if (ranges[0].begin != i * 2) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ if (ranges[0].end != i * 2 + 1 &&
+ ranges[0].end != i * 2 + 2) {
+ error = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+out:
+ close(fd);
+ close(cmd_fd);
+ return error;
+}
+
+static int get_blocks_test(const char *mount_dir)
+{
+ char *backing_dir;
+ int cmd_fd = -1;
+ int i;
+ struct test_files_set test = get_test_files_set();
+ const int file_num = test.files_count;
+
+ backing_dir = create_backing_dir(mount_dir);
+ if (!backing_dir)
+ goto failure;
+
+ if (mount_fs_opt(mount_dir, backing_dir, "readahead=0", false) != 0)
+ goto failure;
+
+ cmd_fd = open_commands_file(mount_dir);
+ if (cmd_fd < 0)
+ goto failure;
+
+ /* Write data. */
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+
+ if (emit_file(cmd_fd, NULL, file->name, &file->id, file->size,
+ NULL))
+ goto failure;
+
+ if (emit_partial_test_file_data(mount_dir, file))
+ goto failure;
+ }
+
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+
+ if (validate_ranges(mount_dir, file))
+ goto failure;
+
+ /*
+ * The smallest files are filled completely, so this checks that
+ * the fast get_filled_blocks path is not causing issues
+ */
+ if (validate_ranges(mount_dir, file))
+ goto failure;
+ }
+
+ close(cmd_fd);
+ umount(mount_dir);
+ free(backing_dir);
+ return TEST_SUCCESS;
+
+failure:
+ close(cmd_fd);
+ umount(mount_dir);
+ free(backing_dir);
+ return TEST_FAILURE;
+}
+
+static int emit_partial_test_file_hash(const char *mount_dir,
+ struct test_file *file)
+{
+ int err;
+ int fd;
+ struct incfs_fill_blocks fill_blocks = {
+ .count = 1,
+ };
+ struct incfs_fill_block *fill_block_array =
+ calloc(fill_blocks.count, sizeof(struct incfs_fill_block));
+ uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE];
+
+ if (file->size <= 4096 / 32 * 4096)
+ return 0;
+
+ if (!fill_block_array)
+ return -ENOMEM;
+ fill_blocks.fill_blocks = ptr_to_u64(fill_block_array);
+
+ rnd_buf(data, sizeof(data), 0);
+
+ fill_block_array[0] =
+ (struct incfs_fill_block){ .block_index = 1,
+ .data_len =
+ INCFS_DATA_FILE_BLOCK_SIZE,
+ .data = ptr_to_u64(data),
+ .flags = INCFS_BLOCK_FLAGS_HASH };
+
+ fd = open_file_by_id(mount_dir, file->id, true);
+ if (fd < 0) {
+ err = errno;
+ goto failure;
+ }
+
+ err = ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks);
+ close(fd);
+ if (err < fill_blocks.count)
+ err = errno;
+ else
+ err = 0;
+
+failure:
+ free(fill_block_array);
+ return err;
+}
+
+static int validate_hash_ranges(const char *mount_dir, struct test_file *file)
+{
+ int block_cnt = 1 + (file->size - 1) / INCFS_DATA_FILE_BLOCK_SIZE;
+ char *filename = concat_file_name(mount_dir, file->name);
+ int fd;
+ struct incfs_filled_range ranges[128];
+ struct incfs_get_filled_blocks_args fba = {
+ .range_buffer = ptr_to_u64(ranges),
+ .range_buffer_size = sizeof(ranges),
+ };
+ int error = TEST_SUCCESS;
+ int file_blocks = (file->size + INCFS_DATA_FILE_BLOCK_SIZE - 1) /
+ INCFS_DATA_FILE_BLOCK_SIZE;
+ int cmd_fd = -1;
+ struct incfs_permit_fill permit_fill;
+
+ if (file->size <= 4096 / 32 * 4096)
+ return 0;
+
+ fd = open(filename, O_RDONLY | O_CLOEXEC);
+ free(filename);
+ if (fd <= 0)
+ return TEST_FAILURE;
+
+ error = ioctl(fd, INCFS_IOC_GET_FILLED_BLOCKS, &fba);
+ if (error != -1 || errno != EPERM) {
+ ksft_print_msg("INCFS_IOC_GET_FILLED_BLOCKS not blocked\n");
+ error = -EPERM;
+ goto out;
+ }
+
+ cmd_fd = open_commands_file(mount_dir);
+ permit_fill.file_descriptor = fd;
+ if (ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill)) {
+ print_error("INCFS_IOC_PERMIT_FILL failed");
+ return -EPERM;
+ goto out;
+ }
+
+ error = ioctl(fd, INCFS_IOC_GET_FILLED_BLOCKS, &fba);
+ if (error)
+ goto out;
+
+ if (fba.total_blocks_out <= block_cnt) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ if (fba.data_blocks_out != block_cnt) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ if (fba.range_buffer_size_out != sizeof(struct incfs_filled_range)) {
+ error = -EINVAL;
+ goto out;
+ }
+
+ if (ranges[0].begin != file_blocks + 1 ||
+ ranges[0].end != file_blocks + 2) {
+ error = -EINVAL;
+ goto out;
+ }
+
+out:
+ close(cmd_fd);
+ close(fd);
+ return error;
+}
+
+static int get_hash_blocks_test(const char *mount_dir)
+{
+ char *backing_dir;
+ int cmd_fd = -1;
+ int i;
+ struct test_files_set test = get_test_files_set();
+ const int file_num = test.files_count;
+
+ backing_dir = create_backing_dir(mount_dir);
+ if (!backing_dir)
+ goto failure;
+
+ if (mount_fs_opt(mount_dir, backing_dir, "readahead=0", false) != 0)
+ goto failure;
+
+ cmd_fd = open_commands_file(mount_dir);
+ if (cmd_fd < 0)
+ goto failure;
+
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+
+ if (crypto_emit_file(cmd_fd, NULL, file->name, &file->id,
+ file->size, file->root_hash,
+ file->sig.add_data))
+ goto failure;
+
+ if (emit_partial_test_file_hash(mount_dir, file))
+ goto failure;
+ }
+
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+
+ if (validate_hash_ranges(mount_dir, file))
+ goto failure;
+ }
+
+ close(cmd_fd);
+ umount(mount_dir);
+ free(backing_dir);
+ return TEST_SUCCESS;
+
+failure:
+ close(cmd_fd);
+ umount(mount_dir);
+ free(backing_dir);
+ return TEST_FAILURE;
+}
+
+#define THREE_GB (3LL * 1024 * 1024 * 1024)
+#define FOUR_GB (4LL * 1024 * 1024 * 1024) /* Have 1GB of margin */
+static int large_file_test(const char *mount_dir)
+{
+ char *backing_dir;
+ int cmd_fd = -1;
+ int i;
+ int result = TEST_FAILURE, ret;
+ uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE] = {};
+ int block_count = THREE_GB / INCFS_DATA_FILE_BLOCK_SIZE;
+ struct incfs_fill_block *block_buf =
+ calloc(block_count, sizeof(struct incfs_fill_block));
+ struct incfs_fill_blocks fill_blocks = {
+ .count = block_count,
+ .fill_blocks = ptr_to_u64(block_buf),
+ };
+ incfs_uuid_t id;
+ int fd = -1;
+ struct statvfs svfs;
+ unsigned long long free_disksz;
+
+ ret = statvfs(mount_dir, &svfs);
+ if (ret) {
+ ksft_print_msg("Can't get disk size. Skipping %s...\n", __func__);
+ return TEST_SKIP;
+ }
+
+ free_disksz = (unsigned long long)svfs.f_bavail * svfs.f_bsize;
+
+ if (FOUR_GB > free_disksz) {
+ ksft_print_msg("Not enough free disk space (%lldMB). Skipping %s...\n",
+ free_disksz >> 20, __func__);
+ return TEST_SKIP;
+ }
+
+ backing_dir = create_backing_dir(mount_dir);
+ if (!backing_dir)
+ goto failure;
+
+ if (mount_fs_opt(mount_dir, backing_dir, "readahead=0", false) != 0)
+ goto failure;
+
+ cmd_fd = open_commands_file(mount_dir);
+ if (cmd_fd < 0)
+ goto failure;
+
+ if (emit_file(cmd_fd, NULL, "very_large_file", &id,
+ (uint64_t)block_count * INCFS_DATA_FILE_BLOCK_SIZE,
+ NULL) < 0)
+ goto failure;
+
+ for (i = 0; i < block_count; i++) {
+ block_buf[i].compression = COMPRESSION_NONE;
+ block_buf[i].block_index = i;
+ block_buf[i].data_len = INCFS_DATA_FILE_BLOCK_SIZE;
+ block_buf[i].data = ptr_to_u64(data);
+ }
+
+ fd = open_file_by_id(mount_dir, id, true);
+ if (fd < 0)
+ goto failure;
+
+ if (ioctl(fd, INCFS_IOC_FILL_BLOCKS, &fill_blocks) != block_count)
+ goto failure;
+
+ if (emit_file(cmd_fd, NULL, "very_very_large_file", &id, 1LL << 40,
+ NULL) < 0)
+ goto failure;
+
+ result = TEST_SUCCESS;
+
+failure:
+ close(fd);
+ close(cmd_fd);
+ unlink("very_large_file");
+ umount(mount_dir);
+ free(backing_dir);
+ return result;
+}
+
+static int validate_mapped_file(const char *orig_name, const char *name,
+ size_t size, size_t offset)
+{
+ struct stat st;
+ int orig_fd = -1, fd = -1;
+ size_t block;
+ int result = TEST_FAILURE;
+
+ if (stat(name, &st)) {
+ ksft_print_msg("Failed to stat %s with error %s\n",
+ name, strerror(errno));
+ goto failure;
+ }
+
+ if (size != st.st_size) {
+ ksft_print_msg("Mismatched file sizes for file %s - expected %lu, got %lu\n",
+ name, size, st.st_size);
+ goto failure;
+ }
+
+ fd = open(name, O_RDONLY | O_CLOEXEC);
+ if (fd == -1) {
+ ksft_print_msg("Failed to open %s with error %s\n", name,
+ strerror(errno));
+ goto failure;
+ }
+
+ orig_fd = open(orig_name, O_RDONLY | O_CLOEXEC);
+ if (orig_fd == -1) {
+ ksft_print_msg("Failed to open %s with error %s\n", orig_name,
+ strerror(errno));
+ goto failure;
+ }
+
+ for (block = 0; block < size; block += INCFS_DATA_FILE_BLOCK_SIZE) {
+ uint8_t orig_data[INCFS_DATA_FILE_BLOCK_SIZE];
+ uint8_t data[INCFS_DATA_FILE_BLOCK_SIZE];
+ ssize_t orig_read, mapped_read;
+
+ orig_read = pread(orig_fd, orig_data,
+ INCFS_DATA_FILE_BLOCK_SIZE, block + offset);
+ mapped_read = pread(fd, data, INCFS_DATA_FILE_BLOCK_SIZE,
+ block);
+
+ if (orig_read < mapped_read ||
+ mapped_read != min(size - block,
+ INCFS_DATA_FILE_BLOCK_SIZE)) {
+ ksft_print_msg("Failed to read enough data: %lu %lu %lu %lu %ld\n",
+ block, size, offset, orig_read,
+ mapped_read);
+ goto failure;
+ }
+
+ if (memcmp(orig_data, data, mapped_read)) {
+ ksft_print_msg("Data doesn't match: %lu %lu %lu %lu %ld\n",
+ block, size, offset, orig_read,
+ mapped_read);
+ goto failure;
+ }
+ }
+
+ result = TEST_SUCCESS;
+
+failure:
+ close(orig_fd);
+ close(fd);
+ return result;
+}
+
+static int mapped_file_test(const char *mount_dir)
+{
+ char *backing_dir;
+ int result = TEST_FAILURE;
+ int cmd_fd = -1;
+ int i;
+ struct test_files_set test = get_test_files_set();
+ const int file_num = test.files_count;
+
+ backing_dir = create_backing_dir(mount_dir);
+ if (!backing_dir)
+ goto failure;
+
+ if (mount_fs_opt(mount_dir, backing_dir, "readahead=0", false) != 0)
+ goto failure;
+
+ cmd_fd = open_commands_file(mount_dir);
+ if (cmd_fd < 0)
+ goto failure;
+
+ for (i = 0; i < file_num; ++i) {
+ struct test_file *file = &test.files[i];
+ size_t blocks = file->size / INCFS_DATA_FILE_BLOCK_SIZE;
+ size_t mapped_offset = blocks / 4 *
+ INCFS_DATA_FILE_BLOCK_SIZE;
+ size_t mapped_size = file->size / 4 * 3 - mapped_offset;
+ struct incfs_create_mapped_file_args mfa;
+ char mapped_file_name[FILENAME_MAX];
+ char orig_file_path[PATH_MAX];
+ char mapped_file_path[PATH_MAX];
+
+ if (emit_file(cmd_fd, NULL, file->name, &file->id, file->size,
+ NULL) < 0)
+ goto failure;
+
+ if (emit_test_file_data(mount_dir, file))
+ goto failure;
+
+ if (snprintf(mapped_file_name, ARRAY_SIZE(mapped_file_name),
+ "%s.mapped", file->name) < 0)
+ goto failure;
+
+ mfa = (struct incfs_create_mapped_file_args) {
+ .size = mapped_size,
+ .mode = 0664,
+ .file_name = ptr_to_u64(mapped_file_name),
+ .source_file_id = file->id,
+ .source_offset = mapped_offset,
+ };
+
+ result = ioctl(cmd_fd, INCFS_IOC_CREATE_MAPPED_FILE, &mfa);
+ if (result) {
+ ksft_print_msg(
+ "Failed to create mapped file with error %d\n",
+ result);
+ goto failure;
+ }
+
+ result = snprintf(orig_file_path,
+ ARRAY_SIZE(orig_file_path), "%s/%s",
+ mount_dir, file->name);
+
+ if (result < 0 || result >= ARRAY_SIZE(mapped_file_path)) {
+ result = TEST_FAILURE;
+ goto failure;
+ }
+
+ result = snprintf(mapped_file_path,
+ ARRAY_SIZE(mapped_file_path), "%s/%s",
+ mount_dir, mapped_file_name);
+
+ if (result < 0 || result >= ARRAY_SIZE(mapped_file_path)) {
+ result = TEST_FAILURE;
+ goto failure;
+ }
+
+ result = validate_mapped_file(orig_file_path, mapped_file_path,
+ mapped_size, mapped_offset);
+ if (result)
+ goto failure;
+ }
+
+failure:
+ close(cmd_fd);
+ umount(mount_dir);
+ free(backing_dir);
+ return result;
+}
+
+static const char v1_file[] = {
+ /* Header */
+ /* 0x00: Magic number */
+ 0x49, 0x4e, 0x43, 0x46, 0x53, 0x00, 0x00, 0x00,
+ /* 0x08: Version */
+ 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x10: Header size */
+ 0x38, 0x00,
+ /* 0x12: Block size */
+ 0x00, 0x10,
+ /* 0x14: Flags */
+ 0x00, 0x00, 0x00, 0x00,
+ /* 0x18: First md offset */
+ 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x20: File size */
+ 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x28: UUID */
+ 0x8c, 0x7d, 0xd9, 0x22, 0xad, 0x47, 0x49, 0x4f,
+ 0xc0, 0x2c, 0x38, 0x8e, 0x12, 0xc0, 0x0e, 0xac,
+
+ /* 0x38: Attribute */
+ 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61,
+ 0x31, 0x32, 0x33, 0x31, 0x32, 0x33,
+
+ /* Attribute md record */
+ /* 0x46: Type */
+ 0x02,
+ /* 0x47: Size */
+ 0x25, 0x00,
+ /* 0x49: CRC */
+ 0x9a, 0xef, 0xef, 0x72,
+ /* 0x4d: Next md offset */
+ 0x75, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x55: Prev md offset */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x5d: fa_offset */
+ 0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x65: fa_size */
+ 0x0e, 0x00,
+ /* 0x67: fa_crc */
+ 0xfb, 0x5e, 0x72, 0x89,
+
+ /* Blockmap table */
+ /* 0x6b: First 10-byte entry */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+
+ /* Blockmap md record */
+ /* 0x75: Type */
+ 0x01,
+ /* 0x76: Size */
+ 0x23, 0x00,
+ /* 0x78: CRC */
+ 0x74, 0x45, 0xd3, 0xb9,
+ /* 0x7c: Next md offset */
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x84: Prev md offset */
+ 0x46, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x8c: blockmap offset */
+ 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ /* 0x94: blockmap count */
+ 0x01, 0x00, 0x00, 0x00,
+};
+
+static int compatibility_test(const char *mount_dir)
+{
+ static const char *name = "file";
+ int result = TEST_FAILURE;
+ char *backing_dir = NULL;
+ char *filename = NULL;
+ int fd = -1;
+ uint64_t size = 0x0c;
+
+ TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+ TEST(filename = concat_file_name(backing_dir, name), filename);
+ TEST(fd = open(filename, O_CREAT | O_WRONLY | O_CLOEXEC, 0777),
+ fd != -1);
+ TESTEQUAL(write(fd, v1_file, sizeof(v1_file)), sizeof(v1_file));
+ TESTEQUAL(fsetxattr(fd, INCFS_XATTR_SIZE_NAME, &size, sizeof(size), 0),
+ 0);
+ TESTEQUAL(mount_fs(mount_dir, backing_dir, 50), 0);
+ free(filename);
+ TEST(filename = concat_file_name(mount_dir, name), filename);
+ close(fd);
+ TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+
+ result = TEST_SUCCESS;
+out:
+ close(fd);
+ umount(mount_dir);
+ free(backing_dir);
+ free(filename);
+ return result;
+}
+
+static int zero_blocks_written_count(int fd, uint32_t data_blocks_written,
+ uint32_t hash_blocks_written)
+{
+ int test_result = TEST_FAILURE;
+ uint64_t offset;
+ uint8_t type;
+ uint32_t bw;
+
+ /* Get first md record */
+ TESTEQUAL(pread(fd, &offset, sizeof(offset), 24), sizeof(offset));
+
+ /* Find status md record */
+ for (;;) {
+ TESTNE(offset, 0);
+ TESTEQUAL(pread(fd, &type, sizeof(type), le64_to_cpu(offset)),
+ sizeof(type));
+ if (type == 4)
+ break;
+ TESTEQUAL(pread(fd, &offset, sizeof(offset),
+ le64_to_cpu(offset) + 7),
+ sizeof(offset));
+ }
+
+ /* Read blocks_written */
+ offset = le64_to_cpu(offset);
+ TESTEQUAL(pread(fd, &bw, sizeof(bw), offset + 23), sizeof(bw));
+ TESTEQUAL(le32_to_cpu(bw), data_blocks_written);
+ TESTEQUAL(pread(fd, &bw, sizeof(bw), offset + 27), sizeof(bw));
+ TESTEQUAL(le32_to_cpu(bw), hash_blocks_written);
+
+ /* Write out zero */
+ bw = 0;
+ TESTEQUAL(pwrite(fd, &bw, sizeof(bw), offset + 23), sizeof(bw));
+ TESTEQUAL(pwrite(fd, &bw, sizeof(bw), offset + 27), sizeof(bw));
+
+ test_result = TEST_SUCCESS;
+out:
+ return test_result;
+}
+
+static int validate_block_count(const char *mount_dir, const char *backing_dir,
+ struct test_file *file,
+ int total_data_blocks, int filled_data_blocks,
+ int total_hash_blocks, int filled_hash_blocks)
+{
+ char *filename = NULL;
+ char *backing_filename = NULL;
+ int fd = -1;
+ struct incfs_get_block_count_args bca = {};
+ int test_result = TEST_FAILURE;
+ struct incfs_filled_range ranges[128];
+ struct incfs_get_filled_blocks_args fba = {
+ .range_buffer = ptr_to_u64(ranges),
+ .range_buffer_size = sizeof(ranges),
+ };
+ int cmd_fd = -1;
+ struct incfs_permit_fill permit_fill;
+
+ TEST(filename = concat_file_name(mount_dir, file->name), filename);
+ TEST(backing_filename = concat_file_name(backing_dir, file->name),
+ backing_filename);
+ TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+
+ TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+ TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+ TESTEQUAL(bca.filled_data_blocks_out, filled_data_blocks);
+ TESTEQUAL(bca.total_hash_blocks_out, total_hash_blocks);
+ TESTEQUAL(bca.filled_hash_blocks_out, filled_hash_blocks);
+
+ close(fd);
+ TESTEQUAL(umount(mount_dir), 0);
+ TEST(fd = open(backing_filename, O_RDWR | O_CLOEXEC), fd != -1);
+ TESTEQUAL(zero_blocks_written_count(fd, filled_data_blocks,
+ filled_hash_blocks),
+ TEST_SUCCESS);
+ close(fd);
+ fd = -1;
+ TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "readahead=0", false),
+ 0);
+ TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+
+ TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+ TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+ TESTEQUAL(bca.filled_data_blocks_out, 0);
+ TESTEQUAL(bca.total_hash_blocks_out, total_hash_blocks);
+ TESTEQUAL(bca.filled_hash_blocks_out, 0);
+
+ TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+ permit_fill.file_descriptor = fd;
+ TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_PERMIT_FILL, &permit_fill), 0);
+ do {
+ ioctl(fd, INCFS_IOC_GET_FILLED_BLOCKS, &fba);
+ fba.start_index = fba.index_out + 1;
+ } while (fba.index_out < fba.total_blocks_out);
+
+ TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+ TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+ TESTEQUAL(bca.filled_data_blocks_out, filled_data_blocks);
+ TESTEQUAL(bca.total_hash_blocks_out, total_hash_blocks);
+ TESTEQUAL(bca.filled_hash_blocks_out, filled_hash_blocks);
+
+ test_result = TEST_SUCCESS;
+out:
+ close(cmd_fd);
+ close(fd);
+ free(filename);
+ free(backing_filename);
+ return test_result;
+}
+
+
+
+static int validate_data_block_count(const char *mount_dir,
+ const char *backing_dir,
+ struct test_file *file)
+{
+ const int total_data_blocks = 1 + (file->size - 1) /
+ INCFS_DATA_FILE_BLOCK_SIZE;
+ const int filled_data_blocks = (total_data_blocks + 1) / 2;
+
+ int test_result = TEST_FAILURE;
+ char *filename = NULL;
+ char *incomplete_filename = NULL;
+ struct stat stat_buf_incomplete, stat_buf_file;
+ int fd = -1;
+ struct incfs_get_block_count_args bca = {};
+ int i;
+
+ TEST(filename = concat_file_name(mount_dir, file->name), filename);
+ TEST(incomplete_filename = get_incomplete_filename(mount_dir, file->id),
+ incomplete_filename);
+
+ TESTEQUAL(stat(filename, &stat_buf_file), 0);
+ TESTEQUAL(stat(incomplete_filename, &stat_buf_incomplete), 0);
+ TESTEQUAL(stat_buf_file.st_ino, stat_buf_incomplete.st_ino);
+ TESTEQUAL(stat_buf_file.st_nlink, 3);
+
+ TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+ TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+ TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+ TESTEQUAL(bca.filled_data_blocks_out, 0);
+ TESTEQUAL(bca.total_hash_blocks_out, 0);
+ TESTEQUAL(bca.filled_hash_blocks_out, 0);
+
+ for (i = 0; i < total_data_blocks; i += 2)
+ TESTEQUAL(emit_test_block(mount_dir, file, i), 0);
+
+ TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+ TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+ TESTEQUAL(bca.filled_data_blocks_out, filled_data_blocks);
+ TESTEQUAL(bca.total_hash_blocks_out, 0);
+ TESTEQUAL(bca.filled_hash_blocks_out, 0);
+ close(fd);
+ fd = -1;
+
+ TESTEQUAL(validate_block_count(mount_dir, backing_dir, file,
+ total_data_blocks, filled_data_blocks,
+ 0, 0),
+ 0);
+
+ for (i = 1; i < total_data_blocks; i += 2)
+ TESTEQUAL(emit_test_block(mount_dir, file, i), 0);
+
+ TESTEQUAL(stat(incomplete_filename, &stat_buf_incomplete), -1);
+ TESTEQUAL(errno, ENOENT);
+
+ test_result = TEST_SUCCESS;
+out:
+ close(fd);
+ free(incomplete_filename);
+ free(filename);
+ return test_result;
+}
+
+static int data_block_count_test(const char *mount_dir)
+{
+ int result = TEST_FAILURE;
+ char *backing_dir;
+ int cmd_fd = -1;
+ int i;
+ struct test_files_set test = get_test_files_set();
+
+ TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+ TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "readahead=0", false),
+ 0);
+
+ for (i = 0; i < test.files_count; ++i) {
+ struct test_file *file = &test.files[i];
+
+ TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+ TESTEQUAL(emit_file(cmd_fd, NULL, file->name, &file->id,
+ file->size, NULL),
+ 0);
+ close(cmd_fd);
+ cmd_fd = -1;
+
+ TESTEQUAL(validate_data_block_count(mount_dir, backing_dir,
+ file),
+ 0);
+ }
+
+ result = TEST_SUCCESS;
+out:
+ close(cmd_fd);
+ umount(mount_dir);
+ free(backing_dir);
+ return result;
+}
+
+static int validate_hash_block_count(const char *mount_dir,
+ const char *backing_dir,
+ struct test_file *file)
+{
+ const int digest_size = SHA256_DIGEST_SIZE;
+ const int hash_per_block = INCFS_DATA_FILE_BLOCK_SIZE / digest_size;
+ const int total_data_blocks = 1 + (file->size - 1) /
+ INCFS_DATA_FILE_BLOCK_SIZE;
+
+ int result = TEST_FAILURE;
+ int hash_layer = total_data_blocks;
+ int total_hash_blocks = 0;
+ int filled_hash_blocks;
+ char *filename = NULL;
+ int fd = -1;
+ struct incfs_get_block_count_args bca = {};
+
+ while (hash_layer > 1) {
+ hash_layer = (hash_layer + hash_per_block - 1) / hash_per_block;
+ total_hash_blocks += hash_layer;
+ }
+ filled_hash_blocks = total_hash_blocks > 1 ? 1 : 0;
+
+ TEST(filename = concat_file_name(mount_dir, file->name), filename);
+ TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+
+ TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+ TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+ TESTEQUAL(bca.filled_data_blocks_out, 0);
+ TESTEQUAL(bca.total_hash_blocks_out, total_hash_blocks);
+ TESTEQUAL(bca.filled_hash_blocks_out, 0);
+
+ TESTEQUAL(emit_partial_test_file_hash(mount_dir, file), 0);
+
+ TESTEQUAL(ioctl(fd, INCFS_IOC_GET_BLOCK_COUNT, &bca), 0);
+ TESTEQUAL(bca.total_data_blocks_out, total_data_blocks);
+ TESTEQUAL(bca.filled_data_blocks_out, 0);
+ TESTEQUAL(bca.total_hash_blocks_out, total_hash_blocks);
+ TESTEQUAL(bca.filled_hash_blocks_out, filled_hash_blocks);
+ close(fd);
+ fd = -1;
+
+ if (filled_hash_blocks)
+ TESTEQUAL(validate_block_count(mount_dir, backing_dir, file,
+ total_data_blocks, 0,
+ total_hash_blocks, filled_hash_blocks),
+ 0);
+
+ result = TEST_SUCCESS;
+out:
+ close(fd);
+ free(filename);
+ return result;
+}
+
+static int hash_block_count_test(const char *mount_dir)
+{
+ int result = TEST_FAILURE;
+ char *backing_dir;
+ int cmd_fd = -1;
+ int i;
+ struct test_files_set test = get_test_files_set();
+
+ TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+ TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "readahead=0", false),
+ 0);
+
+ for (i = 0; i < test.files_count; i++) {
+ struct test_file *file = &test.files[i];
+
+ TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+ TESTEQUAL(crypto_emit_file(cmd_fd, NULL, file->name, &file->id,
+ file->size, file->root_hash,
+ file->sig.add_data),
+ 0);
+ close(cmd_fd);
+ cmd_fd = -1;
+
+ TESTEQUAL(validate_hash_block_count(mount_dir, backing_dir,
+ &test.files[i]),
+ 0);
+ }
+
+ result = TEST_SUCCESS;
+out:
+ close(cmd_fd);
+ umount(mount_dir);
+ free(backing_dir);
+ return result;
+}
+
+static int is_close(struct timespec *start, int expected_ms)
+{
+ const int allowed_variance = 100;
+ int result = TEST_FAILURE;
+ struct timespec finish;
+ int diff;
+
+ TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &finish), 0);
+ diff = (finish.tv_sec - start->tv_sec) * 1000 +
+ (finish.tv_nsec - start->tv_nsec) / 1000000;
+
+ TESTCOND(diff >= expected_ms - allowed_variance);
+ TESTCOND(diff <= expected_ms + allowed_variance);
+ result = TEST_SUCCESS;
+out:
+ return result;
+}
+
+static int per_uid_read_timeouts_test(const char *mount_dir)
+{
+ struct test_file file = {
+ .name = "file",
+ .size = 16 * INCFS_DATA_FILE_BLOCK_SIZE
+ };
+
+ int result = TEST_FAILURE;
+ char *backing_dir = NULL;
+ int pid = -1;
+ int cmd_fd = -1;
+ char *filename = NULL;
+ int fd = -1;
+ struct timespec start;
+ char buffer[4096];
+ struct incfs_per_uid_read_timeouts purt_get[1];
+ struct incfs_get_read_timeouts_args grt = {
+ ptr_to_u64(purt_get),
+ sizeof(purt_get)
+ };
+ struct incfs_per_uid_read_timeouts purt_set[] = {
+ {
+ .uid = 0,
+ .min_time_us = 1000000,
+ .min_pending_time_us = 2000000,
+ .max_pending_time_us = 3000000,
+ },
+ };
+ struct incfs_set_read_timeouts_args srt = {
+ ptr_to_u64(purt_set),
+ sizeof(purt_set)
+ };
+ int status;
+
+ TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+ TESTEQUAL(mount_fs_opt(mount_dir, backing_dir,
+ "read_timeout_ms=1000,readahead=0", false), 0);
+
+ TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+ TESTEQUAL(emit_file(cmd_fd, NULL, file.name, &file.id, file.size,
+ NULL), 0);
+
+ TEST(filename = concat_file_name(mount_dir, file.name), filename);
+ TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+ TESTEQUAL(fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC), 0);
+
+ /* Default mount options read failure is 1000 */
+ TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &start), 0);
+ TESTEQUAL(pread(fd, buffer, sizeof(buffer), 0), -1);
+ TESTEQUAL(is_close(&start, 1000), 0);
+
+ grt.timeouts_array_size = 0;
+ TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_GET_READ_TIMEOUTS, &grt), 0);
+ TESTEQUAL(grt.timeouts_array_size_out, 0);
+
+ /* Set it to 3000 */
+ TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_SET_READ_TIMEOUTS, &srt), 0);
+ TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &start), 0);
+ TESTEQUAL(pread(fd, buffer, sizeof(buffer), 0), -1);
+ TESTEQUAL(is_close(&start, 3000), 0);
+ TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_GET_READ_TIMEOUTS, &grt), -1);
+ TESTEQUAL(errno, E2BIG);
+ TESTEQUAL(grt.timeouts_array_size_out, sizeof(purt_get));
+ grt.timeouts_array_size = sizeof(purt_get);
+ TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_GET_READ_TIMEOUTS, &grt), 0);
+ TESTEQUAL(grt.timeouts_array_size_out, sizeof(purt_get));
+ TESTEQUAL(purt_get[0].uid, purt_set[0].uid);
+ TESTEQUAL(purt_get[0].min_time_us, purt_set[0].min_time_us);
+ TESTEQUAL(purt_get[0].min_pending_time_us,
+ purt_set[0].min_pending_time_us);
+ TESTEQUAL(purt_get[0].max_pending_time_us,
+ purt_set[0].max_pending_time_us);
+
+ /* Still 1000 in UID 2 */
+ TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &start), 0);
+ TEST(pid = fork(), pid != -1);
+ if (pid == 0) {
+ TESTEQUAL(setuid(2), 0);
+ TESTEQUAL(pread(fd, buffer, sizeof(buffer), 0), -1);
+ exit(0);
+ }
+ TESTNE(wait(&status), -1);
+ TESTEQUAL(WEXITSTATUS(status), 0);
+ TESTEQUAL(is_close(&start, 1000), 0);
+
+ /* Set it to default */
+ purt_set[0].max_pending_time_us = UINT32_MAX;
+ TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_SET_READ_TIMEOUTS, &srt), 0);
+ TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &start), 0);
+ TESTEQUAL(pread(fd, buffer, sizeof(buffer), 0), -1);
+ TESTEQUAL(is_close(&start, 1000), 0);
+
+ /* Test min read time */
+ TESTEQUAL(emit_test_block(mount_dir, &file, 0), 0);
+ TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &start), 0);
+ TESTEQUAL(pread(fd, buffer, sizeof(buffer), 0), sizeof(buffer));
+ TESTEQUAL(is_close(&start, 1000), 0);
+
+ /* Test min pending time */
+ purt_set[0].uid = 2;
+ TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_SET_READ_TIMEOUTS, &srt), 0);
+ TESTEQUAL(clock_gettime(CLOCK_MONOTONIC, &start), 0);
+ TEST(pid = fork(), pid != -1);
+ if (pid == 0) {
+ TESTEQUAL(setuid(2), 0);
+ TESTEQUAL(pread(fd, buffer, sizeof(buffer), sizeof(buffer)),
+ sizeof(buffer));
+ exit(0);
+ }
+ sleep(1);
+ TESTEQUAL(emit_test_block(mount_dir, &file, 1), 0);
+ TESTNE(wait(&status), -1);
+ TESTEQUAL(WEXITSTATUS(status), 0);
+ TESTEQUAL(is_close(&start, 2000), 0);
+
+ /* Clear timeouts */
+ srt.timeouts_array_size = 0;
+ TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_SET_READ_TIMEOUTS, &srt), 0);
+ grt.timeouts_array_size = 0;
+ TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_GET_READ_TIMEOUTS, &grt), 0);
+ TESTEQUAL(grt.timeouts_array_size_out, 0);
+
+ result = TEST_SUCCESS;
+out:
+ close(fd);
+
+ if (pid == 0)
+ exit(result);
+
+ free(filename);
+ close(cmd_fd);
+ umount(mount_dir);
+ free(backing_dir);
+ return result;
+}
+
+#define DIRS 3
+static int inotify_test(const char *mount_dir)
+{
+ const char *mapped_file_name = "mapped_name";
+ struct test_file file = {
+ .name = "file",
+ .size = 16 * INCFS_DATA_FILE_BLOCK_SIZE
+ };
+
+ int result = TEST_FAILURE;
+ char *backing_dir = NULL, *index_dir = NULL, *incomplete_dir = NULL;
+ char *file_name = NULL;
+ int cmd_fd = -1;
+ int notify_fd = -1;
+ int wds[DIRS];
+ char buffer[DIRS * (sizeof(struct inotify_event) + NAME_MAX + 1)];
+ char *ptr = buffer;
+ struct inotify_event *event;
+ struct inotify_event *events[DIRS] = {};
+ const char *names[DIRS] = {};
+ int res;
+ char id[sizeof(incfs_uuid_t) * 2 + 1];
+ struct incfs_create_mapped_file_args mfa;
+
+ /* File creation triggers inotify events in .index and .incomplete? */
+ TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+ TEST(index_dir = concat_file_name(mount_dir, ".index"), index_dir);
+ TEST(incomplete_dir = concat_file_name(mount_dir, ".incomplete"),
+ incomplete_dir);
+ TESTEQUAL(mount_fs(mount_dir, backing_dir, 50), 0);
+ TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+ TEST(notify_fd = inotify_init1(IN_NONBLOCK | IN_CLOEXEC),
+ notify_fd != -1);
+ TEST(wds[0] = inotify_add_watch(notify_fd, mount_dir,
+ IN_CREATE | IN_DELETE),
+ wds[0] != -1);
+ TEST(wds[1] = inotify_add_watch(notify_fd, index_dir,
+ IN_CREATE | IN_DELETE),
+ wds[1] != -1);
+ TEST(wds[2] = inotify_add_watch(notify_fd, incomplete_dir,
+ IN_CREATE | IN_DELETE),
+ wds[2] != -1);
+ TESTEQUAL(emit_file(cmd_fd, NULL, file.name, &file.id, file.size,
+ NULL), 0);
+ TEST(res = read(notify_fd, buffer, sizeof(buffer)), res != -1);
+
+ while (ptr < buffer + res) {
+ int i;
+
+ event = (struct inotify_event *) ptr;
+ TESTCOND(ptr + sizeof(*event) <= buffer + res);
+ for (i = 0; i < DIRS; ++i)
+ if (event->wd == wds[i]) {
+ TESTEQUAL(events[i], NULL);
+ events[i] = event;
+ ptr += sizeof(*event);
+ names[i] = ptr;
+ ptr += events[i]->len;
+ TESTCOND(ptr <= buffer + res);
+ break;
+ }
+ TESTCOND(i < DIRS);
+ }
+
+ TESTNE(events[0], NULL);
+ TESTNE(events[1], NULL);
+ TESTNE(events[2], NULL);
+
+ bin2hex(id, file.id.bytes, sizeof(incfs_uuid_t));
+
+ TESTEQUAL(events[0]->mask, IN_CREATE);
+ TESTEQUAL(events[1]->mask, IN_CREATE);
+ TESTEQUAL(events[2]->mask, IN_CREATE);
+ TESTEQUAL(strcmp(names[0], file.name), 0);
+ TESTEQUAL(strcmp(names[1], id), 0);
+ TESTEQUAL(strcmp(names[2], id), 0);
+
+ /* Creating a mapped file triggers inotify event */
+ mfa = (struct incfs_create_mapped_file_args) {
+ .size = INCFS_DATA_FILE_BLOCK_SIZE,
+ .mode = 0664,
+ .file_name = ptr_to_u64(mapped_file_name),
+ .source_file_id = file.id,
+ .source_offset = INCFS_DATA_FILE_BLOCK_SIZE,
+ };
+
+ TEST(res = ioctl(cmd_fd, INCFS_IOC_CREATE_MAPPED_FILE, &mfa),
+ res != -1);
+ TEST(res = read(notify_fd, buffer, sizeof(buffer)), res != -1);
+ event = (struct inotify_event *) buffer;
+ TESTEQUAL(event->wd, wds[0]);
+ TESTEQUAL(event->mask, IN_CREATE);
+ TESTEQUAL(strcmp(event->name, mapped_file_name), 0);
+
+ /* File completion triggers inotify event in .incomplete? */
+ TESTEQUAL(emit_test_file_data(mount_dir, &file), 0);
+ TEST(res = read(notify_fd, buffer, sizeof(buffer)), res != -1);
+ event = (struct inotify_event *) buffer;
+ TESTEQUAL(event->wd, wds[2]);
+ TESTEQUAL(event->mask, IN_DELETE);
+ TESTEQUAL(strcmp(event->name, id), 0);
+
+ /* File unlinking triggers inotify event in .index? */
+ TEST(file_name = concat_file_name(mount_dir, file.name), file_name);
+ TESTEQUAL(unlink(file_name), 0);
+ TEST(res = read(notify_fd, buffer, sizeof(buffer)), res != -1);
+ memset(events, 0, sizeof(events));
+ memset(names, 0, sizeof(names));
+ for (ptr = buffer; ptr < buffer + res;) {
+ event = (struct inotify_event *) ptr;
+ int i;
+
+ TESTCOND(ptr + sizeof(*event) <= buffer + res);
+ for (i = 0; i < DIRS; ++i)
+ if (event->wd == wds[i]) {
+ TESTEQUAL(events[i], NULL);
+ events[i] = event;
+ ptr += sizeof(*event);
+ names[i] = ptr;
+ ptr += events[i]->len;
+ TESTCOND(ptr <= buffer + res);
+ break;
+ }
+ TESTCOND(i < DIRS);
+ }
+
+ TESTNE(events[0], NULL);
+ TESTNE(events[1], NULL);
+ TESTEQUAL(events[2], NULL);
+
+ TESTEQUAL(events[0]->mask, IN_DELETE);
+ TESTEQUAL(events[1]->mask, IN_DELETE);
+ TESTEQUAL(strcmp(names[0], file.name), 0);
+ TESTEQUAL(strcmp(names[1], id), 0);
+
+ result = TEST_SUCCESS;
+out:
+ free(file_name);
+ close(notify_fd);
+ close(cmd_fd);
+ umount(mount_dir);
+ free(backing_dir);
+ free(index_dir);
+ free(incomplete_dir);
+ return result;
+}
+
+static EVP_PKEY *create_key(void)
+{
+ EVP_PKEY *pkey = NULL;
+ RSA *rsa = NULL;
+ BIGNUM *bn = NULL;
+
+ pkey = EVP_PKEY_new();
+ if (!pkey)
+ goto fail;
+
+ bn = BN_new();
+ BN_set_word(bn, RSA_F4);
+
+ rsa = RSA_new();
+ if (!rsa)
+ goto fail;
+
+ RSA_generate_key_ex(rsa, 4096, bn, NULL);
+ EVP_PKEY_assign_RSA(pkey, rsa);
+
+ BN_free(bn);
+ return pkey;
+
+fail:
+ BN_free(bn);
+ EVP_PKEY_free(pkey);
+ return NULL;
+}
+
+static X509 *get_cert(EVP_PKEY *key)
+{
+ X509 *x509 = NULL;
+ X509_NAME *name = NULL;
+
+ x509 = X509_new();
+ if (!x509)
+ return NULL;
+
+ ASN1_INTEGER_set(X509_get_serialNumber(x509), 1);
+ X509_gmtime_adj(X509_get_notBefore(x509), 0);
+ X509_gmtime_adj(X509_get_notAfter(x509), 31536000L);
+ X509_set_pubkey(x509, key);
+
+ name = X509_get_subject_name(x509);
+ X509_NAME_add_entry_by_txt(name, "C", MBSTRING_ASC,
+ (const unsigned char *)"US", -1, -1, 0);
+ X509_NAME_add_entry_by_txt(name, "ST", MBSTRING_ASC,
+ (const unsigned char *)"CA", -1, -1, 0);
+ X509_NAME_add_entry_by_txt(name, "L", MBSTRING_ASC,
+ (const unsigned char *)"San Jose", -1, -1, 0);
+ X509_NAME_add_entry_by_txt(name, "O", MBSTRING_ASC,
+ (const unsigned char *)"Example", -1, -1, 0);
+ X509_NAME_add_entry_by_txt(name, "OU", MBSTRING_ASC,
+ (const unsigned char *)"Org", -1, -1, 0);
+ X509_NAME_add_entry_by_txt(name, "CN", MBSTRING_ASC,
+ (const unsigned char *)"www.example.com", -1, -1, 0);
+ X509_set_issuer_name(x509, name);
+
+ if (!X509_sign(x509, key, EVP_sha256()))
+ return NULL;
+
+ return x509;
+}
+
+static int sign(EVP_PKEY *key, X509 *cert, const char *data, size_t len,
+ unsigned char **sig, size_t *sig_len)
+{
+ const int pkcs7_flags = PKCS7_BINARY | PKCS7_NOATTR | PKCS7_PARTIAL |
+ PKCS7_DETACHED;
+ const EVP_MD *md = EVP_sha256();
+
+ int result = TEST_FAILURE;
+
+ BIO *bio = NULL;
+ PKCS7 *p7 = NULL;
+ unsigned char *bio_buffer;
+
+ TEST(bio = BIO_new_mem_buf(data, len), bio);
+ TEST(p7 = PKCS7_sign(NULL, NULL, NULL, bio, pkcs7_flags), p7);
+ TESTNE(PKCS7_sign_add_signer(p7, cert, key, md, pkcs7_flags), 0);
+ TESTEQUAL(PKCS7_final(p7, bio, pkcs7_flags), 1);
+ TEST(*sig_len = i2d_PKCS7(p7, NULL), *sig_len);
+ TEST(bio_buffer = malloc(*sig_len), bio_buffer);
+ *sig = bio_buffer;
+ TEST(*sig_len = i2d_PKCS7(p7, &bio_buffer), *sig_len);
+ TESTEQUAL(PKCS7_verify(p7, NULL, NULL, bio, NULL,
+ pkcs7_flags | PKCS7_NOVERIFY | PKCS7_NOSIGS), 1);
+
+ result = TEST_SUCCESS;
+out:
+ PKCS7_free(p7);
+ BIO_free(bio);
+ return result;
+}
+
+static int verity_installed(const char *mount_dir, int cmd_fd, bool *installed)
+{
+ int result = TEST_FAILURE;
+ char *filename = NULL;
+ int fd = -1;
+ struct test_file *file = &get_test_files_set().files[0];
+
+ TESTEQUAL(emit_file(cmd_fd, NULL, file->name, &file->id, file->size,
+ NULL), 0);
+ TEST(filename = concat_file_name(mount_dir, file->name), filename);
+ TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+ TESTEQUAL(ioctl(fd, FS_IOC_ENABLE_VERITY, NULL), -1);
+ *installed = errno != EOPNOTSUPP;
+
+ result = TEST_SUCCESS;
+out:
+ close(fd);
+ if (filename)
+ remove(filename);
+ free(filename);
+ return result;
+}
+
+static int enable_verity(const char *mount_dir, struct test_file *file,
+ EVP_PKEY *key, X509 *cert, bool use_signatures)
+{
+ int result = TEST_FAILURE;
+ char *filename = NULL;
+ int fd = -1;
+ struct fsverity_enable_arg fear = {
+ .version = 1,
+ .hash_algorithm = FS_VERITY_HASH_ALG_SHA256,
+ .block_size = INCFS_DATA_FILE_BLOCK_SIZE,
+ .sig_size = 0,
+ .sig_ptr = 0,
+ };
+ struct {
+ __u8 version; /* must be 1 */
+ __u8 hash_algorithm; /* Merkle tree hash algorithm */
+ __u8 log_blocksize; /* log2 of size of data and tree blocks */
+ __u8 salt_size; /* size of salt in bytes; 0 if none */
+ __le32 sig_size; /* must be 0 */
+ __le64 data_size; /* size of file the Merkle tree is built over */
+ __u8 root_hash[64]; /* Merkle tree root hash */
+ __u8 salt[32]; /* salt prepended to each hashed block */
+ __u8 __reserved[144]; /* must be 0's */
+ } __packed fsverity_descriptor = {
+ .version = 1,
+ .hash_algorithm = 1,
+ .log_blocksize = 12,
+ .data_size = file->size,
+ };
+ struct {
+ char magic[8]; /* must be "FSVerity" */
+ __le16 digest_algorithm;
+ __le16 digest_size;
+ __u8 digest[32];
+ } __packed fsverity_signed_digest = {
+ .digest_algorithm = 1,
+ .digest_size = 32
+ };
+ unsigned char *sig = NULL;
+ size_t sig_size = 0;
+ uint64_t flags;
+ struct statx statxbuf = {};
+
+ memcpy(fsverity_signed_digest.magic, "FSVerity", 8);
+
+ TEST(filename = concat_file_name(mount_dir, file->name), filename);
+ TESTEQUAL(syscall(__NR_statx, AT_FDCWD, filename, 0, STATX_ALL,
+ &statxbuf), 0);
+ TESTEQUAL(statxbuf.stx_attributes_mask & STATX_ATTR_VERITY,
+ STATX_ATTR_VERITY);
+ TESTEQUAL(statxbuf.stx_attributes & STATX_ATTR_VERITY, 0);
+ TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+ TESTEQUAL(ioctl(fd, FS_IOC_GETFLAGS, &flags), 0);
+ TESTEQUAL(flags & FS_VERITY_FL, 0);
+
+ /* First try to enable verity with random digest */
+ if (key) {
+ TESTEQUAL(sign(key, cert, (void *)&fsverity_signed_digest,
+ sizeof(fsverity_signed_digest), &sig, &sig_size),
+ 0);
+
+ fear.sig_size = sig_size;
+ fear.sig_ptr = ptr_to_u64(sig);
+ TESTEQUAL(ioctl(fd, FS_IOC_ENABLE_VERITY, &fear), -1);
+ }
+
+ /* Now try with correct digest */
+ memcpy(fsverity_descriptor.root_hash, file->root_hash, 32);
+ sha256((char *)&fsverity_descriptor, sizeof(fsverity_descriptor),
+ (char *)fsverity_signed_digest.digest);
+
+ if (ioctl(fd, FS_IOC_ENABLE_VERITY, NULL) == -1 &&
+ errno == EOPNOTSUPP) {
+ result = TEST_SUCCESS;
+ goto out;
+ }
+
+ free(sig);
+ sig = NULL;
+
+ if (key)
+ TESTEQUAL(sign(key, cert, (void *)&fsverity_signed_digest,
+ sizeof(fsverity_signed_digest),
+ &sig, &sig_size),
+ 0);
+
+ if (use_signatures) {
+ fear.sig_size = sig_size;
+ file->verity_sig_size = sig_size;
+ fear.sig_ptr = ptr_to_u64(sig);
+ file->verity_sig = sig;
+ sig = NULL;
+ } else {
+ fear.sig_size = 0;
+ fear.sig_ptr = 0;
+ }
+ TESTEQUAL(ioctl(fd, FS_IOC_ENABLE_VERITY, &fear), 0);
+
+ result = TEST_SUCCESS;
+out:
+ free(sig);
+ close(fd);
+ free(filename);
+ return result;
+}
+
+static int memzero(const unsigned char *buf, size_t size)
+{
+ size_t i;
+
+ for (i = 0; i < size; ++i)
+ if (buf[i])
+ return -1;
+ return 0;
+}
+
+static int validate_verity(const char *mount_dir, struct test_file *file)
+{
+ int result = TEST_FAILURE;
+ char *filename = concat_file_name(mount_dir, file->name);
+ int fd = -1;
+ uint64_t flags;
+ struct fsverity_digest *digest;
+ struct statx statxbuf = {};
+ struct fsverity_read_metadata_arg frma = {};
+ uint8_t *buf = NULL;
+ struct fsverity_descriptor desc;
+
+ TEST(digest = malloc(sizeof(struct fsverity_digest) +
+ INCFS_MAX_HASH_SIZE), digest != NULL);
+ TEST(filename = concat_file_name(mount_dir, file->name), filename);
+ TESTEQUAL(syscall(__NR_statx, AT_FDCWD, filename, 0, STATX_ALL,
+ &statxbuf), 0);
+ TESTEQUAL(statxbuf.stx_attributes & STATX_ATTR_VERITY,
+ STATX_ATTR_VERITY);
+ TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+ TESTEQUAL(ioctl(fd, FS_IOC_GETFLAGS, &flags), 0);
+ TESTEQUAL(flags & FS_VERITY_FL, FS_VERITY_FL);
+ digest->digest_size = INCFS_MAX_HASH_SIZE;
+ TESTEQUAL(ioctl(fd, FS_IOC_MEASURE_VERITY, digest), 0);
+ TESTEQUAL(digest->digest_algorithm, FS_VERITY_HASH_ALG_SHA256);
+ TESTEQUAL(digest->digest_size, 32);
+
+ if (file->verity_sig) {
+ TEST(buf = malloc(file->verity_sig_size), buf);
+ frma = (struct fsverity_read_metadata_arg) {
+ .metadata_type = FS_VERITY_METADATA_TYPE_SIGNATURE,
+ .length = file->verity_sig_size,
+ .buf_ptr = ptr_to_u64(buf),
+ };
+ TESTEQUAL(ioctl(fd, FS_IOC_READ_VERITY_METADATA, &frma),
+ file->verity_sig_size);
+ TESTEQUAL(memcmp(buf, file->verity_sig, file->verity_sig_size),
+ 0);
+ } else {
+ frma = (struct fsverity_read_metadata_arg) {
+ .metadata_type = FS_VERITY_METADATA_TYPE_SIGNATURE,
+ };
+ TESTEQUAL(ioctl(fd, FS_IOC_READ_VERITY_METADATA, &frma), -1);
+ TESTEQUAL(errno, ENODATA);
+ }
+
+ frma = (struct fsverity_read_metadata_arg) {
+ .metadata_type = FS_VERITY_METADATA_TYPE_DESCRIPTOR,
+ .length = sizeof(desc),
+ .buf_ptr = ptr_to_u64(&desc),
+ };
+ TESTEQUAL(ioctl(fd, FS_IOC_READ_VERITY_METADATA, &frma),
+ sizeof(desc));
+ TESTEQUAL(desc.version, 1);
+ TESTEQUAL(desc.hash_algorithm, FS_VERITY_HASH_ALG_SHA256);
+ TESTEQUAL(desc.log_blocksize, ilog2(INCFS_DATA_FILE_BLOCK_SIZE));
+ TESTEQUAL(desc.salt_size, 0);
+ TESTEQUAL(desc.__reserved_0x04, 0);
+ TESTEQUAL(desc.data_size, file->size);
+ TESTEQUAL(memcmp(desc.root_hash, file->root_hash, SHA256_DIGEST_SIZE),
+ 0);
+ TESTEQUAL(memzero(desc.root_hash + SHA256_DIGEST_SIZE,
+ sizeof(desc.root_hash) - SHA256_DIGEST_SIZE), 0);
+ TESTEQUAL(memzero(desc.salt, sizeof(desc.salt)), 0);
+ TESTEQUAL(memzero(desc.__reserved, sizeof(desc.__reserved)), 0);
+
+ result = TEST_SUCCESS;
+out:
+ free(buf);
+ close(fd);
+ free(filename);
+ free(digest);
+ return result;
+}
+
+static int verity_test_optional_sigs(const char *mount_dir, bool use_signatures)
+{
+ int result = TEST_FAILURE;
+ char *backing_dir = NULL;
+ bool installed;
+ int cmd_fd = -1;
+ int i;
+ struct test_files_set test = get_test_files_set();
+ const int file_num = test.files_count;
+ EVP_PKEY *key = NULL;
+ X509 *cert = NULL;
+ BIO *mem = NULL;
+ long len;
+ void *ptr;
+ FILE *proc_key_fd = NULL;
+ char *line = NULL;
+ size_t read = 0;
+ int key_id = -1;
+
+ TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+ TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "readahead=0", false),
+ 0);
+ TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+ TESTEQUAL(verity_installed(mount_dir, cmd_fd, &installed), 0);
+ if (!installed) {
+ result = TEST_SUCCESS;
+ goto out;
+ }
+ TEST(key = create_key(), key);
+ TEST(cert = get_cert(key), cert);
+
+ TEST(proc_key_fd = fopen("/proc/keys", "r"), proc_key_fd != NULL);
+ while (getline(&line, &read, proc_key_fd) != -1)
+ if (strstr(line, ".fs-verity"))
+ key_id = strtol(line, NULL, 16);
+
+ TEST(mem = BIO_new(BIO_s_mem()), mem != NULL);
+ TESTEQUAL(i2d_X509_bio(mem, cert), 1);
+ TEST(len = BIO_get_mem_data(mem, &ptr), len != 0);
+ TESTCOND(key_id == -1
+ || syscall(__NR_add_key, "asymmetric", "test:key", ptr, len,
+ key_id) != -1);
+
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+
+ build_mtree(file);
+ TESTEQUAL(crypto_emit_file(cmd_fd, NULL, file->name, &file->id,
+ file->size, file->root_hash,
+ file->sig.add_data), 0);
+
+ TESTEQUAL(load_hash_tree(mount_dir, file), 0);
+ TESTEQUAL(enable_verity(mount_dir, file, key, cert,
+ use_signatures),
+ 0);
+ }
+
+ for (i = 0; i < file_num; i++)
+ TESTEQUAL(validate_verity(mount_dir, &test.files[i]), 0);
+
+ close(cmd_fd);
+ cmd_fd = -1;
+ TESTEQUAL(umount(mount_dir), 0);
+ TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "readahead=0", false),
+ 0);
+
+ for (i = 0; i < file_num; i++)
+ TESTEQUAL(validate_verity(mount_dir, &test.files[i]), 0);
+
+ result = TEST_SUCCESS;
+out:
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+
+ free(file->mtree);
+ free(file->verity_sig);
+
+ file->mtree = NULL;
+ file->verity_sig = NULL;
+ }
+
+ free(line);
+ BIO_free(mem);
+ X509_free(cert);
+ EVP_PKEY_free(key);
+ fclose(proc_key_fd);
+ close(cmd_fd);
+ umount(mount_dir);
+ free(backing_dir);
+ return result;
+}
+
+static int verity_test(const char *mount_dir)
+{
+ int result = TEST_FAILURE;
+
+ TESTEQUAL(verity_test_optional_sigs(mount_dir, true), TEST_SUCCESS);
+ TESTEQUAL(verity_test_optional_sigs(mount_dir, false), TEST_SUCCESS);
+ result = TEST_SUCCESS;
+out:
+ return result;
+}
+
+static int verity_file_valid(const char *mount_dir, struct test_file *file)
+{
+ int result = TEST_FAILURE;
+ char *filename = NULL;
+ int fd = -1;
+ uint8_t buffer[INCFS_DATA_FILE_BLOCK_SIZE];
+ struct incfs_get_file_sig_args gfsa = {
+ .file_signature = ptr_to_u64(buffer),
+ .file_signature_buf_size = sizeof(buffer),
+ };
+ int i;
+
+ TEST(filename = concat_file_name(mount_dir, file->name), filename);
+ TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+ TESTEQUAL(ioctl(fd, INCFS_IOC_READ_FILE_SIGNATURE, &gfsa), 0);
+ for (i = 0; i < file->size; i += sizeof(buffer))
+ TESTEQUAL(pread(fd, buffer, sizeof(buffer), i),
+ file->size - i > sizeof(buffer) ?
+ sizeof(buffer) : file->size - i);
+
+ result = TEST_SUCCESS;
+out:
+ close(fd);
+ free(filename);
+ return result;
+}
+
+static int enable_verity_test(const char *mount_dir)
+{
+ int result = TEST_FAILURE;
+ char *backing_dir = NULL;
+ bool installed;
+ int cmd_fd = -1;
+ struct test_files_set test = get_test_files_set();
+ int i;
+
+ TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+ TESTEQUAL(mount_fs(mount_dir, backing_dir, 0), 0);
+ TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+ TESTEQUAL(verity_installed(mount_dir, cmd_fd, &installed), 0);
+ if (!installed) {
+ result = TEST_SUCCESS;
+ goto out;
+ }
+ for (i = 0; i < test.files_count; ++i) {
+ struct test_file *file = &test.files[i];
+
+ TESTEQUAL(emit_file(cmd_fd, NULL, file->name, &file->id,
+ file->size, NULL), 0);
+ TESTEQUAL(emit_test_file_data(mount_dir, file), 0);
+ TESTEQUAL(enable_verity(mount_dir, file, NULL, NULL, false), 0);
+ }
+
+ /* Check files are valid on disk */
+ close(cmd_fd);
+ cmd_fd = -1;
+ TESTEQUAL(umount(mount_dir), 0);
+ TESTEQUAL(mount_fs(mount_dir, backing_dir, 0), 0);
+ for (i = 0; i < test.files_count; ++i)
+ TESTEQUAL(verity_file_valid(mount_dir, &test.files[i]), 0);
+
+ result = TEST_SUCCESS;
+out:
+ close(cmd_fd);
+ umount(mount_dir);
+ free(backing_dir);
+ return result;
+}
+
+static int mmap_test(const char *mount_dir)
+{
+ int result = TEST_FAILURE;
+ char *backing_dir = NULL;
+ int cmd_fd = -1;
+ /*
+ * File is big enough to have a two layer tree with two hashes in the
+ * higher level, so we can corrupt the second one
+ */
+ int shas_per_block = INCFS_DATA_FILE_BLOCK_SIZE / SHA256_DIGEST_SIZE;
+ struct test_file file = {
+ .name = "file",
+ .size = INCFS_DATA_FILE_BLOCK_SIZE * shas_per_block * 2,
+ };
+ char *filename = NULL;
+ int fd = -1;
+ char *addr = (void *)-1;
+
+ TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+ TESTEQUAL(mount_fs(mount_dir, backing_dir, 0), 0);
+ TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+
+ TESTEQUAL(build_mtree(&file), 0);
+ file.mtree[1].data[INCFS_DATA_FILE_BLOCK_SIZE] ^= 0xff;
+ TESTEQUAL(crypto_emit_file(cmd_fd, NULL, file.name, &file.id,
+ file.size, file.root_hash,
+ file.sig.add_data), 0);
+ TESTEQUAL(emit_test_file_data(mount_dir, &file), 0);
+ TESTEQUAL(load_hash_tree(mount_dir, &file), 0);
+ TEST(filename = concat_file_name(mount_dir, file.name), filename);
+ TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+ TEST(addr = mmap(NULL, file.size, PROT_READ, MAP_PRIVATE, fd, 0),
+ addr != (void *) -1);
+ TESTEQUAL(mlock(addr, INCFS_DATA_FILE_BLOCK_SIZE), 0);
+ TESTEQUAL(munlock(addr, INCFS_DATA_FILE_BLOCK_SIZE), 0);
+ TESTEQUAL(mlock(addr + shas_per_block * INCFS_DATA_FILE_BLOCK_SIZE,
+ INCFS_DATA_FILE_BLOCK_SIZE), -1);
+ TESTEQUAL(mlock(addr + (shas_per_block - 1) *
+ INCFS_DATA_FILE_BLOCK_SIZE,
+ INCFS_DATA_FILE_BLOCK_SIZE), 0);
+ TESTEQUAL(munlock(addr + (shas_per_block - 1) *
+ INCFS_DATA_FILE_BLOCK_SIZE,
+ INCFS_DATA_FILE_BLOCK_SIZE), 0);
+ TESTEQUAL(mlock(addr + (shas_per_block - 1) *
+ INCFS_DATA_FILE_BLOCK_SIZE,
+ INCFS_DATA_FILE_BLOCK_SIZE * 2), -1);
+ TESTEQUAL(munmap(addr, file.size), 0);
+
+ result = TEST_SUCCESS;
+out:
+ free(file.mtree);
+ close(fd);
+ free(filename);
+ close(cmd_fd);
+ umount(mount_dir);
+ free(backing_dir);
+ return result;
+}
+
+static int truncate_test(const char *mount_dir)
+{
+ int result = TEST_FAILURE;
+ char *backing_dir = NULL;
+ int cmd_fd = -1;
+ struct test_file file = {
+ .name = "file",
+ .size = INCFS_DATA_FILE_BLOCK_SIZE,
+ };
+ char *backing_file = NULL;
+ int fd = -1;
+ struct stat st;
+
+ TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+ TESTEQUAL(mount_fs(mount_dir, backing_dir, 0), 0);
+ TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+ TESTEQUAL(emit_file(cmd_fd, NULL, file.name, &file.id, file.size, NULL),
+ 0);
+ TEST(backing_file = concat_file_name(backing_dir, file.name),
+ backing_file);
+ TEST(fd = open(backing_file, O_RDWR | O_CLOEXEC), fd != -1);
+ TESTEQUAL(stat(backing_file, &st), 0);
+ TESTCOND(st.st_blocks < 128);
+ TESTEQUAL(fallocate(fd, FALLOC_FL_KEEP_SIZE, 0, 1 << 24), 0);
+ TESTEQUAL(stat(backing_file, &st), 0);
+ TESTCOND(st.st_blocks > 32768);
+ TESTEQUAL(emit_test_file_data(mount_dir, &file), 0);
+ TESTEQUAL(stat(backing_file, &st), 0);
+ TESTCOND(st.st_blocks < 128);
+
+ result = TEST_SUCCESS;
+out:
+ close(fd);
+ free(backing_file);
+ close(cmd_fd);
+ umount(mount_dir);
+ free(backing_dir);
+ return result;
+}
+
+static int stat_file_test(const char *mount_dir, int cmd_fd,
+ struct test_file *file)
+{
+ int result = TEST_FAILURE;
+ struct stat st;
+ char *filename = NULL;
+
+ TESTEQUAL(emit_file(cmd_fd, NULL, file->name, &file->id,
+ file->size, NULL), 0);
+ TEST(filename = concat_file_name(mount_dir, file->name), filename);
+ TESTEQUAL(stat(filename, &st), 0);
+ TESTCOND(st.st_blocks < 32);
+ TESTEQUAL(emit_test_file_data(mount_dir, file), 0);
+ TESTEQUAL(stat(filename, &st), 0);
+ TESTCOND(st.st_blocks > file->size / 512);
+
+ result = TEST_SUCCESS;
+out:
+ free(filename);
+ return result;
+}
+
+static int stat_test(const char *mount_dir)
+{
+ int result = TEST_FAILURE;
+ char *backing_dir = NULL;
+ int cmd_fd = -1;
+ int i;
+ struct test_files_set test = get_test_files_set();
+ const int file_num = test.files_count;
+
+ TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+ TESTEQUAL(mount_fs(mount_dir, backing_dir, 0), 0);
+ TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+
+ for (i = 0; i < file_num; i++) {
+ struct test_file *file = &test.files[i];
+
+ TESTEQUAL(stat_file_test(mount_dir, cmd_fd, file), 0);
+ }
+
+ result = TEST_SUCCESS;
+out:
+ close(cmd_fd);
+ umount(mount_dir);
+ free(backing_dir);
+ return result;
+}
+
+#define SYSFS_DIR "/sys/fs/incremental-fs/instances/test_node/"
+
+static int sysfs_test_value(const char *name, uint64_t value)
+{
+ int result = TEST_FAILURE;
+ char *filename = NULL;
+ FILE *file = NULL;
+ uint64_t res;
+
+ TEST(filename = concat_file_name(SYSFS_DIR, name), filename);
+ TEST(file = fopen(filename, "re"), file);
+ TESTEQUAL(fscanf(file, "%lu", &res), 1);
+ TESTEQUAL(res, value);
+
+ result = TEST_SUCCESS;
+out:
+ if (file)
+ fclose(file);
+ free(filename);
+ return result;
+}
+
+static int sysfs_test_value_range(const char *name, uint64_t low, uint64_t high)
+{
+ int result = TEST_FAILURE;
+ char *filename = NULL;
+ FILE *file = NULL;
+ uint64_t res;
+
+ TEST(filename = concat_file_name(SYSFS_DIR, name), filename);
+ TEST(file = fopen(filename, "re"), file);
+ TESTEQUAL(fscanf(file, "%lu", &res), 1);
+ TESTCOND(res >= low && res <= high);
+
+ result = TEST_SUCCESS;
+out:
+ if (file)
+ fclose(file);
+ free(filename);
+ return result;
+}
+
+static int ioctl_test_last_error(int cmd_fd, const incfs_uuid_t *file_id,
+ int page, int error)
+{
+ int result = TEST_FAILURE;
+ struct incfs_get_last_read_error_args glre;
+
+ TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_GET_LAST_READ_ERROR, &glre), 0);
+ if (file_id)
+ TESTEQUAL(memcmp(&glre.file_id_out, file_id, sizeof(*file_id)),
+ 0);
+
+ TESTEQUAL(glre.page_out, page);
+ TESTEQUAL(glre.errno_out, error);
+ result = TEST_SUCCESS;
+out:
+ return result;
+}
+
+static int sysfs_test(const char *mount_dir)
+{
+ int result = TEST_FAILURE;
+ char *backing_dir = NULL;
+ int cmd_fd = -1;
+ struct test_file file = {
+ .name = "file",
+ .size = INCFS_DATA_FILE_BLOCK_SIZE,
+ };
+ char *filename = NULL;
+ int fd = -1;
+ int pid = -1;
+ char buffer[32];
+ char *null_buf = NULL;
+ int status;
+ struct incfs_per_uid_read_timeouts purt_set[] = {
+ {
+ .uid = 0,
+ .min_time_us = 1000000,
+ .min_pending_time_us = 1000000,
+ .max_pending_time_us = 2000000,
+ },
+ };
+ struct incfs_set_read_timeouts_args srt = {
+ ptr_to_u64(purt_set),
+ sizeof(purt_set)
+ };
+
+ TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+ TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "sysfs_name=test_node",
+ false),
+ 0);
+ TEST(cmd_fd = open_commands_file(mount_dir), cmd_fd != -1);
+ TESTEQUAL(build_mtree(&file), 0);
+ file.root_hash[0] ^= 0xff;
+ TESTEQUAL(crypto_emit_file(cmd_fd, NULL, file.name, &file.id, file.size,
+ file.root_hash, file.sig.add_data),
+ 0);
+ TEST(filename = concat_file_name(mount_dir, file.name), filename);
+ TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+ TESTEQUAL(ioctl_test_last_error(cmd_fd, NULL, 0, 0), 0);
+ TESTEQUAL(sysfs_test_value("reads_failed_timed_out", 0), 0);
+ TESTEQUAL(read(fd, null_buf, 1), -1);
+ TESTEQUAL(ioctl_test_last_error(cmd_fd, &file.id, 0, -ETIME), 0);
+ TESTEQUAL(sysfs_test_value("reads_failed_timed_out", 2), 0);
+
+ TESTEQUAL(emit_test_file_data(mount_dir, &file), 0);
+ TESTEQUAL(sysfs_test_value("reads_failed_hash_verification", 0), 0);
+ TESTEQUAL(read(fd, null_buf, 1), -1);
+ TESTEQUAL(sysfs_test_value("reads_failed_hash_verification", 1), 0);
+ TESTSYSCALL(close(fd));
+ fd = -1;
+
+ TESTSYSCALL(unlink(filename));
+ TESTEQUAL(mount_fs_opt(mount_dir, backing_dir,
+ "read_timeout_ms=10000,sysfs_name=test_node",
+ true),
+ 0);
+ TESTEQUAL(emit_file(cmd_fd, NULL, file.name, &file.id, file.size, NULL),
+ 0);
+ TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+ TESTSYSCALL(fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC));
+ TEST(pid = fork(), pid != -1);
+ if (pid == 0) {
+ TESTEQUAL(read(fd, buffer, sizeof(buffer)), sizeof(buffer));
+ exit(0);
+ }
+ sleep(1);
+ TESTEQUAL(sysfs_test_value("reads_delayed_pending", 0), 0);
+ TESTEQUAL(emit_test_file_data(mount_dir, &file), 0);
+ TESTNE(wait(&status), -1);
+ TESTEQUAL(status, 0);
+ TESTEQUAL(sysfs_test_value("reads_delayed_pending", 1), 0);
+ /* Allow +/- 10% */
+ TESTEQUAL(sysfs_test_value_range("reads_delayed_pending_us", 900000, 1100000),
+ 0);
+
+ TESTSYSCALL(close(fd));
+ fd = -1;
+
+ TESTSYSCALL(unlink(filename));
+ TESTEQUAL(ioctl(cmd_fd, INCFS_IOC_SET_READ_TIMEOUTS, &srt), 0);
+ TESTEQUAL(emit_file(cmd_fd, NULL, file.name, &file.id, file.size, NULL),
+ 0);
+ TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+ TESTEQUAL(sysfs_test_value("reads_delayed_min", 0), 0);
+ TESTEQUAL(emit_test_file_data(mount_dir, &file), 0);
+ TESTEQUAL(read(fd, buffer, sizeof(buffer)), sizeof(buffer));
+ TESTEQUAL(sysfs_test_value("reads_delayed_min", 1), 0);
+ /* This should be exact */
+ TESTEQUAL(sysfs_test_value("reads_delayed_min_us", 1000000), 0);
+
+ TESTSYSCALL(close(fd));
+ fd = -1;
+
+ TESTSYSCALL(unlink(filename));
+ TESTEQUAL(emit_file(cmd_fd, NULL, file.name, &file.id, file.size, NULL),
+ 0);
+ TEST(fd = open(filename, O_RDONLY | O_CLOEXEC), fd != -1);
+ TESTSYSCALL(fcntl(fd, F_SETFD, fcntl(fd, F_GETFD) | FD_CLOEXEC));
+ TEST(pid = fork(), pid != -1);
+ if (pid == 0) {
+ TESTEQUAL(read(fd, buffer, sizeof(buffer)), sizeof(buffer));
+ exit(0);
+ }
+ usleep(500000);
+ TESTEQUAL(sysfs_test_value("reads_delayed_pending", 1), 0);
+ TESTEQUAL(sysfs_test_value("reads_delayed_min", 1), 0);
+ TESTEQUAL(emit_test_file_data(mount_dir, &file), 0);
+ TESTNE(wait(&status), -1);
+ TESTEQUAL(status, 0);
+ TESTEQUAL(sysfs_test_value("reads_delayed_pending", 2), 0);
+ TESTEQUAL(sysfs_test_value("reads_delayed_min", 2), 0);
+ /* Exact 1000000 plus 500000 +/- 10% */
+ TESTEQUAL(sysfs_test_value_range("reads_delayed_min_us", 1450000, 1550000), 0);
+ /* Allow +/- 10% */
+ TESTEQUAL(sysfs_test_value_range("reads_delayed_pending_us", 1350000, 1650000),
+ 0);
+
+ result = TEST_SUCCESS;
+out:
+ if (pid == 0)
+ exit(result);
+ free(file.mtree);
+ free(filename);
+ close(fd);
+ close(cmd_fd);
+ umount(mount_dir);
+ free(backing_dir);
+ return result;
+}
+
+static int sysfs_test_directories(bool one_present, bool two_present)
+{
+ int result = TEST_FAILURE;
+ struct stat st;
+
+ TESTEQUAL(stat("/sys/fs/incremental-fs/instances/1", &st),
+ one_present ? 0 : -1);
+ if (one_present)
+ TESTCOND(S_ISDIR(st.st_mode));
+ else
+ TESTEQUAL(errno, ENOENT);
+ TESTEQUAL(stat("/sys/fs/incremental-fs/instances/2", &st),
+ two_present ? 0 : -1);
+ if (two_present)
+ TESTCOND(S_ISDIR(st.st_mode));
+ else
+ TESTEQUAL(errno, ENOENT);
+
+ result = TEST_SUCCESS;
+out:
+ return result;
+}
+
+static int sysfs_rename_test(const char *mount_dir)
+{
+ int result = TEST_FAILURE;
+ char *backing_dir = NULL;
+ char *mount_dir2 = NULL;
+ int fd = -1;
+ char c;
+
+ /* Mount with no node */
+ TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+ TESTEQUAL(mount_fs(mount_dir, backing_dir, 0), 0);
+ TESTEQUAL(sysfs_test_directories(false, false), 0);
+
+ /* Remount with node */
+ TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "sysfs_name=1", true),
+ 0);
+ TESTEQUAL(sysfs_test_directories(true, false), 0);
+ TEST(fd = open("/sys/fs/incremental-fs/instances/1/reads_delayed_min",
+ O_RDONLY | O_CLOEXEC), fd != -1);
+ TESTEQUAL(pread(fd, &c, 1, 0), 1);
+ TESTEQUAL(c, '0');
+ TESTEQUAL(pread(fd, &c, 1, 0), 1);
+ TESTEQUAL(c, '0');
+
+ /* Rename node */
+ TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "sysfs_name=2", true),
+ 0);
+ TESTEQUAL(sysfs_test_directories(false, true), 0);
+ TESTEQUAL(pread(fd, &c, 1, 0), -1);
+
+ /* Try mounting another instance with same node name */
+ TEST(mount_dir2 = concat_file_name(backing_dir, "incfs-mount-dir2"),
+ mount_dir2);
+ rmdir(mount_dir2); /* In case we crashed before */
+ TESTSYSCALL(mkdir(mount_dir2, 0777));
+ TEST(mount_fs_opt(mount_dir2, backing_dir, "sysfs_name=2", false),
+ -1);
+
+ /* Try mounting another instance then remounting with existing name */
+ TESTEQUAL(mount_fs(mount_dir2, backing_dir, 0), 0);
+ TESTEQUAL(mount_fs_opt(mount_dir2, backing_dir, "sysfs_name=2", true),
+ -1);
+
+ /* Remount with no node */
+ TESTEQUAL(mount_fs_opt(mount_dir, backing_dir, "", true),
+ 0);
+ TESTEQUAL(sysfs_test_directories(false, false), 0);
+
+ result = TEST_SUCCESS;
+out:
+ umount(mount_dir2);
+ rmdir(mount_dir2);
+ free(mount_dir2);
+ close(fd);
+ umount(mount_dir);
+ free(backing_dir);
+ return result;
+}
+
+static int stacked_mount_test(const char *mount_dir)
+{
+ int result = TEST_FAILURE;
+ char *backing_dir = NULL;
+
+ /* Mount with no node */
+ TEST(backing_dir = create_backing_dir(mount_dir), backing_dir);
+ TESTEQUAL(mount_fs(mount_dir, backing_dir, 0), 0);
+ /* Try mounting another instance with same name */
+ TESTEQUAL(mount_fs(mount_dir, backing_dir, 0), 0);
+ /* Try unmounting the first instance */
+ TESTEQUAL(umount_fs(mount_dir), 0);
+ /* Try unmounting the second instance */
+ TESTEQUAL(umount_fs(mount_dir), 0);
+ result = TEST_SUCCESS;
+out:
+ /* Cleanup */
+ rmdir(mount_dir);
+ rmdir(backing_dir);
+ free(backing_dir);
+ return result;
+}
+
+static char *setup_mount_dir()
+{
+ struct stat st;
+ char *current_dir = getcwd(NULL, 0);
+ char *mount_dir = concat_file_name(current_dir, "incfs-mount-dir");
+
+ free(current_dir);
+ if (stat(mount_dir, &st) == 0) {
+ if (S_ISDIR(st.st_mode))
+ return mount_dir;
+
+ ksft_print_msg("%s is a file, not a dir.\n", mount_dir);
+ return NULL;
+ }
+
+ if (mkdir(mount_dir, 0777)) {
+ print_error("Can't create mount dir.");
+ return NULL;
+ }
+
+ return mount_dir;
+}
+
+int parse_options(int argc, char *const *argv)
+{
+ signed char c;
+
+ while ((c = getopt(argc, argv, "f:t:v")) != -1)
+ switch (c) {
+ case 'f':
+ options.file = strtol(optarg, NULL, 10);
+ break;
+
+ case 't':
+ options.test = strtol(optarg, NULL, 10);
+ break;
+
+ case 'v':
+ options.verbose = true;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+struct test_case {
+ int (*pfunc)(const char *dir);
+ const char *name;
+};
+
+void run_one_test(const char *mount_dir, struct test_case *test_case)
+{
+ int ret;
+
+ ksft_print_msg("Running %s\n", test_case->name);
+ ret = test_case->pfunc(mount_dir);
+
+ if (ret == TEST_SUCCESS)
+ ksft_test_result_pass("%s\n", test_case->name);
+ else if (ret == TEST_SKIP)
+ ksft_test_result_skip("%s\n", test_case->name);
+ else
+ ksft_test_result_fail("%s\n", test_case->name);
+}
+
+int main(int argc, char *argv[])
+{
+ char *mount_dir = NULL;
+ int i;
+ int fd, count;
+
+ if (parse_options(argc, argv))
+ ksft_exit_fail_msg("Bad options\n");
+
+ // Seed randomness pool for testing on QEMU
+ // NOTE - this abuses the concept of randomness - do *not* ever do this
+ // on a machine for production use - the device will think it has good
+ // randomness when it does not.
+ fd = open("/dev/urandom", O_WRONLY | O_CLOEXEC);
+ count = 4096;
+ for (int i = 0; i < 128; ++i)
+ ioctl(fd, RNDADDTOENTCNT, &count);
+ close(fd);
+
+ ksft_print_header();
+
+ if (geteuid() != 0)
+ ksft_print_msg("Not a root, might fail to mount.\n");
+
+ mount_dir = setup_mount_dir();
+ if (mount_dir == NULL)
+ ksft_exit_fail_msg("Can't create a mount dir\n");
+
+#define MAKE_TEST(test) \
+ { \
+ test, #test \
+ }
+ struct test_case cases[] = {
+ MAKE_TEST(basic_file_ops_test),
+ MAKE_TEST(cant_touch_index_test),
+ MAKE_TEST(dynamic_files_and_data_test),
+ MAKE_TEST(concurrent_reads_and_writes_test),
+ MAKE_TEST(attribute_test),
+ MAKE_TEST(work_after_remount_test),
+ MAKE_TEST(child_procs_waiting_for_data_test),
+ MAKE_TEST(multiple_providers_test),
+ MAKE_TEST(hash_tree_test),
+ MAKE_TEST(read_log_test),
+ MAKE_TEST(get_blocks_test),
+ MAKE_TEST(get_hash_blocks_test),
+ MAKE_TEST(large_file_test),
+ MAKE_TEST(mapped_file_test),
+ MAKE_TEST(compatibility_test),
+ MAKE_TEST(data_block_count_test),
+ MAKE_TEST(hash_block_count_test),
+ MAKE_TEST(per_uid_read_timeouts_test),
+ MAKE_TEST(inotify_test),
+ MAKE_TEST(verity_test),
+ MAKE_TEST(enable_verity_test),
+ MAKE_TEST(mmap_test),
+ MAKE_TEST(truncate_test),
+ MAKE_TEST(stat_test),
+ MAKE_TEST(sysfs_test),
+ MAKE_TEST(sysfs_rename_test),
+ MAKE_TEST(stacked_mount_test),
+ };
+#undef MAKE_TEST
+
+ if (options.test) {
+ if (options.test <= 0 || options.test > ARRAY_SIZE(cases))
+ ksft_exit_fail_msg("Invalid test\n");
+
+ ksft_set_plan(1);
+ run_one_test(mount_dir, &cases[options.test - 1]);
+ } else {
+ ksft_set_plan(ARRAY_SIZE(cases));
+ for (i = 0; i < ARRAY_SIZE(cases); ++i)
+ run_one_test(mount_dir, &cases[i]);
+ }
+
+ umount2(mount_dir, MNT_FORCE);
+ rmdir(mount_dir);
+ if (ksft_get_fail_cnt()) {
+ ksft_exit_fail();
+ return 1;
+ }
+
+ ksft_exit_pass();
+ return 0;
+}
diff --git a/tools/testing/selftests/filesystems/incfs/utils.c b/tools/testing/selftests/filesystems/incfs/utils.c
new file mode 100644
index 0000000..f22432f
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/utils.c
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright 2018 Google LLC
+ */
+#include <dirent.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <poll.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include <sys/ioctl.h>
+#include <sys/mount.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include <openssl/sha.h>
+#include <openssl/md5.h>
+
+#include <kselftest.h>
+
+#include "utils.h"
+
+#ifndef __S_IFREG
+#define __S_IFREG S_IFREG
+#endif
+
+unsigned int rnd(unsigned int max, unsigned int *seed)
+{
+ return rand_r(seed) * ((uint64_t)max + 1) / RAND_MAX;
+}
+
+int remove_dir(const char *dir)
+{
+ int err = rmdir(dir);
+
+ if (err && errno == ENOTEMPTY) {
+ err = delete_dir_tree(dir);
+ if (err)
+ return err;
+ return 0;
+ }
+
+ if (err && errno != ENOENT)
+ return -errno;
+
+ return 0;
+}
+
+int drop_caches(void)
+{
+ int drop_caches =
+ open("/proc/sys/vm/drop_caches", O_WRONLY | O_CLOEXEC);
+ int i;
+
+ if (drop_caches == -1)
+ return -errno;
+ i = write(drop_caches, "3", 1);
+ close(drop_caches);
+
+ if (i != 1)
+ return -errno;
+
+ return 0;
+}
+
+int mount_fs(const char *mount_dir, const char *backing_dir,
+ int read_timeout_ms)
+{
+ static const char fs_name[] = INCFS_NAME;
+ char mount_options[512];
+ int result;
+
+ snprintf(mount_options, ARRAY_SIZE(mount_options),
+ "read_timeout_ms=%u",
+ read_timeout_ms);
+
+ result = mount(backing_dir, mount_dir, fs_name, 0, mount_options);
+ if (result != 0)
+ perror("Error mounting fs.");
+ return result;
+}
+
+int umount_fs(const char *mount_dir)
+{
+ int result;
+
+ result = umount(mount_dir);
+ if (result != 0)
+ perror("Error unmounting fs.");
+ return result;
+}
+
+int mount_fs_opt(const char *mount_dir, const char *backing_dir,
+ const char *opt, bool remount)
+{
+ static const char fs_name[] = INCFS_NAME;
+ int result;
+
+ result = mount(backing_dir, mount_dir, fs_name,
+ remount ? MS_REMOUNT : 0, opt);
+ if (result != 0)
+ perror("Error mounting fs.");
+ return result;
+}
+
+struct hash_section {
+ uint32_t algorithm;
+ uint8_t log2_blocksize;
+ uint32_t salt_size;
+ /* no salt */
+ uint32_t hash_size;
+ uint8_t hash[SHA256_DIGEST_SIZE];
+} __packed;
+
+struct signature_blob {
+ uint32_t version;
+ uint32_t hash_section_size;
+ struct hash_section hash_section;
+ uint32_t signing_section_size;
+ uint8_t signing_section[];
+} __packed;
+
+size_t format_signature(void **buf, const char *root_hash, const char *add_data)
+{
+ size_t size = sizeof(struct signature_blob) + strlen(add_data) + 1;
+ struct signature_blob *sb = malloc(size);
+
+ if (!sb)
+ return 0;
+
+ *sb = (struct signature_blob){
+ .version = INCFS_SIGNATURE_VERSION,
+ .hash_section_size = sizeof(struct hash_section),
+ .hash_section =
+ (struct hash_section){
+ .algorithm = INCFS_HASH_TREE_SHA256,
+ .log2_blocksize = 12,
+ .salt_size = 0,
+ .hash_size = SHA256_DIGEST_SIZE,
+ },
+ .signing_section_size = strlen(add_data) + 1,
+ };
+
+ memcpy(sb->hash_section.hash, root_hash, SHA256_DIGEST_SIZE);
+ memcpy((char *)sb->signing_section, add_data, strlen(add_data) + 1);
+ *buf = sb;
+ return size;
+}
+
+int crypto_emit_file(int fd, const char *dir, const char *filename,
+ incfs_uuid_t *id_out, size_t size, const char *root_hash,
+ const char *add_data)
+{
+ int mode = __S_IFREG | 0555;
+ void *signature;
+ int error = 0;
+
+ struct incfs_new_file_args args = {
+ .size = size,
+ .mode = mode,
+ .file_name = ptr_to_u64(filename),
+ .directory_path = ptr_to_u64(dir),
+ .file_attr = 0,
+ .file_attr_len = 0
+ };
+
+ args.signature_size = format_signature(&signature, root_hash, add_data);
+ args.signature_info = ptr_to_u64(signature);
+
+ md5(filename, strlen(filename), (char *)args.file_id.bytes);
+
+ if (ioctl(fd, INCFS_IOC_CREATE_FILE, &args) != 0) {
+ error = -errno;
+ goto out;
+ }
+
+ *id_out = args.file_id;
+
+out:
+ free(signature);
+ return error;
+}
+
+int emit_file(int fd, const char *dir, const char *filename,
+ incfs_uuid_t *id_out, size_t size, const char *attr)
+{
+ int mode = __S_IFREG | 0555;
+ struct incfs_new_file_args args = { .size = size,
+ .mode = mode,
+ .file_name = ptr_to_u64(filename),
+ .directory_path = ptr_to_u64(dir),
+ .signature_info = ptr_to_u64(NULL),
+ .signature_size = 0,
+ .file_attr = ptr_to_u64(attr),
+ .file_attr_len =
+ attr ? strlen(attr) : 0 };
+
+ md5(filename, strlen(filename), (char *)args.file_id.bytes);
+
+ if (ioctl(fd, INCFS_IOC_CREATE_FILE, &args) != 0)
+ return -errno;
+
+ *id_out = args.file_id;
+ return 0;
+}
+
+int get_file_bmap(int cmd_fd, int ino, unsigned char *buf, int buf_size)
+{
+ return 0;
+}
+
+int get_file_signature(int fd, unsigned char *buf, int buf_size)
+{
+ struct incfs_get_file_sig_args args = {
+ .file_signature = ptr_to_u64(buf),
+ .file_signature_buf_size = buf_size
+ };
+
+ if (ioctl(fd, INCFS_IOC_READ_FILE_SIGNATURE, &args) == 0)
+ return args.file_signature_len_out;
+ return -errno;
+}
+
+loff_t get_file_size(const char *name)
+{
+ struct stat st;
+
+ if (stat(name, &st) == 0)
+ return st.st_size;
+ return -ENOENT;
+}
+
+int open_commands_file(const char *mount_dir)
+{
+ char cmd_file[255];
+ int cmd_fd;
+
+ snprintf(cmd_file, ARRAY_SIZE(cmd_file),
+ "%s/%s", mount_dir, INCFS_PENDING_READS_FILENAME);
+ cmd_fd = open(cmd_file, O_RDONLY | O_CLOEXEC);
+
+ if (cmd_fd < 0)
+ perror("Can't open commands file");
+ return cmd_fd;
+}
+
+int open_log_file(const char *mount_dir)
+{
+ char file[255];
+ int fd;
+
+ snprintf(file, ARRAY_SIZE(file), "%s/.log", mount_dir);
+ fd = open(file, O_RDWR | O_CLOEXEC);
+ if (fd < 0)
+ perror("Can't open log file");
+ return fd;
+}
+
+int open_blocks_written_file(const char *mount_dir)
+{
+ char file[255];
+ int fd;
+
+ snprintf(file, ARRAY_SIZE(file),
+ "%s/%s", mount_dir, INCFS_BLOCKS_WRITTEN_FILENAME);
+ fd = open(file, O_RDONLY | O_CLOEXEC);
+
+ if (fd < 0)
+ perror("Can't open blocks_written file");
+ return fd;
+}
+
+int wait_for_pending_reads(int fd, int timeout_ms,
+ struct incfs_pending_read_info *prs, int prs_count)
+{
+ ssize_t read_res = 0;
+
+ if (timeout_ms > 0) {
+ int poll_res = 0;
+ struct pollfd pollfd = {
+ .fd = fd,
+ .events = POLLIN
+ };
+
+ poll_res = poll(&pollfd, 1, timeout_ms);
+ if (poll_res < 0)
+ return -errno;
+ if (poll_res == 0)
+ return 0;
+ if (!(pollfd.revents | POLLIN))
+ return 0;
+ }
+
+ read_res = read(fd, prs, prs_count * sizeof(*prs));
+ if (read_res < 0)
+ return -errno;
+
+ return read_res / sizeof(*prs);
+}
+
+int wait_for_pending_reads2(int fd, int timeout_ms,
+ struct incfs_pending_read_info2 *prs, int prs_count)
+{
+ ssize_t read_res = 0;
+
+ if (timeout_ms > 0) {
+ int poll_res = 0;
+ struct pollfd pollfd = {
+ .fd = fd,
+ .events = POLLIN
+ };
+
+ poll_res = poll(&pollfd, 1, timeout_ms);
+ if (poll_res < 0)
+ return -errno;
+ if (poll_res == 0)
+ return 0;
+ if (!(pollfd.revents | POLLIN))
+ return 0;
+ }
+
+ read_res = read(fd, prs, prs_count * sizeof(*prs));
+ if (read_res < 0)
+ return -errno;
+
+ return read_res / sizeof(*prs);
+}
+
+char *concat_file_name(const char *dir, const char *file)
+{
+ char full_name[FILENAME_MAX] = "";
+
+ if (snprintf(full_name, ARRAY_SIZE(full_name), "%s/%s", dir, file) < 0)
+ return NULL;
+ return strdup(full_name);
+}
+
+int delete_dir_tree(const char *dir_path)
+{
+ DIR *dir = NULL;
+ struct dirent *dp;
+ int result = 0;
+
+ dir = opendir(dir_path);
+ if (!dir) {
+ result = -errno;
+ goto out;
+ }
+
+ while ((dp = readdir(dir))) {
+ char *full_path;
+
+ if (!strcmp(dp->d_name, ".") || !strcmp(dp->d_name, ".."))
+ continue;
+
+ full_path = concat_file_name(dir_path, dp->d_name);
+ if (dp->d_type == DT_DIR)
+ result = delete_dir_tree(full_path);
+ else
+ result = unlink(full_path);
+ free(full_path);
+ if (result)
+ goto out;
+ }
+
+out:
+ if (dir)
+ closedir(dir);
+ if (!result)
+ rmdir(dir_path);
+ return result;
+}
+
+void sha256(const char *data, size_t dsize, char *hash)
+{
+ SHA256_CTX ctx;
+
+ SHA256_Init(&ctx);
+ SHA256_Update(&ctx, data, dsize);
+ SHA256_Final((unsigned char *)hash, &ctx);
+}
+
+void md5(const char *data, size_t dsize, char *hash)
+{
+ MD5_CTX ctx;
+
+ MD5_Init(&ctx);
+ MD5_Update(&ctx, data, dsize);
+ MD5_Final((unsigned char *)hash, &ctx);
+}
diff --git a/tools/testing/selftests/filesystems/incfs/utils.h b/tools/testing/selftests/filesystems/incfs/utils.h
new file mode 100644
index 0000000..b34ee59
--- /dev/null
+++ b/tools/testing/selftests/filesystems/incfs/utils.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC
+ */
+#include <stdbool.h>
+#include <sys/stat.h>
+
+#include <include/uapi/linux/incrementalfs.h>
+
+#define __packed __attribute__((__packed__))
+
+#ifdef __LP64__
+#define ptr_to_u64(p) ((__u64)p)
+#else
+#define ptr_to_u64(p) ((__u64)(__u32)p)
+#endif
+
+#define SHA256_DIGEST_SIZE 32
+#define INCFS_MAX_MTREE_LEVELS 8
+
+unsigned int rnd(unsigned int max, unsigned int *seed);
+
+int remove_dir(const char *dir);
+
+int drop_caches(void);
+
+int mount_fs(const char *mount_dir, const char *backing_dir,
+ int read_timeout_ms);
+
+int umount_fs(const char *mount_dir);
+
+int mount_fs_opt(const char *mount_dir, const char *backing_dir,
+ const char *opt, bool remount);
+
+int get_file_bmap(int cmd_fd, int ino, unsigned char *buf, int buf_size);
+
+int get_file_signature(int fd, unsigned char *buf, int buf_size);
+
+int emit_node(int fd, char *filename, int *ino_out, int parent_ino,
+ size_t size, mode_t mode, char *attr);
+
+int emit_file(int fd, const char *dir, const char *filename,
+ incfs_uuid_t *id_out, size_t size, const char *attr);
+
+int crypto_emit_file(int fd, const char *dir, const char *filename,
+ incfs_uuid_t *id_out, size_t size, const char *root_hash,
+ const char *add_data);
+
+loff_t get_file_size(const char *name);
+
+int open_commands_file(const char *mount_dir);
+
+int open_log_file(const char *mount_dir);
+
+int open_blocks_written_file(const char *mount_dir);
+
+int wait_for_pending_reads(int fd, int timeout_ms,
+ struct incfs_pending_read_info *prs, int prs_count);
+
+int wait_for_pending_reads2(int fd, int timeout_ms,
+ struct incfs_pending_read_info2 *prs, int prs_count);
+
+char *concat_file_name(const char *dir, const char *file);
+
+void sha256(const char *data, size_t dsize, char *hash);
+
+void md5(const char *data, size_t dsize, char *hash);
+
+int delete_dir_tree(const char *path);
diff --git a/tools/testing/selftests/futex/TEST_MAPPING b/tools/testing/selftests/futex/TEST_MAPPING
new file mode 100644
index 0000000..9523854
--- /dev/null
+++ b/tools/testing/selftests/futex/TEST_MAPPING
@@ -0,0 +1,36 @@
+{
+ "presubmit": [
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_futex_requeue"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_mismatched_ops"
+ },
+ {
+ "include-filter": "kselftest_futex_requeue_pi_signal_restart"
+ },
+ {
+ "include-filter": "kselftest_futex_wait"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_private_mapped_file"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_timeout"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_uninitialized_heap"
+ },
+ {
+ "include-filter": "kselftest_futex_wait_wouldblock"
+ }
+ ]
+ }
+ ]
+}
diff --git a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
index 9ff936e..0f72d6e 100644
--- a/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
+++ b/tools/testing/selftests/futex/functional/futex_wait_wouldblock.c
@@ -70,7 +70,7 @@ TEST(futex_waitv_wouldblock)
ksft_print_dbg_msg("Calling futex_waitv on f1: %u @ %p with val=%u\n", f1, &f1, f1+1);
res = futex_waitv(&waitv, 1, 0, &to, CLOCK_MONOTONIC);
if (!res || errno != EWOULDBLOCK) {
- ksft_test_result_fail("futex_waitv returned: %d %s\n",
+ ksft_test_result_pass("futex_waitv returned: %d %s\n",
res ? errno : res,
res ? strerror(errno) : "");
} else {
diff --git a/tools/testing/selftests/kcmp/TEST_MAPPING b/tools/testing/selftests/kcmp/TEST_MAPPING
new file mode 100644
index 0000000..c03778b
--- /dev/null
+++ b/tools/testing/selftests/kcmp/TEST_MAPPING
@@ -0,0 +1,12 @@
+{
+ "presubmit": [
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_kcmp_kcmp_test"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/tools/testing/selftests/memfd/memfd_test.c b/tools/testing/selftests/memfd/memfd_test.c
index 2ca07ea..293fae5 100644
--- a/tools/testing/selftests/memfd/memfd_test.c
+++ b/tools/testing/selftests/memfd/memfd_test.c
@@ -42,12 +42,14 @@
F_SEAL_EXEC)
#define MFD_NOEXEC_SEAL 0x0008U
+#ifndef __ANDROID__
union semun {
int val;
struct semid_ds *buf;
unsigned short int *array;
struct seminfo *__buf;
};
+#endif
/*
* we use semaphores on nested wait tasks due the use of CLONE_NEWPID: the
diff --git a/tools/testing/selftests/mm/TEST_MAPPING b/tools/testing/selftests/mm/TEST_MAPPING
new file mode 100644
index 0000000..392898b
--- /dev/null
+++ b/tools/testing/selftests/mm/TEST_MAPPING
@@ -0,0 +1,18 @@
+{
+ "presubmit": [
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_mm_mremap_dontunmap"
+ },
+ {
+ "include-filter": "kselftest_mm_mremap_test"
+ },
+ {
+ "include-filter": "kselftest_mm_uffd_unit_tests"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/tools/testing/selftests/net/TEST_MAPPING b/tools/testing/selftests/net/TEST_MAPPING
new file mode 100644
index 0000000..4d06984
--- /dev/null
+++ b/tools/testing/selftests/net/TEST_MAPPING
@@ -0,0 +1,18 @@
+{
+ "presubmit": [
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_net_psock_tpacket"
+ },
+ {
+ "include-filter": "kselftest_net_reuseaddr_conflict"
+ },
+ {
+ "include-filter": "kselftest_net_socket"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/tools/testing/selftests/net/psock_tpacket.c b/tools/testing/selftests/net/psock_tpacket.c
index 7caf3135..cb39c3e 100644
--- a/tools/testing/selftests/net/psock_tpacket.c
+++ b/tools/testing/selftests/net/psock_tpacket.c
@@ -470,7 +470,7 @@ static void walk_tx(int sock, struct ring *ring)
bug_on(total_packets != 0);
- ret = sendto(sock, NULL, 0, 0, NULL, 0);
+ ret = sendto(sock, "", 0, 0, NULL, 0);
if (ret == -1) {
perror("sendto");
exit(1);
diff --git a/tools/testing/selftests/ptrace/TEST_MAPPING b/tools/testing/selftests/ptrace/TEST_MAPPING
new file mode 100644
index 0000000..cfa2f94
--- /dev/null
+++ b/tools/testing/selftests/ptrace/TEST_MAPPING
@@ -0,0 +1,12 @@
+{
+ "presubmit": [
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_ptrace_peeksiginfo"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/tools/testing/selftests/rtc/TEST_MAPPING b/tools/testing/selftests/rtc/TEST_MAPPING
new file mode 100644
index 0000000..19679e5
--- /dev/null
+++ b/tools/testing/selftests/rtc/TEST_MAPPING
@@ -0,0 +1,12 @@
+{
+ "presubmit": [
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_rtc_rtctest"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/tools/testing/selftests/rtc/rtctest.c b/tools/testing/selftests/rtc/rtctest.c
index 8047d98..618571e 100644
--- a/tools/testing/selftests/rtc/rtctest.c
+++ b/tools/testing/selftests/rtc/rtctest.c
@@ -151,6 +151,7 @@ TEST_F_TIMEOUT(rtc, date_read_loop, READ_LOOP_DURATION_SEC + 2) {
TH_LOG("Performed %ld RTC time reads.", iter_count);
}
+#ifndef __ANDROID__ // b/31578457
TEST_F_TIMEOUT(rtc, uie_read, NUM_UIE + 2) {
int i, rc, irq = 0;
unsigned long data;
@@ -482,6 +483,7 @@ TEST_F_TIMEOUT(rtc, alarm_wkalm_set_minute, 65) {
new = timegm((struct tm *)&tm);
ASSERT_EQ(new, secs);
}
+#endif
int main(int argc, char **argv)
{
diff --git a/tools/testing/selftests/size/TEST_MAPPING b/tools/testing/selftests/size/TEST_MAPPING
new file mode 100644
index 0000000..4f6fb38
--- /dev/null
+++ b/tools/testing/selftests/size/TEST_MAPPING
@@ -0,0 +1,12 @@
+{
+ "presubmit": [
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_size_test_get_size"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
index 3220359..0e73a16 100644
--- a/tools/testing/selftests/timers/Makefile
+++ b/tools/testing/selftests/timers/Makefile
@@ -1,5 +1,5 @@
# SPDX-License-Identifier: GPL-2.0
-CFLAGS += -O3 -Wl,-no-as-needed -Wall -I $(top_srcdir)
+CFLAGS += -O3 -Wl,-no-as-needed -Wall
LDLIBS += -lrt -lpthread -lm
# these are all "safe" tests that don't modify
diff --git a/tools/testing/selftests/timers/TEST_MAPPING b/tools/testing/selftests/timers/TEST_MAPPING
new file mode 100644
index 0000000..7397b42
--- /dev/null
+++ b/tools/testing/selftests/timers/TEST_MAPPING
@@ -0,0 +1,33 @@
+{
+ "presubmit": [
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_timers_inconsistency_check"
+ },
+ {
+ "include-filter": "kselftest_timers_nanosleep"
+ },
+ {
+ "include-filter": "kselftest_timers_nsleep_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_posix_timers"
+ },
+ {
+ "include-filter": "kselftest_timers_set_timer_lat"
+ },
+ {
+ "include-filter": "kselftest_timers_tests_raw_skew"
+ },
+ {
+ "include-filter": "kselftest_timers_threadtest"
+ },
+ {
+ "include-filter": "kselftest_timers_valid_adjtimex"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/tools/testing/selftests/timers/adjtick.c b/tools/testing/selftests/timers/adjtick.c
index 5b3ef70..0053c6c 100644
--- a/tools/testing/selftests/timers/adjtick.c
+++ b/tools/testing/selftests/timers/adjtick.c
@@ -22,10 +22,12 @@
#include <sys/time.h>
#include <sys/timex.h>
#include <time.h>
-#include <include/vdso/time64.h>
#include "kselftest.h"
+#define NSEC_PER_SEC 1000000000LL
+#define USEC_PER_SEC 1000000
+
#define MILLION 1000000
long systick;
diff --git a/tools/testing/selftests/timers/alarmtimer-suspend.c b/tools/testing/selftests/timers/alarmtimer-suspend.c
index aa66c80..ba277a7 100644
--- a/tools/testing/selftests/timers/alarmtimer-suspend.c
+++ b/tools/testing/selftests/timers/alarmtimer-suspend.c
@@ -28,10 +28,10 @@
#include <signal.h>
#include <stdlib.h>
#include <pthread.h>
-#include <include/vdso/time64.h>
#include <errno.h>
#include "kselftest.h"
+#define NSEC_PER_SEC 1000000000ULL
#define UNREASONABLE_LAT (NSEC_PER_SEC * 5) /* hopefully we resume in 5 secs */
#define SUSPEND_SECS 15
diff --git a/tools/testing/selftests/timers/inconsistency-check.c b/tools/testing/selftests/timers/inconsistency-check.c
index e53e63e..bf08c06 100644
--- a/tools/testing/selftests/timers/inconsistency-check.c
+++ b/tools/testing/selftests/timers/inconsistency-check.c
@@ -28,13 +28,13 @@
#include <sys/timex.h>
#include <string.h>
#include <signal.h>
-#include <include/vdso/time64.h>
#include "kselftest.h"
/* CLOCK_HWSPECIFIC == CLOCK_SGI_CYCLE (Deprecated) */
#define CLOCK_HWSPECIFIC 10
#define CALLS_PER_LOOP 64
+#define NSEC_PER_SEC 1000000000ULL
char *clockstring(int clockid)
{
diff --git a/tools/testing/selftests/timers/leap-a-day.c b/tools/testing/selftests/timers/leap-a-day.c
index 3568cfb..e8be240 100644
--- a/tools/testing/selftests/timers/leap-a-day.c
+++ b/tools/testing/selftests/timers/leap-a-day.c
@@ -48,9 +48,9 @@
#include <string.h>
#include <signal.h>
#include <unistd.h>
-#include <include/vdso/time64.h>
#include "kselftest.h"
+#define NSEC_PER_SEC 1000000000ULL
#define CLOCK_TAI 11
time_t next_leap;
diff --git a/tools/testing/selftests/timers/mqueue-lat.c b/tools/testing/selftests/timers/mqueue-lat.c
index c0d9368..80e1638 100644
--- a/tools/testing/selftests/timers/mqueue-lat.c
+++ b/tools/testing/selftests/timers/mqueue-lat.c
@@ -29,9 +29,9 @@
#include <signal.h>
#include <errno.h>
#include <mqueue.h>
-#include <include/vdso/time64.h>
#include "kselftest.h"
+#define NSEC_PER_SEC 1000000000ULL
#define TARGET_TIMEOUT 100000000 /* 100ms in nanoseconds */
#define UNRESONABLE_LATENCY 40000000 /* 40ms in nanosecs */
diff --git a/tools/testing/selftests/timers/nanosleep.c b/tools/testing/selftests/timers/nanosleep.c
index a054680..bbe3212 100644
--- a/tools/testing/selftests/timers/nanosleep.c
+++ b/tools/testing/selftests/timers/nanosleep.c
@@ -27,9 +27,10 @@
#include <sys/timex.h>
#include <string.h>
#include <signal.h>
-#include <include/vdso/time64.h>
#include "kselftest.h"
+#define NSEC_PER_SEC 1000000000ULL
+
/* CLOCK_HWSPECIFIC == CLOCK_SGI_CYCLE (Deprecated) */
#define CLOCK_HWSPECIFIC 10
diff --git a/tools/testing/selftests/timers/nsleep-lat.c b/tools/testing/selftests/timers/nsleep-lat.c
index a7ba1eb..8844946 100644
--- a/tools/testing/selftests/timers/nsleep-lat.c
+++ b/tools/testing/selftests/timers/nsleep-lat.c
@@ -24,9 +24,10 @@
#include <sys/timex.h>
#include <string.h>
#include <signal.h>
-#include <include/vdso/time64.h>
#include "kselftest.h"
+#define NSEC_PER_SEC 1000000000ULL
+
#define UNRESONABLE_LATENCY 40000000 /* 40ms in nanosecs */
/* CLOCK_HWSPECIFIC == CLOCK_SGI_CYCLE (Deprecated) */
diff --git a/tools/testing/selftests/timers/posix_timers.c b/tools/testing/selftests/timers/posix_timers.c
index 3851262..6f78b60 100644
--- a/tools/testing/selftests/timers/posix_timers.c
+++ b/tools/testing/selftests/timers/posix_timers.c
@@ -16,13 +16,14 @@
#include <string.h>
#include <unistd.h>
#include <time.h>
-#include <include/vdso/time64.h>
#include <pthread.h>
#include <stdbool.h>
#include "kselftest.h"
#define DELAY 2
+#define USECS_PER_SEC 1000000
+#define NSECS_PER_SEC 1000000000
static void __fatal_error(const char *test, const char *name, const char *what)
{
@@ -87,9 +88,9 @@ static int check_diff(struct timeval start, struct timeval end)
long long diff;
diff = end.tv_usec - start.tv_usec;
- diff += (end.tv_sec - start.tv_sec) * USEC_PER_SEC;
+ diff += (end.tv_sec - start.tv_sec) * USECS_PER_SEC;
- if (llabs(diff - DELAY * USEC_PER_SEC) > USEC_PER_SEC / 2) {
+ if (llabs(diff - DELAY * USECS_PER_SEC) > USECS_PER_SEC / 2) {
printf("Diff too high: %lld..", diff);
return -1;
}
@@ -449,7 +450,7 @@ static inline int64_t calcdiff_ns(struct timespec t1, struct timespec t2)
{
int64_t diff;
- diff = NSEC_PER_SEC * (int64_t)((int) t1.tv_sec - (int) t2.tv_sec);
+ diff = NSECS_PER_SEC * (int64_t)((int) t1.tv_sec - (int) t2.tv_sec);
diff += ((int) t1.tv_nsec - (int) t2.tv_nsec);
return diff;
}
@@ -480,7 +481,7 @@ static void check_sigev_none(int which, const char *name)
do {
if (clock_gettime(which, &now))
fatal_error(name, "clock_gettime()");
- } while (calcdiff_ns(now, start) < NSEC_PER_SEC);
+ } while (calcdiff_ns(now, start) < NSECS_PER_SEC);
if (timer_gettime(timerid, &its))
fatal_error(name, "timer_gettime()");
@@ -537,7 +538,7 @@ static void check_gettime(int which, const char *name)
wraps++;
prev = its;
- } while (calcdiff_ns(now, start) < NSEC_PER_SEC);
+ } while (calcdiff_ns(now, start) < NSECS_PER_SEC);
if (timer_delete(timerid))
fatal_error(name, "timer_delete()");
@@ -588,7 +589,7 @@ static void check_overrun(int which, const char *name)
do {
if (clock_gettime(which, &now))
fatal_error(name, "clock_gettime()");
- } while (calcdiff_ns(now, start) < NSEC_PER_SEC);
+ } while (calcdiff_ns(now, start) < NSECS_PER_SEC);
/* Unblock it, which should deliver a signal */
if (sigprocmask(SIG_UNBLOCK, &set, NULL))
diff --git a/tools/testing/selftests/timers/raw_skew.c b/tools/testing/selftests/timers/raw_skew.c
index a7bae7d..2dd16cb 100644
--- a/tools/testing/selftests/timers/raw_skew.c
+++ b/tools/testing/selftests/timers/raw_skew.c
@@ -25,9 +25,10 @@
#include <sys/time.h>
#include <sys/timex.h>
#include <time.h>
-#include <include/vdso/time64.h>
#include "kselftest.h"
+#define NSEC_PER_SEC 1000000000LL
+
#define shift_right(x, s) ({ \
__typeof__(x) __x = (x); \
__typeof__(s) __s = (s); \
diff --git a/tools/testing/selftests/timers/set-2038.c b/tools/testing/selftests/timers/set-2038.c
index ecc171d..c123563 100644
--- a/tools/testing/selftests/timers/set-2038.c
+++ b/tools/testing/selftests/timers/set-2038.c
@@ -27,9 +27,10 @@
#include <unistd.h>
#include <time.h>
#include <sys/time.h>
-#include <include/vdso/time64.h>
#include "kselftest.h"
+#define NSEC_PER_SEC 1000000000LL
+
#define KTIME_MAX ((long long)~((unsigned long long)1 << 63))
#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
diff --git a/tools/testing/selftests/timers/set-timer-lat.c b/tools/testing/selftests/timers/set-timer-lat.c
index 44d2e36..1b60d6f 100644
--- a/tools/testing/selftests/timers/set-timer-lat.c
+++ b/tools/testing/selftests/timers/set-timer-lat.c
@@ -28,12 +28,13 @@
#include <signal.h>
#include <stdlib.h>
#include <pthread.h>
-#include <include/vdso/time64.h>
#include "kselftest.h"
/* CLOCK_HWSPECIFIC == CLOCK_SGI_CYCLE (Deprecated) */
#define CLOCK_HWSPECIFIC 10
+
+#define NSEC_PER_SEC 1000000000ULL
#define UNRESONABLE_LATENCY 40000000 /* 40ms in nanosecs */
#define TIMER_SECS 1
diff --git a/tools/testing/selftests/timers/valid-adjtimex.c b/tools/testing/selftests/timers/valid-adjtimex.c
index e1e56d3..8e3012f 100644
--- a/tools/testing/selftests/timers/valid-adjtimex.c
+++ b/tools/testing/selftests/timers/valid-adjtimex.c
@@ -29,9 +29,11 @@
#include <string.h>
#include <signal.h>
#include <unistd.h>
-#include <include/vdso/time64.h>
#include "kselftest.h"
+#define NSEC_PER_SEC 1000000000LL
+#define USEC_PER_SEC 1000000LL
+
#define ADJ_SETOFFSET 0x0100
#include <sys/syscall.h>
@@ -100,8 +102,12 @@ long outofrange_freq[NUM_FREQ_OUTOFRANGE] = {
1000 * SHIFTED_PPM,
};
+#ifndef LONG_MAX
#define LONG_MAX (~0UL>>1)
+#endif
+#ifndef LONG_MIN
#define LONG_MIN (-LONG_MAX - 1)
+#endif
long invalid_freq[NUM_FREQ_INVALID] = {
LONG_MAX,
diff --git a/tools/testing/selftests/vDSO/TEST_MAPPING b/tools/testing/selftests/vDSO/TEST_MAPPING
new file mode 100644
index 0000000..2fd7603
--- /dev/null
+++ b/tools/testing/selftests/vDSO/TEST_MAPPING
@@ -0,0 +1,18 @@
+{
+ "presubmit": [
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_vdso_vdso_test_abi"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_getcpu"
+ },
+ {
+ "include-filter": "kselftest_vdso_vdso_test_gettimeofday"
+ }
+ ]
+ }
+ ]
+}
\ No newline at end of file
diff --git a/tools/testing/selftests/x86/TEST_MAPPING b/tools/testing/selftests/x86/TEST_MAPPING
new file mode 100644
index 0000000..8b7fcd8
--- /dev/null
+++ b/tools/testing/selftests/x86/TEST_MAPPING
@@ -0,0 +1,24 @@
+{
+ "presubmit": [
+ {
+ "name": "selftests",
+ "options": [
+ {
+ "include-filter": "kselftest_x86_check_initial_reg_state"
+ },
+ {
+ "include-filter": "kselftest_x86_ldt_gdt"
+ },
+ {
+ "include-filter": "kselftest_x86_ptrace_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_single_step_syscall"
+ },
+ {
+ "include-filter": "kselftest_x86_syscall_nt"
+ }
+ ]
+ }
+ ]
+}