blob: f8d8ac02e718938695dde0646525928e201cca66 [file] [log] [blame]
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Sami Tolvanen <samitolvanen@google.com>
Date: Fri, 4 Oct 2019 09:14:05 -0700
Subject: ANDROID: arm64: add __pa_function
We use non-canonical CFI jump tables with CONFIG_CFI_CLANG, which
means the compiler replaces function address references with the
address of the function's CFI jump table entry. This results in
__pa_symbol(function) returning the physical address of the jump
table entry, which can lead to address space confusion since the
jump table points to a virtual address.
This change adds a __pa_function macro, which uses inline assembly
to take the actual function address instead.
Bug: 145210207
Change-Id: I674e5ed386b282a7ed32eeb1f070fb39b5c4b19c
Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
---
arch/arm64/include/asm/memory.h | 16 ++++++++++++++++
arch/arm64/include/asm/mmu_context.h | 2 +-
arch/arm64/kernel/cpu-reset.h | 2 +-
arch/arm64/kernel/cpufeature.c | 2 +-
arch/arm64/kernel/psci.c | 3 ++-
arch/arm64/kernel/smp_spin_table.c | 2 +-
6 files changed, 22 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index c23c47360664..b66f0631d30a 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -309,6 +309,22 @@ static inline void *phys_to_virt(phys_addr_t x)
#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x)))
#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
+/*
+ * With non-canonical CFI jump tables, the compiler replaces function
+ * address references with the address of the function's CFI jump
+ * table entry. This results in __pa_symbol(function) returning the
+ * physical address of the jump table entry, which can lead to address
+ * space confusion since the jump table points to the function's
+ * virtual address. Therefore, use inline assembly to ensure we are
+ * always taking the address of the actual function.
+ */
+#define __pa_function(x) ({ \
+ unsigned long addr; \
+ asm("adrp %0, " __stringify(x) "\n\t" \
+ "add %0, %0, :lo12:" __stringify(x) : "=r" (addr)); \
+ __pa_symbol(addr); \
+})
+
/*
* virt_to_page(x) convert a _valid_ virtual address to struct page *
* virt_addr_valid(x) indicates whether a virtual address is valid
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index 2de2b464d991..1a6f5102b865 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -156,7 +156,7 @@ static inline void __nocfi cpu_replace_ttbr1(pgd_t *pgdp)
ttbr1 |= TTBR_CNP_BIT;
}
- replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
+ replace_phys = (void *)__pa_function(idmap_cpu_replace_ttbr1);
cpu_install_idmap();
replace_phys(ttbr1);
diff --git a/arch/arm64/kernel/cpu-reset.h b/arch/arm64/kernel/cpu-reset.h
index 054285f8d858..a05bda363272 100644
--- a/arch/arm64/kernel/cpu-reset.h
+++ b/arch/arm64/kernel/cpu-reset.h
@@ -22,7 +22,7 @@ static inline void __noreturn __nocfi cpu_soft_restart(unsigned long entry,
unsigned long el2_switch = !is_kernel_in_hyp_mode() &&
is_hyp_mode_available();
- restart = (void *)__pa_symbol(__cpu_soft_restart);
+ restart = (void *)__pa_function(__cpu_soft_restart);
cpu_install_idmap();
restart(el2_switch, entry, arg0, arg1, arg2);
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index b7db6e25a197..336f95fafe03 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -1053,7 +1053,7 @@ kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
if (kpti_applied || kaslr_offset() > 0)
return;
- remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
+ remap_fn = (void *)__pa_function(idmap_kpti_install_ng_mappings);
cpu_install_idmap();
remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 43ae4e0c968f..5fadf6bb358e 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -38,7 +38,8 @@ static int __init cpu_psci_cpu_prepare(unsigned int cpu)
static int cpu_psci_cpu_boot(unsigned int cpu)
{
- int err = psci_ops.cpu_on(cpu_logical_map(cpu), __pa_symbol(secondary_entry));
+ int err = psci_ops.cpu_on(cpu_logical_map(cpu),
+ __pa_function(secondary_entry));
if (err)
pr_err("failed to boot CPU%d (%d)\n", cpu, err);
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index c8a3fee00c11..d6c818333419 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -88,7 +88,7 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
* boot-loader's endianess before jumping. This is mandated by
* the boot protocol.
*/
- writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);
+ writeq_relaxed(__pa_function(secondary_holding_pen), release_addr);
__flush_dcache_area((__force void *)release_addr,
sizeof(*release_addr));