Merge remote-tracking branch 'origin/android-msm-bullhead-3.10-security-next' into android-msm-bullhead-3.10

September 2018.2

Bug: 110909354
Bug: 111785512
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index f19506e..0ec9bdd 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -564,23 +564,6 @@
 	default "14" if (ARM64_64K_PAGES && TRANSPARENT_HUGEPAGE)
 	default "11"
 
-config HARDEN_BRANCH_PREDICTOR
-	bool "Harden the branch predictor against aliasing attacks" if EXPERT
-	default y
-	help
-	  Speculation attacks against some high-performance processors rely on
-	  being able to manipulate the branch predictor for a victim context by
-	  executing aliasing branches in the attacker context.  Such attacks
-	  can be partially mitigated against by clearing internal branch
-	  predictor state and limiting the prediction logic in some situations.
-
-	  This config option will take CPU-specific actions to harden the
-	  branch predictor against aliasing attacks and may rely on specific
-	  instruction sequences or control bits being set by the system
-	  firmware.
-
-	  If unsure, say Y.
-
 endmenu
 
 menu "Boot options"
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index f61d50b..cd4ac05 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -26,11 +26,4 @@
 	return elf_hwcap & (1UL << num);
 }
 
-void __init setup_cpu_features(void);
-
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
-extern bool sys_psci_bp_hardening_initialised;
-extern void enable_psci_bp_hardening(void *data);
-#endif
-
 #endif
diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h
index 9183cfe..159743c 100644
--- a/arch/arm64/include/asm/cputype.h
+++ b/arch/arm64/include/asm/cputype.h
@@ -48,7 +48,6 @@
 #define ARM_CPU_PART_FOUNDATION	0xD000
 #define ARM_CPU_PART_CORTEX_A53	0xD030
 #define ARM_CPU_PART_CORTEX_A57	0xD070
-#define ARM_CPU_PART_CORTEX_A72	0xD080
 
 #define APM_CPU_PART_POTENZA	0x0000
 
diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h
index 25b1116..c552c16 100644
--- a/arch/arm64/include/asm/mmu.h
+++ b/arch/arm64/include/asm/mmu.h
@@ -16,11 +16,6 @@
 #ifndef __ASM_MMU_H
 #define __ASM_MMU_H
 
-#include <linux/smp.h>
-
-#include <asm/cpufeature.h>
-#include <asm/percpu.h>
-
 typedef struct {
 	unsigned int id;
 	raw_spinlock_t id_lock;
@@ -35,38 +30,6 @@
 #define INIT_MM_CONTEXT(name)	\
 	.context.id_lock    = __RAW_SPIN_LOCK_UNLOCKED(name.context.id_lock),
 
-typedef void (*bp_hardening_cb_t)(void);
-
-struct bp_hardening_data {
-	bp_hardening_cb_t	fn;
-};
-
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
-
-DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
-
-static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
-{
-	return this_cpu_ptr(&bp_hardening_data);
-}
-
-static inline void arm64_apply_bp_hardening(void)
-{
-	struct bp_hardening_data *d;
-
-	d = arm64_get_bp_hardening_data();
-	if (d->fn)
-		d->fn();
-}
-#else
-static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
-{
-	return NULL;
-}
-
-static inline void arm64_apply_bp_hardening(void)	{ }
-#endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
-
 extern void paging_init(void);
 extern void setup_mm_for_reboot(void);
 extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
diff --git a/arch/arm64/include/asm/psci.h b/arch/arm64/include/asm/psci.h
index 1287865..e5312ea 100644
--- a/arch/arm64/include/asm/psci.h
+++ b/arch/arm64/include/asm/psci.h
@@ -16,27 +16,4 @@
 
 int psci_init(void);
 
-struct psci_power_state {
-	u16	id;
-	u8	type;
-	u8	affinity_level;
-};
-
-struct psci_operations {
-	int (*get_version)(void);
-	int (*cpu_suspend)(struct psci_power_state state,
-			   unsigned long entry_point);
-	int (*cpu_off)(struct psci_power_state state);
-	int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
-	int (*migrate)(unsigned long cpuid);
-	int (*affinity_info)(unsigned long target_affinity,
-			unsigned long lowest_affinity_level);
-	int (*migrate_info_type)(void);
-};
-
-extern struct psci_operations psci_ops;
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
-int psci_apply_bp_hardening(void);
-#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
-
 #endif /* __ASM_PSCI_H */
diff --git a/arch/arm64/include/asm/suspend.h b/arch/arm64/include/asm/suspend.h
index 1e09b0d..e9c149c 100644
--- a/arch/arm64/include/asm/suspend.h
+++ b/arch/arm64/include/asm/suspend.h
@@ -1,7 +1,7 @@
 #ifndef __ASM_SUSPEND_H
 #define __ASM_SUSPEND_H
 
-#define NR_CTX_REGS 12
+#define NR_CTX_REGS 11
 
 /*
  * struct cpu_suspend_ctx must be 16-byte aligned since it is allocated on
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 5887275..2e32bb0 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -451,13 +451,11 @@
 	 * Instruction abort handling
 	 */
 	mrs	x0, far_el1
-	enable_dbg
-#ifdef CONFIG_TRACE_IRQFLAGS
-	bl	trace_hardirqs_off
-#endif
+	// enable interrupts before calling the main handler
+	enable_dbg_and_irq
 	orr	x1, x25, #1 << 24		// use reserved ISS bit for instruction aborts
 	mov	x2, sp
-	b	do_el0_ia_bp_hardening
+	b	do_mem_abort
 el0_fpsimd_acc:
 	/*
 	 * Floating Point or Advanced SIMD access
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 0146603..ec55f46 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -33,7 +33,24 @@
 #define PSCI_POWER_STATE_TYPE_STANDBY		0
 #define PSCI_POWER_STATE_TYPE_POWER_DOWN	1
 
-struct psci_operations psci_ops;
+struct psci_power_state {
+	u16	id;
+	u8	type;
+	u8	affinity_level;
+};
+
+struct psci_operations {
+	int (*cpu_suspend)(struct psci_power_state state,
+			   unsigned long entry_point);
+	int (*cpu_off)(struct psci_power_state state);
+	int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
+	int (*migrate)(unsigned long cpuid);
+	int (*affinity_info)(unsigned long target_affinity,
+			unsigned long lowest_affinity_level);
+	int (*migrate_info_type)(void);
+};
+
+static struct psci_operations psci_ops;
 
 static int (*invoke_psci_fn)(u64, u64, u64, u64);
 typedef int (*psci_initcall_t)(const struct device_node *);
@@ -110,16 +127,6 @@
 	return function_id;
 }
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
-noinline int psci_apply_bp_hardening(void)
-{
-	int err;
-
-	err = __invoke_psci_fn_smc(PSCI_0_2_FN_PSCI_VERSION, 0, 0, 0);
-	return err;
-}
-#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
-
 static int psci_get_version(void)
 {
 	int err;
@@ -258,7 +265,6 @@
 	}
 
 	pr_info("Using standard PSCI v0.2 function IDs\n");
-	psci_ops.get_version = psci_get_version;
 	psci_function_id[PSCI_FN_CPU_SUSPEND] = PSCI_0_2_FN64_CPU_SUSPEND;
 	psci_ops.cpu_suspend = psci_cpu_suspend;
 
@@ -301,7 +307,6 @@
 		goto out_put_node;
 
 	pr_info("Using PSCI v0.1 Function IDs from DT\n");
-	psci_ops.get_version = psci_get_version;
 
 	if (!of_property_read_u32(np, "cpu_suspend", &id)) {
 		psci_function_id[PSCI_FN_CPU_SUSPEND] = id;
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index e81eb53..efa2123 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -50,7 +50,6 @@
 #include <asm/fixmap.h>
 #include <asm/cputype.h>
 #include <asm/elf.h>
-#include <asm/cpufeature.h>
 #include <asm/cputable.h>
 #include <asm/cpu_ops.h>
 #include <asm/sections.h>
@@ -91,9 +90,6 @@
 
 static const char *cpu_name;
 static const char *machine_name;
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
-bool sys_psci_bp_hardening_initialised;
-#endif
 phys_addr_t __fdt_pointer __initdata;
 
 /*
@@ -226,46 +222,27 @@
 }
 #endif
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
-#include <asm/mmu_context.h>
-
-DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
-
-static void __maybe_unused __install_bp_hardening_cb(bp_hardening_cb_t fn)
+static void __init setup_processor(void)
 {
-	__this_cpu_write(bp_hardening_data.fn, fn);
-}
-
-static void __maybe_unused install_bp_hardening_cb(bp_hardening_cb_t fn)
-{
-	__install_bp_hardening_cb(fn);
-}
-
-void enable_psci_bp_hardening(void *data)
-{
-	switch(read_cpuid_part_number()) {
-	case ARM_CPU_PART_CORTEX_A57:
-	case ARM_CPU_PART_CORTEX_A72:
-		if (psci_ops.get_version)
-			install_bp_hardening_cb(
-				(bp_hardening_cb_t)psci_ops.get_version);
-		else
-			install_bp_hardening_cb(
-				(bp_hardening_cb_t)psci_apply_bp_hardening);
-	}
-}
-#endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
-
-void __init setup_cpu_features(void)
-{
+	struct cpu_info *cpu_info;
 	u64 features, block;
 	u32 cwg;
 	int cls;
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
-	on_each_cpu(enable_psci_bp_hardening, NULL, true);
-	sys_psci_bp_hardening_initialised = true;
-#endif
+	cpu_info = lookup_processor_type(read_cpuid_id());
+	if (!cpu_info) {
+		printk("CPU configuration botched (ID %08x), unable to continue.\n",
+		       read_cpuid_id());
+		while (1);
+	}
+
+	cpu_name = cpu_info->cpu_name;
+
+	printk("CPU: %s [%08x] revision %d\n",
+	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
+
+	sprintf(init_utsname()->machine, ELF_PLATFORM);
+	elf_hwcap = 0;
 
 	/*
 	 * Check for sane CTR_EL0.CWG value.
@@ -343,25 +320,6 @@
 #endif
 }
 
-static void __init setup_processor(void)
-{
-	struct cpu_info *cpu_info;
-
-	cpu_info = lookup_processor_type(read_cpuid_id());
-	if (!cpu_info) {
-		printk("CPU configuration botched (ID %08x), unable to continue.\n",
-		       read_cpuid_id());
-		while (1);
-	}
-
-	cpu_name = cpu_info->cpu_name;
-
-	printk("CPU: %s [%08x] revision %d\n",
-	       cpu_name, read_cpuid_id(), read_cpuid_id() & 15);
-
-	sprintf(init_utsname()->machine, ELF_PLATFORM);
-}
-
 static void __init setup_machine_fdt(phys_addr_t dt_phys)
 {
 	cpuinfo_store_cpu();
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index c690c89..797585e 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -39,7 +39,6 @@
 
 #include <asm/atomic.h>
 #include <asm/cacheflush.h>
-#include <asm/cpufeature.h>
 #include <asm/cputype.h>
 #include <asm/cpu_ops.h>
 #include <asm/mmu_context.h>
@@ -162,11 +161,6 @@
 	if (cpu_ops[cpu]->cpu_postboot)
 		cpu_ops[cpu]->cpu_postboot();
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
-	if (sys_psci_bp_hardening_initialised)
-		enable_psci_bp_hardening(NULL);
-#endif
-
 	/*
 	* Log the CPU info before it is marked online and might get read.
 	*/
@@ -327,7 +321,6 @@
 void __init smp_cpus_done(unsigned int max_cpus)
 {
 	pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
-	setup_cpu_features();
 }
 
 void __init smp_prepare_boot_cpu(void)
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index d058aaf..bb7db36 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -104,6 +104,12 @@
 		flush_tlb_all();
 
 		/*
+		 * Restore per-cpu offset before any kernel
+		 * subsystem relying on it has a chance to run.
+		 */
+		set_my_cpu_offset(per_cpu_offset(cpu));
+
+		/*
 		 * Restore HW breakpoint registers to sane values
 		 * before debug exceptions are possibly reenabled
 		 * through local_dbg_restore.
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index a5b64c1..baa758d 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -157,9 +157,3 @@
 	set_mm_context(mm, asid);
 	raw_spin_unlock(&cpu_asid_lock);
 }
-
-/* Errata workaround post TTBRx_EL1 update. */
-asmlinkage void post_ttbr_update_workaround(void)
-{
-	arm64_apply_bp_hardening();
-}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 07b3528..3d1c63e 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -476,22 +476,6 @@
 	arm64_notify_die("", regs, &info, esr);
 }
 
-asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
-						   unsigned int esr,
-						   struct pt_regs *regs)
-{
-	/*
-	 * We've taken an instruction abort from userspace and not yet
-	 * re-enabled IRQs. If the address is a kernel address, apply
-	 * BP hardening prior to enabling IRQs and pre-emption.
-	 */
-	if (addr > TASK_SIZE)
-		arm64_apply_bp_hardening();
-
-	local_irq_enable();
-	do_mem_abort(addr, esr, regs);
-}
-
 /*
  * Handle stack alignment exceptions.
  */
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 8820b86..f663349 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -105,13 +105,12 @@
 	mrs	x10, mdscr_el1
 	mrs	x11, oslsr_el1
 	mrs	x12, sctlr_el1
-	mrs	x13, tpidr_el1
 	stp	x2, x3, [x0]
 	stp	x4, x5, [x0, #16]
 	stp	x6, x7, [x0, #32]
 	stp	x8, x9, [x0, #48]
 	stp	x10, x11, [x0, #64]
-	stp	x12, x13, [x0, #80]
+	str	x12, [x0, #80]
 	ret
 ENDPROC(cpu_do_suspend)
 
@@ -134,7 +133,7 @@
 	ldp	x6, x7, [x0, #32]
 	ldp	x8, x9, [x0, #48]
 	ldp	x10, x11, [x0, #64]
-	ldp	x12, x13, [x0, #80]
+	ldr	x12, [x0, #80]
 	msr	tpidr_el0, x2
 	msr	tpidrro_el0, x3
 	msr	contextidr_el1, x4
@@ -145,7 +144,6 @@
 	msr	tcr_el1, x8
 	msr	vbar_el1, x9
 	msr	mdscr_el1, x10
-	msr	tpidr_el1, x13
 	/*
 	 * Restore oslsr_el1 by writing oslar_el1
 	 */
@@ -170,7 +168,7 @@
 	bfi	x0, x1, #48, #16		// set the ASID
 	msr	ttbr0_el1, x0			// set TTBR0
 	isb
-	b	post_ttbr_update_workaround	// Back to C code...
+	ret
 ENDPROC(cpu_do_switch_mm)
 
 	.section ".text.init", #alloc, #execinstr