ANDROID: mm: handle SPF using a copy of the original vma

When introducing vma refcounting to stabilize the faulting vma, the
change also stopped copying the original vma. While refcounting
protects the vma from being destroyed, it does not protect it from
concurrent changes. This results in possible unexpected vma mutations
while page fault is being handled. To prevent that, revert back to
handling speculative page faults using a stable copy of the original
vma which can't be concurrently changed.

Fixes: d9d7f843da68 ("ANDROID: mm: introduce vma refcounting to protect vma during SPF")
Bug: 271817185
Bug: 277700087
Change-Id: If4ec5a6282ddc037c96addb19510cc83ccd7b2ea
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
(cherry picked from commit 7ba7908a8d5207a00fca5170d183f868a3a9903e)
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 0218230..afdd8ea 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -543,6 +543,7 @@
 	unsigned long addr = untagged_addr(far);
 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
 	struct vm_area_struct *vma;
+	struct vm_area_struct pvma;
 	unsigned long seq;
 #endif
 
@@ -626,17 +627,17 @@
 		count_vm_spf_event(SPF_ABORT_NO_SPECULATE);
 		goto spf_abort;
 	}
-
+	pvma = *vma;
 	if (!mmap_seq_read_check(mm, seq, SPF_ABORT_VMA_COPY)) {
 		put_vma(vma);
 		goto spf_abort;
 	}
-	if (!(vma->vm_flags & vm_flags)) {
+	if (!(pvma.vm_flags & vm_flags)) {
 		put_vma(vma);
 		count_vm_spf_event(SPF_ABORT_ACCESS_ERROR);
 		goto spf_abort;
 	}
-	fault = do_handle_mm_fault(vma, addr & PAGE_MASK,
+	fault = do_handle_mm_fault(&pvma, addr & PAGE_MASK,
 			mm_flags | FAULT_FLAG_SPECULATIVE, seq, regs);
 	put_vma(vma);
 
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 888c12f..fc98393 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -395,6 +395,7 @@
 	vm_fault_t fault, major = 0;
 	bool kprobe_fault = kprobe_page_fault(regs, 11);
 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+	struct vm_area_struct pvma;
 	unsigned long seq;
 #endif
 
@@ -477,25 +478,25 @@
 		count_vm_spf_event(SPF_ABORT_NO_SPECULATE);
 		goto spf_abort;
 	}
-
+	pvma = *vma;
 	if (!mmap_seq_read_check(mm, seq, SPF_ABORT_VMA_COPY)) {
 		put_vma(vma);
 		goto spf_abort;
 	}
 #ifdef CONFIG_PPC_MEM_KEYS
 	if (unlikely(access_pkey_error(is_write, is_exec,
-				       (error_code & DSISR_KEYFAULT), vma))) {
+				       (error_code & DSISR_KEYFAULT), &pvma))) {
 		put_vma(vma);
 		count_vm_spf_event(SPF_ABORT_ACCESS_ERROR);
 		goto spf_abort;
 	}
 #endif /* CONFIG_PPC_MEM_KEYS */
-	if (unlikely(access_error(is_write, is_exec, vma))) {
+	if (unlikely(access_error(is_write, is_exec, &pvma))) {
 		put_vma(vma);
 		count_vm_spf_event(SPF_ABORT_ACCESS_ERROR);
 		goto spf_abort;
 	}
-	fault = do_handle_mm_fault(vma, address,
+	fault = do_handle_mm_fault(&pvma, address,
 			flags | FAULT_FLAG_SPECULATIVE, seq, regs);
 	put_vma(vma);
 	major |= fault & VM_FAULT_MAJOR;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 83e07cb..b643180 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1227,6 +1227,7 @@
 	vm_fault_t fault;
 	unsigned int flags = FAULT_FLAG_DEFAULT;
 #ifdef CONFIG_SPECULATIVE_PAGE_FAULT
+	struct vm_area_struct pvma;
 	unsigned long seq;
 #endif
 
@@ -1351,17 +1352,17 @@
 		count_vm_spf_event(SPF_ABORT_NO_SPECULATE);
 		goto spf_abort;
 	}
-
+	pvma = *vma;
 	if (!mmap_seq_read_check(mm, seq, SPF_ABORT_VMA_COPY)) {
 		put_vma(vma);
 		goto spf_abort;
 	}
-	if (unlikely(access_error(error_code, vma))) {
+	if (unlikely(access_error(error_code, &pvma))) {
 		put_vma(vma);
 		count_vm_spf_event(SPF_ABORT_ACCESS_ERROR);
 		goto spf_abort;
 	}
-	fault = do_handle_mm_fault(vma, address,
+	fault = do_handle_mm_fault(&pvma, address,
 			flags | FAULT_FLAG_SPECULATIVE, seq, regs);
 	put_vma(vma);