ANDROID: mm: allow limited speculative page faulting in do_swap_page()
Speculative page handling was disabled in do_swap_page() because it was
unsafe to call migration_entry_wait(). Another calls which are not safe
without taking mmap_lock are ksm_might_need_to_copy() because it relies
on the VMA being stable and readahead. However if we avoid these cases,
the rest seems to be safe. Relax the check to avoid only these unsafe
cases and allow speculation otherwise.
Bug: 322762567
Bug: 329781167
Change-Id: Ic1fda0a5549088d5f37004dbacf3193116a5f868
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
(cherry picked from commit d47a714fa7cee9671216da007ef562cad6e75c61)
diff --git a/mm/memory.c b/mm/memory.c
index 857cfc9..85bf4cc 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3623,8 +3623,11 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
void *shadow = NULL;
if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
+ /* ksm_might_need_to_copy() needs a stable VMA, spf can't be used */
+#ifdef CONFIG_KSM
pte_unmap(vmf->pte);
return VM_FAULT_RETRY;
+#endif
}
ret = pte_unmap_same(vmf);
@@ -3641,6 +3644,10 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
entry = pte_to_swp_entry(vmf->orig_pte);
if (unlikely(non_swap_entry(entry))) {
+ if (vmf->flags & FAULT_FLAG_SPECULATIVE) {
+ ret = VM_FAULT_RETRY;
+ goto out;
+ }
if (is_migration_entry(entry)) {
migration_entry_wait(vma->vm_mm, vmf->pmd,
vmf->address);