Revert "FROMGIT: mm: improve mprotect(R|W) efficiency on pages referenced once"

This reverts commit b44e46bb047d136bc8977497b6fc2a9f08740321.

Reason for revert:

The patch has not yet landed upstream, following feedback from Linus:
https://lore.kernel.org/all/CAHk-=wj4KCujAH_oPh40Bkp48amM4MXr+8AcbZ=qd5LF4Q+TDg@mail.gmail.com/#t

Bug: 213339151
Signed-off-by: Peter Collingbourne <pcc@google.com>
Change-Id: I81c2cef4076487df1dd0ee75449dcb2371ac1dbc
(cherry picked from commit ac4488815518c236e60c0048833c51a76404b1b6)
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 05073d7..d95115a 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -35,51 +35,6 @@
 
 #include "internal.h"
 
-/* Determine whether we can avoid taking write faults for known dirty pages. */
-static bool may_avoid_write_fault(pte_t pte, struct vm_area_struct *vma,
-				  unsigned long cp_flags)
-{
-	/*
-	 * The dirty accountable bit indicates that we can always make the page
-	 * writable regardless of the number of references.
-	 */
-	if (!(cp_flags & MM_CP_DIRTY_ACCT)) {
-		/* Otherwise, we must have exclusive access to the page. */
-		if (!(vma_is_anonymous(vma) && (vma->vm_flags & VM_WRITE)))
-			return false;
-
-		if (page_count(pte_page(pte)) != 1)
-			return false;
-	}
-
-	/*
-	 * Don't do this optimization for clean pages as we need to be notified
-	 * of the transition from clean to dirty.
-	 */
-	if (!pte_dirty(pte))
-		return false;
-
-	/* Same for softdirty. */
-	if (!pte_soft_dirty(pte) && (vma->vm_flags & VM_SOFTDIRTY))
-		return false;
-
-	/*
-	 * For userfaultfd the user program needs to monitor write faults so we
-	 * can't do this optimization.
-	 */
-	if (pte_uffd_wp(pte))
-		return false;
-
-	/*
-	 * It is unclear whether this optimization can be done safely for NUMA
-	 * pages.
-	 */
-	if (cp_flags & MM_CP_PROT_NUMA)
-		return false;
-
-	return true;
-}
-
 static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
 		unsigned long addr, unsigned long end, pgprot_t newprot,
 		unsigned long cp_flags)
@@ -88,6 +43,7 @@
 	spinlock_t *ptl;
 	unsigned long pages = 0;
 	int target_node = NUMA_NO_NODE;
+	bool dirty_accountable = cp_flags & MM_CP_DIRTY_ACCT;
 	bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
 	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
 	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
@@ -175,8 +131,12 @@
 				ptent = pte_clear_uffd_wp(ptent);
 			}
 
-			if (may_avoid_write_fault(ptent, vma, cp_flags))
+			/* Avoid taking write faults for known dirty pages */
+			if (dirty_accountable && pte_dirty(ptent) &&
+					(pte_soft_dirty(ptent) ||
+					 !(vma->vm_flags & VM_SOFTDIRTY))) {
 				ptent = pte_mkwrite(ptent);
+			}
 			ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
 			pages++;
 		} else if (is_swap_pte(oldpte)) {