Revert "FROMLIST: kasan: allow sampling page_alloc allocations for HW_TAGS"

This reverts commit a2a9e34d164e90fc08d35fd097a164b9101d72ef.

Reason for revert:
Observed frequent boot crashes on a device with sampling KASAN enabled.

Bug: 265863271
Change-Id: Ib7860295065ed7aaa36d9e47d8aaa97918c7bc57
Signed-off-by: Peter Collingbourne <pcc@google.com>
diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst
index e66916a..5c93ab9 100644
--- a/Documentation/dev-tools/kasan.rst
+++ b/Documentation/dev-tools/kasan.rst
@@ -140,23 +140,6 @@
 - ``kasan.vmalloc=off`` or ``=on`` disables or enables tagging of vmalloc
   allocations (default: ``on``).
 
-- ``kasan.page_alloc.sample=<sampling interval>`` makes KASAN tag only every
-  Nth page_alloc allocation with the order equal or greater than
-  ``kasan.page_alloc.sample.order``, where N is the value of the ``sample``
-  parameter (default: ``1``, or tag every such allocation).
-  This parameter is intended to mitigate the performance overhead introduced
-  by KASAN.
-  Note that enabling this parameter makes Hardware Tag-Based KASAN skip checks
-  of allocations chosen by sampling and thus miss bad accesses to these
-  allocations. Use the default value for accurate bug detection.
-
-- ``kasan.page_alloc.sample.order=<minimum page order>`` specifies the minimum
-  order of allocations that are affected by sampling (default: ``3``).
-  Only applies when ``kasan.page_alloc.sample`` is set to a value greater
-  than ``1``.
-  This parameter is intended to allow sampling only large page_alloc
-  allocations, which is the biggest source of the performance overhead.
-
 Error reports
 ~~~~~~~~~~~~~
 
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 6293091..d811b3d7 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -120,13 +120,12 @@
 		__kasan_poison_pages(page, order, init);
 }
 
-bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
-static __always_inline bool kasan_unpoison_pages(struct page *page,
+void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
+static __always_inline void kasan_unpoison_pages(struct page *page,
 						 unsigned int order, bool init)
 {
 	if (kasan_enabled())
-		return __kasan_unpoison_pages(page, order, init);
-	return false;
+		__kasan_unpoison_pages(page, order, init);
 }
 
 void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
@@ -250,11 +249,8 @@
 static inline void kasan_unpoison_range(const void *address, size_t size) {}
 static inline void kasan_poison_pages(struct page *page, unsigned int order,
 				      bool init) {}
-static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
-					bool init)
-{
-	return false;
-}
+static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
+					bool init) {}
 static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
 static inline void kasan_poison_slab(struct slab *slab) {}
 static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 1d0008e..833bf2c 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -95,24 +95,19 @@
 }
 #endif /* CONFIG_KASAN_STACK */
 
-bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
+void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
 {
 	u8 tag;
 	unsigned long i;
 
 	if (unlikely(PageHighMem(page)))
-		return false;
-
-	if (!kasan_sample_page_alloc(order))
-		return false;
+		return;
 
 	tag = kasan_random_tag();
 	kasan_unpoison(set_tag(page_address(page), tag),
 		       PAGE_SIZE << order, init);
 	for (i = 0; i < (1 << order); i++)
 		page_kasan_tag_set(page + i, tag);
-
-	return true;
 }
 
 void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index d1bcb02..b22c4f4 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -59,24 +59,6 @@
 /* Whether to enable vmalloc tagging. */
 DEFINE_STATIC_KEY_TRUE(kasan_flag_vmalloc);
 
-#define PAGE_ALLOC_SAMPLE_DEFAULT	1
-#define PAGE_ALLOC_SAMPLE_ORDER_DEFAULT	3
-
-/*
- * Sampling interval of page_alloc allocation (un)poisoning.
- * Defaults to no sampling.
- */
-unsigned long kasan_page_alloc_sample = PAGE_ALLOC_SAMPLE_DEFAULT;
-
-/*
- * Minimum order of page_alloc allocations to be affected by sampling.
- * The default value is chosen to match both
- * PAGE_ALLOC_COSTLY_ORDER and SKB_FRAG_PAGE_ORDER.
- */
-unsigned int kasan_page_alloc_sample_order = PAGE_ALLOC_SAMPLE_ORDER_DEFAULT;
-
-DEFINE_PER_CPU(long, kasan_page_alloc_skip);
-
 /* kasan=off/on */
 static int __init early_kasan_flag(char *arg)
 {
@@ -140,48 +122,6 @@
 		return "sync";
 }
 
-/* kasan.page_alloc.sample=<sampling interval> */
-static int __init early_kasan_flag_page_alloc_sample(char *arg)
-{
-	int rv;
-
-	if (!arg)
-		return -EINVAL;
-
-	rv = kstrtoul(arg, 0, &kasan_page_alloc_sample);
-	if (rv)
-		return rv;
-
-	if (!kasan_page_alloc_sample || kasan_page_alloc_sample > LONG_MAX) {
-		kasan_page_alloc_sample = PAGE_ALLOC_SAMPLE_DEFAULT;
-		return -EINVAL;
-	}
-
-	return 0;
-}
-early_param("kasan.page_alloc.sample", early_kasan_flag_page_alloc_sample);
-
-/* kasan.page_alloc.sample.order=<minimum page order> */
-static int __init early_kasan_flag_page_alloc_sample_order(char *arg)
-{
-	int rv;
-
-	if (!arg)
-		return -EINVAL;
-
-	rv = kstrtouint(arg, 0, &kasan_page_alloc_sample_order);
-	if (rv)
-		return rv;
-
-	if (kasan_page_alloc_sample_order > INT_MAX) {
-		kasan_page_alloc_sample_order = PAGE_ALLOC_SAMPLE_ORDER_DEFAULT;
-		return -EINVAL;
-	}
-
-	return 0;
-}
-early_param("kasan.page_alloc.sample.order", early_kasan_flag_page_alloc_sample_order);
-
 /*
  * kasan_init_hw_tags_cpu() is called for each CPU.
  * Not marked as __init as a CPU can be hot-plugged after boot.
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 8b6e0983..abbcc1b 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -42,10 +42,6 @@
 
 extern enum kasan_mode kasan_mode __ro_after_init;
 
-extern unsigned long kasan_page_alloc_sample;
-extern unsigned int kasan_page_alloc_sample_order;
-DECLARE_PER_CPU(long, kasan_page_alloc_skip);
-
 static inline bool kasan_vmalloc_enabled(void)
 {
 	return static_branch_likely(&kasan_flag_vmalloc);
@@ -61,24 +57,6 @@
 	return kasan_mode == KASAN_MODE_SYNC || kasan_mode == KASAN_MODE_ASYMM;
 }
 
-static inline bool kasan_sample_page_alloc(unsigned int order)
-{
-	/* Fast-path for when sampling is disabled. */
-	if (kasan_page_alloc_sample == 1)
-		return true;
-
-	if (order < kasan_page_alloc_sample_order)
-		return true;
-
-	if (this_cpu_dec_return(kasan_page_alloc_skip) < 0) {
-		this_cpu_write(kasan_page_alloc_skip,
-			       kasan_page_alloc_sample - 1);
-		return true;
-	}
-
-	return false;
-}
-
 #else /* CONFIG_KASAN_HW_TAGS */
 
 static inline bool kasan_async_fault_possible(void)
@@ -91,11 +69,6 @@
 	return true;
 }
 
-static inline bool kasan_sample_page_alloc(unsigned int order)
-{
-	return true;
-}
-
 #endif /* CONFIG_KASAN_HW_TAGS */
 
 #ifdef CONFIG_KASAN_GENERIC
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 0876589..285628e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1385,8 +1385,6 @@
  *    see the comment next to it.
  * 3. Skipping poisoning is requested via __GFP_SKIP_KASAN_POISON,
  *    see the comment next to it.
- * 4. The allocation is excluded from being checked due to sampling,
- *    see the call to kasan_unpoison_pages.
  *
  * Poisoning pages during deferred memory init will greatly lengthen the
  * process and cause problem in large memory systems as the deferred pages
@@ -2496,8 +2494,7 @@
 {
 	bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags) &&
 			!should_skip_init(gfp_flags);
-	bool zero_tags = init && (gfp_flags & __GFP_ZEROTAGS);
-	bool reset_tags = !zero_tags;
+	bool init_tags = init && (gfp_flags & __GFP_ZEROTAGS);
 	int i;
 
 	set_page_private(page, 0);
@@ -2520,42 +2517,30 @@
 	 */
 
 	/*
-	 * If memory tags should be zeroed
-	 * (which happens only when memory should be initialized as well).
+	 * If memory tags should be zeroed (which happens only when memory
+	 * should be initialized as well).
 	 */
-	if (zero_tags) {
+	if (init_tags) {
 		/* Initialize both memory and tags. */
 		for (i = 0; i != 1 << order; ++i)
 			tag_clear_highpage(page + i);
 
-		/* Take note that memory was initialized by the loop above. */
+		/* Note that memory is already initialized by the loop above. */
 		init = false;
 	}
 	if (!should_skip_kasan_unpoison(gfp_flags)) {
-		/* Try unpoisoning (or setting tags) and initializing memory. */
-		if (kasan_unpoison_pages(page, order, init)) {
-			/* Take note that memory was initialized by KASAN. */
-			if (kasan_has_integrated_init())
-				init = false;
-			/* Take note that memory tags were set by KASAN. */
-			reset_tags = false;
-		} else {
-			/*
-			 * KASAN decided to exclude this allocation from being
-			 * poisoned due to sampling. Skip poisoning as well.
-			 */
-			SetPageSkipKASanPoison(page);
-		}
-	}
-	/*
-	 * If memory tags have not been set, reset the page tags to ensure
-	 * page_address() dereferencing does not fault.
-	 */
-	if (reset_tags) {
+		/* Unpoison shadow memory or set memory tags. */
+		kasan_unpoison_pages(page, order, init);
+
+		/* Note that memory is already initialized by KASAN. */
+		if (kasan_has_integrated_init())
+			init = false;
+	} else {
+		/* Ensure page_address() dereferencing does not fault. */
 		for (i = 0; i != 1 << order; ++i)
 			page_kasan_tag_reset(page + i);
 	}
-	/* If memory is still not initialized, initialize it now. */
+	/* If memory is still not initialized, do it now. */
 	if (init)
 		kernel_init_pages(page, 1 << order);
 	/* Propagate __GFP_SKIP_KASAN_POISON to page flags. */