ANDROID: dm kcopyd: Use reserved memory for the copy buffer

MediaTek needs to use specific reserved memory for merge buffer
to ensure the data correctness during merge period.

The specific path would be enabled only if bootarg "mtk_kcopyd_quirk"
is well defined to be isolated for MediaTek platform only.

The format of bootarg would be: mtk_kcopyd_quirk=mediatek,dm_ota
where "dm_ota" is exact the name of the reserved memory.

Bug: 223346425
Change-Id: I2b295ca8c0cea65146077324c58ac17c05fe0099
Signed-off-by: Will Shiu <Will.Shiu@mediatek.com>
Signed-off-by: Stanley Chu <stanley.chu@mediatek.com>
Signed-off-by: Akilesh Kailash <akailash@google.com>
(cherry picked from commit 8dbcaf63b2bfaeef5592a94d5070ef3693e4db01)
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c
index 1bbe4a3..9594367 100644
--- a/drivers/md/dm-kcopyd.c
+++ b/drivers/md/dm-kcopyd.c
@@ -17,6 +17,8 @@
 #include <linux/list.h>
 #include <linux/mempool.h>
 #include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/of_reserved_mem.h>
 #include <linux/pagemap.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
@@ -39,6 +41,105 @@
 module_param(kcopyd_subjob_size_kb, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(kcopyd_subjob_size_kb, "Sub-job size for dm-kcopyd clients");
 
+static bool rsm_enabled;
+static phys_addr_t rsm_mem_base, rsm_mem_size;
+
+#ifndef MODULE
+static DEFINE_SPINLOCK(rsm_lock);
+static int *rsm_mem;
+static int rsm_page_cnt;
+static int rsm_tbl_idx;
+static struct reserved_mem *rmem;
+
+static void __init kcopyd_rsm_init(void)
+{
+	static struct device_node *rsm_node;
+	int ret = 0;
+
+	if (!rsm_enabled)
+		return;
+
+	rsm_node = of_find_compatible_node(NULL, NULL, "mediatek,dm_ota");
+	if (!rsm_node) {
+		ret = -ENODEV;
+		goto out;
+	}
+
+	rmem = of_reserved_mem_lookup(rsm_node);
+	if (!rmem) {
+		ret = -EINVAL;
+		goto out_put_node;
+	}
+
+	rsm_mem_base = rmem->base;
+	rsm_mem_size = rmem->size;
+	rsm_page_cnt = rsm_mem_size / PAGE_SIZE;
+	rsm_mem = kcalloc(rsm_page_cnt, sizeof(int), GFP_KERNEL);
+	if (!rsm_mem)
+		ret = -ENOMEM;
+
+out_put_node:
+	of_node_put(rsm_node);
+out:
+	if (ret)
+		pr_warn("kcopyd: failed to init rsm: %d", ret);
+}
+
+static int __init kcopyd_rsm_enable(char *str)
+{
+	rsm_enabled = true;
+
+	return 0;
+}
+early_param("mtk_kcopyd_quirk", kcopyd_rsm_enable);
+
+static void kcopyd_rsm_get_page(struct page **p)
+{
+	int i;
+	unsigned long flags;
+
+	*p = NULL;
+	spin_lock_irqsave(&rsm_lock, flags);
+	for (i = 0 ; i < rsm_page_cnt ; i++) {
+		rsm_tbl_idx = (rsm_tbl_idx + 1 == rsm_page_cnt) ? 0 : rsm_tbl_idx + 1;
+
+		if (rsm_mem[rsm_tbl_idx] == 0) {
+			rsm_mem[rsm_tbl_idx] = 1;
+			*p = virt_to_page(phys_to_virt(rsm_mem_base + PAGE_SIZE
+						       * rsm_tbl_idx));
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&rsm_lock, flags);
+}
+
+static void kcopyd_rsm_drop_page(struct page **p)
+{
+	u64 off;
+	unsigned long flags;
+
+	if (*p) {
+		off = page_to_phys(*p) - rsm_mem_base;
+		spin_lock_irqsave(&rsm_lock, flags);
+		rsm_mem[off >> PAGE_SHIFT] = 0;
+		spin_unlock_irqrestore(&rsm_lock, flags);
+		*p = NULL;
+	}
+}
+
+static void kcopyd_rsm_destroy(void)
+{
+	if (rsm_enabled)
+		kfree(rsm_mem);
+}
+
+#else
+#define kcopyd_rsm_destroy(...)
+#define kcopyd_rsm_drop_page(...)
+#define kcopyd_rsm_get_page(...)
+#define kcopyd_rsm_init(...)
+#endif
+
 static unsigned dm_get_kcopyd_subjob_size(void)
 {
 	unsigned sub_job_size_kb;
@@ -211,7 +312,7 @@
 /*
  * Obtain one page for the use of kcopyd.
  */
-static struct page_list *alloc_pl(gfp_t gfp)
+static struct page_list *alloc_pl(gfp_t gfp, unsigned long job_flags)
 {
 	struct page_list *pl;
 
@@ -219,7 +320,12 @@
 	if (!pl)
 		return NULL;
 
-	pl->page = alloc_page(gfp);
+	if (rsm_enabled && test_bit(DM_KCOPYD_SNAP_MERGE, &job_flags)) {
+		kcopyd_rsm_get_page(&pl->page);
+	} else {
+		pl->page = alloc_page(gfp);
+	}
+
 	if (!pl->page) {
 		kfree(pl);
 		return NULL;
@@ -230,7 +336,14 @@
 
 static void free_pl(struct page_list *pl)
 {
-	__free_page(pl->page);
+	struct page *p = pl->page;
+	phys_addr_t pa = page_to_phys(p);
+
+	if (rsm_enabled && pa >= rsm_mem_base && pa < rsm_mem_base + rsm_mem_size)
+		kcopyd_rsm_drop_page(&pl->page);
+	else
+		__free_page(pl->page);
+
 	kfree(pl);
 }
 
@@ -258,14 +371,15 @@
 }
 
 static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
-			    unsigned int nr, struct page_list **pages)
+			    unsigned int nr, struct page_list **pages,
+			    unsigned long job_flags)
 {
 	struct page_list *pl;
 
 	*pages = NULL;
 
 	do {
-		pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM);
+		pl = alloc_pl(__GFP_NOWARN | __GFP_NORETRY | __GFP_KSWAPD_RECLAIM, job_flags);
 		if (unlikely(!pl)) {
 			/* Use reserved pages */
 			pl = kc->pages;
@@ -309,7 +423,7 @@
 	struct page_list *pl = NULL, *next;
 
 	for (i = 0; i < nr_pages; i++) {
-		next = alloc_pl(GFP_KERNEL);
+		next = alloc_pl(GFP_KERNEL, 0);
 		if (!next) {
 			if (pl)
 				drop_pages(pl);
@@ -395,6 +509,8 @@
 	zero_page_list.next = &zero_page_list;
 	zero_page_list.page = ZERO_PAGE(0);
 
+	kcopyd_rsm_init();
+
 	return 0;
 }
 
@@ -402,6 +518,7 @@
 {
 	kmem_cache_destroy(_job_cache);
 	_job_cache = NULL;
+	kcopyd_rsm_destroy();
 }
 
 /*
@@ -586,7 +703,7 @@
 	int r;
 	unsigned nr_pages = dm_div_up(job->dests[0].count, PAGE_SIZE >> 9);
 
-	r = kcopyd_get_pages(job->kc, nr_pages, &job->pages);
+	r = kcopyd_get_pages(job->kc, nr_pages, &job->pages, job->flags);
 	if (!r) {
 		/* this job is ready for io */
 		push(&job->kc->io_jobs, job);
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 41735a2..b2d8b37 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1117,7 +1117,8 @@
 	for (i = 0; i < linear_chunks; i++)
 		__check_for_conflicting_io(s, old_chunk + i);
 
-	dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s);
+	dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 1 << DM_KCOPYD_SNAP_MERGE,
+		       merge_callback, s);
 	return;
 
 shut:
diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h
index e42de77..3a594ab 100644
--- a/include/linux/dm-kcopyd.h
+++ b/include/linux/dm-kcopyd.h
@@ -21,6 +21,7 @@
 
 #define DM_KCOPYD_IGNORE_ERROR 1
 #define DM_KCOPYD_WRITE_SEQ    2
+#define DM_KCOPYD_SNAP_MERGE   3
 
 struct dm_kcopyd_throttle {
 	unsigned throttle;