mnh_pcie_ep: dynamically allocate transfer list entries.

Also modified the size of a transfer list entry to fit into a page.

Bug: 111833476
Change-Id: Ifd2a99b7d8e41f27630c651e3f207263a234842e
Signed-off-by: Sean Howarth <showarth@google.com>
diff --git a/drivers/misc/google-easel-comm-hw.c b/drivers/misc/google-easel-comm-hw.c
index 149f90b..1c30d71 100644
--- a/drivers/misc/google-easel-comm-hw.c
+++ b/drivers/misc/google-easel-comm-hw.c
@@ -541,6 +541,7 @@
 	mnh_ll = kmalloc(sizeof(struct mnh_dma_ll), GFP_KERNEL);
 	if (!mnh_ll)
 		return -ENOMEM;
+	INIT_LIST_HEAD(&mnh_ll->entries);
 	ret = mnh_ll_build(src_sg, dest_sg, mnh_ll);
 	if (ret) {
 		kfree(mnh_ll);
@@ -562,7 +563,7 @@
 uint64_t easelcomm_hw_easel_ll_addr(void *ll_data)
 {
 #ifdef EASELCOMM_EASEL
-	return (uint64_t)((struct mnh_dma_ll *)ll_data)->dma[0];
+	return mnh_ll_base_addr((struct mnh_dma_ll *)ll_data);
 #else
 	return 0;
 #endif
diff --git a/drivers/pcie_ep/mnh_pcie_ep.c b/drivers/pcie_ep/mnh_pcie_ep.c
index 8427e76..5a7adc4 100755
--- a/drivers/pcie_ep/mnh_pcie_ep.c
+++ b/drivers/pcie_ep/mnh_pcie_ep.c
@@ -1317,23 +1317,44 @@
 	return 0;
 }
 
+static struct mnh_dma_ll_element *add_ll_entry(struct mnh_dma_ll *ll,
+						dma_addr_t *dma)
+{
+	struct mnh_dma_ll_entry *entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry) {
+		dev_err(pcie_ep_dev->dev,
+			"Failed to allocate LL entry for PCIe DMA scatterlist; kmalloc failed.\n");
+		return NULL;
+	}
+
+	entry->elements = mnh_alloc_coherent(
+		DMA_LL_LENGTH * sizeof(*entry->elements), &entry->dma);
+	if (!entry->elements) {
+		dev_err(pcie_ep_dev->dev,
+			"Failed to allocate LL elements for PCIe DMA scatterlist; mnh_alloc_coherent failed to allocate %zu bytes.\n",
+			DMA_LL_LENGTH * sizeof(*entry->elements));
+		kfree(entry);
+		return NULL;
+	}
+
+	list_add_tail(&entry->entry, &ll->entries);
+	*dma = entry->dma;
+
+	return entry->elements;
+}
+
 static int pcie_ll_build(struct mnh_sg_entry *src_sg,
 			struct mnh_sg_entry *dst_sg, struct mnh_dma_ll *ll)
 {
 	struct mnh_dma_ll_element *ll_element, *tmp_element;
 	struct mnh_sg_entry sg_dst, sg_src;
-	dma_addr_t dma;
+	dma_addr_t dma, dma_base;
 	int i, s, u;
 
-	ll_element = dma_alloc_coherent(pcie_ep_dev->dev,
-		DMA_LL_LENGTH *sizeof(struct mnh_dma_ll_element), &dma, GFP_KERNEL);
-	ll->size = 0;
-	ll->ll_element[0] = ll_element;
-	ll->dma[0] = dma;
-	if (!ll_element) {
-		dev_err(pcie_ep_dev->dev, "LL alloc failed \n");
-		return -EINVAL;
-		}
+	ll_element = add_ll_entry(ll, &dma_base);
+	if (!ll_element)
+		return -ENOMEM;
+
 	i = 0;
 	s = 0;
 	u = 0;
@@ -1342,6 +1363,7 @@
 	dev_dbg(pcie_ep_dev->dev, "LL checkpoint 2\n");
 	if ((sg_src.paddr == 0x0) || (sg_dst.paddr == 0x0)) {
 		dev_err(pcie_ep_dev->dev, "Input lists invalid\n");
+		pcie_ll_destroy(ll);
 		return -EINVAL;
 	}
 	while ((sg_src.paddr != 0x0) && (sg_dst.paddr != 0x0)) {
@@ -1386,35 +1408,15 @@
 				ll_element[u-1].header = LL_IRQ_DATA_ELEMENT;
 				ll_element[u].header = LL_LAST_LINK_ELEMENT;
 				ll_element[u].sar_low =
-					LOWER((uint64_t) ll->dma[0]);
+					LOWER((uint64_t) dma_base);
 				ll_element[u].sar_high =
-					UPPER((uint64_t) ll->dma[0]);
+					UPPER((uint64_t) dma_base);
 				return 0;
 			}
-			if (ll->size >= (MNH_MAX_LL_ELEMENT-1)) {
-				dev_err(pcie_ep_dev->dev, "Out of dma elements\n");
-				ll_element[u-1].header = LL_IRQ_DATA_ELEMENT;
-				ll_element[u].header = LL_LAST_LINK_ELEMENT;
-				ll_element[u].sar_low =
-					LOWER((uint64_t) ll->dma[0]);
-				ll_element[u].sar_high =
-					UPPER((uint64_t) ll->dma[0]);
-				pcie_ll_destroy(ll);
-				return -EINVAL;
-			}
-			tmp_element = dma_alloc_coherent(pcie_ep_dev->dev,
-			DMA_LL_LENGTH * sizeof(struct mnh_dma_ll_element),
-				&dma, GFP_KERNEL);
+			tmp_element = add_ll_entry(ll, &dma);
 			if (!tmp_element) {
-				dev_err(pcie_ep_dev->dev, "Element allcation failed\n");
-				ll_element[u-1].header = LL_IRQ_DATA_ELEMENT;
-				ll_element[u].header = LL_LAST_LINK_ELEMENT;
-				ll_element[u].sar_low =
-					LOWER((uint64_t) ll->dma[0]);
-				ll_element[u].sar_high =
-					UPPER((uint64_t) ll->dma[0]);
 				pcie_ll_destroy(ll);
-				return -EINVAL;
+				return -ENOMEM;
 			}
 			ll_element[u].sar_low =
 				LOWER((uint64_t) dma);
@@ -1422,29 +1424,26 @@
 				UPPER((uint64_t) dma);
 			ll_element = tmp_element;
 			u = 0;
-			ll->size++;
-			ll->ll_element[ll->size] = ll_element;
-			ll->dma[ll->size] = dma;
 		}
 	}
 	ll_element[u-1].header = LL_IRQ_DATA_ELEMENT;
 	ll_element[u].header = LL_LAST_LINK_ELEMENT;
-	ll_element[u].sar_low = LOWER((uint64_t) ll->dma[0]);
-	ll_element[u].sar_high = UPPER((uint64_t) ll->dma[0]);
+	ll_element[u].sar_low = LOWER((uint64_t) dma_base);
+	ll_element[u].sar_high = UPPER((uint64_t) dma_base);
 	return 0;
 }
 
 static int pcie_ll_destroy(struct mnh_dma_ll *ll)
 {
-	int i;
+	struct mnh_dma_ll_entry *entry, *tmp;
 
-	i = 0;
-	while (i <= ll->size) {
-		mnh_free_coherent(DMA_LL_LENGTH
-			* sizeof(struct mnh_dma_ll_element),
-			ll->ll_element[i], ll->dma[i]);
-		i++;
+	list_for_each_entry_safe(entry, tmp, &ll->entries, entry) {
+		list_del(&entry->entry);
+		mnh_free_coherent(DMA_LL_LENGTH * sizeof(*entry->elements),
+					entry->elements, entry->dma);
+		kfree(entry);
 	}
+
 	return 0;
 }
 
@@ -1584,6 +1583,19 @@
 }
 EXPORT_SYMBOL(mnh_ll_build);
 
+uint64_t mnh_ll_base_addr(struct mnh_dma_ll *ll)
+{
+	struct mnh_dma_ll_entry *entry;
+	entry = list_first_entry_or_null(&ll->entries, typeof(*entry), entry);
+	if (!entry) {
+		dev_err(pcie_ep_dev->dev, "PCIe DMA scatterlist is empty.\n");
+		return 0;
+	}
+
+	return (uint64_t)entry->dma;
+}
+EXPORT_SYMBOL(mnh_ll_base_addr);
+
 int mnh_ll_destroy(struct mnh_dma_ll *ll)
 {
 	return pcie_ll_destroy(ll);
diff --git a/drivers/pcie_ep/mnh_pcie_test.c b/drivers/pcie_ep/mnh_pcie_test.c
index 5121289..f0b0624 100755
--- a/drivers/pcie_ep/mnh_pcie_test.c
+++ b/drivers/pcie_ep/mnh_pcie_test.c
@@ -73,10 +73,10 @@
 	dev_err(pcie_ep_tst_device, "Start LL build \n");
 	if (dma_dir == 1) {
 		if (mnh_ll_build(sg2, sg1, ll_adr) == 0)
-			dev_err(pcie_ep_tst_device, "LL build succesfully %d  %pad\n", ll_adr->size, &ll_adr->dma[0]);
+			dev_err(pcie_ep_tst_device, "LL built successfully\n");
 	} else if (mnh_ll_build(sg1, sg2, ll_adr) == 0)
-			dev_err(pcie_ep_tst_device, "LL build succesfully %d  %pad\n", ll_adr->size, &ll_adr->dma[0]);
-	mnh_set_rb_base(ll_adr->dma[0]);
+			dev_err(pcie_ep_tst_device, "LL built successfully\n");
+	mnh_set_rb_base(mnh_ll_base_addr(ll_adr));
 	status = 2;
 	//status = 3;
 	return 0;
diff --git a/include/linux/mnh_pcie_ep.h b/include/linux/mnh_pcie_ep.h
index 9a2dd08..9ab4f1e 100755
--- a/include/linux/mnh_pcie_ep.h
+++ b/include/linux/mnh_pcie_ep.h
@@ -17,12 +17,11 @@
  * @version 1.0
  */
 
+#include <linux/list.h>
 #include <linux/types.h>
 #ifndef __LINUX_MNH_PCIE_EP_H
 #define __LINUX_MNH_PCIE_EP_H
 #define MNH_PCIE_DEBUG_ENABLE 1
-#define MNH_MAX_LL 256
-#define MNH_MAX_LL_ELEMENT 64
 /* TODO implement to mask sysfs and other code */
 
 /*****************************************************************************
@@ -145,10 +144,14 @@
 	uint32_t dar_high;
 };
 
+struct mnh_dma_ll_entry {
+	struct mnh_dma_ll_element *elements;
+	dma_addr_t dma;
+	struct list_head entry;
+};
+
 struct mnh_dma_ll {
-	uint32_t size;
-	struct mnh_dma_ll_element *ll_element[MNH_MAX_LL_ELEMENT];
-	dma_addr_t dma[MNH_MAX_LL_ELEMENT];
+	struct list_head entries;
 };
 
 typedef int (*pm_callback_t)(enum mnh_ep_pm_event_t event, void *param);
@@ -241,6 +244,8 @@
 int mnh_ll_build(struct mnh_sg_entry *src_sg, struct mnh_sg_entry *dst_sg,
 					struct mnh_dma_ll *ll);
 
+uint64_t mnh_ll_base_addr(struct mnh_dma_ll *ll);
+
 int mnh_ll_destroy(struct mnh_dma_ll *ll);
 
 void *mnh_alloc_coherent(size_t size, dma_addr_t *dma_adr);
diff --git a/include/linux/mnh_pcie_str.h b/include/linux/mnh_pcie_str.h
index 65319ef..dd7ad04 100755
--- a/include/linux/mnh_pcie_str.h
+++ b/include/linux/mnh_pcie_str.h
@@ -34,8 +34,11 @@
 #define REGION_1		1
 #define REGION_2		2
 
-//#define DMA_LL_LENGTH			20 /*TODO Needs to be optimized */
-#define DMA_LL_LENGTH			256 /*TODO Needs to be optimized */
+/* Size of a single DMA linked list element/descriptor (which is represented in
+ * code by mnh_dma_ll_element).
+ */
+#define DMA_LL_ELEMENT_SIZE	24
+#define DMA_LL_LENGTH		(PAGE_SIZE / DMA_LL_ELEMENT_SIZE)
 
 #define LL_DATA_ELEMENT			0x1
 #define LL_IRQ_DATA_ELEMENT		0x19