blob: d4666692f1757f5dcb08fef53870a0e2052f5611 [file] [log] [blame]
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Sandeep Patil <sspatil@google.com>
Date: Wed, 7 Aug 2019 15:01:24 -0700
Subject: ANDROID: staging: ion: refactor ion's dmabuf manipulators into a
separate file.
This patch is preparatory work for making ion heaps modular. The patch
itself doesn't make any significant changes except for re-organizing the
buffer manipulator functions in a single file. This will be helpful
later when we specifically export some of these functions to be used by
a heap module.
Bug: 133508579
Test: ion-unit-tests
Change-Id: I52ecea78a179c936006a21beb7630009984cb22f
Co-developed-by: Isaac J. Manjarres <isaacm@codeaurora.org>
Signed-off-by: Sandeep Patil <sspatil@google.com>
---
drivers/staging/android/ion/Makefile | 2 +-
drivers/staging/android/ion/ion.c | 254 +---------------------
drivers/staging/android/ion/ion_buffer.c | 6 +-
drivers/staging/android/ion/ion_dma_buf.c | 252 +++++++++++++++++++++
drivers/staging/android/ion/ion_private.h | 11 +-
include/linux/ion.h | 16 +-
6 files changed, 289 insertions(+), 252 deletions(-)
create mode 100644 drivers/staging/android/ion/ion_dma_buf.c
diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile
index da386dbf3ffd..b6fab2816781 100644
--- a/drivers/staging/android/ion/Makefile
+++ b/drivers/staging/android/ion/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_ION) += ion.o ion_buffer.o ion_heap.o
+obj-$(CONFIG_ION) += ion.o ion_buffer.o ion_dma_buf.o ion_heap.o
obj-$(CONFIG_ION_SYSTEM_HEAP) += ion_system_heap.o ion_page_pool.o
obj-$(CONFIG_ION_CMA_HEAP) += ion_cma_heap.o
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 19d3e82b2f5b..c80a31da371b 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -17,276 +17,38 @@
#include <linux/fs.h>
#include <linux/kthread.h>
#include <linux/list.h>
-#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/rbtree.h>
#include <linux/sched/task.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include <linux/vmalloc.h>
#include "ion_private.h"
static struct ion_device *internal_dev;
static int heap_id;
-static struct sg_table *dup_sg_table(struct sg_table *table)
-{
- struct sg_table *new_table;
- int ret, i;
- struct scatterlist *sg, *new_sg;
-
- new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
- if (!new_table)
- return ERR_PTR(-ENOMEM);
-
- ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
- if (ret) {
- kfree(new_table);
- return ERR_PTR(-ENOMEM);
- }
-
- new_sg = new_table->sgl;
- for_each_sg(table->sgl, sg, table->nents, i) {
- memcpy(new_sg, sg, sizeof(*sg));
- new_sg->dma_address = 0;
- new_sg = sg_next(new_sg);
- }
-
- return new_table;
-}
-
-static void free_duped_table(struct sg_table *table)
-{
- sg_free_table(table);
- kfree(table);
-}
-
-struct ion_dma_buf_attachment {
- struct device *dev;
- struct sg_table *table;
- struct list_head list;
-};
-
-static int ion_dma_buf_attach(struct dma_buf *dmabuf,
- struct dma_buf_attachment *attachment)
-{
- struct ion_dma_buf_attachment *a;
- struct sg_table *table;
- struct ion_buffer *buffer = dmabuf->priv;
-
- a = kzalloc(sizeof(*a), GFP_KERNEL);
- if (!a)
- return -ENOMEM;
-
- table = dup_sg_table(buffer->sg_table);
- if (IS_ERR(table)) {
- kfree(a);
- return -ENOMEM;
- }
-
- a->table = table;
- a->dev = attachment->dev;
- INIT_LIST_HEAD(&a->list);
-
- attachment->priv = a;
-
- mutex_lock(&buffer->lock);
- list_add(&a->list, &buffer->attachments);
- mutex_unlock(&buffer->lock);
-
- return 0;
-}
-
-static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
- struct dma_buf_attachment *attachment)
-{
- struct ion_dma_buf_attachment *a = attachment->priv;
- struct ion_buffer *buffer = dmabuf->priv;
-
- mutex_lock(&buffer->lock);
- list_del(&a->list);
- mutex_unlock(&buffer->lock);
- free_duped_table(a->table);
-
- kfree(a);
-}
-
-static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
- enum dma_data_direction direction)
-{
- struct ion_dma_buf_attachment *a = attachment->priv;
- struct sg_table *table;
-
- table = a->table;
-
- if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
- direction))
- return ERR_PTR(-ENOMEM);
-
- return table;
-}
-
-static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
- struct sg_table *table,
- enum dma_data_direction direction)
-{
- dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
-}
-
-static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
-{
- struct ion_buffer *buffer = dmabuf->priv;
- int ret = 0;
-
- if (!buffer->heap->ops->map_user) {
- pr_err("%s: this heap does not define a method for mapping to userspace\n",
- __func__);
- return -EINVAL;
- }
-
- if (!(buffer->flags & ION_FLAG_CACHED))
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- mutex_lock(&buffer->lock);
- /* now map it to userspace */
- ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
- mutex_unlock(&buffer->lock);
-
- if (ret)
- pr_err("%s: failure mapping buffer to userspace\n",
- __func__);
-
- return ret;
-}
-
-static void ion_dma_buf_release(struct dma_buf *dmabuf)
-{
- struct ion_buffer *buffer = dmabuf->priv;
-
- ion_buffer_destroy(internal_dev, buffer);
-}
-
-static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
-{
- struct ion_buffer *buffer = dmabuf->priv;
-
- return buffer->vaddr + offset * PAGE_SIZE;
-}
-
-static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
- void *ptr)
-{
-}
-
-static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
- enum dma_data_direction direction)
-{
- struct ion_buffer *buffer = dmabuf->priv;
- void *vaddr;
- struct ion_dma_buf_attachment *a;
- int ret = 0;
-
- /*
- * TODO: Move this elsewhere because we don't always need a vaddr
- */
- if (buffer->heap->ops->map_kernel) {
- mutex_lock(&buffer->lock);
- vaddr = ion_buffer_kmap_get(buffer);
- if (IS_ERR(vaddr)) {
- ret = PTR_ERR(vaddr);
- goto unlock;
- }
- mutex_unlock(&buffer->lock);
- }
-
- mutex_lock(&buffer->lock);
- list_for_each_entry(a, &buffer->attachments, list) {
- dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
- direction);
- }
-
-unlock:
- mutex_unlock(&buffer->lock);
- return ret;
-}
-
-static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
- enum dma_data_direction direction)
-{
- struct ion_buffer *buffer = dmabuf->priv;
- struct ion_dma_buf_attachment *a;
-
- if (buffer->heap->ops->map_kernel) {
- mutex_lock(&buffer->lock);
- ion_buffer_kmap_put(buffer);
- mutex_unlock(&buffer->lock);
- }
-
- mutex_lock(&buffer->lock);
- list_for_each_entry(a, &buffer->attachments, list) {
- dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
- direction);
- }
- mutex_unlock(&buffer->lock);
-
- return 0;
-}
-
-static const struct dma_buf_ops dma_buf_ops = {
- .map_dma_buf = ion_map_dma_buf,
- .unmap_dma_buf = ion_unmap_dma_buf,
- .mmap = ion_mmap,
- .release = ion_dma_buf_release,
- .attach = ion_dma_buf_attach,
- .detach = ion_dma_buf_detatch,
- .begin_cpu_access = ion_dma_buf_begin_cpu_access,
- .end_cpu_access = ion_dma_buf_end_cpu_access,
- .map = ion_dma_buf_kmap,
- .unmap = ion_dma_buf_kunmap,
-};
-
-static struct dma_buf *ion_alloc_dmabuf(size_t len, unsigned int heap_id_mask,
- unsigned int flags)
-{
- struct ion_buffer *buffer;
- DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
- struct dma_buf *dmabuf;
-
- pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
- len, heap_id_mask, flags);
-
- buffer = ion_buffer_alloc(internal_dev, len, heap_id_mask, flags);
- if (IS_ERR(buffer))
- return ERR_CAST(buffer);
-
- exp_info.ops = &dma_buf_ops;
- exp_info.size = buffer->size;
- exp_info.flags = O_RDWR;
- exp_info.priv = buffer;
-
- dmabuf = dma_buf_export(&exp_info);
- if (IS_ERR(dmabuf))
- ion_buffer_destroy(internal_dev, buffer);
-
- return dmabuf;
-}
-
+/* Entry into ION allocator for rest of the kernel */
struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
unsigned int flags)
{
- return ion_alloc_dmabuf(len, heap_id_mask, flags);
+ return ion_dmabuf_alloc(internal_dev, len, heap_id_mask, flags);
}
EXPORT_SYMBOL_GPL(ion_alloc);
+int ion_free(struct ion_buffer *buffer)
+{
+ return ion_buffer_destroy(internal_dev, buffer);
+}
+
static int ion_alloc_fd(size_t len, unsigned int heap_id_mask,
unsigned int flags)
{
int fd;
struct dma_buf *dmabuf;
- dmabuf = ion_alloc_dmabuf(len, heap_id_mask, flags);
+ dmabuf = ion_dmabuf_alloc(internal_dev, len, heap_id_mask, flags);
if (IS_ERR(dmabuf))
return PTR_ERR(dmabuf);
diff --git a/drivers/staging/android/ion/ion_buffer.c b/drivers/staging/android/ion/ion_buffer.c
index 8b8dd1e216d4..c10ea6672f1a 100644
--- a/drivers/staging/android/ion/ion_buffer.c
+++ b/drivers/staging/android/ion/ion_buffer.c
@@ -149,13 +149,13 @@ void ion_buffer_release(struct ion_buffer *buffer)
kfree(buffer);
}
-void ion_buffer_destroy(struct ion_device *dev, struct ion_buffer *buffer)
+int ion_buffer_destroy(struct ion_device *dev, struct ion_buffer *buffer)
{
struct ion_heap *heap;
if (!dev || !buffer) {
pr_warn("%s: invalid argument\n", __func__);
- return;
+ return -EINVAL;
}
heap = buffer->heap;
@@ -167,6 +167,8 @@ void ion_buffer_destroy(struct ion_device *dev, struct ion_buffer *buffer)
ion_heap_freelist_add(heap, buffer);
else
ion_buffer_release(buffer);
+
+ return 0;
}
void *ion_buffer_kmap_get(struct ion_buffer *buffer)
diff --git a/drivers/staging/android/ion/ion_dma_buf.c b/drivers/staging/android/ion/ion_dma_buf.c
new file mode 100644
index 000000000000..0de08e6f1af5
--- /dev/null
+++ b/drivers/staging/android/ion/ion_dma_buf.c
@@ -0,0 +1,252 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * ION Memory Allocator - dmabuf interface
+ *
+ * Copyright (c) 2019, Google, Inc.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/mm.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include "ion_private.h"
+
+static struct sg_table *dup_sg_table(struct sg_table *table)
+{
+ struct sg_table *new_table;
+ int ret, i;
+ struct scatterlist *sg, *new_sg;
+
+ new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
+ if (!new_table)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL);
+ if (ret) {
+ kfree(new_table);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ new_sg = new_table->sgl;
+ for_each_sg(table->sgl, sg, table->nents, i) {
+ memcpy(new_sg, sg, sizeof(*sg));
+ new_sg->dma_address = 0;
+ new_sg = sg_next(new_sg);
+ }
+
+ return new_table;
+}
+
+static void free_duped_table(struct sg_table *table)
+{
+ sg_free_table(table);
+ kfree(table);
+}
+
+struct ion_dma_buf_attachment {
+ struct device *dev;
+ struct sg_table *table;
+ struct list_head list;
+};
+
+static int ion_dma_buf_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct ion_dma_buf_attachment *a;
+ struct sg_table *table;
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ table = dup_sg_table(buffer->sg_table);
+ if (IS_ERR(table)) {
+ kfree(a);
+ return -ENOMEM;
+ }
+
+ a->table = table;
+ a->dev = attachment->dev;
+ INIT_LIST_HEAD(&a->list);
+
+ attachment->priv = a;
+
+ mutex_lock(&buffer->lock);
+ list_add(&a->list, &buffer->attachments);
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static void ion_dma_buf_detatch(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct ion_dma_buf_attachment *a = attachment->priv;
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ list_del(&a->list);
+ mutex_unlock(&buffer->lock);
+ free_duped_table(a->table);
+
+ kfree(a);
+}
+
+static struct sg_table *ion_dma_buf_map(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct ion_dma_buf_attachment *a = attachment->priv;
+ struct sg_table *table;
+
+ table = a->table;
+
+ if (!dma_map_sg(attachment->dev, table->sgl, table->nents,
+ direction))
+ return ERR_PTR(-ENOMEM);
+
+ return table;
+}
+
+static void ion_dma_buf_unmap(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction);
+}
+
+static int ion_dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ int ret = 0;
+
+ if (!buffer->heap->ops->map_user) {
+ pr_err("%s: this heap does not define a method for mapping to userspace\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (!(buffer->flags & ION_FLAG_CACHED))
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ mutex_lock(&buffer->lock);
+ /* now map it to userspace */
+ ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
+ mutex_unlock(&buffer->lock);
+
+ if (ret)
+ pr_err("%s: failure mapping buffer to userspace\n",
+ __func__);
+
+ return ret;
+}
+
+static void ion_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ ion_free(buffer);
+}
+
+static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ return buffer->vaddr + offset * PAGE_SIZE;
+}
+
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+ struct ion_dma_buf_attachment *a;
+ int ret = 0;
+
+ /*
+ * TODO: Move this elsewhere because we don't always need a vaddr
+ */
+ if (buffer->heap->ops->map_kernel) {
+ mutex_lock(&buffer->lock);
+ vaddr = ion_buffer_kmap_get(buffer);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto unlock;
+ }
+ mutex_unlock(&buffer->lock);
+ }
+
+ mutex_lock(&buffer->lock);
+ list_for_each_entry(a, &buffer->attachments, list) {
+ dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents,
+ direction);
+ }
+
+unlock:
+ mutex_unlock(&buffer->lock);
+ return ret;
+}
+
+static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+ struct ion_dma_buf_attachment *a;
+
+ if (buffer->heap->ops->map_kernel) {
+ mutex_lock(&buffer->lock);
+ ion_buffer_kmap_put(buffer);
+ mutex_unlock(&buffer->lock);
+ }
+
+ mutex_lock(&buffer->lock);
+ list_for_each_entry(a, &buffer->attachments, list) {
+ dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents,
+ direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static const struct dma_buf_ops dma_buf_ops = {
+ .map_dma_buf = ion_dma_buf_map,
+ .unmap_dma_buf = ion_dma_buf_unmap,
+ .mmap = ion_dma_buf_mmap,
+ .release = ion_dma_buf_release,
+ .attach = ion_dma_buf_attach,
+ .detach = ion_dma_buf_detatch,
+ .begin_cpu_access = ion_dma_buf_begin_cpu_access,
+ .end_cpu_access = ion_dma_buf_end_cpu_access,
+ .map = ion_dma_buf_kmap,
+};
+
+struct dma_buf *ion_dmabuf_alloc(struct ion_device *dev, size_t len,
+ unsigned int heap_id_mask,
+ unsigned int flags)
+{
+ struct ion_buffer *buffer;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct dma_buf *dmabuf;
+
+ pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__,
+ len, heap_id_mask, flags);
+
+ buffer = ion_buffer_alloc(dev, len, heap_id_mask, flags);
+ if (IS_ERR(buffer))
+ return ERR_CAST(buffer);
+
+ exp_info.ops = &dma_buf_ops;
+ exp_info.size = buffer->size;
+ exp_info.flags = O_RDWR;
+ exp_info.priv = buffer;
+
+ dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dmabuf))
+ ion_buffer_destroy(dev, buffer);
+
+ return dmabuf;
+}
diff --git a/drivers/staging/android/ion/ion_private.h b/drivers/staging/android/ion/ion_private.h
index e80692c885b4..b3c8b55788db 100644
--- a/drivers/staging/android/ion/ion_private.h
+++ b/drivers/staging/android/ion/ion_private.h
@@ -9,6 +9,7 @@
#define _ION_PRIVATE_H
#include <linux/dcache.h>
+#include <linux/dma-buf.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/plist.h>
@@ -40,8 +41,16 @@ extern struct ion_buffer *ion_buffer_alloc(struct ion_device *dev, size_t len,
unsigned int heap_id_mask,
unsigned int flags);
extern void ion_buffer_release(struct ion_buffer *buffer);
-extern void ion_buffer_destroy(struct ion_device *dev, struct ion_buffer *buffer);
+extern int ion_buffer_destroy(struct ion_device *dev,
+ struct ion_buffer *buffer);
extern void *ion_buffer_kmap_get(struct ion_buffer *buffer);
extern void ion_buffer_kmap_put(struct ion_buffer *buffer);
+/* ion dmabuf allocator */
+extern struct dma_buf *ion_dmabuf_alloc(struct ion_device *dev, size_t len,
+ unsigned int heap_id_mask,
+ unsigned int flags);
+extern int ion_free(struct ion_buffer *buffer);
+
+
#endif /* _ION_PRIVATE_H */
diff --git a/include/linux/ion.h b/include/linux/ion.h
index 7bec77d98224..66a694b875ca 100644
--- a/include/linux/ion.h
+++ b/include/linux/ion.h
@@ -10,8 +10,18 @@
#include <linux/err.h>
#ifdef CONFIG_ION
-/*
- * Allocates an ION buffer.
+
+
+/**
+ * ion_alloc - Allocates an ion buffer of given size from given heap
+ *
+ * @len: size of the buffer to be allocated.
+ * @heap_id_mask: a bitwise maks of heap ids to allocate from
+ * @flags: ION_BUFFER_XXXX flags for the new buffer.
+ *
+ * The function exports a dma_buf object for the new ion buffer internally
+ * and returns that to the caller. So, the buffer is ready to be used by other
+ * drivers immediately. Returns ERR_PTR in case of failure.
*/
struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
unsigned int flags);
@@ -22,5 +32,7 @@ static inline struct dma_buf *ion_alloc(size_t len, unsigned int heap_id_mask,
{
return ERR_PTR(-ENOMEM);
}
+
+
#endif /* CONFIG_ION */
#endif /* _ION_KERNEL_H */