ANDROID: staging: ion: move buffer kmap from begin/end_cpu_access()
Since dma_buf_begin/end_cpu_access() calls always used to bracket
dma_buf_kmap/kunmap calls, ION performed kmap/kunmap invocations for the
buffer during dma_buf_begin/end_cpu_access() calls and cached the
results with a kmap counter.
However, dma_buf_begin/end_cpu_access() invocations can be
triggered from the userspace using the DMA_BUF_IOC_SYNC ioctl as well.
This means that a mapping that was created by a device driver using by a
dma_buf_kmap() call or an ion_map_kernel() call could be unmapped
from userspace if a client accidentally(or maliciously) invoked
DMA_BUF_IOCTL_SYNC IOCTL with 'DMA_BUF_SYNC_END' argument since this
would inturn invoke dma_buf_end_cpu_access() which would then decrement
the kmap counter and invoke kunmap() when the counter gets to 0.
This patch moves the kmap/kunmap operations from the
begin/end_cpu_access() DMA-BUF ops to the map/unmap DMA-BUF ops to
prevent the issue.
Bug: 187527909
Change-Id: I00dc8eefefb1f3aab99e770f90d624011f7740f0
[hridya: minor conflicts during cherry-picking]
Signed-off-by: Hridya Valsaraju <hridya@google.com>
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 3af862c..86729db 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -969,42 +969,45 @@
static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
{
struct ion_buffer *buffer = dmabuf->priv;
-
- return buffer->vaddr + offset * PAGE_SIZE;
-}
-
-static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
- void *ptr)
-{
-}
-
-static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
- enum dma_data_direction direction)
-{
- struct ion_buffer *buffer = dmabuf->priv;
void *vaddr;
if (!buffer->heap->ops->map_kernel) {
pr_err("%s: map kernel is not implemented by this heap.\n",
__func__);
- return -ENODEV;
+ return ERR_PTR(-ENOTTY);
}
-
mutex_lock(&buffer->lock);
vaddr = ion_buffer_kmap_get(buffer);
mutex_unlock(&buffer->lock);
- return PTR_ERR_OR_ZERO(vaddr);
+
+ if (IS_ERR(vaddr))
+ return vaddr;
+
+ return vaddr + offset * PAGE_SIZE;
+}
+
+static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
+ void *ptr)
+{
+ struct ion_buffer *buffer = dmabuf->priv;
+
+ if (buffer->heap->ops->map_kernel) {
+ mutex_lock(&buffer->lock);
+ ion_buffer_kmap_put(buffer);
+ mutex_unlock(&buffer->lock);
+ }
+
+}
+
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ return 0;
}
static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
enum dma_data_direction direction)
{
- struct ion_buffer *buffer = dmabuf->priv;
-
- mutex_lock(&buffer->lock);
- ion_buffer_kmap_put(buffer);
- mutex_unlock(&buffer->lock);
-
return 0;
}