Merge changes I01907c10,I25b71054,I61d607a4,I6ff1f602,If252350d, ...
* changes:
libdmabufheap: Add a C test for libdmabufheap
libdmabufheap: Add a C wrapper for BufferAllocator class
lidmabufheap: Add some tests for CpuSyncStart() and CpuSyncEnd()
libdmabufheap: Implement CpuSyncStart() and CpuSyncEnd().
libdmabufheap: Add some tests for Alloc()
libdmabufheap: Add Alloc()
libdmabufheap: Add IonAlloc()
libdmabufheap: Add MapNameToIonHeap()
diff --git a/Android.bp b/Android.bp
index 678cecf..ffe877a 100644
--- a/Android.bp
+++ b/Android.bp
@@ -17,6 +17,7 @@
cc_library {
srcs: [
"BufferAllocator.cpp",
+ "BufferAllocatorWrapper.cpp",
],
name: "libdmabufheap",
cflags: [
diff --git a/BufferAllocator.cpp b/BufferAllocator.cpp
index b2933fc..5f9abc6 100644
--- a/BufferAllocator.cpp
+++ b/BufferAllocator.cpp
@@ -21,6 +21,7 @@
#include <errno.h>
#include <fcntl.h>
#include <ion/ion.h>
+#include <linux/dma-buf.h>
#include <linux/dma-heap.h>
#include <linux/ion_4.12.h>
#include <stdlib.h>
@@ -34,6 +35,14 @@
static constexpr char kDmaHeapRoot[] = "/dev/dma_heap/";
static constexpr char kIonDevice[] = "/dev/ion";
+static constexpr char kIonSystemHeapName[] = "ion_system_heap";
+
+void BufferAllocator::LogInterface(const std::string& interface) {
+ if (!logged_interface_) {
+ LOG(INFO) << "Using : " << interface;
+ logged_interface_ = true;
+ }
+}
int BufferAllocator::GetDmabufHeapFd(const std::string& heap_name) {
/* check if we have this dmabuf heap open and if so return the fd for it. */
@@ -62,7 +71,8 @@
void BufferAllocator::QueryIonHeaps() {
uses_legacy_ion_iface_ = ion_is_legacy(ion_fd_);
if (uses_legacy_ion_iface_) {
- LOG(INFO) << "Using legacy ION heaps";
+ LogInterface("Legacy ion heaps");
+ MapNameToIonMask(kDmabufSystemHeapName, ION_HEAP_SYSTEM_MASK);
return;
}
@@ -76,7 +86,13 @@
// Abort if heap query fails
CHECK(ret == 0)
<< "Non-legacy ION implementation must support heap information queries";
- LOG(INFO) << "Using non-legacy ION heaps";
+ LogInterface("Non-legacy ION heaps");
+
+ /*
+ * No error checking here, it is possible that devices may have used another name for
+ * the ion system heap.
+ */
+ MapNameToIonName(kDmabufSystemHeapName, kIonSystemHeapName);
}
BufferAllocator::BufferAllocator() {
@@ -90,6 +106,195 @@
CHECK(ion_fd_ >= 0) << "Either dmabuf heaps or ion must be supported";
QueryIonHeaps();
} else {
- LOG(INFO) << "Using DMABUF Heaps";
+ LogInterface("DMABUF Heaps");
}
}
+
+int BufferAllocator::MapNameToIonMask(const std::string& heap_name, unsigned int ion_heap_mask,
+ unsigned int ion_heap_flags) {
+ if (!ion_heap_mask)
+ return -EINVAL;
+ IonHeapConfig heap_config = { ion_heap_mask, ion_heap_flags };
+ heap_name_to_config_[heap_name] = heap_config;
+ return 0;
+}
+
+int BufferAllocator::GetIonHeapIdByName(const std::string& heap_name, unsigned int* heap_id) {
+ for (auto& it : ion_heap_info_) {
+ if (heap_name == it.name) {
+ *heap_id = it.heap_id;
+ return 0;
+ }
+ }
+
+ LOG(ERROR) << "No ion heap of name " << heap_name << " exists";
+ return -EINVAL;
+}
+
+int BufferAllocator::MapNameToIonName(const std::string& heap_name,
+ const std::string& ion_heap_name,
+ unsigned int ion_heap_flags) {
+ unsigned int ion_heap_id = 0;
+ auto ret = GetIonHeapIdByName(ion_heap_name, &ion_heap_id);
+ if (ret < 0)
+ return ret;
+
+ unsigned int ion_heap_mask = 1 << ion_heap_id;
+ IonHeapConfig heap_config = { ion_heap_mask, ion_heap_flags };
+ heap_name_to_config_[heap_name] = heap_config;
+
+ return 0;
+}
+
+int BufferAllocator::MapNameToIonHeap(const std::string& heap_name,
+ const std::string& ion_heap_name,
+ unsigned int ion_heap_flags,
+ unsigned int legacy_ion_heap_mask,
+ unsigned int legacy_ion_heap_flags) {
+ int ret = 0;
+
+ if (uses_legacy_ion_iface_) {
+ ret = MapNameToIonMask(heap_name, legacy_ion_heap_mask, legacy_ion_heap_flags);
+ } else if (!DmabufHeapsSupported() && !ion_heap_name.empty()) {
+ ret = MapNameToIonName(heap_name, ion_heap_name, ion_heap_flags);
+ }
+
+ return ret;
+}
+
+int BufferAllocator::GetIonConfig(const std::string& heap_name, IonHeapConfig& heap_config) {
+ int ret = 0;
+ auto it = heap_name_to_config_.find(heap_name);
+ if (it != heap_name_to_config_.end()) {
+ heap_config = it->second;
+ } else {
+ if (uses_legacy_ion_iface_) {
+ ret = -EINVAL;
+ } else {
+ unsigned int heap_id;
+ ret = GetIonHeapIdByName(heap_name, &heap_id);
+ if (ret == 0) {
+ heap_config.mask = 1 << heap_id;
+ heap_config.flags = 0;
+ /* save it so that this lookup does not need to happen again */
+ heap_name_to_config_[heap_name] = heap_config;
+ }
+ }
+ }
+
+ if (ret)
+ LOG(ERROR) << "No ion heap of name " << heap_name << " exists";
+ return ret;
+}
+
+int BufferAllocator::DmabufAlloc(const std::string& heap_name, size_t len) {
+ int fd = OpenDmabufHeap(heap_name);
+ if (fd < 0) {
+ LOG(ERROR) << "Unsupported dmabuf heap: " << heap_name << " error: " << fd;
+ return fd;
+ }
+
+ struct dma_heap_allocation_data heap_data{
+ .len = len, // length of data to be allocated in bytes
+ .fd_flags = O_RDWR | O_CLOEXEC, // permissions for the memory to be allocated
+ };
+
+ auto ret = TEMP_FAILURE_RETRY(ioctl(fd, DMA_HEAP_IOCTL_ALLOC, &heap_data));
+ if (ret < 0)
+ return ret;
+
+ return heap_data.fd;
+}
+
+int BufferAllocator::IonAlloc(const std::string& heap_name, size_t len, unsigned int heap_flags) {
+ IonHeapConfig heap_config;
+ auto ret = GetIonConfig(heap_name, heap_config);
+ if (ret)
+ return ret;
+
+ int alloc_fd = -1;
+ unsigned int flags = heap_config.flags | heap_flags;
+ ret = ion_alloc_fd(ion_fd_, len, 0, heap_config.mask, flags, &alloc_fd);
+ if (ret) {
+ PLOG(ERROR) << "allocation fails for ion heap with mask: " << heap_config.mask
+ << " and flags: " << flags;
+ return ret;
+ }
+ return alloc_fd;
+}
+
+int BufferAllocator::Alloc(const std::string& heap_name, size_t len, unsigned int heap_flags) {
+ if (DmabufHeapsSupported()) {
+ return DmabufAlloc(heap_name, len);
+ }
+
+ return IonAlloc(heap_name, len, heap_flags);
+}
+
+int BufferAllocator::LegacyIonCpuSync(unsigned int dmabuf_fd,
+ const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom) {
+ if (!legacy_ion_cpu_sync_custom)
+ return ion_sync_fd(ion_fd_, dmabuf_fd);
+
+ // dup ion_fd_ so that we retain its ownership.
+ int new_ion_fd = TEMP_FAILURE_RETRY(dup(ion_fd_.get()));
+ if (new_ion_fd < 0) {
+ PLOG(ERROR) << "Unable to dup ion fd. error: " << new_ion_fd;
+ return new_ion_fd;
+ }
+
+ int ret = legacy_ion_cpu_sync_custom(new_ion_fd);
+
+ close(new_ion_fd);
+ return ret;
+}
+
+int BufferAllocator::DoSync(unsigned int dmabuf_fd, bool start, SyncType sync_type,
+ const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom) {
+ if (uses_legacy_ion_iface_) {
+ return LegacyIonCpuSync(dmabuf_fd, legacy_ion_cpu_sync_custom);
+ }
+
+ struct dma_buf_sync sync = {
+ .flags = (start ? DMA_BUF_SYNC_START : DMA_BUF_SYNC_END) |
+ static_cast<uint64_t>(sync_type),
+ };
+ return TEMP_FAILURE_RETRY(ioctl(dmabuf_fd, DMA_BUF_IOCTL_SYNC, &sync));
+}
+
+int BufferAllocator::CpuSyncStart(unsigned int dmabuf_fd, SyncType sync_type,
+ const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom) {
+ auto it = fd_last_sync_type_.find(dmabuf_fd);
+ if (it != fd_last_sync_type_.end()) {
+ LOG(ERROR) << "CpuSyncEnd needs to be invoked for this fd first";
+ return -EINVAL;
+ }
+
+ int ret = DoSync(dmabuf_fd, true /* start */, sync_type, legacy_ion_cpu_sync_custom);
+
+ if (ret) {
+ PLOG(ERROR) << "CpuSyncStart() failure";
+ } else {
+ fd_last_sync_type_[dmabuf_fd] = sync_type;
+ }
+ return ret;
+}
+
+int BufferAllocator::CpuSyncEnd(unsigned int dmabuf_fd,
+ const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom) {
+ auto it = fd_last_sync_type_.find(dmabuf_fd);
+ if (it == fd_last_sync_type_.end()) {
+ LOG(ERROR) << "CpuSyncStart() must be called before CpuSyncEnd()";
+ return -EINVAL;
+ }
+
+ int ret = DoSync(dmabuf_fd, false /* start */, it->second /* sync_type */,
+ legacy_ion_cpu_sync_custom);
+ if (ret) {
+ PLOG(ERROR) << "CpuSyncEnd() failure";
+ } else {
+ fd_last_sync_type_.erase(it);
+ }
+
+ return ret;
+}
diff --git a/BufferAllocatorWrapper.cpp b/BufferAllocatorWrapper.cpp
new file mode 100644
index 0000000..5e0bc61
--- /dev/null
+++ b/BufferAllocatorWrapper.cpp
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <BufferAllocator/BufferAllocator.h>
+#include <BufferAllocator/BufferAllocatorWrapper.h>
+#include <errno.h>
+#include <sys/types.h>
+
+extern "C" {
+
+BufferAllocator* CreateDmabufHeapBufferAllocator() {
+ return new BufferAllocator();
+}
+
+void FreeDmabufHeapBufferAllocator(BufferAllocator* buffer_allocator) {
+ delete buffer_allocator;
+};
+
+int DmabufHeapAlloc(BufferAllocator* buffer_allocator, const char* heap_name, size_t len,
+ unsigned int heap_flags) {
+ if (!buffer_allocator)
+ return -EINVAL;
+ return buffer_allocator->Alloc(heap_name, len, heap_flags);
+}
+
+int MapDmabufHeapNameToIonHeap(BufferAllocator* buffer_allocator, const char* heap_name,
+ const char* ion_heap_name, unsigned int ion_heap_flags,
+ unsigned int legacy_ion_heap_mask,
+ unsigned int legacy_ion_heap_flags) {
+ if (!buffer_allocator)
+ return -EINVAL;
+ return buffer_allocator->MapNameToIonHeap(heap_name, ion_heap_name, ion_heap_flags,
+ legacy_ion_heap_mask, legacy_ion_heap_flags);
+}
+
+int DmabufHeapCpuSyncStart(BufferAllocator* buffer_allocator, unsigned int dmabuf_fd,
+ SyncType sync_type, int (*legacy_ion_cpu_sync)(int)) {
+ if (!buffer_allocator)
+ return -EINVAL;
+ return buffer_allocator->CpuSyncStart(dmabuf_fd, sync_type, legacy_ion_cpu_sync);
+}
+
+int DmabufHeapCpuSyncEnd(BufferAllocator* buffer_allocator, unsigned int dmabuf_fd,
+ int (*legacy_ion_cpu_sync)(int)) {
+ if (!buffer_allocator)
+ return -EINVAL;
+ return buffer_allocator->CpuSyncEnd(dmabuf_fd, legacy_ion_cpu_sync);
+}
+}
diff --git a/include/BufferAllocator/BufferAllocator.h b/include/BufferAllocator/BufferAllocator.h
index 5c27ec8..e63acf4 100644
--- a/include/BufferAllocator/BufferAllocator.h
+++ b/include/BufferAllocator/BufferAllocator.h
@@ -16,19 +16,22 @@
#pragma once
+#include <BufferAllocator/dmabufheap-defs.h>
+
+#include <android-base/unique_fd.h>
#include <linux/ion_4.12.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
+#include <cstdint>
#include <string>
#include <unordered_map>
#include <vector>
-#include <android-base/unique_fd.h>
-class BufferAllocator{
+class BufferAllocator {
public:
BufferAllocator();
~BufferAllocator() {}
@@ -37,10 +40,116 @@
BufferAllocator(const BufferAllocator&) = delete;
BufferAllocator& operator=(const BufferAllocator&) = delete;
+ /**
+ * Maps a dmabuf heap to an equivalent ion heap configuration. This method is required since
+ * dmabuf heaps do not support heap flags. This means that a single ion heap may encompass the
+ * functionality of multiple dmabuf heaps by using heap flags. This method will check the
+ * interface being used and only create the required mappings. For example,
+ * if the interface being used is dmabuf heaps, the method will not do
+ * anything. If the interface being used is non-legacy ion, the mapping from
+ * dmabuf heap name to non-legacy ion heap name will be created and the
+ * legacy parameters will be ignored.
+ * The method can be deprecated once all devices have
+ * migrated to dmabuf heaps from ion. Returns an error code when the
+ * interface used is non-legacy ion and the @ion_heap_name parameter is
+ * invalid or if the interface used is legacy ion and @legacy_ion_heap_mask
+ * is invalid(0);
+ * @heap_name: dmabuf heap name.
+ * @ion_heap_name: name of the equivalent ion heap.
+ * @ion_heap_flags: flags to be passed to the ion heap @ion_heap_name for it to function
+ * equivalently to the dmabuf heap @heap_name.
+ * @legacy_ion_heap_mask: heap mask for the equivalent legacy ion heap.
+ * @legacy_ion_heap_flags: flags to be passed to the legacy ion heap for it
+ * to function equivalently to dmabuf heap @heap_name..
+ */
+ int MapNameToIonHeap(const std::string& heap_name, const std::string& ion_heap_name,
+ unsigned int ion_heap_flags = 0, unsigned int legacy_ion_heap_mask = 0,
+ unsigned int legacy_ion_heap_flags = 0);
+
+ /* *
+ * Returns a dmabuf fd if the allocation in one of the specified heaps is successful and
+ * an error code otherwise. If dmabuf heaps are supported, tries to allocate in the
+ * specified dmabuf heap. If dmabuf heaps are not supported and if ion_fd is a valid fd,
+ * go through saved heap data to find a heap ID/mask to match the specified heap names and
+ * allocate memory as per the specified parameters. For vendor defined heaps with a legacy
+ * ION interface(no heap query support), MapNameToIonMask() must be called prior to invocation
+ * of Alloc() to map a heap name to an equivalent heap mask and heap flag configuration.
+ * @heap_name: name of the heap to allocate in.
+ * @len: size of the allocation.
+ * @heap_flags: flags passed to heap.
+ */
+ int Alloc(const std::string& heap_name, size_t len, unsigned int heap_flags = 0);
+
+ /**
+ * Optional custom callback for legacy ion implementation that can be specified as a
+ * parameter to CpuSyncStart() and CpuSyncEnd(). It takes an fd to /dev/ion
+ * as its argument. The callback MUST NOT assume ownership of the fd.
+ * The fd will be closed once the callback returns.
+ * If specified, the callback will be used for syncing a shared dmabuf fd with
+ * memory(instead of ion_sync_fd()). It will be invoked with a dup of
+ * ion_fd_ as its argument. Return 0 on success and error code otherwise
+ * which will become the return value for CpuSyncStart() and CpuSyncEnd().
+ */
+ typedef std::function<int(int)> CustomCpuSyncLegacyIon;
+
+ /**
+ * Must be invoked before CPU access of the allocated memory.
+ * For a legacy ion interface, syncs a shared dmabuf fd with memory either using
+ * ION_IOC_SYNC ioctl or using callback @legacy_ion_cpu_sync if specified. For
+ * non-legacy ION and dmabuf heap interfaces, DMA_BUF_IOCTL_SYNC is used.
+ * @fd: dmabuf fd
+ * @sync_type: specifies if the sync is for read, write or read/write.
+ * @legacy_ion_cpu_sync: optional callback for legacy ion interfaces. If
+ * specified, will be invoked instead of ion_sync_fd()
+ * to sync dmabuf_fd with memory. The paremeter will be ignored if the interface being
+ * used is not legacy ion.
+ *
+ * Returns 0 on success and an error code otherwise.
+ */
+ int CpuSyncStart(unsigned int dmabuf_fd, SyncType sync_type = kSyncRead,
+ const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync = nullptr);
+
+ /**
+ * Must be invoked once CPU is done accessing the allocated memory.
+ * For a legacy ion interface, syncs a shared dmabuf fd with memory using
+ * either ION_IOC_SYNC ioctl or using callback @legacy_ion_cpu_sync if
+ * specified. For non-legacy ION and dmabuf heap interfaces,
+ * DMA_BUF_IOCTL_SYNC is used. The type of sync(read, write or rw) done will
+ * the same with which CpuSyncStart() was invoked.
+ * @fd: dmabuf fd
+ * @legacy_ion_cpu_sync: optional callback for legacy ion interfaces. If
+ * specified, will be invoked instead of ion_sync_fd with a dup of ion_fd_ as its
+ * argument. The parameter will be ignored if the interface being used is
+ * not legacy ion.
+ *
+ * Returns 0 on success and an error code otherwise.
+ */
+ int CpuSyncEnd(unsigned int dmabuf_fd,
+ const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync = nullptr);
+
private:
int OpenDmabufHeap(const std::string& name);
void QueryIonHeaps();
int GetDmabufHeapFd(const std::string& name);
+ bool DmabufHeapsSupported() { return !dmabuf_heap_fds_.empty(); }
+ int GetIonHeapIdByName(const std::string& heap_name, unsigned int* heap_id);
+ int MapNameToIonMask(const std::string& heap_name, unsigned int ion_heap_mask,
+ unsigned int ion_heap_flags = 0);
+ int MapNameToIonName(const std::string& heap_name, const std::string& ion_heap_name,
+ unsigned int ion_heap_flags = 0);
+ void LogInterface(const std::string& interface);
+ int IonAlloc(const std::string& heap_name, size_t len, unsigned int heap_flags = 0);
+ int DmabufAlloc(const std::string& heap_name, size_t len);
+
+ struct IonHeapConfig {
+ unsigned int mask;
+ unsigned int flags;
+ };
+ int GetIonConfig(const std::string& heap_name, IonHeapConfig& heap_config);
+ int LegacyIonCpuSync(unsigned int fd, const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom);
+ int DmabufFdSync(unsigned int dmabuf_fd, bool start, SyncType sync_type);
+ int DoSync(unsigned int dmabuf_fd, bool start, SyncType sync_type,
+ const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom);
/* Stores all open dmabuf_heap handles. */
std::unordered_map<std::string, android::base::unique_fd> dmabuf_heap_fds_;
@@ -61,4 +170,12 @@
*/
bool uses_legacy_ion_iface_ = false;
std::vector<struct ion_heap_data> ion_heap_info_;
+ inline static bool logged_interface_ = false;
+ /* stores a map of dmabuf heap names to equivalent ion heap configurations. */
+ std::unordered_map<std::string, struct IonHeapConfig> heap_name_to_config_;
+ /**
+ * stores a map of dmabuf fds to the type of their last known CpuSyncStart()
+ * call. The entry will be cleared when CpuSyncEnd() is invoked.
+ */
+ std::unordered_map<int, SyncType> fd_last_sync_type_;
};
diff --git a/include/BufferAllocator/BufferAllocatorWrapper.h b/include/BufferAllocator/BufferAllocatorWrapper.h
new file mode 100644
index 0000000..ea442cf
--- /dev/null
+++ b/include/BufferAllocator/BufferAllocatorWrapper.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef BUFFER_ALLOCATOR_H_
+#define BUFFER_ALLOCATOR_H_
+
+#include <BufferAllocator/dmabufheap-defs.h>
+#include <sys/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#else
+typedef struct BufferAllocator BufferAllocator;
+#endif
+
+BufferAllocator* CreateDmabufHeapBufferAllocator();
+
+void FreeDmabufHeapBufferAllocator(BufferAllocator* buffer_allocator);
+
+int DmabufHeapAlloc(BufferAllocator* buffer_allocator, const char* heap_name, size_t len,
+ unsigned int heap_flags);
+
+int MapDmabufHeapNameToIonHeap(BufferAllocator* buffer_allocator, const char* heap_name,
+ const char* ion_heap_name, unsigned int ion_heap_flags,
+ unsigned int legacy_ion_heap_mask, unsigned legacy_ion_heap_flags);
+
+int DmabufHeapCpuSyncStart(BufferAllocator* buffer_allocator, unsigned int dmabuf_fd,
+ SyncType sync_type, int (*legacy_ion_cpu_sync)(int));
+
+int DmabufHeapCpuSyncEnd(BufferAllocator* buffer_allocator, unsigned int dmabuf_fd,
+ int (*legacy_ion_cpu_sync)(int));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/include/BufferAllocator/dmabufheap-defs.h b/include/BufferAllocator/dmabufheap-defs.h
new file mode 100644
index 0000000..7b52598
--- /dev/null
+++ b/include/BufferAllocator/dmabufheap-defs.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2020 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef DMABUFHEAP_DEF_H_
+#define DMABUFHEAP_DEF_H_
+
+#include <linux/dma-buf.h>
+
+static const char kDmabufSystemHeapName[] = "system";
+
+typedef enum {
+ kSyncRead = DMA_BUF_SYNC_READ,
+ kSyncWrite = DMA_BUF_SYNC_WRITE,
+ kSyncReadWrite = DMA_BUF_SYNC_RW,
+} SyncType;
+
+#endif
diff --git a/tests/Android.bp b/tests/Android.bp
index ecc6466..01141e7 100644
--- a/tests/Android.bp
+++ b/tests/Android.bp
@@ -21,12 +21,28 @@
"-Werror",
],
static_libs: [
+ "libbase",
"libdmabufheap",
"libion",
"liblog",
- "libbase"
],
srcs: [
"dmabuf_heap_test.cpp",
],
}
+
+cc_test {
+ name: "dmabufheaptest",
+ srcs: ["dmabuf_heap_test.c"],
+ static_libs: [
+ "libion",
+ "libdmabufheap",
+ "liblog",
+ "libbase"
+ ],
+ cflags: [
+ "-Werror",
+ "-Wall",
+ "-Wextra",
+ ],
+}
diff --git a/tests/dmabuf_heap_test.c b/tests/dmabuf_heap_test.c
new file mode 100644
index 0000000..aac811e
--- /dev/null
+++ b/tests/dmabuf_heap_test.c
@@ -0,0 +1,119 @@
+/*
+ * Copyright 2020 Google, Inc
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <BufferAllocator/BufferAllocatorWrapper.h>
+#include <errno.h>
+#include <ion/ion.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+int legacy_ion_custom_callback(int ion_fd) {
+ int ret = 0;
+ if (!ion_is_legacy(ion_fd)) {
+ perror("error in legacy ion custom callback");
+ ret = errno;
+ } else {
+ printf("in custom legacy ion cpu sync callback\n");
+ }
+
+ return ret;
+}
+
+void libdmabufheaptest(bool use_custom_callback) {
+ const size_t len = 1024 * 1024;
+ int fd = -1, ret = 0;
+ size_t i = 0;
+ unsigned char* ptr = NULL;
+
+ BufferAllocator* bufferAllocator = CreateDmabufHeapBufferAllocator();
+ if (!bufferAllocator) {
+ printf("unable to get allocator\n");
+ return;
+ }
+
+ /*
+ * Legacy ion devices may have hardcoded heap IDs that do not
+ * match the ion UAPI header. Map heap name 'system' to a heap mask
+ * of all 1s so that these devices will allocate from the first
+ * available heap when asked to allocate from a heap of name 'system'.
+ */
+ ret = MapDmabufHeapNameToIonHeap(bufferAllocator, kDmabufSystemHeapName,
+ "" /* no mapping for non-legacy */,
+ 0 /* no mapping for non-legacy ion */,
+ ~0 /* legacy ion heap mask */, 0 /* legacy ion heap flag */);
+ if (ret < 0) {
+ printf("MapDmabufHeapNameToIonHeap failed: %d\n", ret);
+ return;
+ }
+
+ fd = DmabufHeapAlloc(bufferAllocator, kDmabufSystemHeapName, len, 0);
+ if (fd < 0) {
+ printf("Alloc failed: %d\n", fd);
+ return;
+ }
+
+ ptr = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if (ptr == MAP_FAILED) {
+ perror("mmap failed\n");
+ return;
+ }
+
+ ret = DmabufHeapCpuSyncStart(bufferAllocator, fd, kSyncReadWrite,
+ use_custom_callback ? legacy_ion_custom_callback : NULL);
+ if (ret) {
+ printf("DmabufHeapCpuSyncStart failed: %d\n", ret);
+ return;
+ }
+
+ for (i = 0; i < len; i++) {
+ ptr[i] = (unsigned char)i;
+ }
+ for (i = 0; i < len; i++) {
+ if (ptr[i] != (unsigned char)i) {
+ printf("%s failed wrote %zu read %d from mapped "
+ "memory\n",
+ __func__, i, ptr[i]);
+ return;
+ }
+ }
+
+ ret = DmabufHeapCpuSyncEnd(bufferAllocator, fd,
+ use_custom_callback ? legacy_ion_custom_callback : NULL);
+ if (ret) {
+ printf("DmabufHeapCpuSyncEnd failed: %d\n", ret);
+ return;
+ }
+
+ munmap(ptr, len);
+ close(fd);
+
+ FreeDmabufHeapBufferAllocator(bufferAllocator);
+ printf("PASSED\n");
+}
+
+int main(int argc, char* argv[]) {
+ (void)argc;
+ (void)argv;
+ printf("*****running with custom legacy ion cpu sync callback****\n");
+ libdmabufheaptest(true);
+ printf("****running without custom legacy ion cpu sync callback****\n");
+ libdmabufheaptest(false);
+ return 0;
+}
diff --git a/tests/dmabuf_heap_test.cpp b/tests/dmabuf_heap_test.cpp
index 813a3c9..8dd330d 100644
--- a/tests/dmabuf_heap_test.cpp
+++ b/tests/dmabuf_heap_test.cpp
@@ -17,10 +17,238 @@
#include <BufferAllocator/BufferAllocator.h>
#include "dmabuf_heap_test.h"
+#include <ion/ion.h>
+#include <sys/mman.h>
+
#include <gtest/gtest.h>
-DmaBufHeapTest::DmaBufHeapTest() : allocator(new BufferAllocator()) {}
+#include <android-base/logging.h>
+#include <android-base/unique_fd.h>
-TEST_F(DmaBufHeapTest, TestSetup) {
- /* No public APIs to test yet */
+DmaBufHeapTest::DmaBufHeapTest() : allocator(new BufferAllocator()) {
+ /*
+ * Legacy ion devices may have hardcoded heap IDs that do not
+ * match the ion UAPI header. Map heap name 'system' to a heap mask
+ * of all 1s so that these devices will allocate from the first
+ * available heap when asked to allocate from a heap of name 'system'.
+ */
+ allocator->MapNameToIonHeap(kDmabufSystemHeapName, "" /* no mapping for non-legacy */,
+ 0 /* no mapping for non-legacy ion */,
+ ~0 /* legacy ion heap mask */);
+}
+
+TEST_F(DmaBufHeapTest, Allocate) {
+ static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
+ for (size_t size : allocationSizes) {
+ SCOPED_TRACE(::testing::Message()
+ << "heap: " << kDmabufSystemHeapName << " size: " << size);
+ int fd = allocator->Alloc(kDmabufSystemHeapName, size);
+ ASSERT_GE(fd, 0);
+ ASSERT_EQ(close(fd), 0); // free the buffer
+ }
+}
+
+TEST_F(DmaBufHeapTest, AllocateCached) {
+ static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
+ for (size_t size : allocationSizes) {
+ SCOPED_TRACE(::testing::Message()
+ << "heap: " << kDmabufSystemHeapName << " size: " << size);
+ int fd = allocator->Alloc(kDmabufSystemHeapName, size, ION_FLAG_CACHED
+ /* ion heap flags will be ignored if using dmabuf heaps */);
+ ASSERT_GE(fd, 0);
+ ASSERT_EQ(close(fd), 0); // free the buffer
+ }
+}
+
+TEST_F(DmaBufHeapTest, AllocateCachedNeedsSync) {
+ static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
+ for (size_t size : allocationSizes) {
+ SCOPED_TRACE(::testing::Message()
+ << "heap: " << kDmabufSystemHeapName << " size: " << size);
+ int fd = allocator->Alloc(kDmabufSystemHeapName, size, ION_FLAG_CACHED_NEEDS_SYNC
+ /* ion heap flags will be ignored if using dmabuf heaps */);
+ ASSERT_GE(fd, 0);
+ ASSERT_EQ(close(fd), 0); // free the buffer
+ }
+}
+
+TEST_F(DmaBufHeapTest, RepeatedAllocate) {
+ static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
+ for (size_t size : allocationSizes) {
+ SCOPED_TRACE(::testing::Message()
+ << "heap: " << kDmabufSystemHeapName << " size: " << size);
+ for (unsigned int i = 0; i < 1024; i++) {
+ SCOPED_TRACE(::testing::Message() << "iteration " << i);
+ int fd = allocator->Alloc(kDmabufSystemHeapName, size);
+ ASSERT_GE(fd, 0);
+ ASSERT_EQ(close(fd), 0); // free the buffer
+ }
+ }
+}
+
+/*
+ * Make sure all heaps always return zeroed pages
+ */
+TEST_F(DmaBufHeapTest, Zeroed) {
+ static const size_t kAllocSizeInBytes = 4096;
+ static const size_t kNumFds = 16;
+
+ auto zeroes_ptr = std::make_unique<char[]>(kAllocSizeInBytes);
+ int fds[kNumFds];
+ int ret = 0, map_fd = -1;
+ for (unsigned int i = 0; i < kNumFds; i++) {
+ map_fd = allocator->Alloc(kDmabufSystemHeapName, kAllocSizeInBytes);
+ ASSERT_GE(map_fd, 0);
+
+ void* ptr = NULL;
+
+ ptr = mmap(NULL, kAllocSizeInBytes, PROT_WRITE, MAP_SHARED, map_fd, 0);
+ ASSERT_TRUE(ptr != NULL);
+
+ ret = allocator->CpuSyncStart(map_fd, kSyncWrite);
+ ASSERT_EQ(0, ret);
+
+ memset(ptr, 0xaa, kAllocSizeInBytes);
+
+ ret = allocator->CpuSyncEnd(map_fd);
+ ASSERT_EQ(0, ret);
+
+ ASSERT_EQ(0, munmap(ptr, kAllocSizeInBytes));
+ fds[i] = map_fd;
+ }
+
+ for (unsigned int i = 0; i < kNumFds; i++) {
+ ASSERT_EQ(0, close(fds[i]));
+ }
+
+ map_fd = allocator->Alloc(kDmabufSystemHeapName, kAllocSizeInBytes);
+ ASSERT_GE(map_fd, 0);
+
+ void* ptr = NULL;
+ ptr = mmap(NULL, kAllocSizeInBytes, PROT_READ, MAP_SHARED, map_fd, 0);
+ ASSERT_TRUE(ptr != NULL);
+
+ ret = allocator->CpuSyncStart(map_fd);
+ ASSERT_EQ(0, ret);
+
+ ASSERT_EQ(0, memcmp(ptr, zeroes_ptr.get(), kAllocSizeInBytes));
+
+ ret = allocator->CpuSyncEnd(map_fd);
+ ASSERT_EQ(0, ret);
+
+ ASSERT_EQ(0, munmap(ptr, kAllocSizeInBytes));
+ ASSERT_EQ(0, close(map_fd));
+}
+
+TEST_F(DmaBufHeapTest, TestCpuSync) {
+ static const size_t kAllocSizeInBytes = 4096;
+ auto vec_sync_type = {kSyncRead, kSyncWrite, kSyncReadWrite};
+ for (auto sync_type : vec_sync_type) {
+ int map_fd = allocator->Alloc(kDmabufSystemHeapName, kAllocSizeInBytes);
+ ASSERT_GE(map_fd, 0);
+
+ void* ptr;
+ ptr = mmap(NULL, kAllocSizeInBytes, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
+ ASSERT_TRUE(ptr != NULL);
+
+ int ret = allocator->CpuSyncStart(map_fd, sync_type);
+ ASSERT_EQ(0, ret);
+
+ ret = allocator->CpuSyncEnd(map_fd);
+ ASSERT_EQ(0, ret);
+
+ ASSERT_EQ(0, munmap(ptr, kAllocSizeInBytes));
+ ASSERT_EQ(0, close(map_fd));
+ }
+}
+
+TEST_F(DmaBufHeapTest, TestCpuSyncMismatched) {
+ static const size_t kAllocSizeInBytes = 4096;
+ auto vec_sync_type = {kSyncRead, kSyncWrite, kSyncReadWrite};
+ for (auto sync_type : vec_sync_type) {
+ int map_fd1 = allocator->Alloc(kDmabufSystemHeapName, kAllocSizeInBytes);
+ ASSERT_GE(map_fd1, 0);
+
+ int map_fd2 = allocator->Alloc(kDmabufSystemHeapName, kAllocSizeInBytes);
+ ASSERT_GE(map_fd2, 0);
+
+ int ret = allocator->CpuSyncStart(map_fd1, sync_type);
+ ASSERT_EQ(0, ret);
+
+ ret = allocator->CpuSyncEnd(map_fd2);
+ ASSERT_EQ(-EINVAL, ret);
+
+ ret = allocator->CpuSyncEnd(map_fd1);
+ ASSERT_EQ(0, ret);
+
+ ASSERT_EQ(0, close(map_fd1));
+ ASSERT_EQ(0, close(map_fd2));
+ }
+}
+
+TEST_F(DmaBufHeapTest, TestCpuSyncMismatched2) {
+ static const size_t kAllocSizeInBytes = 4096;
+ auto vec_sync_type = {kSyncRead, kSyncWrite, kSyncReadWrite};
+ for (auto sync_type : vec_sync_type) {
+ int map_fd = allocator->Alloc(kDmabufSystemHeapName, kAllocSizeInBytes);
+ ASSERT_GE(map_fd, 0);
+
+ int ret = allocator->CpuSyncStart(map_fd, sync_type);
+ ASSERT_EQ(0, ret);
+
+ ret = allocator->CpuSyncEnd(map_fd);
+ ASSERT_EQ(0, ret);
+
+ /* Should fail since it is missing a CpuSyncStart() */
+ ret = allocator->CpuSyncEnd(map_fd);
+ ASSERT_EQ(-EINVAL, ret);
+
+ ret = allocator->CpuSyncStart(map_fd, sync_type);
+ ASSERT_EQ(0, ret);
+
+ /* Should fail since it is missing a CpuSyncEnd() */
+ ret = allocator->CpuSyncStart(map_fd, sync_type);
+ ASSERT_EQ(-EINVAL, ret);
+
+ ret = allocator->CpuSyncEnd(map_fd);
+ ASSERT_EQ(0, ret);
+
+ ASSERT_EQ(0, close(map_fd));
+ }
+}
+
+int CustomCpuSyncStart(int /* ion_fd */) {
+ LOG(INFO) << "In custom cpu sync start callback";
+ return 0;
+}
+
+int CustomCpuSyncEnd(int /* ion_fd */) {
+ LOG(INFO) << "In custom cpu sync end callback";
+ return 0;
+}
+
+TEST_F(DmaBufHeapTest, TestCustomLegacyIonSyncCallback) {
+ static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
+ for (size_t size : allocationSizes) {
+ SCOPED_TRACE(::testing::Message()
+ << "heap: " << kDmabufSystemHeapName << " size: " << size);
+
+ int map_fd = allocator->Alloc(kDmabufSystemHeapName, size);
+ ASSERT_GE(map_fd, 0);
+
+ void* ptr;
+ ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
+ ASSERT_TRUE(ptr != NULL);
+
+ int ret = allocator->CpuSyncStart(map_fd, kSyncWrite, CustomCpuSyncStart);
+ ASSERT_EQ(0, ret);
+
+ memset(ptr, 0xaa, size);
+
+ ret = allocator->CpuSyncEnd(map_fd, CustomCpuSyncEnd);
+ ASSERT_EQ(0, ret);
+
+ ASSERT_EQ(0, munmap(ptr, size));
+ ASSERT_EQ(0, close(map_fd));
+ }
}