libdmabufheap: Make libdmabufheap threadsafe

libdmabufheap has a couple of private members that are not thread safe.
This patch addes syncronization to protect them from concurrent access.

Test: atest DmabufHeapTest
Bug: 153786620

Change-Id: Iadc8aaf4796a11f55f1149928e719cf5155431c0
diff --git a/BufferAllocator.cpp b/BufferAllocator.cpp
index 8c0056c..37fc67f 100644
--- a/BufferAllocator.cpp
+++ b/BufferAllocator.cpp
@@ -28,6 +28,7 @@
 #include <sys/types.h>
 #include <unistd.h>
 
+#include <shared_mutex>
 #include <string>
 #include <unordered_set>
 
@@ -45,25 +46,37 @@
     }
 }
 
-int BufferAllocator::GetDmabufHeapFd(const std::string& heap_name) {
-    /* check if we have this dmabuf heap open and if so return the fd for it. */
+int BufferAllocator::OpenDmabufHeap(const std::string& heap_name) {
+    std::shared_lock<std::shared_mutex> slock(dmabuf_heap_fd_mutex_);
+
+    /* Check if heap has already been opened. */
     auto it = dmabuf_heap_fds_.find(heap_name);
     if (it != dmabuf_heap_fds_.end())
         return it->second;
-    return -1;
-}
 
-int BufferAllocator::OpenDmabufHeap(const std::string& heap_name) {
-    /* Check if we have already opened this heap. */
-    auto fd = GetDmabufHeapFd(heap_name);
-    if (fd < 0) {
-        std::string heap_path = kDmaHeapRoot + heap_name;
-        fd = TEMP_FAILURE_RETRY(open(heap_path.c_str(), O_RDONLY | O_CLOEXEC));
-        if (fd < 0)
-            return -errno;
-        LOG(INFO) << "Using DMA-BUF heap named: " << heap_name;
-        dmabuf_heap_fds_.insert({heap_name, android::base::unique_fd(fd)});
-    }
+    slock.unlock();
+
+    /*
+     * Heap device needs to be opened, use a unique_lock since dmabuf_heap_fd_
+     * needs to be modified.
+     */
+    std::unique_lock<std::shared_mutex> ulock(dmabuf_heap_fd_mutex_);
+
+    /*
+     * Check if we already opened this heap again to prevent racing threads from
+     * opening the heap device multiple times.
+     */
+    it = dmabuf_heap_fds_.find(heap_name);
+    if (it != dmabuf_heap_fds_.end()) return it->second;
+
+    std::string heap_path = kDmaHeapRoot + heap_name;
+    int fd = TEMP_FAILURE_RETRY(open(heap_path.c_str(), O_RDONLY | O_CLOEXEC));
+    if (fd < 0) return -errno;
+
+    LOG(INFO) << "Using DMA-BUF heap named: " << heap_name;
+
+    auto ret = dmabuf_heap_fds_.insert({heap_name, android::base::unique_fd(fd)});
+    CHECK(ret.second);
     return fd;
 }
 
@@ -107,6 +120,8 @@
     if (!ion_heap_mask)
         return -EINVAL;
     IonHeapConfig heap_config = { ion_heap_mask, ion_heap_flags };
+
+    std::unique_lock<std::shared_mutex> ulock(heap_name_to_config_mutex_);
     heap_name_to_config_[heap_name] = heap_config;
     return 0;
 }
@@ -133,6 +148,8 @@
 
     unsigned int ion_heap_mask = 1 << ion_heap_id;
     IonHeapConfig heap_config = { ion_heap_mask, ion_heap_flags };
+
+    std::unique_lock<std::shared_mutex> ulock(heap_name_to_config_mutex_);
     heap_name_to_config_[heap_name] = heap_config;
 
     return 0;
@@ -159,21 +176,28 @@
 
 int BufferAllocator::GetIonConfig(const std::string& heap_name, IonHeapConfig& heap_config) {
     int ret = 0;
+
+    std::shared_lock<std::shared_mutex> slock(heap_name_to_config_mutex_);
+
     auto it = heap_name_to_config_.find(heap_name);
     if (it != heap_name_to_config_.end()) {
         heap_config = it->second;
+        return ret;
+    }
+
+    slock.unlock();
+
+    if (uses_legacy_ion_iface_) {
+        ret = -EINVAL;
     } else {
-        if (uses_legacy_ion_iface_) {
-            ret = -EINVAL;
-        } else {
-            unsigned int heap_id;
-            ret = GetIonHeapIdByName(heap_name, &heap_id);
-            if (ret == 0) {
-                heap_config.mask = 1 << heap_id;
-                heap_config.flags = 0;
-                /* save it so that this lookup does not need to happen again */
-                heap_name_to_config_[heap_name] = heap_config;
-            }
+        unsigned int heap_id;
+        ret = GetIonHeapIdByName(heap_name, &heap_id);
+        if (ret == 0) {
+            heap_config.mask = 1 << heap_id;
+            heap_config.flags = 0;
+            /* save it so that this lookup does not need to happen again */
+            std::unique_lock<std::shared_mutex> ulock(heap_name_to_config_mutex_);
+            heap_name_to_config_[heap_name] = heap_config;
         }
     }
 
@@ -184,8 +208,7 @@
 
 int BufferAllocator::DmabufAlloc(const std::string& heap_name, size_t len) {
     int fd = OpenDmabufHeap(heap_name);
-    if (fd < 0)
-        return fd;
+    if (fd < 0) return fd;
 
     struct dma_heap_allocation_data heap_data{
         .len = len,  // length of data to be allocated in bytes
@@ -194,7 +217,7 @@
 
     auto ret = TEMP_FAILURE_RETRY(ioctl(fd, DMA_HEAP_IOCTL_ALLOC, &heap_data));
     if (ret < 0) {
-        LOG(ERROR) << "Unable to allocate from DMA-BUF heap of name: " << heap_name;
+        PLOG(ERROR) << "Unable to allocate from DMA-BUF heap: " << heap_name;
         return ret;
     }
 
diff --git a/include/BufferAllocator/BufferAllocator.h b/include/BufferAllocator/BufferAllocator.h
index 89534d3..7c9b92d 100644
--- a/include/BufferAllocator/BufferAllocator.h
+++ b/include/BufferAllocator/BufferAllocator.h
@@ -26,6 +26,7 @@
 #include <sys/types.h>
 
 #include <cstdint>
+#include <shared_mutex>
 #include <string>
 #include <unordered_map>
 #include <unordered_set>
@@ -173,6 +174,8 @@
 
     /* Stores all open dmabuf_heap handles. */
     std::unordered_map<std::string, android::base::unique_fd> dmabuf_heap_fds_;
+    /* Protects dma_buf_heap_fd_ from concurrent access */
+    std::shared_mutex dmabuf_heap_fd_mutex_;
 
     /* saved handle to /dev/ion. */
     android::base::unique_fd ion_fd_;
@@ -193,6 +196,9 @@
     inline static bool logged_interface_ = false;
     /* stores a map of dmabuf heap names to equivalent ion heap configurations. */
     std::unordered_map<std::string, struct IonHeapConfig> heap_name_to_config_;
+    /* protects heap_name_to_config_ from concurrent access */
+    std::shared_mutex heap_name_to_config_mutex_;
+
     /**
      * stores a map of dmabuf fds to the type of their last known CpuSyncStart()
      * call. The entry will be cleared when CpuSyncEnd() is invoked.