Revert "[gfxstream] Use aemu SubAllocator for coherent memory"

It's desirable not to have a dependency on the AEMU
suballocator.

This reverts commit 16c7e8427376984ed87017cf91857d182c73967a.

BUG=421934218
TEST=CI

Change-Id: I04bbbd01713d0c1f5f4368d28c5e37ec9a46ce36
diff --git a/src/gfxstream/aemu/Android.bp b/src/gfxstream/aemu/Android.bp
index 62ae930..92a32d7 100644
--- a/src/gfxstream/aemu/Android.bp
+++ b/src/gfxstream/aemu/Android.bp
@@ -33,15 +33,10 @@
     vendor: true,
     host_supported: true,
     header_libs: [
-        "mesa_common_headers",
         "mesa_gfxstream_aemu_headers",
     ],
-    shared_libs: [
-        "liblog",
-    ],
     srcs: [
         "ring_buffer.cpp",
         "Stream.cpp",
-        "SubAllocator.cpp",
     ],
 }
diff --git a/src/gfxstream/aemu/SubAllocator.cpp b/src/gfxstream/aemu/SubAllocator.cpp
deleted file mode 100644
index bb613b2..0000000
--- a/src/gfxstream/aemu/SubAllocator.cpp
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * Copyright 2019 Google
- * SPDX-License-Identifier: MIT
- */
-
-#include "Stream.h"
-#include "SubAllocator.h"
-#include "address_space.h"
-#include "util/log.h"
-
-namespace gfxstream {
-namespace aemu {
-
-class SubAllocator::Impl {
-  public:
-   Impl(void* _buffer, uint64_t _totalSize, uint64_t _pageSize)
-       : buffer(_buffer),
-         totalSize(_totalSize),
-         pageSize(_pageSize),
-         startAddr((uintptr_t)buffer),
-         endAddr(startAddr + totalSize) {
-      address_space_allocator_init(&addr_alloc, totalSize, 32);
-   }
-
-   ~Impl() { address_space_allocator_destroy_nocleanup(&addr_alloc); }
-
-   void clear() {
-      address_space_allocator_destroy_nocleanup(&addr_alloc);
-      address_space_allocator_init(&addr_alloc, totalSize, 32);
-   }
-
-   bool save(Stream* stream) {
-      address_space_allocator_iter_func_t allocatorSaver =
-          [](void* context, struct address_space_allocator* allocator) {
-             Stream* stream = reinterpret_cast<Stream*>(context);
-             stream->putBe32(allocator->size);
-             stream->putBe32(allocator->capacity);
-             stream->putBe64(allocator->total_bytes);
-          };
-      address_block_iter_func_t allocatorBlockSaver =
-          [](void* context, struct address_block* block) {
-             Stream* stream = reinterpret_cast<Stream*>(context);
-             stream->putBe64(block->offset);
-             stream->putBe64(block->size_available);
-          };
-      address_space_allocator_run(&addr_alloc, (void*)stream, allocatorSaver,
-                                  allocatorBlockSaver);
-
-      stream->putBe64(pageSize);
-      stream->putBe64(totalSize);
-      stream->putBe32(allocCount);
-
-      return true;
-   }
-
-   bool load(Stream* stream) {
-      clear();
-      address_space_allocator_iter_func_t allocatorLoader =
-          [](void* context, struct address_space_allocator* allocator) {
-             Stream* stream = reinterpret_cast<Stream*>(context);
-             allocator->size = stream->getBe32();
-             allocator->capacity = stream->getBe32();
-             allocator->total_bytes = stream->getBe64();
-          };
-      address_block_iter_func_t allocatorBlockLoader =
-          [](void* context, struct address_block* block) {
-             Stream* stream = reinterpret_cast<Stream*>(context);
-             block->offset = stream->getBe64();
-             block->size_available = stream->getBe64();
-          };
-      address_space_allocator_run(&addr_alloc, (void*)stream, allocatorLoader,
-                                  allocatorBlockLoader);
-
-      pageSize = stream->getBe64();
-      totalSize = stream->getBe64();
-      allocCount = stream->getBe32();
-
-      return true;
-   }
-
-   bool postLoad(void* postLoadBuffer) {
-      buffer = postLoadBuffer;
-      startAddr = (uint64_t)(uintptr_t)postLoadBuffer;
-      return true;
-   }
-
-   void rangeCheck(const char* task, void* ptr) {
-      uint64_t addr = (uintptr_t)ptr;
-      if (addr < startAddr || addr > endAddr) {
-         mesa_loge(
-            "FATAL in SubAllocator: Task:%s ptr '0x%llx' is out of range! "
-            "Range:[0x%llx - 0x%llx]", task, addr, startAddr, endAddr);
-      }
-   }
-
-   uint64_t getOffset(void* checkedPtr) {
-      uint64_t addr = (uintptr_t)checkedPtr;
-      return addr - startAddr;
-   }
-
-   bool free(void* ptr) {
-      if (!ptr) return false;
-
-      rangeCheck("free", ptr);
-      if (EINVAL ==
-          address_space_allocator_deallocate(&addr_alloc, getOffset(ptr))) {
-         return false;
-      }
-
-      --allocCount;
-      return true;
-   }
-
-   void freeAll() {
-      address_space_allocator_reset(&addr_alloc);
-      allocCount = 0;
-   }
-
-   void* alloc(size_t wantedSize) {
-      if (wantedSize == 0) return nullptr;
-
-      uint64_t wantedSize64 = (uint64_t)wantedSize;
-
-      size_t toPageSize = pageSize * ((wantedSize + pageSize - 1) / pageSize);
-
-      uint64_t offset =
-          address_space_allocator_allocate(&addr_alloc, toPageSize);
-
-      if (offset == ANDROID_EMU_ADDRESS_SPACE_BAD_OFFSET) {
-         return nullptr;
-      }
-
-      ++allocCount;
-      return (void*)(uintptr_t)(startAddr + offset);
-   }
-
-   bool empty() const { return allocCount == 0; }
-
-   void* buffer;
-   uint64_t totalSize;
-   uint64_t pageSize;
-   uint64_t startAddr;
-   uint64_t endAddr;
-   struct address_space_allocator addr_alloc;
-   uint32_t allocCount = 0;
-};
-
-SubAllocator::SubAllocator(void* buffer, uint64_t totalSize, uint64_t pageSize)
-    : mImpl(new SubAllocator::Impl(buffer, totalSize, pageSize)) {}
-
-SubAllocator::~SubAllocator() { delete mImpl; }
-
-// Snapshotting
-bool SubAllocator::save(Stream* stream) { return mImpl->save(stream); }
-
-bool SubAllocator::load(Stream* stream) { return mImpl->load(stream); }
-
-bool SubAllocator::postLoad(void* postLoadBuffer) {
-   return mImpl->postLoad(postLoadBuffer);
-}
-
-void* SubAllocator::alloc(size_t wantedSize) {
-   return mImpl->alloc(wantedSize);
-}
-
-bool SubAllocator::free(void* ptr) { return mImpl->free(ptr); }
-
-void SubAllocator::freeAll() { mImpl->freeAll(); }
-
-uint64_t SubAllocator::getOffset(void* ptr) { return mImpl->getOffset(ptr); }
-
-bool SubAllocator::empty() const { return mImpl->empty(); }
-
-}  // namespace aemu
-}  // namespace gfxstream
diff --git a/src/gfxstream/aemu/include/SubAllocator.h b/src/gfxstream/aemu/include/SubAllocator.h
deleted file mode 100644
index c376664..0000000
--- a/src/gfxstream/aemu/include/SubAllocator.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright 2021 Google
- * SPDX-License-Identifier: MIT
- */
-#pragma once
-
-#include <inttypes.h>
-#include <stddef.h>
-#include <string.h>
-
-namespace gfxstream {
-namespace aemu {
-
-class Stream;
-
-// Class to create sub-allocations in an existing buffer. Similar interface to
-// Pool, but underlying mechanism is different as it's difficult to combine
-// same-size heaps in Pool with a preallocated buffer.
-class SubAllocator {
-  public:
-   // |pageSize| determines both the alignment of pointers returned
-   // and the multiples of space occupied.
-   SubAllocator(void* buffer, uint64_t totalSize, uint64_t pageSize);
-
-   // Memory is freed from the perspective of the user of
-   // SubAllocator, but the prealloced buffer is not freed.
-   ~SubAllocator();
-
-   // Snapshotting
-   bool save(Stream* stream);
-   bool load(Stream* stream);
-   bool postLoad(void* postLoadBuffer);
-
-   // returns null if the allocation cannot be satisfied.
-   void* alloc(size_t wantedSize);
-   // returns true if |ptr| came from alloc(), false otherwise
-   bool free(void* ptr);
-   void freeAll();
-   uint64_t getOffset(void* ptr);
-
-   bool empty() const;
-
-   // Convenience function to allocate an array
-   // of objects of type T.
-   template <class T>
-   T* allocArray(size_t count) {
-      size_t bytes = sizeof(T) * count;
-      void* res = alloc(bytes);
-      return (T*)res;
-   }
-
-   char* strDup(const char* toCopy) {
-      size_t bytes = strlen(toCopy) + 1;
-      void* res = alloc(bytes);
-      memset(res, 0x0, bytes);
-      memcpy(res, toCopy, bytes);
-      return (char*)res;
-   }
-
-   char** strDupArray(const char* const* arrayToCopy, size_t count) {
-      char** res = allocArray<char*>(count);
-
-      for (size_t i = 0; i < count; i++) {
-         res[i] = strDup(arrayToCopy[i]);
-      }
-
-      return res;
-   }
-
-   void* dupArray(const void* buf, size_t bytes) {
-      void* res = alloc(bytes);
-      memcpy(res, buf, bytes);
-      return res;
-   }
-
-  private:
-   class Impl;
-   Impl* mImpl = nullptr;
-};
-
-}  // namespace aemu
-}  // namespace gfxstream
\ No newline at end of file
diff --git a/src/gfxstream/aemu/include/address_space.h b/src/gfxstream/aemu/include/address_space.h
deleted file mode 100644
index 1bdfd91..0000000
--- a/src/gfxstream/aemu/include/address_space.h
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Copyright 2021 Google
- * SPDX-License-Identifier: MIT
- */
-#pragma once
-
-#include <assert.h>
-#include <errno.h>
-#include <inttypes.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-namespace gfxstream {
-namespace aemu {
-
-// This is ported from goldfish_address_space, allowing it to be used for
-// general sub-allocations of any buffer range.
-// It is also a pure header library, so there are no compiler tricks needed
-// to use this in a particular implementation. please don't include this
-// in a file that is included everywhere else, though.
-
-/* Represents a continuous range of addresses and a flag if this block is
- * available
- */
-struct address_block {
-   uint64_t offset;
-   union {
-      uint64_t size_available; /* VMSTATE_x does not support bit fields */
-      struct {
-         uint64_t size : 63;
-         uint64_t available : 1;
-      };
-   };
-};
-
-/* A dynamic array of address blocks, with the following invariant:
- * blocks[i].size > 0
- * blocks[i+1].offset = blocks[i].offset + blocks[i].size
- */
-struct address_space_allocator {
-   struct address_block *blocks;
-   int size;
-   int capacity;
-   uint64_t total_bytes;
-};
-
-#define ANDROID_EMU_ADDRESS_SPACE_BAD_OFFSET (~(uint64_t)0)
-
-/* The assert function to abort if something goes wrong. */
-static void address_space_assert(bool condition) {
-#ifdef ANDROID_EMU_ADDRESS_SPACE_ASSERT_FUNC
-   ANDROID_EMU_ADDRESS_SPACE_ASSERT_FUNC(condition);
-#else
-   (void)condition;
-   assert(condition);
-#endif
-}
-
-static void *address_space_malloc0(size_t size) {
-#ifdef ANDROID_EMU_ADDRESS_SPACE_MALLOC0_FUNC
-   return ANDROID_EMU_ADDRESS_SPACE_MALLOC0_FUNC(size);
-#else
-   void *res = malloc(size);
-   memset(res, 0, size);
-   return res;
-#endif
-}
-
-static void *address_space_realloc(void *ptr, size_t size) {
-#ifdef ANDROID_EMU_ADDRESS_SPACE_REALLOC_FUNC
-   return ANDROID_EMU_ADDRESS_SPACE_REALLOC_FUNC(ptr, size);
-#else
-   void *res = realloc(ptr, size);
-   return res;
-#endif
-}
-
-static void address_space_free(void *ptr) {
-#ifdef ANDROID_EMU_ADDRESS_SPACE_FREE_FUNC
-   return ANDROID_EMU_ADDRESS_SPACE_FREE_FUNC(ptr);
-#else
-   free(ptr);
-#endif
-}
-
-/* Looks for the smallest (to reduce fragmentation) available block with size to
- * fit the requested amount and returns its index or -1 if none is available.
- */
-static int address_space_allocator_find_available_block(
-    struct address_block *block, int n_blocks, uint64_t size_at_least) {
-   int index = -1;
-   uint64_t size_at_index = 0;
-   int i;
-
-   address_space_assert(n_blocks >= 1);
-
-   for (i = 0; i < n_blocks; ++i, ++block) {
-      uint64_t this_size = block->size;
-      address_space_assert(this_size > 0);
-
-      if (this_size >= size_at_least && block->available &&
-          (index < 0 || this_size < size_at_index)) {
-         index = i;
-         size_at_index = this_size;
-      }
-   }
-
-   return index;
-}
-
-static int address_space_allocator_grow_capacity(int old_capacity) {
-   address_space_assert(old_capacity >= 1);
-
-   return old_capacity + old_capacity;
-}
-
-/* Inserts one more address block right after i'th (by borrowing i'th size) and
- * adjusts sizes:
- * pre:
- *   size > blocks[i].size
- *
- * post:
- *   * might reallocate allocator->blocks if there is no capacity to insert one
- *   * blocks[i].size -= size;
- *   * blocks[i+1].size = size;
- */
-static struct address_block *address_space_allocator_split_block(
-    struct address_space_allocator *allocator, int i, uint64_t size) {
-   address_space_assert(allocator->capacity >= 1);
-   address_space_assert(allocator->size >= 1);
-   address_space_assert(allocator->size <= allocator->capacity);
-   address_space_assert(i >= 0);
-   address_space_assert(i < allocator->size);
-   address_space_assert(size < allocator->blocks[i].size);
-
-   if (allocator->size == allocator->capacity) {
-      int new_capacity =
-          address_space_allocator_grow_capacity(allocator->capacity);
-      allocator->blocks = (struct address_block *)address_space_realloc(
-          allocator->blocks, sizeof(struct address_block) * new_capacity);
-      address_space_assert(allocator->blocks);
-      allocator->capacity = new_capacity;
-   }
-
-   struct address_block *blocks = allocator->blocks;
-
-   /*   size = 5, i = 1
-    *   [ 0 | 1 |  2  |  3  | 4 ]  =>  [ 0 | 1 | new |  2  | 3 | 4 ]
-    *         i  (i+1) (i+2)                 i  (i+1) (i+2)
-    */
-   memmove(&blocks[i + 2], &blocks[i + 1],
-           sizeof(struct address_block) * (allocator->size - i - 1));
-
-   struct address_block *to_borrow_from = &blocks[i];
-   struct address_block *new_block = to_borrow_from + 1;
-
-   uint64_t new_size = to_borrow_from->size - size;
-
-   to_borrow_from->size = new_size;
-
-   new_block->offset = to_borrow_from->offset + new_size;
-   new_block->size = size;
-   new_block->available = 1;
-
-   ++allocator->size;
-
-   return new_block;
-}
-
-/* Marks i'th block as available. If adjacent ((i-1) and (i+1)) blocks are also
- * available, it merges i'th block with them.
- * pre:
- *   i < allocator->size
- * post:
- *   i'th block is merged with adjacent ones if they are available, blocks that
- *   were merged from are removed. allocator->size is updated if blocks were
- *   removed.
- */
-static void address_space_allocator_release_block(
-    struct address_space_allocator *allocator, int i) {
-   struct address_block *blocks = allocator->blocks;
-   int before = i - 1;
-   int after = i + 1;
-   int size = allocator->size;
-
-   address_space_assert(i >= 0);
-   address_space_assert(i < size);
-
-   blocks[i].available = 1;
-
-   if (before >= 0 && blocks[before].available) {
-      if (after < size && blocks[after].available) {
-         // merge (before, i, after) into before
-         blocks[before].size += (blocks[i].size + blocks[after].size);
-
-         size -= 2;
-         memmove(&blocks[i], &blocks[i + 2],
-                 sizeof(struct address_block) * (size - i));
-         allocator->size = size;
-      } else {
-         // merge (before, i) into before
-         blocks[before].size += blocks[i].size;
-
-         --size;
-         memmove(&blocks[i], &blocks[i + 1],
-                 sizeof(struct address_block) * (size - i));
-         allocator->size = size;
-      }
-   } else if (after < size && blocks[after].available) {
-      // merge (i, after) into i
-      blocks[i].size += blocks[after].size;
-
-      --size;
-      memmove(&blocks[after], &blocks[after + 1],
-              sizeof(struct address_block) * (size - after));
-      allocator->size = size;
-   }
-}
-
-/* Takes a size to allocate an address block and returns an offset where this
- * block is allocated. This block will not be available for other callers unless
- * it is explicitly deallocated (see address_space_allocator_deallocate below).
- */
-static uint64_t address_space_allocator_allocate(
-    struct address_space_allocator *allocator, uint64_t size) {
-   int i = address_space_allocator_find_available_block(allocator->blocks,
-                                                        allocator->size, size);
-   if (i < 0) {
-      return ANDROID_EMU_ADDRESS_SPACE_BAD_OFFSET;
-   } else {
-      address_space_assert(i < allocator->size);
-
-      struct address_block *block = &allocator->blocks[i];
-      address_space_assert(block->size >= size);
-
-      if (block->size > size) {
-         block = address_space_allocator_split_block(allocator, i, size);
-      }
-
-      address_space_assert(block->size == size);
-      block->available = 0;
-
-      return block->offset;
-   }
-}
-
-/* Takes an offset returned from address_space_allocator_allocate ealier
- * (see above) and marks this block as available for further allocation.
- */
-static uint32_t address_space_allocator_deallocate(
-    struct address_space_allocator *allocator, uint64_t offset) {
-   struct address_block *block = allocator->blocks;
-   int size = allocator->size;
-   int i;
-
-   address_space_assert(size >= 1);
-
-   for (i = 0; i < size; ++i, ++block) {
-      if (block->offset == offset) {
-         if (block->available) {
-            return EINVAL;
-         } else {
-            address_space_allocator_release_block(allocator, i);
-            return 0;
-         }
-      }
-   }
-
-   return EINVAL;
-}
-
-/* Creates a seed block. */
-static void address_space_allocator_init(
-    struct address_space_allocator *allocator, uint64_t size,
-    int initial_capacity) {
-   address_space_assert(initial_capacity >= 1);
-
-   allocator->blocks = (struct address_block *)malloc(
-       sizeof(struct address_block) * initial_capacity);
-   memset(allocator->blocks, 0,
-          sizeof(struct address_block) * initial_capacity);
-   address_space_assert(allocator->blocks);
-
-   struct address_block *block = allocator->blocks;
-
-   block->offset = 0;
-   block->size = size;
-   block->available = 1;
-
-   allocator->size = 1;
-   allocator->capacity = initial_capacity;
-   allocator->total_bytes = size;
-}
-
-/* At this point there should be no used blocks and all available blocks must
- * have been merged into one block.
- */
-static void address_space_allocator_destroy(
-    struct address_space_allocator *allocator) {
-   address_space_assert(allocator->size == 1);
-   address_space_assert(allocator->capacity >= allocator->size);
-   address_space_assert(allocator->blocks[0].available);
-   address_space_free(allocator->blocks);
-}
-
-/* Destroy function if we don't care what was previoulsy allocated.
- * have been merged into one block.
- */
-static void address_space_allocator_destroy_nocleanup(
-    struct address_space_allocator *allocator) {
-   address_space_free(allocator->blocks);
-}
-
-/* Resets the state of the allocator to the initial state without
- * performing any dynamic memory management. */
-static void address_space_allocator_reset(
-    struct address_space_allocator *allocator) {
-   address_space_assert(allocator->size >= 1);
-
-   allocator->size = 1;
-
-   struct address_block *block = allocator->blocks;
-   block->offset = 0;
-   block->size = allocator->total_bytes;
-   block->available = 1;
-}
-
-typedef void (*address_block_iter_func_t)(void *context,
-                                          struct address_block *);
-typedef void (*address_space_allocator_iter_func_t)(
-    void *context, struct address_space_allocator *);
-
-static void address_space_allocator_run(
-    struct address_space_allocator *allocator, void *context,
-    address_space_allocator_iter_func_t allocator_func,
-    address_block_iter_func_t block_func) {
-   struct address_block *block = 0;
-   int size;
-   int i;
-
-   allocator_func(context, allocator);
-
-   block = allocator->blocks;
-   size = allocator->size;
-
-   address_space_assert(size >= 1);
-
-   for (i = 0; i < size; ++i, ++block) {
-      block_func(context, block);
-   }
-}
-
-}  // namespace aemu
-}  // namespace gfxstream
\ No newline at end of file
diff --git a/src/gfxstream/aemu/meson.build b/src/gfxstream/aemu/meson.build
index a47e40c..36727a0 100644
--- a/src/gfxstream/aemu/meson.build
+++ b/src/gfxstream/aemu/meson.build
@@ -6,7 +6,6 @@
 files_libaemu = files(
   'ring_buffer.cpp',
   'Stream.cpp',
-  'SubAllocator.cpp',
 )
 
 libaemu = static_library(
diff --git a/src/gfxstream/guest/vulkan_enc/HostVisibleMemoryVirtualization.cpp b/src/gfxstream/guest/vulkan_enc/HostVisibleMemoryVirtualization.cpp
index f6c6bf6..c564899 100644
--- a/src/gfxstream/guest/vulkan_enc/HostVisibleMemoryVirtualization.cpp
+++ b/src/gfxstream/guest/vulkan_enc/HostVisibleMemoryVirtualization.cpp
@@ -17,38 +17,45 @@
 CoherentMemory::CoherentMemory(VirtGpuResourceMappingPtr blobMapping, uint64_t size,
                                VkDevice device, VkDeviceMemory memory)
     : mSize(size), mBlobMapping(blobMapping), mDevice(device), mMemory(memory) {
-    mAllocator =
-        std::make_unique<gfxstream::aemu::SubAllocator>(blobMapping->asRawPtr(), mSize, 4096);
+    mHeap = u_mmInit(0, kHostVisibleHeapSize);
+    mBaseAddr = blobMapping->asRawPtr();
 }
 
 #if DETECT_OS_ANDROID
 CoherentMemory::CoherentMemory(GoldfishAddressSpaceBlockPtr block, uint64_t gpuAddr, uint64_t size,
                                VkDevice device, VkDeviceMemory memory)
     : mSize(size), mBlock(block), mDevice(device), mMemory(memory) {
-    void* address = block->mmap(gpuAddr);
-    mAllocator = std::make_unique<gfxstream::aemu::SubAllocator>(address, mSize, kLargestPageSize);
+    mHeap = u_mmInit(0, kHostVisibleHeapSize);
+    mBaseAddr = (uint8_t*)block->mmap(gpuAddr);
 }
 #endif  // DETECT_OS_ANDROID
 
 CoherentMemory::~CoherentMemory() {
     ResourceTracker::getThreadLocalEncoder()->vkFreeMemorySyncGOOGLE(mDevice, mMemory, nullptr,
                                                                      false);
+    u_mmDestroy(mHeap);
 }
 
 VkDeviceMemory CoherentMemory::getDeviceMemory() const { return mMemory; }
 
 bool CoherentMemory::subAllocate(uint64_t size, uint8_t** ptr, uint64_t& offset) {
-    auto address = mAllocator->alloc(size);
-    if (!address) return false;
+    auto block = u_mmAllocMem(mHeap, (int)size, 0, 0);
+    if (!block) return false;
 
-    *ptr = (uint8_t*)address;
-    offset = mAllocator->getOffset(address);
+    *ptr = mBaseAddr + block->ofs;
+    offset = block->ofs;
     return true;
 }
 
 bool CoherentMemory::release(uint8_t* ptr) {
-    mAllocator->free(ptr);
-    return true;
+    int offset = ptr - mBaseAddr;
+    auto block = u_mmFindBlock(mHeap, offset);
+    if (block) {
+        u_mmFreeMem(block);
+        return true;
+    }
+
+    return false;
 }
 
 }  // namespace vk
diff --git a/src/gfxstream/guest/vulkan_enc/HostVisibleMemoryVirtualization.h b/src/gfxstream/guest/vulkan_enc/HostVisibleMemoryVirtualization.h
index 812862f..f6cf9e9 100644
--- a/src/gfxstream/guest/vulkan_enc/HostVisibleMemoryVirtualization.h
+++ b/src/gfxstream/guest/vulkan_enc/HostVisibleMemoryVirtualization.h
@@ -8,7 +8,7 @@
 
 #include "VirtGpu.h"
 #include "goldfish_address_space.h"
-#include "SubAllocator.h"
+#include "util/u_mm.h"
 #include "util/detect_os.h"
 
 constexpr uint64_t kMegaByte = 1048576;
@@ -26,7 +26,6 @@
 namespace vk {
 
 using GoldfishAddressSpaceBlockPtr = std::shared_ptr<GoldfishAddressSpaceBlock>;
-using SubAllocatorPtr = std::unique_ptr<gfxstream::aemu::SubAllocator>;
 
 class CoherentMemory {
    public:
@@ -54,7 +53,9 @@
     GoldfishAddressSpaceBlockPtr mBlock;
     VkDevice mDevice;
     VkDeviceMemory mMemory;
-    SubAllocatorPtr mAllocator;
+
+    uint8_t* mBaseAddr = nullptr;
+    struct mem_block* mHeap = nullptr;
 };
 
 using CoherentMemoryPtr = std::shared_ptr<CoherentMemory>;