blob: e7ff088a9cc41b09797b03bc4fb873b4b8346dd1 [file] [log] [blame]
// Copyright (C) 2018 The Android Open Source Project
// Copyright (C) 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ResourceTracker.h"
#include "android/base/Optional.h"
#include "android/base/threads/AndroidWorkPool.h"
#include "goldfish_vk_private_defs.h"
#include "../OpenglSystemCommon/EmulatorFeatureInfo.h"
#include "../OpenglSystemCommon/HostConnection.h"
#ifdef VK_USE_PLATFORM_ANDROID_KHR
#include "../egl/goldfish_sync.h"
typedef uint32_t zx_handle_t;
#define ZX_HANDLE_INVALID ((zx_handle_t)0)
void zx_handle_close(zx_handle_t) { }
void zx_event_create(int, zx_handle_t*) { }
#include "AndroidHardwareBuffer.h"
#ifndef HOST_BUILD
#include <drm/virtgpu_drm.h>
#include <xf86drm.h>
#endif
#include "VirtioGpuNext.h"
#endif // VK_USE_PLATFORM_ANDROID_KHR
#ifdef VK_USE_PLATFORM_FUCHSIA
#include <cutils/native_handle.h>
#include <fuchsia/hardware/goldfish/cpp/fidl.h>
#include <fuchsia/sysmem/cpp/fidl.h>
#include <lib/zx/channel.h>
#include <lib/zx/vmo.h>
#include <zircon/process.h>
#include <zircon/syscalls.h>
#include <zircon/syscalls/object.h>
#include "services/service_connector.h"
struct AHardwareBuffer;
void AHardwareBuffer_release(AHardwareBuffer*) { }
native_handle_t *AHardwareBuffer_getNativeHandle(AHardwareBuffer*) { return NULL; }
uint64_t getAndroidHardwareBufferUsageFromVkUsage(
const VkImageCreateFlags vk_create,
const VkImageUsageFlags vk_usage) {
return AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
}
VkResult importAndroidHardwareBuffer(
Gralloc *grallocHelper,
const VkImportAndroidHardwareBufferInfoANDROID* info,
struct AHardwareBuffer **importOut) {
return VK_SUCCESS;
}
VkResult createAndroidHardwareBuffer(
bool hasDedicatedImage,
bool hasDedicatedBuffer,
const VkExtent3D& imageExtent,
uint32_t imageLayers,
VkFormat imageFormat,
VkImageUsageFlags imageUsage,
VkImageCreateFlags imageCreateFlags,
VkDeviceSize bufferSize,
VkDeviceSize allocationInfoAllocSize,
struct AHardwareBuffer **out) {
return VK_SUCCESS;
}
namespace goldfish_vk {
struct HostVisibleMemoryVirtualizationInfo;
}
VkResult getAndroidHardwareBufferPropertiesANDROID(
Gralloc *grallocHelper,
const goldfish_vk::HostVisibleMemoryVirtualizationInfo*,
VkDevice,
const AHardwareBuffer*,
VkAndroidHardwareBufferPropertiesANDROID*) { return VK_SUCCESS; }
VkResult getMemoryAndroidHardwareBufferANDROID(struct AHardwareBuffer **) { return VK_SUCCESS; }
#endif // VK_USE_PLATFORM_FUCHSIA
#include "HostVisibleMemoryVirtualization.h"
#include "Resources.h"
#include "VkEncoder.h"
#include "android/base/AlignedBuf.h"
#include "android/base/synchronization/AndroidLock.h"
#include "goldfish_address_space.h"
#include "goldfish_vk_private_defs.h"
#include "vk_format_info.h"
#include "vk_util.h"
#include <set>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vndk/hardware_buffer.h>
#include <log/log.h>
#include <stdlib.h>
#include <sync/sync.h>
#ifdef VK_USE_PLATFORM_ANDROID_KHR
#include <sys/mman.h>
#include <sys/syscall.h>
#ifdef HOST_BUILD
#include "android/utils/tempfile.h"
#endif
static inline int
inline_memfd_create(const char *name, unsigned int flags) {
#ifdef HOST_BUILD
TempFile* tmpFile = tempfile_create();
return open(tempfile_path(tmpFile), O_RDWR);
// TODO: Windows is not suppose to support VkSemaphoreGetFdInfoKHR
#else
return syscall(SYS_memfd_create, name, flags);
#endif
}
#define memfd_create inline_memfd_create
#endif // !VK_USE_PLATFORM_ANDROID_KHR
#define RESOURCE_TRACKER_DEBUG 0
#if RESOURCE_TRACKER_DEBUG
#undef D
#define D(fmt,...) ALOGD("%s: " fmt, __func__, ##__VA_ARGS__);
#else
#ifndef D
#define D(fmt,...)
#endif
#endif
using android::aligned_buf_alloc;
using android::aligned_buf_free;
using android::base::Optional;
using android::base::guest::AutoLock;
using android::base::guest::Lock;
using android::base::guest::WorkPool;
namespace goldfish_vk {
#define MAKE_HANDLE_MAPPING_FOREACH(type_name, map_impl, map_to_u64_impl, map_from_u64_impl) \
void mapHandles_##type_name(type_name* handles, size_t count) override { \
for (size_t i = 0; i < count; ++i) { \
map_impl; \
} \
} \
void mapHandles_##type_name##_u64(const type_name* handles, uint64_t* handle_u64s, size_t count) override { \
for (size_t i = 0; i < count; ++i) { \
map_to_u64_impl; \
} \
} \
void mapHandles_u64_##type_name(const uint64_t* handle_u64s, type_name* handles, size_t count) override { \
for (size_t i = 0; i < count; ++i) { \
map_from_u64_impl; \
} \
} \
#define DEFINE_RESOURCE_TRACKING_CLASS(class_name, impl) \
class class_name : public VulkanHandleMapping { \
public: \
virtual ~class_name() { } \
GOLDFISH_VK_LIST_HANDLE_TYPES(impl) \
}; \
#define CREATE_MAPPING_IMPL_FOR_TYPE(type_name) \
MAKE_HANDLE_MAPPING_FOREACH(type_name, \
handles[i] = new_from_host_##type_name(handles[i]); ResourceTracker::get()->register_##type_name(handles[i]);, \
handle_u64s[i] = (uint64_t)new_from_host_##type_name(handles[i]), \
handles[i] = (type_name)new_from_host_u64_##type_name(handle_u64s[i]); ResourceTracker::get()->register_##type_name(handles[i]);)
#define UNWRAP_MAPPING_IMPL_FOR_TYPE(type_name) \
MAKE_HANDLE_MAPPING_FOREACH(type_name, \
handles[i] = get_host_##type_name(handles[i]), \
handle_u64s[i] = (uint64_t)get_host_u64_##type_name(handles[i]), \
handles[i] = (type_name)get_host_##type_name((type_name)handle_u64s[i]))
#define DESTROY_MAPPING_IMPL_FOR_TYPE(type_name) \
MAKE_HANDLE_MAPPING_FOREACH(type_name, \
ResourceTracker::get()->unregister_##type_name(handles[i]); delete_goldfish_##type_name(handles[i]), \
(void)handle_u64s[i]; delete_goldfish_##type_name(handles[i]), \
(void)handles[i]; delete_goldfish_##type_name((type_name)handle_u64s[i]))
DEFINE_RESOURCE_TRACKING_CLASS(CreateMapping, CREATE_MAPPING_IMPL_FOR_TYPE)
DEFINE_RESOURCE_TRACKING_CLASS(UnwrapMapping, UNWRAP_MAPPING_IMPL_FOR_TYPE)
DEFINE_RESOURCE_TRACKING_CLASS(DestroyMapping, DESTROY_MAPPING_IMPL_FOR_TYPE)
class ResourceTracker::Impl {
public:
Impl() = default;
CreateMapping createMapping;
UnwrapMapping unwrapMapping;
DestroyMapping destroyMapping;
DefaultHandleMapping defaultMapping;
#define HANDLE_DEFINE_TRIVIAL_INFO_STRUCT(type) \
struct type##_Info { \
uint32_t unused; \
}; \
GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_DEFINE_TRIVIAL_INFO_STRUCT)
struct VkInstance_Info {
uint32_t highestApiVersion;
std::set<std::string> enabledExtensions;
// Fodder for vkEnumeratePhysicalDevices.
std::vector<VkPhysicalDevice> physicalDevices;
};
using HostMemBlocks = std::vector<HostMemAlloc>;
using HostMemBlockIndex = size_t;
#define INVALID_HOST_MEM_BLOCK (-1)
struct VkDevice_Info {
VkPhysicalDevice physdev;
VkPhysicalDeviceProperties props;
VkPhysicalDeviceMemoryProperties memProps;
std::vector<HostMemBlocks> hostMemBlocks { VK_MAX_MEMORY_TYPES };
uint32_t apiVersion;
std::set<std::string> enabledExtensions;
};
struct VirtioGpuHostmemResourceInfo {
uint32_t resourceId = 0;
int primeFd = -1;
};
struct VkDeviceMemory_Info {
VkDeviceSize allocationSize = 0;
VkDeviceSize mappedSize = 0;
uint8_t* mappedPtr = nullptr;
uint32_t memoryTypeIndex = 0;
bool virtualHostVisibleBacking = false;
bool directMapped = false;
GoldfishAddressSpaceBlock*
goldfishAddressSpaceBlock = nullptr;
VirtioGpuHostmemResourceInfo resInfo;
SubAlloc subAlloc;
AHardwareBuffer* ahw = nullptr;
zx_handle_t vmoHandle = ZX_HANDLE_INVALID;
};
struct VkCommandBuffer_Info {
VkEncoder** lastUsedEncoderPtr = nullptr;
uint32_t sequenceNumber = 0;
};
// custom guest-side structs for images/buffers because of AHardwareBuffer :((
struct VkImage_Info {
VkDevice device;
VkImageCreateInfo createInfo;
bool external = false;
VkExternalMemoryImageCreateInfo externalCreateInfo;
VkDeviceMemory currentBacking = VK_NULL_HANDLE;
VkDeviceSize currentBackingOffset = 0;
VkDeviceSize currentBackingSize = 0;
bool baseRequirementsKnown = false;
VkMemoryRequirements baseRequirements;
#ifdef VK_USE_PLATFORM_FUCHSIA
bool isSysmemBackedMemory = false;
#endif
};
struct VkBuffer_Info {
VkDevice device;
VkBufferCreateInfo createInfo;
bool external = false;
VkExternalMemoryBufferCreateInfo externalCreateInfo;
VkDeviceMemory currentBacking = VK_NULL_HANDLE;
VkDeviceSize currentBackingOffset = 0;
VkDeviceSize currentBackingSize = 0;
bool baseRequirementsKnown = false;
VkMemoryRequirements baseRequirements;
#ifdef VK_USE_PLATFORM_FUCHSIA
bool isSysmemBackedMemory = false;
#endif
};
struct VkSemaphore_Info {
VkDevice device;
zx_handle_t eventHandle = ZX_HANDLE_INVALID;
int syncFd = -1;
};
struct VkDescriptorUpdateTemplate_Info {
std::vector<VkDescriptorUpdateTemplateEntry> templateEntries;
// Flattened versions
std::vector<uint32_t> imageInfoEntryIndices;
std::vector<uint32_t> bufferInfoEntryIndices;
std::vector<uint32_t> bufferViewEntryIndices;
std::vector<VkDescriptorImageInfo> imageInfos;
std::vector<VkDescriptorBufferInfo> bufferInfos;
std::vector<VkBufferView> bufferViews;
};
struct VkFence_Info {
VkDevice device;
bool external = false;
VkExportFenceCreateInfo exportFenceCreateInfo;
#ifdef VK_USE_PLATFORM_ANDROID_KHR
int syncFd = -1;
#endif
};
struct VkDescriptorPool_Info {
std::unordered_set<VkDescriptorSet> allocedSets;
VkDescriptorPoolCreateFlags createFlags;
};
struct VkDescriptorSet_Info {
VkDescriptorPool pool;
std::vector<bool> bindingIsImmutableSampler;
};
struct VkDescriptorSetLayout_Info {
std::vector<VkDescriptorSetLayoutBinding> bindings;
};
#define HANDLE_REGISTER_IMPL_IMPL(type) \
std::unordered_map<type, type##_Info> info_##type; \
void register_##type(type obj) { \
AutoLock lock(mLock); \
info_##type[obj] = type##_Info(); \
} \
#define HANDLE_UNREGISTER_IMPL_IMPL(type) \
void unregister_##type(type obj) { \
AutoLock lock(mLock); \
info_##type.erase(obj); \
} \
GOLDFISH_VK_LIST_HANDLE_TYPES(HANDLE_REGISTER_IMPL_IMPL)
GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_UNREGISTER_IMPL_IMPL)
void unregister_VkInstance(VkInstance instance) {
AutoLock lock(mLock);
auto it = info_VkInstance.find(instance);
if (it == info_VkInstance.end()) return;
auto info = it->second;
info_VkInstance.erase(instance);
lock.unlock();
}
void unregister_VkDevice(VkDevice device) {
AutoLock lock(mLock);
auto it = info_VkDevice.find(device);
if (it == info_VkDevice.end()) return;
auto info = it->second;
info_VkDevice.erase(device);
lock.unlock();
}
void unregister_VkCommandBuffer(VkCommandBuffer commandBuffer) {
AutoLock lock(mLock);
auto it = info_VkCommandBuffer.find(commandBuffer);
if (it == info_VkCommandBuffer.end()) return;
auto& info = it->second;
auto lastUsedEncoder =
info.lastUsedEncoderPtr ?
*(info.lastUsedEncoderPtr) : nullptr;
if (lastUsedEncoder) {
lastUsedEncoder->unregisterCleanupCallback(commandBuffer);
delete info.lastUsedEncoderPtr;
}
info_VkCommandBuffer.erase(commandBuffer);
}
void unregister_VkDeviceMemory(VkDeviceMemory mem) {
AutoLock lock(mLock);
auto it = info_VkDeviceMemory.find(mem);
if (it == info_VkDeviceMemory.end()) return;
auto& memInfo = it->second;
if (memInfo.ahw) {
AHardwareBuffer_release(memInfo.ahw);
}
if (memInfo.vmoHandle != ZX_HANDLE_INVALID) {
zx_handle_close(memInfo.vmoHandle);
}
if (memInfo.mappedPtr &&
!memInfo.virtualHostVisibleBacking &&
!memInfo.directMapped) {
aligned_buf_free(memInfo.mappedPtr);
}
if (memInfo.directMapped) {
subFreeHostMemory(&memInfo.subAlloc);
}
delete memInfo.goldfishAddressSpaceBlock;
info_VkDeviceMemory.erase(mem);
}
void unregister_VkImage(VkImage img) {
AutoLock lock(mLock);
auto it = info_VkImage.find(img);
if (it == info_VkImage.end()) return;
auto& imageInfo = it->second;
info_VkImage.erase(img);
}
void unregister_VkBuffer(VkBuffer buf) {
AutoLock lock(mLock);
auto it = info_VkBuffer.find(buf);
if (it == info_VkBuffer.end()) return;
info_VkBuffer.erase(buf);
}
void unregister_VkSemaphore(VkSemaphore sem) {
AutoLock lock(mLock);
auto it = info_VkSemaphore.find(sem);
if (it == info_VkSemaphore.end()) return;
auto& semInfo = it->second;
if (semInfo.eventHandle != ZX_HANDLE_INVALID) {
zx_handle_close(semInfo.eventHandle);
}
info_VkSemaphore.erase(sem);
}
void unregister_VkDescriptorUpdateTemplate(VkDescriptorUpdateTemplate templ) {
info_VkDescriptorUpdateTemplate.erase(templ);
}
void unregister_VkFence(VkFence fence) {
AutoLock lock(mLock);
auto it = info_VkFence.find(fence);
if (it == info_VkFence.end()) return;
auto& fenceInfo = it->second;
(void)fenceInfo;
#ifdef VK_USE_PLATFORM_ANDROID_KHR
if (fenceInfo.syncFd >= 0) {
close(fenceInfo.syncFd);
}
#endif
info_VkFence.erase(fence);
}
void unregister_VkDescriptorSet_locked(VkDescriptorSet set) {
auto it = info_VkDescriptorSet.find(set);
if (it == info_VkDescriptorSet.end()) return;
const auto& setInfo = it->second;
auto poolIt = info_VkDescriptorPool.find(setInfo.pool);
info_VkDescriptorSet.erase(set);
if (poolIt == info_VkDescriptorPool.end()) return;
auto& poolInfo = poolIt->second;
poolInfo.allocedSets.erase(set);
}
void unregister_VkDescriptorSet(VkDescriptorSet set) {
AutoLock lock(mLock);
unregister_VkDescriptorSet_locked(set);
}
void unregister_VkDescriptorSetLayout(VkDescriptorSetLayout setLayout) {
AutoLock lock(mLock);
info_VkDescriptorSetLayout.erase(setLayout);
}
void initDescriptorSetStateLocked(const VkDescriptorSetAllocateInfo* ci, const VkDescriptorSet* sets) {
auto it = info_VkDescriptorPool.find(ci->descriptorPool);
if (it == info_VkDescriptorPool.end()) return;
auto& info = it->second;
for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
info.allocedSets.insert(sets[i]);
auto setIt = info_VkDescriptorSet.find(sets[i]);
if (setIt == info_VkDescriptorSet.end()) continue;
auto& setInfo = setIt->second;
setInfo.pool = ci->descriptorPool;
VkDescriptorSetLayout setLayout = ci->pSetLayouts[i];
auto layoutIt = info_VkDescriptorSetLayout.find(setLayout);
if (layoutIt == info_VkDescriptorSetLayout.end()) continue;
const auto& layoutInfo = layoutIt->second;
for (size_t i = 0; i < layoutInfo.bindings.size(); ++i) {
// Bindings can be sparsely defined
const auto& binding = layoutInfo.bindings[i];
uint32_t bindingIndex = binding.binding;
if (setInfo.bindingIsImmutableSampler.size() <= bindingIndex) {
setInfo.bindingIsImmutableSampler.resize(bindingIndex + 1, false);
}
setInfo.bindingIsImmutableSampler[bindingIndex] =
binding.descriptorCount > 0 &&
(binding.descriptorType == VK_DESCRIPTOR_TYPE_SAMPLER ||
binding.descriptorType ==
VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) &&
binding.pImmutableSamplers;
}
}
}
VkWriteDescriptorSet
createImmutableSamplersFilteredWriteDescriptorSetLocked(
const VkWriteDescriptorSet* descriptorWrite,
std::vector<VkDescriptorImageInfo>* imageInfoArray) {
VkWriteDescriptorSet res = *descriptorWrite;
if (descriptorWrite->descriptorCount == 0) return res;
if (descriptorWrite->descriptorType != VK_DESCRIPTOR_TYPE_SAMPLER &&
descriptorWrite->descriptorType != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) return res;
VkDescriptorSet set = descriptorWrite->dstSet;
auto descSetIt = info_VkDescriptorSet.find(set);
if (descSetIt == info_VkDescriptorSet.end()) {
ALOGE("%s: error: descriptor set 0x%llx not found\n", __func__,
(unsigned long long)set);
return res;
}
const auto& descInfo = descSetIt->second;
uint32_t binding = descriptorWrite->dstBinding;
bool immutableSampler = descInfo.bindingIsImmutableSampler[binding];
if (!immutableSampler) return res;
for (uint32_t i = 0; i < descriptorWrite->descriptorCount; ++i) {
VkDescriptorImageInfo imageInfo = descriptorWrite->pImageInfo[i];
imageInfo.sampler = 0;
imageInfoArray->push_back(imageInfo);
}
res.pImageInfo = imageInfoArray->data();
return res;
}
// Also unregisters underlying descriptor sets
// and deletes their guest-side wrapped handles.
void clearDescriptorPoolLocked(VkDescriptorPool pool) {
auto it = info_VkDescriptorPool.find(pool);
if (it == info_VkDescriptorPool.end()) return;
std::vector<VkDescriptorSet> toClear;
for (auto set : it->second.allocedSets) {
toClear.push_back(set);
}
for (auto set : toClear) {
unregister_VkDescriptorSet_locked(set);
delete_goldfish_VkDescriptorSet(set);
}
}
void unregister_VkDescriptorPool(VkDescriptorPool pool) {
AutoLock lock(mLock);
clearDescriptorPoolLocked(pool);
info_VkDescriptorPool.erase(pool);
}
bool descriptorPoolSupportsIndividualFreeLocked(VkDescriptorPool pool) {
auto it = info_VkDescriptorPool.find(pool);
if (it == info_VkDescriptorPool.end()) return false;
const auto& info = it->second;
return VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT &
info.createFlags;
}
bool descriptorSetReallyAllocedFromPoolLocked(VkDescriptorSet set, VkDescriptorPool pool) {
auto it = info_VkDescriptorSet.find(set);
if (it == info_VkDescriptorSet.end()) return false;
const auto& info = it->second;
if (pool != info.pool) return false;
auto poolIt = info_VkDescriptorPool.find(info.pool);
if (poolIt == info_VkDescriptorPool.end()) return false;
const auto& poolInfo = poolIt->second;
if (poolInfo.allocedSets.find(set) == poolInfo.allocedSets.end()) return false;
return true;
}
static constexpr uint32_t kDefaultApiVersion = VK_MAKE_VERSION(1, 1, 0);
void setInstanceInfo(VkInstance instance,
uint32_t enabledExtensionCount,
const char* const* ppEnabledExtensionNames,
uint32_t apiVersion) {
AutoLock lock(mLock);
auto& info = info_VkInstance[instance];
info.highestApiVersion = apiVersion;
if (!ppEnabledExtensionNames) return;
for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
}
}
void setDeviceInfo(VkDevice device,
VkPhysicalDevice physdev,
VkPhysicalDeviceProperties props,
VkPhysicalDeviceMemoryProperties memProps,
uint32_t enabledExtensionCount,
const char* const* ppEnabledExtensionNames) {
AutoLock lock(mLock);
auto& info = info_VkDevice[device];
info.physdev = physdev;
info.props = props;
info.memProps = memProps;
initHostVisibleMemoryVirtualizationInfo(
physdev, &memProps,
mFeatureInfo.get(),
&mHostVisibleMemoryVirtInfo);
info.apiVersion = props.apiVersion;
if (!ppEnabledExtensionNames) return;
for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
}
}
void setDeviceMemoryInfo(VkDevice device,
VkDeviceMemory memory,
VkDeviceSize allocationSize,
VkDeviceSize mappedSize,
uint8_t* ptr,
uint32_t memoryTypeIndex,
AHardwareBuffer* ahw = nullptr,
zx_handle_t vmoHandle = ZX_HANDLE_INVALID) {
AutoLock lock(mLock);
auto& deviceInfo = info_VkDevice[device];
auto& info = info_VkDeviceMemory[memory];
info.allocationSize = allocationSize;
info.mappedSize = mappedSize;
info.mappedPtr = ptr;
info.memoryTypeIndex = memoryTypeIndex;
info.ahw = ahw;
info.vmoHandle = vmoHandle;
}
void setImageInfo(VkImage image,
VkDevice device,
const VkImageCreateInfo *pCreateInfo) {
AutoLock lock(mLock);
auto& info = info_VkImage[image];
info.device = device;
info.createInfo = *pCreateInfo;
}
bool isMemoryTypeHostVisible(VkDevice device, uint32_t typeIndex) const {
AutoLock lock(mLock);
const auto it = info_VkDevice.find(device);
if (it == info_VkDevice.end()) return false;
const auto& info = it->second;
return info.memProps.memoryTypes[typeIndex].propertyFlags &
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
}
uint8_t* getMappedPointer(VkDeviceMemory memory) {
AutoLock lock(mLock);
const auto it = info_VkDeviceMemory.find(memory);
if (it == info_VkDeviceMemory.end()) return nullptr;
const auto& info = it->second;
return info.mappedPtr;
}
VkDeviceSize getMappedSize(VkDeviceMemory memory) {
AutoLock lock(mLock);
const auto it = info_VkDeviceMemory.find(memory);
if (it == info_VkDeviceMemory.end()) return 0;
const auto& info = it->second;
return info.mappedSize;
}
VkDeviceSize getNonCoherentExtendedSize(VkDevice device, VkDeviceSize basicSize) const {
AutoLock lock(mLock);
const auto it = info_VkDevice.find(device);
if (it == info_VkDevice.end()) return basicSize;
const auto& info = it->second;
VkDeviceSize nonCoherentAtomSize =
info.props.limits.nonCoherentAtomSize;
VkDeviceSize atoms =
(basicSize + nonCoherentAtomSize - 1) / nonCoherentAtomSize;
return atoms * nonCoherentAtomSize;
}
bool isValidMemoryRange(const VkMappedMemoryRange& range) const {
AutoLock lock(mLock);
const auto it = info_VkDeviceMemory.find(range.memory);
if (it == info_VkDeviceMemory.end()) return false;
const auto& info = it->second;
if (!info.mappedPtr) return false;
VkDeviceSize offset = range.offset;
VkDeviceSize size = range.size;
if (size == VK_WHOLE_SIZE) {
return offset <= info.mappedSize;
}
return offset + size <= info.mappedSize;
}
void setupFeatures(const EmulatorFeatureInfo* features) {
if (!features || mFeatureInfo) return;
mFeatureInfo.reset(new EmulatorFeatureInfo);
*mFeatureInfo = *features;
if (mFeatureInfo->hasDirectMem) {
mGoldfishAddressSpaceBlockProvider.reset(
new GoldfishAddressSpaceBlockProvider(
GoldfishAddressSpaceSubdeviceType::NoSubdevice));
}
#ifdef VK_USE_PLATFORM_FUCHSIA
if (mFeatureInfo->hasVulkan) {
zx::channel channel(GetConnectToServiceFunction()("/dev/class/goldfish-control/000"));
if (!channel) {
ALOGE("failed to open control device");
abort();
}
mControlDevice.Bind(std::move(channel));
zx::channel sysmem_channel(GetConnectToServiceFunction()("/svc/fuchsia.sysmem.Allocator"));
if (!sysmem_channel) {
ALOGE("failed to open sysmem connection");
}
mSysmemAllocator.Bind(std::move(sysmem_channel));
}
#endif
if (mFeatureInfo->hasVulkanNullOptionalStrings) {
mStreamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT;
}
if (mFeatureInfo->hasVulkanIgnoredHandles) {
mStreamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT;
}
if (mFeatureInfo->hasVulkanShaderFloat16Int8) {
mStreamFeatureBits |= VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT;
}
#if !defined(HOST_BUILD) && defined(VK_USE_PLATFORM_ANDROID_KHR)
if (mFeatureInfo->hasVirtioGpuNext) {
ALOGD("%s: has virtio-gpu-next; create hostmem rendernode\n", __func__);
mRendernodeFd = drmOpenRender(128 /* RENDERNODE_MINOR */);
}
#endif
}
void setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks& callbacks) {
mThreadingCallbacks = callbacks;
}
bool hostSupportsVulkan() const {
if (!mFeatureInfo) return false;
return mFeatureInfo->hasVulkan;
}
bool usingDirectMapping() const {
return mHostVisibleMemoryVirtInfo.virtualizationSupported;
}
uint32_t getStreamFeatures() const {
return mStreamFeatureBits;
}
bool supportsDeferredCommands() const {
if (!mFeatureInfo) return false;
return mFeatureInfo->hasDeferredVulkanCommands;
}
bool supportsCreateResourcesWithRequirements() const {
if (!mFeatureInfo) return false;
return mFeatureInfo->hasVulkanCreateResourcesWithRequirements;
}
int getHostInstanceExtensionIndex(const std::string& extName) const {
int i = 0;
for (const auto& prop : mHostInstanceExtensions) {
if (extName == std::string(prop.extensionName)) {
return i;
}
++i;
}
return -1;
}
int getHostDeviceExtensionIndex(const std::string& extName) const {
int i = 0;
for (const auto& prop : mHostDeviceExtensions) {
if (extName == std::string(prop.extensionName)) {
return i;
}
++i;
}
return -1;
}
void deviceMemoryTransform_tohost(
VkDeviceMemory* memory, uint32_t memoryCount,
VkDeviceSize* offset, uint32_t offsetCount,
VkDeviceSize* size, uint32_t sizeCount,
uint32_t* typeIndex, uint32_t typeIndexCount,
uint32_t* typeBits, uint32_t typeBitsCount) {
(void)memoryCount;
(void)offsetCount;
(void)sizeCount;
const auto& hostVirt =
mHostVisibleMemoryVirtInfo;
if (!hostVirt.virtualizationSupported) return;
if (memory) {
AutoLock lock (mLock);
for (uint32_t i = 0; i < memoryCount; ++i) {
VkDeviceMemory mem = memory[i];
auto it = info_VkDeviceMemory.find(mem);
if (it == info_VkDeviceMemory.end()) return;
const auto& info = it->second;
if (!info.directMapped) continue;
memory[i] = info.subAlloc.baseMemory;
if (offset) {
offset[i] = info.subAlloc.baseOffset + offset[i];
}
if (size) {
if (size[i] == VK_WHOLE_SIZE) {
size[i] = info.subAlloc.subMappedSize;
}
}
// TODO
(void)memory;
(void)offset;
(void)size;
}
}
for (uint32_t i = 0; i < typeIndexCount; ++i) {
typeIndex[i] =
hostVirt.memoryTypeIndexMappingToHost[typeIndex[i]];
}
for (uint32_t i = 0; i < typeBitsCount; ++i) {
uint32_t bits = 0;
for (uint32_t j = 0; j < VK_MAX_MEMORY_TYPES; ++j) {
bool guestHas = typeBits[i] & (1 << j);
uint32_t hostIndex =
hostVirt.memoryTypeIndexMappingToHost[j];
bits |= guestHas ? (1 << hostIndex) : 0;
}
typeBits[i] = bits;
}
}
void deviceMemoryTransform_fromhost(
VkDeviceMemory* memory, uint32_t memoryCount,
VkDeviceSize* offset, uint32_t offsetCount,
VkDeviceSize* size, uint32_t sizeCount,
uint32_t* typeIndex, uint32_t typeIndexCount,
uint32_t* typeBits, uint32_t typeBitsCount) {
(void)memoryCount;
(void)offsetCount;
(void)sizeCount;
const auto& hostVirt =
mHostVisibleMemoryVirtInfo;
if (!hostVirt.virtualizationSupported) return;
AutoLock lock (mLock);
for (uint32_t i = 0; i < memoryCount; ++i) {
// TODO
(void)memory;
(void)offset;
(void)size;
}
for (uint32_t i = 0; i < typeIndexCount; ++i) {
typeIndex[i] =
hostVirt.memoryTypeIndexMappingFromHost[typeIndex[i]];
}
for (uint32_t i = 0; i < typeBitsCount; ++i) {
uint32_t bits = 0;
for (uint32_t j = 0; j < VK_MAX_MEMORY_TYPES; ++j) {
bool hostHas = typeBits[i] & (1 << j);
uint32_t guestIndex =
hostVirt.memoryTypeIndexMappingFromHost[j];
bits |= hostHas ? (1 << guestIndex) : 0;
if (hostVirt.memoryTypeBitsShouldAdvertiseBoth[j]) {
bits |= hostHas ? (1 << j) : 0;
}
}
typeBits[i] = bits;
}
}
VkResult on_vkEnumerateInstanceExtensionProperties(
void* context,
VkResult,
const char*,
uint32_t* pPropertyCount,
VkExtensionProperties* pProperties) {
std::vector<const char*> allowedExtensionNames = {
"VK_KHR_get_physical_device_properties2",
"VK_KHR_sampler_ycbcr_conversion",
#ifdef VK_USE_PLATFORM_ANDROID_KHR
"VK_KHR_external_semaphore_capabilities",
"VK_KHR_external_memory_capabilities",
"VK_KHR_external_fence_capabilities",
#endif
// TODO:
// VK_KHR_external_memory_capabilities
};
VkEncoder* enc = (VkEncoder*)context;
// Only advertise a select set of extensions.
if (mHostInstanceExtensions.empty()) {
uint32_t hostPropCount = 0;
enc->vkEnumerateInstanceExtensionProperties(nullptr, &hostPropCount, nullptr);
mHostInstanceExtensions.resize(hostPropCount);
VkResult hostRes =
enc->vkEnumerateInstanceExtensionProperties(
nullptr, &hostPropCount, mHostInstanceExtensions.data());
if (hostRes != VK_SUCCESS) {
return hostRes;
}
}
std::vector<VkExtensionProperties> filteredExts;
for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
auto extIndex = getHostInstanceExtensionIndex(allowedExtensionNames[i]);
if (extIndex != -1) {
filteredExts.push_back(mHostInstanceExtensions[extIndex]);
}
}
VkExtensionProperties anbExtProps[] = {
#ifdef VK_USE_PLATFORM_FUCHSIA
{ "VK_KHR_external_memory_capabilities", 1},
{ "VK_KHR_external_semaphore_capabilities", 1},
#endif
};
for (auto& anbExtProp: anbExtProps) {
filteredExts.push_back(anbExtProp);
}
// Spec:
//
// https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html
//
// If pProperties is NULL, then the number of extensions properties
// available is returned in pPropertyCount. Otherwise, pPropertyCount
// must point to a variable set by the user to the number of elements
// in the pProperties array, and on return the variable is overwritten
// with the number of structures actually written to pProperties. If
// pPropertyCount is less than the number of extension properties
// available, at most pPropertyCount structures will be written. If
// pPropertyCount is smaller than the number of extensions available,
// VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
// that not all the available properties were returned.
//
// pPropertyCount must be a valid pointer to a uint32_t value
if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
if (!pProperties) {
*pPropertyCount = (uint32_t)filteredExts.size();
return VK_SUCCESS;
} else {
auto actualExtensionCount = (uint32_t)filteredExts.size();
if (*pPropertyCount > actualExtensionCount) {
*pPropertyCount = actualExtensionCount;
}
for (uint32_t i = 0; i < *pPropertyCount; ++i) {
pProperties[i] = filteredExts[i];
}
if (actualExtensionCount > *pPropertyCount) {
return VK_INCOMPLETE;
}
return VK_SUCCESS;
}
}
VkResult on_vkEnumerateDeviceExtensionProperties(
void* context,
VkResult,
VkPhysicalDevice physdev,
const char*,
uint32_t* pPropertyCount,
VkExtensionProperties* pProperties) {
std::vector<const char*> allowedExtensionNames = {
"VK_KHR_maintenance1",
"VK_KHR_maintenance2",
"VK_KHR_maintenance3",
"VK_KHR_get_memory_requirements2",
"VK_KHR_dedicated_allocation",
"VK_KHR_bind_memory2",
"VK_KHR_sampler_ycbcr_conversion",
"VK_KHR_shader_float16_int8",
"VK_AMD_gpu_shader_half_float",
"VK_NV_shader_subgroup_partitioned",
#ifdef VK_USE_PLATFORM_ANDROID_KHR
"VK_KHR_external_semaphore",
"VK_KHR_external_semaphore_fd",
// "VK_KHR_external_semaphore_win32", not exposed because it's translated to fd
"VK_KHR_external_memory",
"VK_KHR_external_fence",
"VK_KHR_external_fence_fd",
#endif
// TODO:
// VK_KHR_external_memory_capabilities
};
VkEncoder* enc = (VkEncoder*)context;
if (mHostDeviceExtensions.empty()) {
uint32_t hostPropCount = 0;
enc->vkEnumerateDeviceExtensionProperties(physdev, nullptr, &hostPropCount, nullptr);
mHostDeviceExtensions.resize(hostPropCount);
VkResult hostRes =
enc->vkEnumerateDeviceExtensionProperties(
physdev, nullptr, &hostPropCount, mHostDeviceExtensions.data());
if (hostRes != VK_SUCCESS) {
return hostRes;
}
}
bool hostHasWin32ExternalSemaphore =
getHostDeviceExtensionIndex(
"VK_KHR_external_semaphore_win32") != -1;
bool hostHasPosixExternalSemaphore =
getHostDeviceExtensionIndex(
"VK_KHR_external_semaphore_fd") != -1;
ALOGD("%s: host has ext semaphore? win32 %d posix %d\n", __func__,
hostHasWin32ExternalSemaphore,
hostHasPosixExternalSemaphore);
bool hostSupportsExternalSemaphore =
hostHasWin32ExternalSemaphore ||
hostHasPosixExternalSemaphore;
std::vector<VkExtensionProperties> filteredExts;
for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
auto extIndex = getHostDeviceExtensionIndex(allowedExtensionNames[i]);
if (extIndex != -1) {
filteredExts.push_back(mHostDeviceExtensions[extIndex]);
}
}
VkExtensionProperties anbExtProps[] = {
#ifdef VK_USE_PLATFORM_ANDROID_KHR
{ "VK_ANDROID_native_buffer", 7 },
#endif
#ifdef VK_USE_PLATFORM_FUCHSIA
{ "VK_KHR_external_memory", 1 },
{ "VK_KHR_external_semaphore", 1 },
{ "VK_FUCHSIA_external_semaphore", 1 },
#endif
};
for (auto& anbExtProp: anbExtProps) {
filteredExts.push_back(anbExtProp);
}
#ifndef VK_USE_PLATFORM_FUCHSIA
if (hostSupportsExternalSemaphore &&
!hostHasPosixExternalSemaphore) {
filteredExts.push_back(
{ "VK_KHR_external_semaphore_fd", 1});
}
#endif
bool win32ExtMemAvailable =
getHostDeviceExtensionIndex(
"VK_KHR_external_memory_win32") != -1;
bool posixExtMemAvailable =
getHostDeviceExtensionIndex(
"VK_KHR_external_memory_fd") != -1;
bool moltenVkExtAvailable =
getHostInstanceExtensionIndex(
"VK_MVK_moltenvk") != -1;
bool hostHasExternalMemorySupport =
win32ExtMemAvailable || posixExtMemAvailable || moltenVkExtAvailable;
if (hostHasExternalMemorySupport) {
#ifdef VK_USE_PLATFORM_ANDROID_KHR
filteredExts.push_back({
"VK_ANDROID_external_memory_android_hardware_buffer", 7
});
filteredExts.push_back({
"VK_EXT_queue_family_foreign", 1
});
#endif
#ifdef VK_USE_PLATFORM_FUCHSIA
filteredExts.push_back({
"VK_FUCHSIA_external_memory", 1
});
filteredExts.push_back({
"VK_FUCHSIA_buffer_collection", 1
});
#endif
}
// Spec:
//
// https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateDeviceExtensionProperties.html
//
// pPropertyCount is a pointer to an integer related to the number of
// extension properties available or queried, and is treated in the
// same fashion as the
// vkEnumerateInstanceExtensionProperties::pPropertyCount parameter.
//
// https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html
//
// If pProperties is NULL, then the number of extensions properties
// available is returned in pPropertyCount. Otherwise, pPropertyCount
// must point to a variable set by the user to the number of elements
// in the pProperties array, and on return the variable is overwritten
// with the number of structures actually written to pProperties. If
// pPropertyCount is less than the number of extension properties
// available, at most pPropertyCount structures will be written. If
// pPropertyCount is smaller than the number of extensions available,
// VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
// that not all the available properties were returned.
//
// pPropertyCount must be a valid pointer to a uint32_t value
if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
if (!pProperties) {
*pPropertyCount = (uint32_t)filteredExts.size();
return VK_SUCCESS;
} else {
auto actualExtensionCount = (uint32_t)filteredExts.size();
if (*pPropertyCount > actualExtensionCount) {
*pPropertyCount = actualExtensionCount;
}
for (uint32_t i = 0; i < *pPropertyCount; ++i) {
pProperties[i] = filteredExts[i];
}
if (actualExtensionCount > *pPropertyCount) {
return VK_INCOMPLETE;
}
return VK_SUCCESS;
}
}
VkResult on_vkEnumeratePhysicalDevices(
void* context, VkResult,
VkInstance instance, uint32_t* pPhysicalDeviceCount,
VkPhysicalDevice* pPhysicalDevices) {
VkEncoder* enc = (VkEncoder*)context;
if (!instance) return VK_ERROR_INITIALIZATION_FAILED;
if (!pPhysicalDeviceCount) return VK_ERROR_INITIALIZATION_FAILED;
AutoLock lock(mLock);
// When this function is called, we actually need to do two things:
// - Get full information about physical devices from the host,
// even if the guest did not ask for it
// - Serve the guest query according to the spec:
//
// https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html
auto it = info_VkInstance.find(instance);
if (it == info_VkInstance.end()) return VK_ERROR_INITIALIZATION_FAILED;
auto& info = it->second;
// Get the full host information here if it doesn't exist already.
if (info.physicalDevices.empty()) {
uint32_t hostPhysicalDeviceCount = 0;
lock.unlock();
VkResult countRes = enc->vkEnumeratePhysicalDevices(
instance, &hostPhysicalDeviceCount, nullptr);
lock.lock();
if (countRes != VK_SUCCESS) {
ALOGE("%s: failed: could not count host physical devices. "
"Error %d\n", __func__, countRes);
return countRes;
}
info.physicalDevices.resize(hostPhysicalDeviceCount);
lock.unlock();
VkResult enumRes = enc->vkEnumeratePhysicalDevices(
instance, &hostPhysicalDeviceCount, info.physicalDevices.data());
lock.lock();
if (enumRes != VK_SUCCESS) {
ALOGE("%s: failed: could not retrieve host physical devices. "
"Error %d\n", __func__, enumRes);
return enumRes;
}
}
// Serve the guest query according to the spec.
//
// https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html
//
// If pPhysicalDevices is NULL, then the number of physical devices
// available is returned in pPhysicalDeviceCount. Otherwise,
// pPhysicalDeviceCount must point to a variable set by the user to the
// number of elements in the pPhysicalDevices array, and on return the
// variable is overwritten with the number of handles actually written
// to pPhysicalDevices. If pPhysicalDeviceCount is less than the number
// of physical devices available, at most pPhysicalDeviceCount
// structures will be written. If pPhysicalDeviceCount is smaller than
// the number of physical devices available, VK_INCOMPLETE will be
// returned instead of VK_SUCCESS, to indicate that not all the
// available physical devices were returned.
if (!pPhysicalDevices) {
*pPhysicalDeviceCount = (uint32_t)info.physicalDevices.size();
return VK_SUCCESS;
} else {
uint32_t actualDeviceCount = (uint32_t)info.physicalDevices.size();
uint32_t toWrite = actualDeviceCount < *pPhysicalDeviceCount ? actualDeviceCount : *pPhysicalDeviceCount;
for (uint32_t i = 0; i < toWrite; ++i) {
pPhysicalDevices[i] = info.physicalDevices[i];
}
*pPhysicalDeviceCount = toWrite;
if (actualDeviceCount > *pPhysicalDeviceCount) {
return VK_INCOMPLETE;
}
return VK_SUCCESS;
}
}
void on_vkGetPhysicalDeviceProperties(
void*,
VkPhysicalDevice,
VkPhysicalDeviceProperties* pProperties) {
if (pProperties) {
pProperties->deviceType = VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU;
}
}
void on_vkGetPhysicalDeviceProperties2(
void*,
VkPhysicalDevice,
VkPhysicalDeviceProperties2* pProperties) {
if (pProperties) {
pProperties->properties.deviceType =
VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU;
}
}
void on_vkGetPhysicalDeviceMemoryProperties(
void*,
VkPhysicalDevice physdev,
VkPhysicalDeviceMemoryProperties* out) {
initHostVisibleMemoryVirtualizationInfo(
physdev,
out,
mFeatureInfo.get(),
&mHostVisibleMemoryVirtInfo);
if (mHostVisibleMemoryVirtInfo.virtualizationSupported) {
*out = mHostVisibleMemoryVirtInfo.guestMemoryProperties;
}
}
void on_vkGetPhysicalDeviceMemoryProperties2(
void*,
VkPhysicalDevice physdev,
VkPhysicalDeviceMemoryProperties2* out) {
initHostVisibleMemoryVirtualizationInfo(
physdev,
&out->memoryProperties,
mFeatureInfo.get(),
&mHostVisibleMemoryVirtInfo);
if (mHostVisibleMemoryVirtInfo.virtualizationSupported) {
out->memoryProperties = mHostVisibleMemoryVirtInfo.guestMemoryProperties;
}
}
VkResult on_vkCreateInstance(
void* context,
VkResult input_result,
const VkInstanceCreateInfo* createInfo,
const VkAllocationCallbacks*,
VkInstance* pInstance) {
if (input_result != VK_SUCCESS) return input_result;
VkEncoder* enc = (VkEncoder*)context;
uint32_t apiVersion;
VkResult enumInstanceVersionRes =
enc->vkEnumerateInstanceVersion(&apiVersion);
setInstanceInfo(
*pInstance,
createInfo->enabledExtensionCount,
createInfo->ppEnabledExtensionNames,
apiVersion);
return input_result;
}
VkResult on_vkCreateDevice(
void* context,
VkResult input_result,
VkPhysicalDevice physicalDevice,
const VkDeviceCreateInfo* pCreateInfo,
const VkAllocationCallbacks*,
VkDevice* pDevice) {
if (input_result != VK_SUCCESS) return input_result;
VkEncoder* enc = (VkEncoder*)context;
VkPhysicalDeviceProperties props;
VkPhysicalDeviceMemoryProperties memProps;
enc->vkGetPhysicalDeviceProperties(physicalDevice, &props);
enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProps);
setDeviceInfo(
*pDevice, physicalDevice, props, memProps,
pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames);
return input_result;
}
void on_vkDestroyDevice_pre(
void* context,
VkDevice device,
const VkAllocationCallbacks*) {
AutoLock lock(mLock);
auto it = info_VkDevice.find(device);
if (it == info_VkDevice.end()) return;
auto info = it->second;
lock.unlock();
VkEncoder* enc = (VkEncoder*)context;
bool freeMemorySyncSupported =
mFeatureInfo->hasVulkanFreeMemorySync;
for (uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) {
for (auto& block : info.hostMemBlocks[i]) {
destroyHostMemAlloc(
freeMemorySyncSupported,
enc, device, &block);
}
}
}
VkResult on_vkGetAndroidHardwareBufferPropertiesANDROID(
void*, VkResult,
VkDevice device,
const AHardwareBuffer* buffer,
VkAndroidHardwareBufferPropertiesANDROID* pProperties) {
auto grallocHelper =
mThreadingCallbacks.hostConnectionGetFunc()->grallocHelper();
return getAndroidHardwareBufferPropertiesANDROID(
grallocHelper,
&mHostVisibleMemoryVirtInfo,
device, buffer, pProperties);
}
VkResult on_vkGetMemoryAndroidHardwareBufferANDROID(
void*, VkResult,
VkDevice device,
const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
struct AHardwareBuffer** pBuffer) {
if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
AutoLock lock(mLock);
auto deviceIt = info_VkDevice.find(device);
if (deviceIt == info_VkDevice.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
if (memoryIt == info_VkDeviceMemory.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto& info = memoryIt->second;
VkResult queryRes =
getMemoryAndroidHardwareBufferANDROID(&info.ahw);
if (queryRes != VK_SUCCESS) return queryRes;
*pBuffer = info.ahw;
return queryRes;
}
#ifdef VK_USE_PLATFORM_FUCHSIA
VkResult on_vkGetMemoryZirconHandleFUCHSIA(
void*, VkResult,
VkDevice device,
const VkMemoryGetZirconHandleInfoFUCHSIA* pInfo,
uint32_t* pHandle) {
if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
AutoLock lock(mLock);
auto deviceIt = info_VkDevice.find(device);
if (deviceIt == info_VkDevice.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
if (memoryIt == info_VkDeviceMemory.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto& info = memoryIt->second;
if (info.vmoHandle == ZX_HANDLE_INVALID) {
ALOGE("%s: memory cannot be exported", __func__);
return VK_ERROR_INITIALIZATION_FAILED;
}
*pHandle = ZX_HANDLE_INVALID;
zx_handle_duplicate(info.vmoHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
return VK_SUCCESS;
}
VkResult on_vkGetMemoryZirconHandlePropertiesFUCHSIA(
void*, VkResult,
VkDevice device,
VkExternalMemoryHandleTypeFlagBits handleType,
uint32_t handle,
VkMemoryZirconHandlePropertiesFUCHSIA* pProperties) {
if (handleType != VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA) {
return VK_ERROR_INITIALIZATION_FAILED;
}
AutoLock lock(mLock);
auto deviceIt = info_VkDevice.find(device);
if (deviceIt == info_VkDevice.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto& info = deviceIt->second;
// Device local memory type supported.
pProperties->memoryTypeBits = 0;
for (uint32_t i = 0; i < info.memProps.memoryTypeCount; ++i) {
if (info.memProps.memoryTypes[i].propertyFlags &
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {
pProperties->memoryTypeBits |= 1ull << i;
}
}
return VK_SUCCESS;
}
VkResult on_vkImportSemaphoreZirconHandleFUCHSIA(
void*, VkResult,
VkDevice device,
const VkImportSemaphoreZirconHandleInfoFUCHSIA* pInfo) {
if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
AutoLock lock(mLock);
auto deviceIt = info_VkDevice.find(device);
if (deviceIt == info_VkDevice.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
if (semaphoreIt == info_VkSemaphore.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto& info = semaphoreIt->second;
if (info.eventHandle != ZX_HANDLE_INVALID) {
zx_handle_close(info.eventHandle);
}
info.eventHandle = pInfo->handle;
return VK_SUCCESS;
}
VkResult on_vkGetSemaphoreZirconHandleFUCHSIA(
void*, VkResult,
VkDevice device,
const VkSemaphoreGetZirconHandleInfoFUCHSIA* pInfo,
uint32_t* pHandle) {
if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
AutoLock lock(mLock);
auto deviceIt = info_VkDevice.find(device);
if (deviceIt == info_VkDevice.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
if (semaphoreIt == info_VkSemaphore.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto& info = semaphoreIt->second;
if (info.eventHandle == ZX_HANDLE_INVALID) {
return VK_ERROR_INITIALIZATION_FAILED;
}
*pHandle = ZX_HANDLE_INVALID;
zx_handle_duplicate(info.eventHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
return VK_SUCCESS;
}
VkResult on_vkCreateBufferCollectionFUCHSIA(
void*, VkResult, VkDevice,
const VkBufferCollectionCreateInfoFUCHSIA* pInfo,
const VkAllocationCallbacks*,
VkBufferCollectionFUCHSIA* pCollection) {
fuchsia::sysmem::BufferCollectionTokenSyncPtr token;
if (pInfo->collectionToken) {
token.Bind(zx::channel(pInfo->collectionToken));
} else {
zx_status_t status = mSysmemAllocator->AllocateSharedCollection(token.NewRequest());
if (status != ZX_OK) {
ALOGE("AllocateSharedCollection failed: %d", status);
return VK_ERROR_INITIALIZATION_FAILED;
}
}
auto sysmem_collection = new fuchsia::sysmem::BufferCollectionSyncPtr;
zx_status_t status = mSysmemAllocator->BindSharedCollection(
std::move(token), sysmem_collection->NewRequest());
if (status != ZX_OK) {
ALOGE("BindSharedCollection failed: %d", status);
return VK_ERROR_INITIALIZATION_FAILED;
}
*pCollection = reinterpret_cast<VkBufferCollectionFUCHSIA>(sysmem_collection);
return VK_SUCCESS;
}
void on_vkDestroyBufferCollectionFUCHSIA(
void*, VkResult, VkDevice,
VkBufferCollectionFUCHSIA collection,
const VkAllocationCallbacks*) {
auto sysmem_collection = reinterpret_cast<fuchsia::sysmem::BufferCollectionSyncPtr*>(collection);
if (sysmem_collection->is_bound()) {
(*sysmem_collection)->Close();
}
delete sysmem_collection;
}
inline fuchsia::sysmem::BufferCollectionConstraints
defaultBufferCollectionConstraints(size_t min_size_bytes,
size_t buffer_count) {
fuchsia::sysmem::BufferCollectionConstraints constraints = {};
constraints.min_buffer_count = buffer_count;
constraints.has_buffer_memory_constraints = true;
fuchsia::sysmem::BufferMemoryConstraints& buffer_constraints =
constraints.buffer_memory_constraints;
buffer_constraints.min_size_bytes = min_size_bytes;
buffer_constraints.max_size_bytes = 0xffffffff;
buffer_constraints.physically_contiguous_required = false;
buffer_constraints.secure_required = false;
buffer_constraints.ram_domain_supported = false;
buffer_constraints.cpu_domain_supported = false;
buffer_constraints.inaccessible_domain_supported = true;
buffer_constraints.heap_permitted_count = 1;
buffer_constraints.heap_permitted[0] =
fuchsia::sysmem::HeapType::GOLDFISH_DEVICE_LOCAL;
return constraints;
}
uint32_t getBufferCollectionConstraintsVulkanImageUsage(
const VkImageCreateInfo* pImageInfo) {
uint32_t usage = 0u;
VkImageUsageFlags imageUsage = pImageInfo->usage;
#define SetUsageBit(USAGE) \
if (imageUsage & VK_IMAGE_USAGE_##USAGE##_BIT) { \
usage |= fuchsia::sysmem::VULKAN_IMAGE_USAGE_##USAGE; \
}
SetUsageBit(COLOR_ATTACHMENT);
SetUsageBit(TRANSFER_SRC);
SetUsageBit(TRANSFER_DST);
SetUsageBit(SAMPLED);
#undef SetUsageBit
return usage;
}
uint32_t getBufferCollectionConstraintsVulkanBufferUsage(
const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
uint32_t usage = 0u;
VkBufferUsageFlags bufferUsage =
pBufferConstraintsInfo->pBufferCreateInfo->usage;
#define SetUsageBit(USAGE) \
if (bufferUsage & VK_BUFFER_USAGE_##USAGE##_BIT) { \
usage |= fuchsia::sysmem::VULKAN_BUFFER_USAGE_##USAGE; \
}
SetUsageBit(TRANSFER_SRC);
SetUsageBit(TRANSFER_DST);
SetUsageBit(UNIFORM_TEXEL_BUFFER);
SetUsageBit(STORAGE_TEXEL_BUFFER);
SetUsageBit(UNIFORM_BUFFER);
SetUsageBit(STORAGE_BUFFER);
SetUsageBit(INDEX_BUFFER);
SetUsageBit(VERTEX_BUFFER);
SetUsageBit(INDIRECT_BUFFER);
#undef SetUsageBit
return usage;
}
VkResult setBufferCollectionConstraints(
fuchsia::sysmem::BufferCollectionSyncPtr* collection,
const VkImageCreateInfo* pImageInfo) {
if (pImageInfo == nullptr) {
ALOGE("setBufferCollectionConstraints: pImageInfo cannot be null.");
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
// TODO(liyl): Currently the size only works for RGBA8 and BGRA8 images.
// We should set the size based on its actual format.
fuchsia::sysmem::BufferCollectionConstraints constraints =
defaultBufferCollectionConstraints(
/* min_size_bytes */ pImageInfo->extent.width *
pImageInfo->extent.height * 4,
/* buffer_count */ 1u);
constraints.usage.vulkan =
getBufferCollectionConstraintsVulkanImageUsage(pImageInfo);
// Set image format constraints for VkImage allocation.
if (pImageInfo) {
std::vector<VkFormat> formats{pImageInfo->format};
if (pImageInfo->format == VK_FORMAT_UNDEFINED) {
// This is a hack to allow the client to say it supports every
// vulkan format the driver does. TODO(fxb/13247): Modify this
// function to take a list of vulkan formats to use.
formats = std::vector<VkFormat>{
VK_FORMAT_B8G8R8A8_UNORM,
VK_FORMAT_R8G8B8A8_UNORM,
};
}
constraints.image_format_constraints_count = formats.size();
uint32_t format_index = 0;
for (VkFormat format : formats) {
fuchsia::sysmem::ImageFormatConstraints& image_constraints =
constraints.image_format_constraints[format_index++];
switch (format) {
case VK_FORMAT_B8G8R8A8_SINT:
case VK_FORMAT_B8G8R8A8_UNORM:
case VK_FORMAT_B8G8R8A8_SRGB:
case VK_FORMAT_B8G8R8A8_SNORM:
case VK_FORMAT_B8G8R8A8_SSCALED:
case VK_FORMAT_B8G8R8A8_USCALED:
image_constraints.pixel_format.type =
fuchsia::sysmem::PixelFormatType::BGRA32;
break;
case VK_FORMAT_R8G8B8A8_SINT:
case VK_FORMAT_R8G8B8A8_UNORM:
case VK_FORMAT_R8G8B8A8_SRGB:
case VK_FORMAT_R8G8B8A8_SNORM:
case VK_FORMAT_R8G8B8A8_SSCALED:
case VK_FORMAT_R8G8B8A8_USCALED:
image_constraints.pixel_format.type =
fuchsia::sysmem::PixelFormatType::R8G8B8A8;
break;
default:
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
image_constraints.color_spaces_count = 1;
image_constraints.color_space[0].type =
fuchsia::sysmem::ColorSpaceType::SRGB;
image_constraints.min_coded_width = pImageInfo->extent.width;
image_constraints.max_coded_width = 0xfffffff;
image_constraints.min_coded_height = pImageInfo->extent.height;
image_constraints.max_coded_height = 0xffffffff;
image_constraints.min_bytes_per_row =
pImageInfo->extent.width * 4;
image_constraints.max_bytes_per_row = 0xffffffff;
image_constraints.max_coded_width_times_coded_height =
0xffffffff;
image_constraints.layers = 1;
image_constraints.coded_width_divisor = 1;
image_constraints.coded_height_divisor = 1;
image_constraints.bytes_per_row_divisor = 1;
image_constraints.start_offset_divisor = 1;
image_constraints.display_width_divisor = 1;
image_constraints.display_height_divisor = 1;
}
}
(*collection)->SetConstraints(true, constraints);
return VK_SUCCESS;
}
VkResult setBufferCollectionBufferConstraints(
fuchsia::sysmem::BufferCollectionSyncPtr* collection,
const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
if (pBufferConstraintsInfo == nullptr) {
ALOGE(
"setBufferCollectionBufferConstraints: "
"pBufferConstraintsInfo cannot be null.");
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
fuchsia::sysmem::BufferCollectionConstraints constraints =
defaultBufferCollectionConstraints(
/* min_size_bytes */ pBufferConstraintsInfo->pBufferCreateInfo
->size,
/* buffer_count */ pBufferConstraintsInfo->minCount);
constraints.usage.vulkan =
getBufferCollectionConstraintsVulkanBufferUsage(
pBufferConstraintsInfo);
(*collection)->SetConstraints(true, constraints);
return VK_SUCCESS;
}
VkResult on_vkSetBufferCollectionConstraintsFUCHSIA(
void*, VkResult, VkDevice,
VkBufferCollectionFUCHSIA collection,
const VkImageCreateInfo* pImageInfo) {
auto sysmem_collection =
reinterpret_cast<fuchsia::sysmem::BufferCollectionSyncPtr*>(collection);
return setBufferCollectionConstraints(sysmem_collection, pImageInfo);
}
VkResult on_vkSetBufferCollectionBufferConstraintsFUCHSIA(
void*,
VkResult,
VkDevice,
VkBufferCollectionFUCHSIA collection,
const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
auto sysmem_collection =
reinterpret_cast<fuchsia::sysmem::BufferCollectionSyncPtr*>(
collection);
return setBufferCollectionBufferConstraints(sysmem_collection,
pBufferConstraintsInfo);
}
VkResult on_vkGetBufferCollectionPropertiesFUCHSIA(
void*, VkResult,
VkDevice device,
VkBufferCollectionFUCHSIA collection,
VkBufferCollectionPropertiesFUCHSIA* pProperties) {
auto sysmem_collection = reinterpret_cast<fuchsia::sysmem::BufferCollectionSyncPtr*>(collection);
fuchsia::sysmem::BufferCollectionInfo_2 info;
zx_status_t status2;
zx_status_t status = (*sysmem_collection)->WaitForBuffersAllocated(&status2, &info);
if (status != ZX_OK || status2 != ZX_OK) {
ALOGE("Failed wait for allocation: %d %d", status, status2);
return VK_ERROR_INITIALIZATION_FAILED;
}
if (!info.settings.has_image_format_constraints) {
return VK_ERROR_INITIALIZATION_FAILED;
}
pProperties->count = info.buffer_count;
AutoLock lock(mLock);
auto deviceIt = info_VkDevice.find(device);
if (deviceIt == info_VkDevice.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto& deviceInfo = deviceIt->second;
// Device local memory type supported.
pProperties->memoryTypeBits = 0;
for (uint32_t i = 0; i < deviceInfo.memProps.memoryTypeCount; ++i) {
if (deviceInfo.memProps.memoryTypes[i].propertyFlags &
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT) {
pProperties->memoryTypeBits |= 1ull << i;
}
}
return VK_SUCCESS;
}
#endif
HostMemBlockIndex getOrAllocateHostMemBlockLocked(
HostMemBlocks& blocks,
const VkMemoryAllocateInfo* pAllocateInfo,
VkEncoder* enc,
VkDevice device,
const VkDevice_Info& deviceInfo) {
HostMemBlockIndex res = 0;
bool found = false;
while (!found) {
for (HostMemBlockIndex i = 0; i < blocks.size(); ++i) {
if (blocks[i].initialized &&
blocks[i].initResult == VK_SUCCESS &&
canSubAlloc(
blocks[i].subAlloc,
pAllocateInfo->allocationSize)) {
res = i;
found = true;
return res;
}
}
blocks.push_back({});
auto& hostMemAlloc = blocks.back();
// Uninitialized block; allocate on host.
static constexpr VkDeviceSize oneMb = 1048576;
static constexpr VkDeviceSize kDefaultHostMemBlockSize =
16 * oneMb; // 16 mb
VkDeviceSize roundedUpAllocSize =
oneMb * ((pAllocateInfo->allocationSize + oneMb - 1) / oneMb);
VkDeviceSize virtualHeapSize = VIRTUAL_HOST_VISIBLE_HEAP_SIZE;
VkDeviceSize blockSizeNeeded =
std::max(roundedUpAllocSize,
std::min(virtualHeapSize,
kDefaultHostMemBlockSize));
VkMemoryAllocateInfo allocInfoForHost = *pAllocateInfo;
allocInfoForHost.allocationSize = blockSizeNeeded;
// TODO: Support dedicated/external host visible allocation
allocInfoForHost.pNext = nullptr;
mLock.unlock();
VkResult host_res =
enc->vkAllocateMemory(
device,
&allocInfoForHost,
nullptr,
&hostMemAlloc.memory);
mLock.lock();
if (host_res != VK_SUCCESS) {
ALOGE("Could not allocate backing for virtual host visible memory: %d",
host_res);
hostMemAlloc.initialized = true;
hostMemAlloc.initResult = host_res;
return INVALID_HOST_MEM_BLOCK;
}
auto& hostMemInfo = info_VkDeviceMemory[hostMemAlloc.memory];
hostMemInfo.allocationSize = allocInfoForHost.allocationSize;
VkDeviceSize nonCoherentAtomSize =
deviceInfo.props.limits.nonCoherentAtomSize;
hostMemInfo.mappedSize = hostMemInfo.allocationSize;
hostMemInfo.memoryTypeIndex =
pAllocateInfo->memoryTypeIndex;
hostMemAlloc.nonCoherentAtomSize = nonCoherentAtomSize;
uint64_t directMappedAddr = 0;
VkResult directMapResult = VK_SUCCESS;
if (mFeatureInfo->hasDirectMem) {
mLock.unlock();
directMapResult =
enc->vkMapMemoryIntoAddressSpaceGOOGLE(
device, hostMemAlloc.memory, &directMappedAddr);
mLock.lock();
} else if (mFeatureInfo->hasVirtioGpuNext) {
#if !defined(HOST_BUILD) && defined(VK_USE_PLATFORM_ANDROID_KHR)
uint64_t hvaSizeId[3];
mLock.unlock();
enc->vkGetMemoryHostAddressInfoGOOGLE(
device, hostMemAlloc.memory,
&hvaSizeId[0], &hvaSizeId[1], &hvaSizeId[2]);
ALOGD("%s: hvaOff, size: 0x%llx 0x%llx id: 0x%llx\n", __func__,
(unsigned long long)hvaSizeId[0],
(unsigned long long)hvaSizeId[1],
(unsigned long long)hvaSizeId[2]);
mLock.lock();
struct drm_virtgpu_resource_create_blob drm_rc_blob = { 0 };
drm_rc_blob.blob_mem = VIRTGPU_BLOB_MEM_HOST;
drm_rc_blob.blob_flags = VIRTGPU_BLOB_FLAG_MAPPABLE;
drm_rc_blob.blob_id = hvaSizeId[2];
drm_rc_blob.size = hvaSizeId[1];
int res = drmIoctl(
mRendernodeFd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE_BLOB, &drm_rc_blob);
if (res) {
ALOGE("%s: Failed to resource create v2: sterror: %s errno: %d\n", __func__,
strerror(errno), errno);
abort();
}
struct drm_virtgpu_map map_info = {
.handle = drm_rc_blob.bo_handle,
};
res = drmIoctl(mRendernodeFd, DRM_IOCTL_VIRTGPU_MAP, &map_info);
if (res) {
ALOGE("%s: Failed to virtgpu map: sterror: %s errno: %d\n", __func__,
strerror(errno), errno);
abort();
}
directMappedAddr = (uint64_t)(uintptr_t)
mmap64(0, hvaSizeId[1], PROT_WRITE, MAP_SHARED, mRendernodeFd, map_info.offset);
if (!directMappedAddr) {
ALOGE("%s: mmap of virtio gpu resource failed\n", __func__);
abort();
}
// add the host's page offset
directMappedAddr += (uint64_t)(uintptr_t)(hvaSizeId[0]) & (PAGE_SIZE - 1);
directMapResult = VK_SUCCESS;
#endif // VK_USE_PLATFORM_ANDROID_KHR
}
if (directMapResult != VK_SUCCESS) {
hostMemAlloc.initialized = true;
hostMemAlloc.initResult = directMapResult;
mLock.unlock();
enc->vkFreeMemory(device, hostMemAlloc.memory, nullptr);
mLock.lock();
return INVALID_HOST_MEM_BLOCK;
}
hostMemInfo.mappedPtr =
(uint8_t*)(uintptr_t)directMappedAddr;
hostMemInfo.virtualHostVisibleBacking = true;
VkResult hostMemAllocRes =
finishHostMemAllocInit(
enc,
device,
pAllocateInfo->memoryTypeIndex,
nonCoherentAtomSize,
hostMemInfo.allocationSize,
hostMemInfo.mappedSize,
hostMemInfo.mappedPtr,
&hostMemAlloc);
if (hostMemAllocRes != VK_SUCCESS) {
return INVALID_HOST_MEM_BLOCK;
}
}
// unreacheable, but we need to make Werror happy
return INVALID_HOST_MEM_BLOCK;
}
VkResult on_vkAllocateMemory(
void* context,
VkResult input_result,
VkDevice device,
const VkMemoryAllocateInfo* pAllocateInfo,
const VkAllocationCallbacks* pAllocator,
VkDeviceMemory* pMemory) {
if (input_result != VK_SUCCESS) return input_result;
VkEncoder* enc = (VkEncoder*)context;
VkMemoryAllocateInfo finalAllocInfo = vk_make_orphan_copy(*pAllocateInfo);
vk_struct_chain_iterator structChainIter = vk_make_chain_iterator(&finalAllocInfo);
VkMemoryDedicatedAllocateInfo dedicatedAllocInfo;
VkImportColorBufferGOOGLE importCbInfo = {
VK_STRUCTURE_TYPE_IMPORT_COLOR_BUFFER_GOOGLE, 0,
};
VkImportBufferGOOGLE importBufferInfo = {
VK_STRUCTURE_TYPE_IMPORT_BUFFER_GOOGLE,
0,
};
// VkImportPhysicalAddressGOOGLE importPhysAddrInfo = {
// VK_STRUCTURE_TYPE_IMPORT_PHYSICAL_ADDRESS_GOOGLE, 0,
// };
const VkExportMemoryAllocateInfo* exportAllocateInfoPtr =
vk_find_struct<VkExportMemoryAllocateInfo>(pAllocateInfo);
const VkImportAndroidHardwareBufferInfoANDROID* importAhbInfoPtr =
vk_find_struct<VkImportAndroidHardwareBufferInfoANDROID>(pAllocateInfo);
const VkImportMemoryBufferCollectionFUCHSIA* importBufferCollectionInfoPtr =
vk_find_struct<VkImportMemoryBufferCollectionFUCHSIA>(pAllocateInfo);
const VkImportMemoryZirconHandleInfoFUCHSIA* importVmoInfoPtr =
vk_find_struct<VkImportMemoryZirconHandleInfoFUCHSIA>(pAllocateInfo);
const VkMemoryDedicatedAllocateInfo* dedicatedAllocInfoPtr =
vk_find_struct<VkMemoryDedicatedAllocateInfo>(pAllocateInfo);
bool shouldPassThroughDedicatedAllocInfo =
!exportAllocateInfoPtr &&
!importAhbInfoPtr &&
!importBufferCollectionInfoPtr &&
!importVmoInfoPtr &&
!isHostVisibleMemoryTypeIndexForGuest(
&mHostVisibleMemoryVirtInfo,
pAllocateInfo->memoryTypeIndex);
if (!exportAllocateInfoPtr &&
(importAhbInfoPtr || importBufferCollectionInfoPtr || importVmoInfoPtr) &&
dedicatedAllocInfoPtr &&
isHostVisibleMemoryTypeIndexForGuest(
&mHostVisibleMemoryVirtInfo,
pAllocateInfo->memoryTypeIndex)) {
ALOGE("FATAL: It is not yet supported to import-allocate "
"external memory that is both host visible and dedicated.");
abort();
}
if (shouldPassThroughDedicatedAllocInfo &&
dedicatedAllocInfoPtr) {
dedicatedAllocInfo = vk_make_orphan_copy(*dedicatedAllocInfoPtr);
vk_append_struct(&structChainIter, &dedicatedAllocInfo);
}
// State needed for import/export.
bool exportAhb = false;
bool exportVmo = false;
bool importAhb = false;
bool importBufferCollection = false;
bool importVmo = false;
(void)exportVmo;
// Even if we export allocate, the underlying operation
// for the host is always going to be an import operation.
// This is also how Intel's implementation works,
// and is generally simpler;
// even in an export allocation,
// we perform AHardwareBuffer allocation
// on the guest side, at this layer,
// and then we attach a new VkDeviceMemory
// to the AHardwareBuffer on the host via an "import" operation.
AHardwareBuffer* ahw = nullptr;
if (exportAllocateInfoPtr) {
exportAhb =
exportAllocateInfoPtr->handleTypes &
VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
exportVmo =
exportAllocateInfoPtr->handleTypes &
VK_EXTERNAL_MEMORY_HANDLE_TYPE_TEMP_ZIRCON_VMO_BIT_FUCHSIA;
} else if (importAhbInfoPtr) {
importAhb = true;
} else if (importBufferCollectionInfoPtr) {
importBufferCollection = true;
} else if (importVmoInfoPtr) {
importVmo = true;
}
if (exportAhb) {
bool hasDedicatedImage = dedicatedAllocInfoPtr &&
(dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
bool hasDedicatedBuffer = dedicatedAllocInfoPtr &&
(dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
VkExtent3D imageExtent = { 0, 0, 0 };
uint32_t imageLayers = 0;
VkFormat imageFormat = VK_FORMAT_UNDEFINED;
VkImageUsageFlags imageUsage = 0;
VkImageCreateFlags imageCreateFlags = 0;
VkDeviceSize bufferSize = 0;
VkDeviceSize allocationInfoAllocSize =
finalAllocInfo.allocationSize;
if (hasDedicatedImage) {
AutoLock lock(mLock);
auto it = info_VkImage.find(
dedicatedAllocInfoPtr->image);
if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
const auto& info = it->second;
const auto& imgCi = info.createInfo;
imageExtent = imgCi.extent;
imageLayers = imgCi.arrayLayers;
imageFormat = imgCi.format;
imageUsage = imgCi.usage;
imageCreateFlags = imgCi.flags;
}
if (hasDedicatedBuffer) {
AutoLock lock(mLock);
auto it = info_VkBuffer.find(
dedicatedAllocInfoPtr->buffer);
if (it == info_VkBuffer.end()) return VK_ERROR_INITIALIZATION_FAILED;
const auto& info = it->second;
const auto& bufCi = info.createInfo;
bufferSize = bufCi.size;
}
VkResult ahbCreateRes =
createAndroidHardwareBuffer(
hasDedicatedImage,
hasDedicatedBuffer,
imageExtent,
imageLayers,
imageFormat,
imageUsage,
imageCreateFlags,
bufferSize,
allocationInfoAllocSize,
&ahw);
if (ahbCreateRes != VK_SUCCESS) {
return ahbCreateRes;
}
}
if (importAhb) {
ahw = importAhbInfoPtr->buffer;
// We still need to acquire the AHardwareBuffer.
importAndroidHardwareBuffer(
mThreadingCallbacks.hostConnectionGetFunc()->grallocHelper(),
importAhbInfoPtr, nullptr);
}
if (ahw) {
ALOGD("%s: Import AHardwareBuffer", __func__);
importCbInfo.colorBuffer =
mThreadingCallbacks.hostConnectionGetFunc()->grallocHelper()->
getHostHandle(AHardwareBuffer_getNativeHandle(ahw));
vk_append_struct(&structChainIter, &importCbInfo);
}
zx_handle_t vmo_handle = ZX_HANDLE_INVALID;
if (importBufferCollection) {
#ifdef VK_USE_PLATFORM_FUCHSIA
auto collection = reinterpret_cast<fuchsia::sysmem::BufferCollectionSyncPtr*>(
importBufferCollectionInfoPtr->collection);
fuchsia::sysmem::BufferCollectionInfo_2 info;
zx_status_t status2;
zx_status_t status = (*collection)->WaitForBuffersAllocated(&status2, &info);
if (status != ZX_OK || status2 != ZX_OK) {
ALOGE("WaitForBuffersAllocated failed: %d %d", status);
return VK_ERROR_INITIALIZATION_FAILED;
}
uint32_t index = importBufferCollectionInfoPtr->index;
if (info.buffer_count < index) {
ALOGE("Invalid buffer index: %d %d", index);
return VK_ERROR_INITIALIZATION_FAILED;
}
vmo_handle = info.buffers[index].vmo.release();
#endif
}
if (importVmo) {
vmo_handle = importVmoInfoPtr->handle;
}
#ifdef VK_USE_PLATFORM_FUCHSIA
if (exportVmo) {
bool hasDedicatedImage = dedicatedAllocInfoPtr &&
(dedicatedAllocInfoPtr->image != VK_NULL_HANDLE);
bool hasDedicatedBuffer =
dedicatedAllocInfoPtr &&
(dedicatedAllocInfoPtr->buffer != VK_NULL_HANDLE);
if (hasDedicatedImage && hasDedicatedBuffer) {
ALOGE(
"Invalid VkMemoryDedicatedAllocationInfo: At least one "
"of image and buffer must be VK_NULL_HANDLE.");
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
const VkImageCreateInfo* pImageCreateInfo = nullptr;
VkBufferConstraintsInfoFUCHSIA bufferConstraintsInfo = {
.sType =
VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CREATE_INFO_FUCHSIA,
.pNext = nullptr,
.pBufferCreateInfo = nullptr,
.requiredFormatFeatures = 0,
.minCount = 1,
};
const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo =
nullptr;
if (hasDedicatedImage) {
AutoLock lock(mLock);
auto it = info_VkImage.find(dedicatedAllocInfoPtr->image);
if (it == info_VkImage.end()) return VK_ERROR_INITIALIZATION_FAILED;
const auto& imageInfo = it->second;
pImageCreateInfo = &imageInfo.createInfo;
}
if (hasDedicatedBuffer) {
AutoLock lock(mLock);
auto it = info_VkBuffer.find(dedicatedAllocInfoPtr->buffer);
if (it == info_VkBuffer.end())
return VK_ERROR_INITIALIZATION_FAILED;
const auto& bufferInfo = it->second;
bufferConstraintsInfo.pBufferCreateInfo =
&bufferInfo.createInfo;
pBufferConstraintsInfo = &bufferConstraintsInfo;
}
hasDedicatedImage = hasDedicatedImage &&
getBufferCollectionConstraintsVulkanImageUsage(
pImageCreateInfo);
hasDedicatedBuffer =
hasDedicatedBuffer &&
getBufferCollectionConstraintsVulkanBufferUsage(
pBufferConstraintsInfo);
if (hasDedicatedImage || hasDedicatedBuffer) {
fuchsia::sysmem::BufferCollectionTokenSyncPtr token;
zx_status_t status = mSysmemAllocator->AllocateSharedCollection(
token.NewRequest());
if (status != ZX_OK) {
ALOGE("AllocateSharedCollection failed: %d", status);
abort();
}
fuchsia::sysmem::BufferCollectionSyncPtr collection;
status = mSysmemAllocator->BindSharedCollection(
std::move(token), collection.NewRequest());
if (status != ZX_OK) {
ALOGE("BindSharedCollection failed: %d", status);
abort();
}
if (hasDedicatedImage) {
VkResult res = setBufferCollectionConstraints(
&collection, pImageCreateInfo);
if (res != VK_SUCCESS) {
ALOGE("setBufferCollectionConstraints failed: %d", res);
abort();
}
}
if (hasDedicatedBuffer) {
VkResult res = setBufferCollectionBufferConstraints(
&collection, pBufferConstraintsInfo);
if (res != VK_SUCCESS) {
ALOGE("setBufferCollectionBufferConstraints failed: %d",
res);
abort();
}
}
fuchsia::sysmem::BufferCollectionInfo_2 info;
zx_status_t status2;
status = collection->WaitForBuffersAllocated(&status2, &info);
if (status == ZX_OK && status2 == ZX_OK) {
if (!info.buffer_count) {
ALOGE("WaitForBuffersAllocated returned invalid count: %d", status);
abort();
}
vmo_handle = info.buffers[0].vmo.release();
} else {
ALOGE("WaitForBuffersAllocated failed: %d %d", status, status2);
abort();
}
collection->Close();
zx::vmo vmo_copy;
status = zx_handle_duplicate(vmo_handle,
ZX_RIGHT_SAME_RIGHTS,
vmo_copy.reset_and_get_address());
if (status != ZX_OK) {
ALOGE("Failed to duplicate VMO: %d", status);
abort();
}
if (pImageCreateInfo) {
fuchsia::hardware::goldfish::ColorBufferFormatType format;
switch (pImageCreateInfo->format) {
case VK_FORMAT_B8G8R8A8_SINT:
case VK_FORMAT_B8G8R8A8_UNORM:
case VK_FORMAT_B8G8R8A8_SRGB:
case VK_FORMAT_B8G8R8A8_SNORM:
case VK_FORMAT_B8G8R8A8_SSCALED:
case VK_FORMAT_B8G8R8A8_USCALED:
format = fuchsia::hardware::goldfish::
ColorBufferFormatType::BGRA;
break;
case VK_FORMAT_R8G8B8A8_SINT:
case VK_FORMAT_R8G8B8A8_UNORM:
case VK_FORMAT_R8G8B8A8_SRGB:
case VK_FORMAT_R8G8B8A8_SNORM:
case VK_FORMAT_R8G8B8A8_SSCALED:
case VK_FORMAT_R8G8B8A8_USCALED:
format = fuchsia::hardware::goldfish::
ColorBufferFormatType::RGBA;
break;
default:
ALOGE("Unsupported format: %d",
pImageCreateInfo->format);
abort();
}
status = mControlDevice->CreateColorBuffer(
std::move(vmo_copy), pImageCreateInfo->extent.width,
pImageCreateInfo->extent.height, format, &status2);
if (status != ZX_OK || status2 != ZX_OK) {
ALOGE("CreateColorBuffer failed: %d:%d", status,
status2);
abort();
}
}
if (pBufferConstraintsInfo) {
status = mControlDevice->CreateBuffer(
std::move(vmo_copy),
pBufferConstraintsInfo->pBufferCreateInfo->size,
&status2);
if (status != ZX_OK || status2 != ZX_OK) {
ALOGE("CreateBuffer failed: %d:%d", status, status2);
abort();
}
}
}
}
if (vmo_handle != ZX_HANDLE_INVALID) {
zx::vmo vmo_copy;
zx_status_t status = zx_handle_duplicate(vmo_handle,
ZX_RIGHT_SAME_RIGHTS,
vmo_copy.reset_and_get_address());
if (status != ZX_OK) {
ALOGE("Failed to duplicate VMO: %d", status);
abort();
}
zx_status_t status2 = ZX_OK;
fuchsia::hardware::goldfish::BufferHandleType handle_type;
uint32_t buffer_handle;
status = mControlDevice->GetBufferHandle(std::move(vmo_copy),
&status2, &buffer_handle,
&handle_type);
if (status != ZX_OK || status2 != ZX_OK) {
ALOGE("GetBufferHandle failed: %d:%d", status, status2);
}
if (handle_type ==
fuchsia::hardware::goldfish::BufferHandleType::BUFFER) {
importBufferInfo.buffer = buffer_handle;
vk_append_struct(&structChainIter, &importBufferInfo);
} else {
importCbInfo.colorBuffer = buffer_handle;
vk_append_struct(&structChainIter, &importCbInfo);
}
}
#endif
if (!isHostVisibleMemoryTypeIndexForGuest(
&mHostVisibleMemoryVirtInfo,
finalAllocInfo.memoryTypeIndex)) {
input_result =
enc->vkAllocateMemory(
device, &finalAllocInfo, pAllocator, pMemory);
if (input_result != VK_SUCCESS) return input_result;
VkDeviceSize allocationSize = finalAllocInfo.allocationSize;
setDeviceMemoryInfo(
device, *pMemory,
finalAllocInfo.allocationSize,
0, nullptr,
finalAllocInfo.memoryTypeIndex,
ahw,
vmo_handle);
return VK_SUCCESS;
}
// Device-local memory dealing is over. What follows:
// host-visible memory.
if (ahw) {
ALOGE("%s: Host visible export/import allocation "
"of Android hardware buffers is not supported.",
__func__);
abort();
}
if (vmo_handle != ZX_HANDLE_INVALID) {
ALOGE("%s: Host visible export/import allocation "
"of VMO is not supported yet.",
__func__);
abort();
}
// Host visible memory, non external
bool directMappingSupported = usingDirectMapping();
if (!directMappingSupported) {
input_result =
enc->vkAllocateMemory(
device, &finalAllocInfo, pAllocator, pMemory);
if (input_result != VK_SUCCESS) return input_result;
VkDeviceSize mappedSize =
getNonCoherentExtendedSize(device,
finalAllocInfo.allocationSize);
uint8_t* mappedPtr = (uint8_t*)aligned_buf_alloc(4096, mappedSize);
D("host visible alloc (non-direct): "
"size 0x%llx host ptr %p mapped size 0x%llx",
(unsigned long long)finalAllocInfo.allocationSize, mappedPtr,
(unsigned long long)mappedSize);
setDeviceMemoryInfo(
device, *pMemory,
finalAllocInfo.allocationSize,
mappedSize, mappedPtr,
finalAllocInfo.memoryTypeIndex);
return VK_SUCCESS;
}
// Host visible memory with direct mapping via
// VkImportPhysicalAddressGOOGLE
// if (importPhysAddr) {
// vkAllocateMemory(device, &finalAllocInfo, pAllocator, pMemory);
// host maps the host pointer to the guest physical address
// TODO: the host side page offset of the
// host pointer needs to be returned somehow.
// }
// Host visible memory with direct mapping
AutoLock lock(mLock);
auto it = info_VkDevice.find(device);
if (it == info_VkDevice.end()) return VK_ERROR_DEVICE_LOST;
auto& deviceInfo = it->second;
auto& hostMemBlocksForTypeIndex =
deviceInfo.hostMemBlocks[finalAllocInfo.memoryTypeIndex];
HostMemBlockIndex blockIndex =
getOrAllocateHostMemBlockLocked(
hostMemBlocksForTypeIndex,
&finalAllocInfo,
enc,
device,
deviceInfo);
if (blockIndex == (HostMemBlockIndex) INVALID_HOST_MEM_BLOCK) {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
VkDeviceMemory_Info virtualMemInfo;
subAllocHostMemory(
&hostMemBlocksForTypeIndex[blockIndex],
&finalAllocInfo,
&virtualMemInfo.subAlloc);
virtualMemInfo.allocationSize = virtualMemInfo.subAlloc.subAllocSize;
virtualMemInfo.mappedSize = virtualMemInfo.subAlloc.subMappedSize;
virtualMemInfo.mappedPtr = virtualMemInfo.subAlloc.mappedPtr;
virtualMemInfo.memoryTypeIndex = finalAllocInfo.memoryTypeIndex;
virtualMemInfo.directMapped = true;
D("host visible alloc (direct, suballoc): "
"size 0x%llx ptr %p mapped size 0x%llx",
(unsigned long long)virtualMemInfo.allocationSize, virtualMemInfo.mappedPtr,
(unsigned long long)virtualMemInfo.mappedSize);
info_VkDeviceMemory[
virtualMemInfo.subAlloc.subMemory] = virtualMemInfo;
*pMemory = virtualMemInfo.subAlloc.subMemory;
return VK_SUCCESS;
}
void on_vkFreeMemory(
void* context,
VkDevice device,
VkDeviceMemory memory,
const VkAllocationCallbacks* pAllocateInfo) {
AutoLock lock(mLock);
auto it = info_VkDeviceMemory.find(memory);
if (it == info_VkDeviceMemory.end()) return;
auto& info = it->second;
if (!info.directMapped) {
lock.unlock();
VkEncoder* enc = (VkEncoder*)context;
enc->vkFreeMemory(device, memory, pAllocateInfo);
return;
}
subFreeHostMemory(&info.subAlloc);
}
VkResult on_vkMapMemory(
void*,
VkResult host_result,
VkDevice,
VkDeviceMemory memory,
VkDeviceSize offset,
VkDeviceSize size,
VkMemoryMapFlags,
void** ppData) {
if (host_result != VK_SUCCESS) return host_result;
AutoLock lock(mLock);
auto it = info_VkDeviceMemory.find(memory);
if (it == info_VkDeviceMemory.end()) return VK_ERROR_MEMORY_MAP_FAILED;
auto& info = it->second;
if (!info.mappedPtr) return VK_ERROR_MEMORY_MAP_FAILED;
if (size != VK_WHOLE_SIZE &&
(info.mappedPtr + offset + size > info.mappedPtr + info.allocationSize)) {
return VK_ERROR_MEMORY_MAP_FAILED;
}
*ppData = info.mappedPtr + offset;
return host_result;
}
void on_vkUnmapMemory(
void*,
VkDevice,
VkDeviceMemory) {
// no-op
}
uint32_t transformNonExternalResourceMemoryTypeBitsForGuest(
uint32_t hostBits) {
uint32_t res = 0;
for (uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) {
if (hostBits & (1 << i)) {
res |= (1 << i);
}
}
return res;
}
uint32_t transformExternalResourceMemoryTypeBitsForGuest(
uint32_t normalBits) {
uint32_t res = 0;
for (uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) {
if (normalBits & (1 << i) &&
!isHostVisibleMemoryTypeIndexForGuest(
&mHostVisibleMemoryVirtInfo, i)) {
res |= (1 << i);
}
}
return res;
}
void transformNonExternalResourceMemoryRequirementsForGuest(
VkMemoryRequirements* reqs) {
reqs->memoryTypeBits =
transformNonExternalResourceMemoryTypeBitsForGuest(
reqs->memoryTypeBits);
}
void transformExternalResourceMemoryRequirementsForGuest(
VkMemoryRequirements* reqs) {
reqs->memoryTypeBits =
transformExternalResourceMemoryTypeBitsForGuest(
reqs->memoryTypeBits);
}
void transformExternalResourceMemoryDedicatedRequirementsForGuest(
VkMemoryDedicatedRequirements* dedicatedReqs) {
dedicatedReqs->prefersDedicatedAllocation = VK_TRUE;
dedicatedReqs->requiresDedicatedAllocation = VK_TRUE;
}
void transformImageMemoryRequirementsForGuestLocked(
VkImage image,
VkMemoryRequirements* reqs) {
auto it = info_VkImage.find(image);