blob: 1d01987ff91b5709d569abb0126421afe2896023 [file] [log] [blame]
// Copyright (C) 2018 The Android Open Source Project
// Copyright (C) 2018 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "ResourceTracker.h"
#include "Resources.h"
#include "CommandBufferStagingStream.h"
#include "DescriptorSetVirtualization.h"
#include "android/base/Optional.h"
#include "android/base/threads/AndroidWorkPool.h"
#include "android/base/Tracing.h"
#include "goldfish_vk_private_defs.h"
#include "../OpenglSystemCommon/EmulatorFeatureInfo.h"
#include "../OpenglSystemCommon/HostConnection.h"
/// Use installed headers or locally defined Fuchsia-specific bits
#ifdef VK_USE_PLATFORM_FUCHSIA
#include <cutils/native_handle.h>
#include <fidl/fuchsia.hardware.goldfish/cpp/wire.h>
#include <fidl/fuchsia.sysmem/cpp/wire.h>
#include <lib/zx/channel.h>
#include <lib/zx/vmo.h>
#include <zircon/errors.h>
#include <zircon/process.h>
#include <zircon/rights.h>
#include <zircon/syscalls.h>
#include <zircon/syscalls/object.h>
#include "services/service_connector.h"
#ifndef FUCHSIA_NO_TRACE
#include <lib/trace/event.h>
#endif
#define GET_STATUS_SAFE(result, member) \
((result).ok() ? ((result).Unwrap()->member) : ZX_OK)
#else
typedef uint32_t zx_handle_t;
typedef uint64_t zx_koid_t;
#define ZX_HANDLE_INVALID ((zx_handle_t)0)
#define ZX_KOID_INVALID ((zx_koid_t)0)
void zx_handle_close(zx_handle_t) { }
void zx_event_create(int, zx_handle_t*) { }
#endif // VK_USE_PLATFORM_FUCHSIA
/// Use installed headers or locally defined Android-specific bits
#ifdef VK_USE_PLATFORM_ANDROID_KHR
/// Goldfish sync only used for AEMU -- should replace in virtio-gpu when possibe
#include "../egl/goldfish_sync.h"
#include "AndroidHardwareBuffer.h"
#else
#include <android/hardware_buffer.h>
native_handle_t *AHardwareBuffer_getNativeHandle(AHardwareBuffer*) { return NULL; }
uint64_t getAndroidHardwareBufferUsageFromVkUsage(
const VkImageCreateFlags vk_create,
const VkImageUsageFlags vk_usage) {
return AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE;
}
VkResult importAndroidHardwareBuffer(
Gralloc *grallocHelper,
const VkImportAndroidHardwareBufferInfoANDROID* info,
struct AHardwareBuffer **importOut) {
return VK_SUCCESS;
}
VkResult createAndroidHardwareBuffer(
bool hasDedicatedImage,
bool hasDedicatedBuffer,
const VkExtent3D& imageExtent,
uint32_t imageLayers,
VkFormat imageFormat,
VkImageUsageFlags imageUsage,
VkImageCreateFlags imageCreateFlags,
VkDeviceSize bufferSize,
VkDeviceSize allocationInfoAllocSize,
struct AHardwareBuffer **out) {
return VK_SUCCESS;
}
namespace goldfish_vk {
struct HostVisibleMemoryVirtualizationInfo;
}
VkResult getAndroidHardwareBufferPropertiesANDROID(
Gralloc *grallocHelper,
const goldfish_vk::HostVisibleMemoryVirtualizationInfo*,
VkDevice,
const AHardwareBuffer*,
VkAndroidHardwareBufferPropertiesANDROID*) { return VK_SUCCESS; }
VkResult getMemoryAndroidHardwareBufferANDROID(struct AHardwareBuffer **) { return VK_SUCCESS; }
#endif // VK_USE_PLATFORM_ANDROID_KHR
#include "HostVisibleMemoryVirtualization.h"
#include "Resources.h"
#include "VkEncoder.h"
#include "android/base/AlignedBuf.h"
#include "android/base/synchronization/AndroidLock.h"
#include "goldfish_address_space.h"
#include "goldfish_vk_private_defs.h"
#include "vk_format_info.h"
#include "vk_struct_id.h"
#include "vk_util.h"
#include <set>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vndk/hardware_buffer.h>
#include <log/log.h>
#include <stdlib.h>
#include <sync/sync.h>
#if defined(__ANDROID__) || defined(__linux__)
#include <sys/mman.h>
#include <unistd.h>
#include <sys/syscall.h>
#ifdef HOST_BUILD
#include "android/utils/tempfile.h"
#else
#include "virtgpu_drm.h"
#include <xf86drm.h>
#endif
static inline int
inline_memfd_create(const char *name, unsigned int flags) {
#ifdef HOST_BUILD
TempFile* tmpFile = tempfile_create();
return open(tempfile_path(tmpFile), O_RDWR);
// TODO: Windows is not suppose to support VkSemaphoreGetFdInfoKHR
#else
return syscall(SYS_memfd_create, name, flags);
#endif
}
#define memfd_create inline_memfd_create
#endif
#define RESOURCE_TRACKER_DEBUG 0
#if RESOURCE_TRACKER_DEBUG
#undef D
#define D(fmt,...) ALOGD("%s: " fmt, __func__, ##__VA_ARGS__);
#else
#ifndef D
#define D(fmt,...)
#endif
#endif
using android::aligned_buf_alloc;
using android::aligned_buf_free;
using android::base::Optional;
using android::base::guest::AutoLock;
using android::base::guest::RecursiveLock;
using android::base::guest::Lock;
using android::base::guest::WorkPool;
namespace goldfish_vk {
#define MAKE_HANDLE_MAPPING_FOREACH(type_name, map_impl, map_to_u64_impl, map_from_u64_impl) \
void mapHandles_##type_name(type_name* handles, size_t count) override { \
for (size_t i = 0; i < count; ++i) { \
map_impl; \
} \
} \
void mapHandles_##type_name##_u64(const type_name* handles, uint64_t* handle_u64s, size_t count) override { \
for (size_t i = 0; i < count; ++i) { \
map_to_u64_impl; \
} \
} \
void mapHandles_u64_##type_name(const uint64_t* handle_u64s, type_name* handles, size_t count) override { \
for (size_t i = 0; i < count; ++i) { \
map_from_u64_impl; \
} \
} \
#define DEFINE_RESOURCE_TRACKING_CLASS(class_name, impl) \
class class_name : public VulkanHandleMapping { \
public: \
virtual ~class_name() { } \
GOLDFISH_VK_LIST_HANDLE_TYPES(impl) \
}; \
#define CREATE_MAPPING_IMPL_FOR_TYPE(type_name) \
MAKE_HANDLE_MAPPING_FOREACH(type_name, \
handles[i] = new_from_host_##type_name(handles[i]); ResourceTracker::get()->register_##type_name(handles[i]);, \
handle_u64s[i] = (uint64_t)new_from_host_##type_name(handles[i]), \
handles[i] = (type_name)new_from_host_u64_##type_name(handle_u64s[i]); ResourceTracker::get()->register_##type_name(handles[i]);)
#define UNWRAP_MAPPING_IMPL_FOR_TYPE(type_name) \
MAKE_HANDLE_MAPPING_FOREACH(type_name, \
handles[i] = get_host_##type_name(handles[i]), \
handle_u64s[i] = (uint64_t)get_host_u64_##type_name(handles[i]), \
handles[i] = (type_name)get_host_##type_name((type_name)handle_u64s[i]))
#define DESTROY_MAPPING_IMPL_FOR_TYPE(type_name) \
MAKE_HANDLE_MAPPING_FOREACH(type_name, \
ResourceTracker::get()->unregister_##type_name(handles[i]); delete_goldfish_##type_name(handles[i]), \
(void)handle_u64s[i]; delete_goldfish_##type_name(handles[i]), \
(void)handles[i]; delete_goldfish_##type_name((type_name)handle_u64s[i]))
DEFINE_RESOURCE_TRACKING_CLASS(CreateMapping, CREATE_MAPPING_IMPL_FOR_TYPE)
DEFINE_RESOURCE_TRACKING_CLASS(UnwrapMapping, UNWRAP_MAPPING_IMPL_FOR_TYPE)
DEFINE_RESOURCE_TRACKING_CLASS(DestroyMapping, DESTROY_MAPPING_IMPL_FOR_TYPE)
static uint32_t* sSeqnoPtr = nullptr;
// static
uint32_t ResourceTracker::streamFeatureBits = 0;
ResourceTracker::ThreadingCallbacks ResourceTracker::threadingCallbacks;
struct StagingInfo {
Lock mLock;
std::vector<CommandBufferStagingStream*> streams;
std::vector<VkEncoder*> encoders;
~StagingInfo() {
for (auto stream : streams) {
delete stream;
}
for (auto encoder : encoders) {
delete encoder;
}
}
void pushStaging(CommandBufferStagingStream* stream, VkEncoder* encoder) {
AutoLock<Lock> lock(mLock);
stream->reset();
streams.push_back(stream);
encoders.push_back(encoder);
}
void popStaging(CommandBufferStagingStream** streamOut, VkEncoder** encoderOut) {
AutoLock<Lock> lock(mLock);
CommandBufferStagingStream* stream;
VkEncoder* encoder;
if (streams.empty()) {
stream = new CommandBufferStagingStream;
encoder = new VkEncoder(stream);
} else {
stream = streams.back();
encoder = encoders.back();
streams.pop_back();
encoders.pop_back();
}
*streamOut = stream;
*encoderOut = encoder;
}
};
static StagingInfo sStaging;
class ResourceTracker::Impl {
public:
Impl() = default;
CreateMapping createMapping;
UnwrapMapping unwrapMapping;
DestroyMapping destroyMapping;
DefaultHandleMapping defaultMapping;
#define HANDLE_DEFINE_TRIVIAL_INFO_STRUCT(type) \
struct type##_Info { \
uint32_t unused; \
}; \
GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_DEFINE_TRIVIAL_INFO_STRUCT)
struct VkInstance_Info {
uint32_t highestApiVersion;
std::set<std::string> enabledExtensions;
// Fodder for vkEnumeratePhysicalDevices.
std::vector<VkPhysicalDevice> physicalDevices;
};
using HostMemBlocks = std::vector<HostMemAlloc>;
using HostMemBlockIndex = size_t;
#define INVALID_HOST_MEM_BLOCK (-1)
struct VkDevice_Info {
VkPhysicalDevice physdev;
VkPhysicalDeviceProperties props;
VkPhysicalDeviceMemoryProperties memProps;
std::vector<HostMemBlocks> hostMemBlocks { VK_MAX_MEMORY_TYPES };
uint32_t apiVersion;
std::set<std::string> enabledExtensions;
std::vector<std::pair<PFN_vkDeviceMemoryReportCallbackEXT, void *>> deviceMemoryReportCallbacks;
};
struct VirtioGpuHostmemResourceInfo {
uint32_t resourceId = 0;
int primeFd = -1;
};
struct VkDeviceMemory_Info {
VkDeviceSize allocationSize = 0;
VkDeviceSize mappedSize = 0;
uint8_t* mappedPtr = nullptr;
uint32_t memoryTypeIndex = 0;
bool virtualHostVisibleBacking = false;
bool directMapped = false;
GoldfishAddressSpaceBlock*
goldfishAddressSpaceBlock = nullptr;
VirtioGpuHostmemResourceInfo resInfo;
SubAlloc subAlloc;
AHardwareBuffer* ahw = nullptr;
bool imported = false;
zx_handle_t vmoHandle = ZX_HANDLE_INVALID;
};
struct VkCommandBuffer_Info {
uint32_t placeholder;
};
struct VkQueue_Info {
VkDevice device;
};
// custom guest-side structs for images/buffers because of AHardwareBuffer :((
struct VkImage_Info {
VkDevice device;
VkImageCreateInfo createInfo;
bool external = false;
VkExternalMemoryImageCreateInfo externalCreateInfo;
VkDeviceMemory currentBacking = VK_NULL_HANDLE;
VkDeviceSize currentBackingOffset = 0;
VkDeviceSize currentBackingSize = 0;
bool baseRequirementsKnown = false;
VkMemoryRequirements baseRequirements;
#ifdef VK_USE_PLATFORM_FUCHSIA
bool isSysmemBackedMemory = false;
#endif
};
struct VkBuffer_Info {
VkDevice device;
VkBufferCreateInfo createInfo;
bool external = false;
VkExternalMemoryBufferCreateInfo externalCreateInfo;
VkDeviceMemory currentBacking = VK_NULL_HANDLE;
VkDeviceSize currentBackingOffset = 0;
VkDeviceSize currentBackingSize = 0;
bool baseRequirementsKnown = false;
VkMemoryRequirements baseRequirements;
#ifdef VK_USE_PLATFORM_FUCHSIA
bool isSysmemBackedMemory = false;
#endif
};
struct VkSemaphore_Info {
VkDevice device;
zx_handle_t eventHandle = ZX_HANDLE_INVALID;
zx_koid_t eventKoid = ZX_KOID_INVALID;
int syncFd = -1;
};
struct VkDescriptorUpdateTemplate_Info {
uint32_t templateEntryCount = 0;
VkDescriptorUpdateTemplateEntry* templateEntries;
uint32_t imageInfoCount = 0;
uint32_t bufferInfoCount = 0;
uint32_t bufferViewCount = 0;
uint32_t* imageInfoIndices;
uint32_t* bufferInfoIndices;
uint32_t* bufferViewIndices;
VkDescriptorImageInfo* imageInfos;
VkDescriptorBufferInfo* bufferInfos;
VkBufferView* bufferViews;
};
struct VkFence_Info {
VkDevice device;
bool external = false;
VkExportFenceCreateInfo exportFenceCreateInfo;
#ifdef VK_USE_PLATFORM_ANDROID_KHR
int syncFd = -1;
#endif
};
struct VkDescriptorPool_Info {
uint32_t unused;
};
struct VkDescriptorSet_Info {
uint32_t unused;
};
struct VkDescriptorSetLayout_Info {
uint32_t unused;
};
struct VkCommandPool_Info {
uint32_t unused;
};
struct VkSampler_Info {
uint32_t unused;
};
struct VkBufferCollectionFUCHSIA_Info {
#ifdef VK_USE_PLATFORM_FUCHSIA
android::base::Optional<
fuchsia_sysmem::wire::BufferCollectionConstraints>
constraints;
android::base::Optional<VkBufferCollectionPropertiesFUCHSIA> properties;
// the index of corresponding createInfo for each image format
// constraints in |constraints|.
std::vector<uint32_t> createInfoIndex;
#endif // VK_USE_PLATFORM_FUCHSIA
};
struct VkBufferCollectionFUCHSIAX_Info {
#ifdef VK_USE_PLATFORM_FUCHSIA
android::base::Optional<
fuchsia_sysmem::wire::BufferCollectionConstraints>
constraints;
android::base::Optional<VkBufferCollectionProperties2FUCHSIAX>
properties;
// the index of corresponding createInfo for each image format
// constraints in |constraints|.
std::vector<uint32_t> createInfoIndex;
#endif // VK_USE_PLATFORM_FUCHSIA
};
#define HANDLE_REGISTER_IMPL_IMPL(type) \
std::unordered_map<type, type##_Info> info_##type; \
void register_##type(type obj) { \
AutoLock<RecursiveLock> lock(mLock); \
info_##type[obj] = type##_Info(); \
} \
#define HANDLE_UNREGISTER_IMPL_IMPL(type) \
void unregister_##type(type obj) { \
AutoLock<RecursiveLock> lock(mLock); \
info_##type.erase(obj); \
} \
GOLDFISH_VK_LIST_HANDLE_TYPES(HANDLE_REGISTER_IMPL_IMPL)
GOLDFISH_VK_LIST_TRIVIAL_HANDLE_TYPES(HANDLE_UNREGISTER_IMPL_IMPL)
void unregister_VkInstance(VkInstance instance) {
AutoLock<RecursiveLock> lock(mLock);
auto it = info_VkInstance.find(instance);
if (it == info_VkInstance.end()) return;
auto info = it->second;
info_VkInstance.erase(instance);
lock.unlock();
}
void unregister_VkDevice(VkDevice device) {
AutoLock<RecursiveLock> lock(mLock);
auto it = info_VkDevice.find(device);
if (it == info_VkDevice.end()) return;
auto info = it->second;
info_VkDevice.erase(device);
lock.unlock();
}
void unregister_VkCommandPool(VkCommandPool pool) {
if (!pool) return;
clearCommandPool(pool);
AutoLock<RecursiveLock> lock(mLock);
info_VkCommandPool.erase(pool);
}
void unregister_VkSampler(VkSampler sampler) {
if (!sampler) return;
AutoLock<RecursiveLock> lock(mLock);
info_VkSampler.erase(sampler);
}
void unregister_VkCommandBuffer(VkCommandBuffer commandBuffer) {
resetCommandBufferStagingInfo(commandBuffer, true /* also reset primaries */, true /* also clear pending descriptor sets */);
struct goldfish_VkCommandBuffer* cb = as_goldfish_VkCommandBuffer(commandBuffer);
if (!cb) return;
if (cb->lastUsedEncoder) { cb->lastUsedEncoder->decRef(); }
eraseObjects(&cb->subObjects);
forAllObjects(cb->poolObjects, [cb](void* commandPool) {
struct goldfish_VkCommandPool* p = as_goldfish_VkCommandPool((VkCommandPool)commandPool);
eraseObject(&p->subObjects, (void*)cb);
});
eraseObjects(&cb->poolObjects);
if (cb->userPtr) {
CommandBufferPendingDescriptorSets* pendingSets = (CommandBufferPendingDescriptorSets*)cb->userPtr;
delete pendingSets;
}
AutoLock<RecursiveLock> lock(mLock);
info_VkCommandBuffer.erase(commandBuffer);
}
void unregister_VkQueue(VkQueue queue) {
struct goldfish_VkQueue* q = as_goldfish_VkQueue(queue);
if (!q) return;
if (q->lastUsedEncoder) { q->lastUsedEncoder->decRef(); }
AutoLock<RecursiveLock> lock(mLock);
info_VkQueue.erase(queue);
}
void unregister_VkDeviceMemory(VkDeviceMemory mem) {
AutoLock<RecursiveLock> lock(mLock);
auto it = info_VkDeviceMemory.find(mem);
if (it == info_VkDeviceMemory.end()) return;
auto& memInfo = it->second;
if (memInfo.ahw) {
AHardwareBuffer_release(memInfo.ahw);
}
if (memInfo.vmoHandle != ZX_HANDLE_INVALID) {
zx_handle_close(memInfo.vmoHandle);
}
if (memInfo.mappedPtr &&
!memInfo.virtualHostVisibleBacking &&
!memInfo.directMapped) {
aligned_buf_free(memInfo.mappedPtr);
}
if (memInfo.directMapped) {
ALOGE("%s: warning: direct mapped memory never goes to unregister!\n", __func__);
subFreeHostMemory(&memInfo.subAlloc);
}
delete memInfo.goldfishAddressSpaceBlock;
info_VkDeviceMemory.erase(mem);
}
void unregister_VkImage(VkImage img) {
AutoLock<RecursiveLock> lock(mLock);
auto it = info_VkImage.find(img);
if (it == info_VkImage.end()) return;
auto& imageInfo = it->second;
info_VkImage.erase(img);
}
void unregister_VkBuffer(VkBuffer buf) {
AutoLock<RecursiveLock> lock(mLock);
auto it = info_VkBuffer.find(buf);
if (it == info_VkBuffer.end()) return;
info_VkBuffer.erase(buf);
}
void unregister_VkSemaphore(VkSemaphore sem) {
AutoLock<RecursiveLock> lock(mLock);
auto it = info_VkSemaphore.find(sem);
if (it == info_VkSemaphore.end()) return;
auto& semInfo = it->second;
if (semInfo.eventHandle != ZX_HANDLE_INVALID) {
zx_handle_close(semInfo.eventHandle);
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
if (semInfo.syncFd >= 0) {
close(semInfo.syncFd);
}
#endif
info_VkSemaphore.erase(sem);
}
void unregister_VkDescriptorUpdateTemplate(VkDescriptorUpdateTemplate templ) {
AutoLock<RecursiveLock> lock(mLock);
auto it = info_VkDescriptorUpdateTemplate.find(templ);
if (it == info_VkDescriptorUpdateTemplate.end())
return;
auto& info = it->second;
if (info.templateEntryCount) delete [] info.templateEntries;
if (info.imageInfoCount) {
delete [] info.imageInfoIndices;
delete [] info.imageInfos;
}
if (info.bufferInfoCount) {
delete [] info.bufferInfoIndices;
delete [] info.bufferInfos;
}
if (info.bufferViewCount) {
delete [] info.bufferViewIndices;
delete [] info.bufferViews;
}
info_VkDescriptorUpdateTemplate.erase(it);
}
void unregister_VkFence(VkFence fence) {
AutoLock<RecursiveLock> lock(mLock);
auto it = info_VkFence.find(fence);
if (it == info_VkFence.end()) return;
auto& fenceInfo = it->second;
(void)fenceInfo;
#ifdef VK_USE_PLATFORM_ANDROID_KHR
if (fenceInfo.syncFd >= 0) {
close(fenceInfo.syncFd);
}
#endif
info_VkFence.erase(fence);
}
#ifdef VK_USE_PLATFORM_FUCHSIA
void unregister_VkBufferCollectionFUCHSIA(
VkBufferCollectionFUCHSIA collection) {
AutoLock<RecursiveLock> lock(mLock);
info_VkBufferCollectionFUCHSIA.erase(collection);
}
#endif
#ifdef VK_USE_PLATFORM_FUCHSIA
void unregister_VkBufferCollectionFUCHSIAX(
VkBufferCollectionFUCHSIAX collection) {
AutoLock<RecursiveLock> lock(mLock);
info_VkBufferCollectionFUCHSIAX.erase(collection);
}
#endif
void unregister_VkDescriptorSet_locked(VkDescriptorSet set) {
struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(set);
delete ds->reified;
info_VkDescriptorSet.erase(set);
}
void unregister_VkDescriptorSet(VkDescriptorSet set) {
if (!set) return;
AutoLock<RecursiveLock> lock(mLock);
unregister_VkDescriptorSet_locked(set);
}
void unregister_VkDescriptorSetLayout(VkDescriptorSetLayout setLayout) {
if (!setLayout) return;
AutoLock<RecursiveLock> lock(mLock);
delete as_goldfish_VkDescriptorSetLayout(setLayout)->layoutInfo;
info_VkDescriptorSetLayout.erase(setLayout);
}
VkResult allocAndInitializeDescriptorSets(
void* context,
VkDevice device,
const VkDescriptorSetAllocateInfo* ci,
VkDescriptorSet* sets) {
if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
// Using the pool ID's we collected earlier from the host
VkResult poolAllocResult = validateAndApplyVirtualDescriptorSetAllocation(ci, sets);
if (poolAllocResult != VK_SUCCESS) return poolAllocResult;
for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
register_VkDescriptorSet(sets[i]);
VkDescriptorSetLayout setLayout = as_goldfish_VkDescriptorSet(sets[i])->reified->setLayout;
// Need to add ref to the set layout in the virtual case
// because the set itself might not be realized on host at the
// same time
struct goldfish_VkDescriptorSetLayout* dsl = as_goldfish_VkDescriptorSetLayout(setLayout);
++dsl->layoutInfo->refcount;
}
} else {
// Pass through and use host allocation
VkEncoder* enc = (VkEncoder*)context;
VkResult allocRes = enc->vkAllocateDescriptorSets(device, ci, sets, true /* do lock */);
if (allocRes != VK_SUCCESS) return allocRes;
for (uint32_t i = 0; i < ci->descriptorSetCount; ++i) {
applyDescriptorSetAllocation(ci->descriptorPool, ci->pSetLayouts[i]);
fillDescriptorSetInfoForPool(ci->descriptorPool, ci->pSetLayouts[i], sets[i]);
}
}
return VK_SUCCESS;
}
VkDescriptorImageInfo createImmutableSamplersFilteredImageInfo(
VkDescriptorType descType,
VkDescriptorSet descSet,
uint32_t binding,
const VkDescriptorImageInfo* pImageInfo) {
VkDescriptorImageInfo res = *pImageInfo;
if (descType != VK_DESCRIPTOR_TYPE_SAMPLER &&
descType != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER) return res;
bool immutableSampler = as_goldfish_VkDescriptorSet(descSet)->reified->bindingIsImmutableSampler[binding];
if (!immutableSampler) return res;
res.sampler = 0;
return res;
}
bool descriptorBindingIsImmutableSampler(
VkDescriptorSet dstSet,
uint32_t dstBinding) {
return as_goldfish_VkDescriptorSet(dstSet)->reified->bindingIsImmutableSampler[dstBinding];
}
VkDescriptorImageInfo
filterNonexistentSampler(
const VkDescriptorImageInfo& inputInfo) {
VkSampler sampler =
inputInfo.sampler;
VkDescriptorImageInfo res = inputInfo;
if (sampler) {
auto it = info_VkSampler.find(sampler);
bool samplerExists = it != info_VkSampler.end();
if (!samplerExists) res.sampler = 0;
}
return res;
}
void freeDescriptorSetsIfHostAllocated(VkEncoder* enc, VkDevice device, uint32_t descriptorSetCount, const VkDescriptorSet* sets) {
for (uint32_t i = 0; i < descriptorSetCount; ++i) {
struct goldfish_VkDescriptorSet* ds = as_goldfish_VkDescriptorSet(sets[i]);
if (ds->reified->allocationPending) {
unregister_VkDescriptorSet(sets[i]);
delete_goldfish_VkDescriptorSet(sets[i]);
} else {
enc->vkFreeDescriptorSets(device, ds->reified->pool, 1, &sets[i], false /* no lock */);
}
}
}
void clearDescriptorPoolAndUnregisterDescriptorSets(void* context, VkDevice device, VkDescriptorPool pool) {
std::vector<VkDescriptorSet> toClear =
clearDescriptorPool(pool, mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate);
for (auto set : toClear) {
if (mFeatureInfo->hasVulkanBatchedDescriptorSetUpdate) {
VkDescriptorSetLayout setLayout = as_goldfish_VkDescriptorSet(set)->reified->setLayout;
decDescriptorSetLayoutRef(context, device, setLayout, nullptr);
}
unregister_VkDescriptorSet(set);
delete_goldfish_VkDescriptorSet(set);
}
}
void unregister_VkDescriptorPool(VkDescriptorPool pool) {
if (!pool) return;
AutoLock<RecursiveLock> lock(mLock);
struct goldfish_VkDescriptorPool* dp = as_goldfish_VkDescriptorPool(pool);
delete dp->allocInfo;
info_VkDescriptorPool.erase(pool);
}
bool descriptorPoolSupportsIndividualFreeLocked(VkDescriptorPool pool) {
return as_goldfish_VkDescriptorPool(pool)->allocInfo->createFlags &
VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
}
static constexpr uint32_t kDefaultApiVersion = VK_MAKE_VERSION(1, 1, 0);
void setInstanceInfo(VkInstance instance,
uint32_t enabledExtensionCount,
const char* const* ppEnabledExtensionNames,
uint32_t apiVersion) {
AutoLock<RecursiveLock> lock(mLock);
auto& info = info_VkInstance[instance];
info.highestApiVersion = apiVersion;
if (!ppEnabledExtensionNames) return;
for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
}
}
void setDeviceInfo(VkDevice device,
VkPhysicalDevice physdev,
VkPhysicalDeviceProperties props,
VkPhysicalDeviceMemoryProperties memProps,
uint32_t enabledExtensionCount,
const char* const* ppEnabledExtensionNames,
const void* pNext) {
AutoLock<RecursiveLock> lock(mLock);
auto& info = info_VkDevice[device];
info.physdev = physdev;
info.props = props;
info.memProps = memProps;
initHostVisibleMemoryVirtualizationInfo(
physdev, &memProps,
mFeatureInfo.get(),
&mHostVisibleMemoryVirtInfo);
info.apiVersion = props.apiVersion;
const VkBaseInStructure *extensionCreateInfo =
reinterpret_cast<const VkBaseInStructure *>(pNext);
while(extensionCreateInfo) {
if(extensionCreateInfo->sType
== VK_STRUCTURE_TYPE_DEVICE_DEVICE_MEMORY_REPORT_CREATE_INFO_EXT) {
auto deviceMemoryReportCreateInfo =
reinterpret_cast<const VkDeviceDeviceMemoryReportCreateInfoEXT *>(
extensionCreateInfo);
if(deviceMemoryReportCreateInfo->pfnUserCallback != nullptr) {
info.deviceMemoryReportCallbacks.emplace_back(
deviceMemoryReportCreateInfo->pfnUserCallback,
deviceMemoryReportCreateInfo->pUserData);
}
}
extensionCreateInfo = extensionCreateInfo->pNext;
}
if (!ppEnabledExtensionNames) return;
for (uint32_t i = 0; i < enabledExtensionCount; ++i) {
info.enabledExtensions.insert(ppEnabledExtensionNames[i]);
}
}
void emitDeviceMemoryReport(VkDevice_Info info,
VkDeviceMemoryReportEventTypeEXT type,
uint64_t memoryObjectId,
VkDeviceSize size,
VkObjectType objectType,
uint64_t objectHandle,
uint32_t heapIndex = 0) {
if(info.deviceMemoryReportCallbacks.empty()) return;
const VkDeviceMemoryReportCallbackDataEXT callbackData = {
VK_STRUCTURE_TYPE_DEVICE_MEMORY_REPORT_CALLBACK_DATA_EXT, // sType
nullptr, // pNext
0, // flags
type, // type
memoryObjectId, // memoryObjectId
size, // size
objectType, // objectType
objectHandle, // objectHandle
heapIndex, // heapIndex
};
for(const auto &callback : info.deviceMemoryReportCallbacks) {
callback.first(&callbackData, callback.second);
}
}
void setDeviceMemoryInfo(VkDevice device,
VkDeviceMemory memory,
VkDeviceSize allocationSize,
VkDeviceSize mappedSize,
uint8_t* ptr,
uint32_t memoryTypeIndex,
AHardwareBuffer* ahw = nullptr,
bool imported = false,
zx_handle_t vmoHandle = ZX_HANDLE_INVALID) {
AutoLock<RecursiveLock> lock(mLock);
auto& deviceInfo = info_VkDevice[device];
auto& info = info_VkDeviceMemory[memory];
info.allocationSize = allocationSize;
info.mappedSize = mappedSize;
info.mappedPtr = ptr;
info.memoryTypeIndex = memoryTypeIndex;
info.ahw = ahw;
info.imported = imported;
info.vmoHandle = vmoHandle;
}
void setImageInfo(VkImage image,
VkDevice device,
const VkImageCreateInfo *pCreateInfo) {
AutoLock<RecursiveLock> lock(mLock);
auto& info = info_VkImage[image];
info.device = device;
info.createInfo = *pCreateInfo;
}
bool isMemoryTypeHostVisible(VkDevice device, uint32_t typeIndex) const {
AutoLock<RecursiveLock> lock(mLock);
const auto it = info_VkDevice.find(device);
if (it == info_VkDevice.end()) return false;
const auto& info = it->second;
return info.memProps.memoryTypes[typeIndex].propertyFlags &
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT;
}
uint8_t* getMappedPointer(VkDeviceMemory memory) {
AutoLock<RecursiveLock> lock(mLock);
const auto it = info_VkDeviceMemory.find(memory);
if (it == info_VkDeviceMemory.end()) return nullptr;
const auto& info = it->second;
return info.mappedPtr;
}
VkDeviceSize getMappedSize(VkDeviceMemory memory) {
AutoLock<RecursiveLock> lock(mLock);
const auto it = info_VkDeviceMemory.find(memory);
if (it == info_VkDeviceMemory.end()) return 0;
const auto& info = it->second;
return info.mappedSize;
}
VkDeviceSize getNonCoherentExtendedSize(VkDevice device, VkDeviceSize basicSize) const {
AutoLock<RecursiveLock> lock(mLock);
const auto it = info_VkDevice.find(device);
if (it == info_VkDevice.end()) return basicSize;
const auto& info = it->second;
VkDeviceSize nonCoherentAtomSize =
info.props.limits.nonCoherentAtomSize;
VkDeviceSize atoms =
(basicSize + nonCoherentAtomSize - 1) / nonCoherentAtomSize;
return atoms * nonCoherentAtomSize;
}
bool isValidMemoryRange(const VkMappedMemoryRange& range) const {
AutoLock<RecursiveLock> lock(mLock);
const auto it = info_VkDeviceMemory.find(range.memory);
if (it == info_VkDeviceMemory.end()) return false;
const auto& info = it->second;
if (!info.mappedPtr) return false;
VkDeviceSize offset = range.offset;
VkDeviceSize size = range.size;
if (size == VK_WHOLE_SIZE) {
return offset <= info.mappedSize;
}
return offset + size <= info.mappedSize;
}
void setupFeatures(const EmulatorFeatureInfo* features) {
if (!features || mFeatureInfo) return;
mFeatureInfo.reset(new EmulatorFeatureInfo);
*mFeatureInfo = *features;
if (mFeatureInfo->hasDirectMem) {
mGoldfishAddressSpaceBlockProvider.reset(
new GoldfishAddressSpaceBlockProvider(
GoldfishAddressSpaceSubdeviceType::NoSubdevice));
}
#ifdef VK_USE_PLATFORM_FUCHSIA
if (mFeatureInfo->hasVulkan) {
fidl::ClientEnd<fuchsia_hardware_goldfish::ControlDevice> channel{
zx::channel(GetConnectToServiceFunction()("/loader-gpu-devices/class/goldfish-control/000"))};
if (!channel) {
ALOGE("failed to open control device");
abort();
}
mControlDevice =
fidl::WireSyncClient<fuchsia_hardware_goldfish::ControlDevice>(
std::move(channel));
fidl::ClientEnd<fuchsia_sysmem::Allocator> sysmem_channel{
zx::channel(GetConnectToServiceFunction()("/svc/fuchsia.sysmem.Allocator"))};
if (!sysmem_channel) {
ALOGE("failed to open sysmem connection");
}
mSysmemAllocator =
fidl::WireSyncClient<fuchsia_sysmem::Allocator>(
std::move(sysmem_channel));
char name[ZX_MAX_NAME_LEN] = {};
zx_object_get_property(zx_process_self(), ZX_PROP_NAME, name, sizeof(name));
std::string client_name(name);
client_name += "-goldfish";
zx_info_handle_basic_t info;
zx_object_get_info(zx_process_self(), ZX_INFO_HANDLE_BASIC, &info, sizeof(info),
nullptr, nullptr);
mSysmemAllocator->SetDebugClientInfo(fidl::StringView::FromExternal(client_name),
info.koid);
}
#endif
if (mFeatureInfo->hasVulkanNullOptionalStrings) {
ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_NULL_OPTIONAL_STRINGS_BIT;
}
if (mFeatureInfo->hasVulkanIgnoredHandles) {
ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_IGNORED_HANDLES_BIT;
}
if (mFeatureInfo->hasVulkanShaderFloat16Int8) {
ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_SHADER_FLOAT16_INT8_BIT;
}
if (mFeatureInfo->hasVulkanQueueSubmitWithCommands) {
ResourceTracker::streamFeatureBits |= VULKAN_STREAM_FEATURE_QUEUE_SUBMIT_WITH_COMMANDS_BIT;
}
#if !defined(HOST_BUILD) && defined(VIRTIO_GPU)
if (mFeatureInfo->hasVirtioGpuNext) {
ALOGD("%s: has virtio-gpu-next; create auxiliary rendernode\n", __func__);
mRendernodeFd = drmOpenRender(128 /* RENDERNODE_MINOR */);
if (mRendernodeFd < 0) {
ALOGE("%s: error: could not init auxiliary rendernode\n", __func__);
} else {
ALOGD("%s: has virtio-gpu-next; aux context init\n", __func__);
struct drm_virtgpu_context_set_param drm_setparams[] = {
{
VIRTGPU_CONTEXT_PARAM_CAPSET_ID,
3, /* CAPSET_GFXSTREAM */
},
{
VIRTGPU_CONTEXT_PARAM_NUM_RINGS,
2,
},
};
struct drm_virtgpu_context_init drm_ctx_init = {
2,
0,
(uint64_t)(uintptr_t)drm_setparams,
};
int ctxInitret = drmIoctl(mRendernodeFd, DRM_IOCTL_VIRTGPU_CONTEXT_INIT, &drm_ctx_init);
if (ctxInitret < 0) {
ALOGE("%s: error: could not ctx init. ret %d errno %d\n", __func__, ctxInitret, errno);
}
}
}
#endif
}
void setThreadingCallbacks(const ResourceTracker::ThreadingCallbacks& callbacks) {
ResourceTracker::threadingCallbacks = callbacks;
}
bool hostSupportsVulkan() const {
if (!mFeatureInfo) return false;
return mFeatureInfo->hasVulkan;
}
bool usingDirectMapping() const {
return mHostVisibleMemoryVirtInfo.virtualizationSupported;
}
uint32_t getStreamFeatures() const {
return ResourceTracker::streamFeatureBits;
}
bool supportsDeferredCommands() const {
if (!mFeatureInfo) return false;
return mFeatureInfo->hasDeferredVulkanCommands;
}
bool supportsAsyncQueueSubmit() const {
if (!mFeatureInfo) return false;
return mFeatureInfo->hasVulkanAsyncQueueSubmit;
}
bool supportsCreateResourcesWithRequirements() const {
if (!mFeatureInfo) return false;
return mFeatureInfo->hasVulkanCreateResourcesWithRequirements;
}
int getHostInstanceExtensionIndex(const std::string& extName) const {
int i = 0;
for (const auto& prop : mHostInstanceExtensions) {
if (extName == std::string(prop.extensionName)) {
return i;
}
++i;
}
return -1;
}
int getHostDeviceExtensionIndex(const std::string& extName) const {
int i = 0;
for (const auto& prop : mHostDeviceExtensions) {
if (extName == std::string(prop.extensionName)) {
return i;
}
++i;
}
return -1;
}
void deviceMemoryTransform_tohost(
VkDeviceMemory* memory, uint32_t memoryCount,
VkDeviceSize* offset, uint32_t offsetCount,
VkDeviceSize* size, uint32_t sizeCount,
uint32_t* typeIndex, uint32_t typeIndexCount,
uint32_t* typeBits, uint32_t typeBitsCount) {
(void)memoryCount;
(void)offsetCount;
(void)sizeCount;
const auto& hostVirt =
mHostVisibleMemoryVirtInfo;
if (!hostVirt.virtualizationSupported) return;
if (memory) {
AutoLock<RecursiveLock> lock (mLock);
for (uint32_t i = 0; i < memoryCount; ++i) {
VkDeviceMemory mem = memory[i];
auto it = info_VkDeviceMemory.find(mem);
if (it == info_VkDeviceMemory.end()) return;
const auto& info = it->second;
if (!info.directMapped) continue;
memory[i] = info.subAlloc.baseMemory;
if (offset) {
offset[i] = info.subAlloc.baseOffset + offset[i];
}
if (size) {
if (size[i] == VK_WHOLE_SIZE) {
size[i] = info.subAlloc.subMappedSize;
}
}
// TODO
(void)memory;
(void)offset;
(void)size;
}
}
for (uint32_t i = 0; i < typeIndexCount; ++i) {
typeIndex[i] =
hostVirt.memoryTypeIndexMappingToHost[typeIndex[i]];
}
for (uint32_t i = 0; i < typeBitsCount; ++i) {
uint32_t bits = 0;
for (uint32_t j = 0; j < VK_MAX_MEMORY_TYPES; ++j) {
bool guestHas = typeBits[i] & (1 << j);
uint32_t hostIndex =
hostVirt.memoryTypeIndexMappingToHost[j];
bits |= guestHas ? (1 << hostIndex) : 0;
}
typeBits[i] = bits;
}
}
void deviceMemoryTransform_fromhost(
VkDeviceMemory* memory, uint32_t memoryCount,
VkDeviceSize* offset, uint32_t offsetCount,
VkDeviceSize* size, uint32_t sizeCount,
uint32_t* typeIndex, uint32_t typeIndexCount,
uint32_t* typeBits, uint32_t typeBitsCount) {
(void)memoryCount;
(void)offsetCount;
(void)sizeCount;
const auto& hostVirt =
mHostVisibleMemoryVirtInfo;
if (!hostVirt.virtualizationSupported) return;
AutoLock<RecursiveLock> lock (mLock);
for (uint32_t i = 0; i < memoryCount; ++i) {
// TODO
(void)memory;
(void)offset;
(void)size;
}
for (uint32_t i = 0; i < typeIndexCount; ++i) {
typeIndex[i] =
hostVirt.memoryTypeIndexMappingFromHost[typeIndex[i]];
}
for (uint32_t i = 0; i < typeBitsCount; ++i) {
uint32_t bits = 0;
for (uint32_t j = 0; j < VK_MAX_MEMORY_TYPES; ++j) {
bool hostHas = typeBits[i] & (1 << j);
uint32_t guestIndex =
hostVirt.memoryTypeIndexMappingFromHost[j];
bits |= hostHas ? (1 << guestIndex) : 0;
if (hostVirt.memoryTypeBitsShouldAdvertiseBoth[j]) {
bits |= hostHas ? (1 << j) : 0;
}
}
typeBits[i] = bits;
}
}
void transformImpl_VkExternalMemoryProperties_fromhost(
VkExternalMemoryProperties* pProperties,
uint32_t) {
VkExternalMemoryHandleTypeFlags supportedHandleType = 0u;
#ifdef VK_USE_PLATFORM_FUCHSIA
supportedHandleType |=
VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA;
#endif // VK_USE_PLATFORM_FUCHSIA
#ifdef VK_USE_PLATFORM_ANDROID_KHR
supportedHandleType |=
VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT |
VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID;
#endif // VK_USE_PLATFORM_ANDROID_KHR
if (supportedHandleType) {
pProperties->compatibleHandleTypes &= supportedHandleType;
pProperties->exportFromImportedHandleTypes &= supportedHandleType;
}
}
VkResult on_vkEnumerateInstanceExtensionProperties(
void* context,
VkResult,
const char*,
uint32_t* pPropertyCount,
VkExtensionProperties* pProperties) {
std::vector<const char*> allowedExtensionNames = {
"VK_KHR_get_physical_device_properties2",
"VK_KHR_sampler_ycbcr_conversion",
#ifdef VK_USE_PLATFORM_ANDROID_KHR
"VK_KHR_external_semaphore_capabilities",
"VK_KHR_external_memory_capabilities",
"VK_KHR_external_fence_capabilities",
#endif
};
VkEncoder* enc = (VkEncoder*)context;
// Only advertise a select set of extensions.
if (mHostInstanceExtensions.empty()) {
uint32_t hostPropCount = 0;
enc->vkEnumerateInstanceExtensionProperties(nullptr, &hostPropCount, nullptr, true /* do lock */);
mHostInstanceExtensions.resize(hostPropCount);
VkResult hostRes =
enc->vkEnumerateInstanceExtensionProperties(
nullptr, &hostPropCount, mHostInstanceExtensions.data(), true /* do lock */);
if (hostRes != VK_SUCCESS) {
return hostRes;
}
}
std::vector<VkExtensionProperties> filteredExts;
for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
auto extIndex = getHostInstanceExtensionIndex(allowedExtensionNames[i]);
if (extIndex != -1) {
filteredExts.push_back(mHostInstanceExtensions[extIndex]);
}
}
VkExtensionProperties anbExtProps[] = {
#ifdef VK_USE_PLATFORM_FUCHSIA
{ "VK_KHR_external_memory_capabilities", 1},
{ "VK_KHR_external_semaphore_capabilities", 1},
#endif
};
for (auto& anbExtProp: anbExtProps) {
filteredExts.push_back(anbExtProp);
}
// Spec:
//
// https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html
//
// If pProperties is NULL, then the number of extensions properties
// available is returned in pPropertyCount. Otherwise, pPropertyCount
// must point to a variable set by the user to the number of elements
// in the pProperties array, and on return the variable is overwritten
// with the number of structures actually written to pProperties. If
// pPropertyCount is less than the number of extension properties
// available, at most pPropertyCount structures will be written. If
// pPropertyCount is smaller than the number of extensions available,
// VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
// that not all the available properties were returned.
//
// pPropertyCount must be a valid pointer to a uint32_t value
if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
if (!pProperties) {
*pPropertyCount = (uint32_t)filteredExts.size();
return VK_SUCCESS;
} else {
auto actualExtensionCount = (uint32_t)filteredExts.size();
if (*pPropertyCount > actualExtensionCount) {
*pPropertyCount = actualExtensionCount;
}
for (uint32_t i = 0; i < *pPropertyCount; ++i) {
pProperties[i] = filteredExts[i];
}
if (actualExtensionCount > *pPropertyCount) {
return VK_INCOMPLETE;
}
return VK_SUCCESS;
}
}
VkResult on_vkEnumerateDeviceExtensionProperties(
void* context,
VkResult,
VkPhysicalDevice physdev,
const char*,
uint32_t* pPropertyCount,
VkExtensionProperties* pProperties) {
std::vector<const char*> allowedExtensionNames = {
"VK_KHR_vulkan_memory_model",
"VK_KHR_buffer_device_address",
"VK_KHR_maintenance1",
"VK_KHR_maintenance2",
"VK_KHR_maintenance3",
"VK_KHR_bind_memory2",
"VK_KHR_dedicated_allocation",
"VK_KHR_get_memory_requirements2",
"VK_KHR_image_format_list",
"VK_KHR_sampler_ycbcr_conversion",
"VK_KHR_shader_float16_int8",
// Timeline semaphores buggy in newer NVIDIA drivers
// (vkWaitSemaphoresKHR causes further vkCommandBuffer dispatches to deadlock)
#ifndef VK_USE_PLATFORM_ANDROID_KHR
"VK_KHR_timeline_semaphore",
#endif
"VK_AMD_gpu_shader_half_float",
"VK_NV_shader_subgroup_partitioned",
"VK_KHR_shader_subgroup_extended_types",
"VK_EXT_subgroup_size_control",
"VK_EXT_provoking_vertex",
"VK_EXT_line_rasterization",
"VK_KHR_shader_terminate_invocation",
"VK_EXT_transform_feedback",
"VK_EXT_primitive_topology_list_restart",
"VK_EXT_index_type_uint8",
"VK_EXT_load_store_op_none",
"VK_EXT_swapchain_colorspace",
"VK_EXT_image_robustness",
"VK_EXT_custom_border_color",
"VK_EXT_shader_stencil_export",
"VK_KHR_image_format_list",
"VK_KHR_incremental_present",
"VK_KHR_pipeline_executable_properties",
#ifdef VK_USE_PLATFORM_ANDROID_KHR
"VK_KHR_external_semaphore",
"VK_KHR_external_semaphore_fd",
// "VK_KHR_external_semaphore_win32", not exposed because it's translated to fd
"VK_KHR_external_memory",
"VK_KHR_external_fence",
"VK_KHR_external_fence_fd",
"VK_EXT_device_memory_report",
#endif
};
VkEncoder* enc = (VkEncoder*)context;
if (mHostDeviceExtensions.empty()) {
uint32_t hostPropCount = 0;
enc->vkEnumerateDeviceExtensionProperties(physdev, nullptr, &hostPropCount, nullptr, true /* do lock */);
mHostDeviceExtensions.resize(hostPropCount);
VkResult hostRes =
enc->vkEnumerateDeviceExtensionProperties(
physdev, nullptr, &hostPropCount, mHostDeviceExtensions.data(), true /* do lock */);
if (hostRes != VK_SUCCESS) {
return hostRes;
}
}
bool hostHasWin32ExternalSemaphore =
getHostDeviceExtensionIndex(
"VK_KHR_external_semaphore_win32") != -1;
bool hostHasPosixExternalSemaphore =
getHostDeviceExtensionIndex(
"VK_KHR_external_semaphore_fd") != -1;
ALOGD("%s: host has ext semaphore? win32 %d posix %d\n", __func__,
hostHasWin32ExternalSemaphore,
hostHasPosixExternalSemaphore);
bool hostSupportsExternalSemaphore =
hostHasWin32ExternalSemaphore ||
hostHasPosixExternalSemaphore;
std::vector<VkExtensionProperties> filteredExts;
for (size_t i = 0; i < allowedExtensionNames.size(); ++i) {
auto extIndex = getHostDeviceExtensionIndex(allowedExtensionNames[i]);
if (extIndex != -1) {
filteredExts.push_back(mHostDeviceExtensions[extIndex]);
}
}
VkExtensionProperties anbExtProps[] = {
#ifdef VK_USE_PLATFORM_ANDROID_KHR
{ "VK_ANDROID_native_buffer", 7 },
#endif
#ifdef VK_USE_PLATFORM_FUCHSIA
{ "VK_KHR_external_memory", 1 },
{ "VK_KHR_external_semaphore", 1 },
{ "VK_FUCHSIA_external_semaphore", 1 },
#endif
};
for (auto& anbExtProp: anbExtProps) {
filteredExts.push_back(anbExtProp);
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
bool hostSupportsExternalFenceFd =
getHostDeviceExtensionIndex(
"VK_KHR_external_fence_fd") != -1;
if (!hostSupportsExternalFenceFd) {
filteredExts.push_back(
VkExtensionProperties { "VK_KHR_external_fence_fd", 1});
}
#endif
#ifndef VK_USE_PLATFORM_FUCHSIA
if (hostSupportsExternalSemaphore &&
!hostHasPosixExternalSemaphore) {
filteredExts.push_back(
VkExtensionProperties { "VK_KHR_external_semaphore_fd", 1});
}
#endif
bool win32ExtMemAvailable =
getHostDeviceExtensionIndex(
"VK_KHR_external_memory_win32") != -1;
bool posixExtMemAvailable =
getHostDeviceExtensionIndex(
"VK_KHR_external_memory_fd") != -1;
bool moltenVkExtAvailable =
getHostDeviceExtensionIndex(
"VK_MVK_moltenvk") != -1;
bool hostHasExternalMemorySupport =
win32ExtMemAvailable || posixExtMemAvailable || moltenVkExtAvailable;
if (hostHasExternalMemorySupport) {
#ifdef VK_USE_PLATFORM_ANDROID_KHR
filteredExts.push_back(
VkExtensionProperties {
"VK_ANDROID_external_memory_android_hardware_buffer", 7
});
filteredExts.push_back(
VkExtensionProperties { "VK_EXT_queue_family_foreign", 1 });
#endif
#ifdef VK_USE_PLATFORM_FUCHSIA
filteredExts.push_back(
VkExtensionProperties { "VK_FUCHSIA_external_memory", 1});
filteredExts.push_back(
VkExtensionProperties { "VK_FUCHSIA_buffer_collection", 1 });
filteredExts.push_back(
VkExtensionProperties { "VK_FUCHSIA_buffer_collection_x", 1});
#endif
}
// Spec:
//
// https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateDeviceExtensionProperties.html
//
// pPropertyCount is a pointer to an integer related to the number of
// extension properties available or queried, and is treated in the
// same fashion as the
// vkEnumerateInstanceExtensionProperties::pPropertyCount parameter.
//
// https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumerateInstanceExtensionProperties.html
//
// If pProperties is NULL, then the number of extensions properties
// available is returned in pPropertyCount. Otherwise, pPropertyCount
// must point to a variable set by the user to the number of elements
// in the pProperties array, and on return the variable is overwritten
// with the number of structures actually written to pProperties. If
// pPropertyCount is less than the number of extension properties
// available, at most pPropertyCount structures will be written. If
// pPropertyCount is smaller than the number of extensions available,
// VK_INCOMPLETE will be returned instead of VK_SUCCESS, to indicate
// that not all the available properties were returned.
//
// pPropertyCount must be a valid pointer to a uint32_t value
if (!pPropertyCount) return VK_ERROR_INITIALIZATION_FAILED;
if (!pProperties) {
*pPropertyCount = (uint32_t)filteredExts.size();
return VK_SUCCESS;
} else {
auto actualExtensionCount = (uint32_t)filteredExts.size();
if (*pPropertyCount > actualExtensionCount) {
*pPropertyCount = actualExtensionCount;
}
for (uint32_t i = 0; i < *pPropertyCount; ++i) {
pProperties[i] = filteredExts[i];
}
if (actualExtensionCount > *pPropertyCount) {
return VK_INCOMPLETE;
}
return VK_SUCCESS;
}
}
VkResult on_vkEnumeratePhysicalDevices(
void* context, VkResult,
VkInstance instance, uint32_t* pPhysicalDeviceCount,
VkPhysicalDevice* pPhysicalDevices) {
VkEncoder* enc = (VkEncoder*)context;
if (!instance) return VK_ERROR_INITIALIZATION_FAILED;
if (!pPhysicalDeviceCount) return VK_ERROR_INITIALIZATION_FAILED;
AutoLock<RecursiveLock> lock(mLock);
// When this function is called, we actually need to do two things:
// - Get full information about physical devices from the host,
// even if the guest did not ask for it
// - Serve the guest query according to the spec:
//
// https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html
auto it = info_VkInstance.find(instance);
if (it == info_VkInstance.end()) return VK_ERROR_INITIALIZATION_FAILED;
auto& info = it->second;
// Get the full host information here if it doesn't exist already.
if (info.physicalDevices.empty()) {
uint32_t hostPhysicalDeviceCount = 0;
lock.unlock();
VkResult countRes = enc->vkEnumeratePhysicalDevices(
instance, &hostPhysicalDeviceCount, nullptr, false /* no lock */);
lock.lock();
if (countRes != VK_SUCCESS) {
ALOGE("%s: failed: could not count host physical devices. "
"Error %d\n", __func__, countRes);
return countRes;
}
info.physicalDevices.resize(hostPhysicalDeviceCount);
lock.unlock();
VkResult enumRes = enc->vkEnumeratePhysicalDevices(
instance, &hostPhysicalDeviceCount, info.physicalDevices.data(), false /* no lock */);
lock.lock();
if (enumRes != VK_SUCCESS) {
ALOGE("%s: failed: could not retrieve host physical devices. "
"Error %d\n", __func__, enumRes);
return enumRes;
}
}
// Serve the guest query according to the spec.
//
// https://www.khronos.org/registry/vulkan/specs/1.1-extensions/man/html/vkEnumeratePhysicalDevices.html
//
// If pPhysicalDevices is NULL, then the number of physical devices
// available is returned in pPhysicalDeviceCount. Otherwise,
// pPhysicalDeviceCount must point to a variable set by the user to the
// number of elements in the pPhysicalDevices array, and on return the
// variable is overwritten with the number of handles actually written
// to pPhysicalDevices. If pPhysicalDeviceCount is less than the number
// of physical devices available, at most pPhysicalDeviceCount
// structures will be written. If pPhysicalDeviceCount is smaller than
// the number of physical devices available, VK_INCOMPLETE will be
// returned instead of VK_SUCCESS, to indicate that not all the
// available physical devices were returned.
if (!pPhysicalDevices) {
*pPhysicalDeviceCount = (uint32_t)info.physicalDevices.size();
return VK_SUCCESS;
} else {
uint32_t actualDeviceCount = (uint32_t)info.physicalDevices.size();
uint32_t toWrite = actualDeviceCount < *pPhysicalDeviceCount ? actualDeviceCount : *pPhysicalDeviceCount;
for (uint32_t i = 0; i < toWrite; ++i) {
pPhysicalDevices[i] = info.physicalDevices[i];
}
*pPhysicalDeviceCount = toWrite;
if (actualDeviceCount > *pPhysicalDeviceCount) {
return VK_INCOMPLETE;
}
return VK_SUCCESS;
}
}
void on_vkGetPhysicalDeviceProperties(
void*,
VkPhysicalDevice,
VkPhysicalDeviceProperties* pProperties) {
if (pProperties) {
pProperties->deviceType = VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU;
}
}
void on_vkGetPhysicalDeviceProperties2(
void*,
VkPhysicalDevice,
VkPhysicalDeviceProperties2* pProperties) {
if (pProperties) {
pProperties->properties.deviceType =
VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU;
VkPhysicalDeviceDeviceMemoryReportFeaturesEXT* memoryReportFeaturesEXT =
vk_find_struct<VkPhysicalDeviceDeviceMemoryReportFeaturesEXT>(pProperties);
if (memoryReportFeaturesEXT) {
memoryReportFeaturesEXT->deviceMemoryReport = VK_TRUE;
}
}
}
void on_vkGetPhysicalDeviceMemoryProperties(
void*,
VkPhysicalDevice physdev,
VkPhysicalDeviceMemoryProperties* out) {
initHostVisibleMemoryVirtualizationInfo(
physdev,
out,
mFeatureInfo.get(),
&mHostVisibleMemoryVirtInfo);
if (mHostVisibleMemoryVirtInfo.virtualizationSupported) {
*out = mHostVisibleMemoryVirtInfo.guestMemoryProperties;
}
}
void on_vkGetPhysicalDeviceMemoryProperties2(
void*,
VkPhysicalDevice physdev,
VkPhysicalDeviceMemoryProperties2* out) {
initHostVisibleMemoryVirtualizationInfo(
physdev,
&out->memoryProperties,
mFeatureInfo.get(),
&mHostVisibleMemoryVirtInfo);
if (mHostVisibleMemoryVirtInfo.virtualizationSupported) {
out->memoryProperties = mHostVisibleMemoryVirtInfo.guestMemoryProperties;
}
}
void on_vkGetDeviceQueue(void*,
VkDevice device,
uint32_t,
uint32_t,
VkQueue* pQueue) {
AutoLock<RecursiveLock> lock(mLock);
info_VkQueue[*pQueue].device = device;
}
void on_vkGetDeviceQueue2(void*,
VkDevice device,
const VkDeviceQueueInfo2*,
VkQueue* pQueue) {
AutoLock<RecursiveLock> lock(mLock);
info_VkQueue[*pQueue].device = device;
}
VkResult on_vkCreateInstance(
void* context,
VkResult input_result,
const VkInstanceCreateInfo* createInfo,
const VkAllocationCallbacks*,
VkInstance* pInstance) {
if (input_result != VK_SUCCESS) return input_result;
VkEncoder* enc = (VkEncoder*)context;
uint32_t apiVersion;
VkResult enumInstanceVersionRes =
enc->vkEnumerateInstanceVersion(&apiVersion, false /* no lock */);
setInstanceInfo(
*pInstance,
createInfo->enabledExtensionCount,
createInfo->ppEnabledExtensionNames,
apiVersion);
return input_result;
}
VkResult on_vkCreateDevice(
void* context,
VkResult input_result,
VkPhysicalDevice physicalDevice,
const VkDeviceCreateInfo* pCreateInfo,
const VkAllocationCallbacks*,
VkDevice* pDevice) {
if (input_result != VK_SUCCESS) return input_result;
VkEncoder* enc = (VkEncoder*)context;
VkPhysicalDeviceProperties props;
VkPhysicalDeviceMemoryProperties memProps;
enc->vkGetPhysicalDeviceProperties(physicalDevice, &props, false /* no lock */);
enc->vkGetPhysicalDeviceMemoryProperties(physicalDevice, &memProps, false /* no lock */);
setDeviceInfo(
*pDevice, physicalDevice, props, memProps,
pCreateInfo->enabledExtensionCount, pCreateInfo->ppEnabledExtensionNames,
pCreateInfo->pNext);
return input_result;
}
void on_vkDestroyDevice_pre(
void* context,
VkDevice device,
const VkAllocationCallbacks*) {
AutoLock<RecursiveLock> lock(mLock);
auto it = info_VkDevice.find(device);
if (it == info_VkDevice.end()) return;
auto info = it->second;
lock.unlock();
VkEncoder* enc = (VkEncoder*)context;
bool freeMemorySyncSupported =
mFeatureInfo->hasVulkanFreeMemorySync;
for (uint32_t i = 0; i < VK_MAX_MEMORY_TYPES; ++i) {
for (auto& block : info.hostMemBlocks[i]) {
destroyHostMemAlloc(
freeMemorySyncSupported,
enc, device, &block);
}
}
}
VkResult on_vkGetAndroidHardwareBufferPropertiesANDROID(
void*, VkResult,
VkDevice device,
const AHardwareBuffer* buffer,
VkAndroidHardwareBufferPropertiesANDROID* pProperties) {
auto grallocHelper =
ResourceTracker::threadingCallbacks.hostConnectionGetFunc()->grallocHelper();
return getAndroidHardwareBufferPropertiesANDROID(
grallocHelper,
&mHostVisibleMemoryVirtInfo,
device, buffer, pProperties);
}
VkResult on_vkGetMemoryAndroidHardwareBufferANDROID(
void*, VkResult,
VkDevice device,
const VkMemoryGetAndroidHardwareBufferInfoANDROID *pInfo,
struct AHardwareBuffer** pBuffer) {
if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
AutoLock<RecursiveLock> lock(mLock);
auto deviceIt = info_VkDevice.find(device);
if (deviceIt == info_VkDevice.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
if (memoryIt == info_VkDeviceMemory.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto& info = memoryIt->second;
VkResult queryRes =
getMemoryAndroidHardwareBufferANDROID(&info.ahw);
if (queryRes != VK_SUCCESS) return queryRes;
*pBuffer = info.ahw;
return queryRes;
}
#ifdef VK_USE_PLATFORM_FUCHSIA
VkResult on_vkGetMemoryZirconHandleFUCHSIA(
void*, VkResult,
VkDevice device,
const VkMemoryGetZirconHandleInfoFUCHSIA* pInfo,
uint32_t* pHandle) {
if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
if (!pInfo->memory) return VK_ERROR_INITIALIZATION_FAILED;
AutoLock<RecursiveLock> lock(mLock);
auto deviceIt = info_VkDevice.find(device);
if (deviceIt == info_VkDevice.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto memoryIt = info_VkDeviceMemory.find(pInfo->memory);
if (memoryIt == info_VkDeviceMemory.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto& info = memoryIt->second;
if (info.vmoHandle == ZX_HANDLE_INVALID) {
ALOGE("%s: memory cannot be exported", __func__);
return VK_ERROR_INITIALIZATION_FAILED;
}
*pHandle = ZX_HANDLE_INVALID;
zx_handle_duplicate(info.vmoHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
return VK_SUCCESS;
}
VkResult on_vkGetMemoryZirconHandlePropertiesFUCHSIA(
void*, VkResult,
VkDevice device,
VkExternalMemoryHandleTypeFlagBits handleType,
uint32_t handle,
VkMemoryZirconHandlePropertiesFUCHSIA* pProperties) {
using fuchsia_hardware_goldfish::wire::kMemoryPropertyDeviceLocal;
using fuchsia_hardware_goldfish::wire::kMemoryPropertyHostVisible;
if (handleType !=
VK_EXTERNAL_MEMORY_HANDLE_TYPE_ZIRCON_VMO_BIT_FUCHSIA) {
return VK_ERROR_INITIALIZATION_FAILED;
}
zx_info_handle_basic_t handleInfo;
zx_status_t status = zx::unowned_vmo(handle)->get_info(
ZX_INFO_HANDLE_BASIC, &handleInfo, sizeof(handleInfo), nullptr,
nullptr);
if (status != ZX_OK || handleInfo.type != ZX_OBJ_TYPE_VMO) {
return VK_ERROR_INVALID_EXTERNAL_HANDLE;
}
AutoLock<RecursiveLock> lock(mLock);
auto deviceIt = info_VkDevice.find(device);
if (deviceIt == info_VkDevice.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto& info = deviceIt->second;
zx::vmo vmo_dup;
status =
zx::unowned_vmo(handle)->duplicate(ZX_RIGHT_SAME_RIGHTS, &vmo_dup);
if (status != ZX_OK) {
ALOGE("zx_handle_duplicate() error: %d", status);
return VK_ERROR_INITIALIZATION_FAILED;
}
uint32_t memoryProperty = 0u;
auto result = mControlDevice->GetBufferHandleInfo(std::move(vmo_dup));
if (!result.ok()) {
ALOGE(
"mControlDevice->GetBufferHandleInfo fatal error: epitaph: %d",
result.status());
return VK_ERROR_INITIALIZATION_FAILED;
}
if (result->result.is_response()) {
memoryProperty = result->result.response().info.memory_property();
} else if (result->result.err() == ZX_ERR_NOT_FOUND) {
// If an VMO is allocated while ColorBuffer/Buffer is not created,
// it must be a device-local buffer, since for host-visible buffers,
// ColorBuffer/Buffer is created at sysmem allocation time.
memoryProperty = kMemoryPropertyDeviceLocal;
} else {
// Importing read-only host memory into the Vulkan driver should not
// work, but it is not an error to try to do so. Returning a
// VkMemoryZirconHandlePropertiesFUCHSIA with no available
// memoryType bits should be enough for clients. See fxbug.dev/24225
// for other issues this this flow.
ALOGW("GetBufferHandleInfo failed: %d", result->result.err());
pProperties->memoryTypeBits = 0;
return VK_SUCCESS;
}
pProperties->memoryTypeBits = 0;
for (uint32_t i = 0; i < info.memProps.memoryTypeCount; ++i) {
if (((memoryProperty & kMemoryPropertyDeviceLocal) &&
(info.memProps.memoryTypes[i].propertyFlags &
VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT)) ||
((memoryProperty & kMemoryPropertyHostVisible) &&
(info.memProps.memoryTypes[i].propertyFlags &
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT))) {
pProperties->memoryTypeBits |= 1ull << i;
}
}
return VK_SUCCESS;
}
zx_koid_t getEventKoid(zx_handle_t eventHandle) {
if (eventHandle == ZX_HANDLE_INVALID) {
return ZX_KOID_INVALID;
}
zx_info_handle_basic_t info;
zx_status_t status =
zx_object_get_info(eventHandle, ZX_INFO_HANDLE_BASIC, &info,
sizeof(info), nullptr, nullptr);
if (status != ZX_OK) {
ALOGE("Cannot get object info of handle %u: %d", eventHandle,
status);
return ZX_KOID_INVALID;
}
return info.koid;
}
VkResult on_vkImportSemaphoreZirconHandleFUCHSIA(
void*, VkResult,
VkDevice device,
const VkImportSemaphoreZirconHandleInfoFUCHSIA* pInfo) {
if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
AutoLock<RecursiveLock> lock(mLock);
auto deviceIt = info_VkDevice.find(device);
if (deviceIt == info_VkDevice.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
if (semaphoreIt == info_VkSemaphore.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto& info = semaphoreIt->second;
if (info.eventHandle != ZX_HANDLE_INVALID) {
zx_handle_close(info.eventHandle);
}
#if VK_HEADER_VERSION < 174
info.eventHandle = pInfo->handle;
#else // VK_HEADER_VERSION >= 174
info.eventHandle = pInfo->zirconHandle;
#endif // VK_HEADER_VERSION < 174
if (info.eventHandle != ZX_HANDLE_INVALID) {
info.eventKoid = getEventKoid(info.eventHandle);
}
return VK_SUCCESS;
}
VkResult on_vkGetSemaphoreZirconHandleFUCHSIA(
void*, VkResult,
VkDevice device,
const VkSemaphoreGetZirconHandleInfoFUCHSIA* pInfo,
uint32_t* pHandle) {
if (!pInfo) return VK_ERROR_INITIALIZATION_FAILED;
if (!pInfo->semaphore) return VK_ERROR_INITIALIZATION_FAILED;
AutoLock<RecursiveLock> lock(mLock);
auto deviceIt = info_VkDevice.find(device);
if (deviceIt == info_VkDevice.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto semaphoreIt = info_VkSemaphore.find(pInfo->semaphore);
if (semaphoreIt == info_VkSemaphore.end()) {
return VK_ERROR_INITIALIZATION_FAILED;
}
auto& info = semaphoreIt->second;
if (info.eventHandle == ZX_HANDLE_INVALID) {
return VK_ERROR_INITIALIZATION_FAILED;
}
*pHandle = ZX_HANDLE_INVALID;
zx_handle_duplicate(info.eventHandle, ZX_RIGHT_SAME_RIGHTS, pHandle);
return VK_SUCCESS;
}
VkResult on_vkCreateBufferCollectionFUCHSIA(
void*,
VkResult,
VkDevice,
const VkBufferCollectionCreateInfoFUCHSIA* pInfo,
const VkAllocationCallbacks*,
VkBufferCollectionFUCHSIA* pCollection) {
fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken> token_client;
if (pInfo->collectionToken) {
token_client =
fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken>(
zx::channel(pInfo->collectionToken));
} else {
auto endpoints = fidl::CreateEndpoints<
::fuchsia_sysmem::BufferCollectionToken>();
if (!endpoints.is_ok()) {
ALOGE("zx_channel_create failed: %d", endpoints.status_value());
return VK_ERROR_INITIALIZATION_FAILED;
}
auto result = mSysmemAllocator->AllocateSharedCollection(
std::move(endpoints->server));
if (!result.ok()) {
ALOGE("AllocateSharedCollection failed: %d", result.status());
return VK_ERROR_INITIALIZATION_FAILED;
}
token_client = std::move(endpoints->client);
}
auto endpoints =
fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
if (!endpoints.is_ok()) {
ALOGE("zx_channel_create failed: %d", endpoints.status_value());
return VK_ERROR_INITIALIZATION_FAILED;
}
auto [collection_client, collection_server] =
std::move(endpoints.value());
auto result = mSysmemAllocator->BindSharedCollection(
std::move(token_client), std::move(collection_server));
if (!result.ok()) {
ALOGE("BindSharedCollection failed: %d", result.status());
return VK_ERROR_INITIALIZATION_FAILED;
}
auto* sysmem_collection =
new fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>(
std::move(collection_client));
*pCollection =
reinterpret_cast<VkBufferCollectionFUCHSIA>(sysmem_collection);
register_VkBufferCollectionFUCHSIA(*pCollection);
return VK_SUCCESS;
}
VkResult on_vkCreateBufferCollectionFUCHSIAX(
void*,
VkResult,
VkDevice,
const VkBufferCollectionCreateInfoFUCHSIAX* pInfo,
const VkAllocationCallbacks*,
VkBufferCollectionFUCHSIAX* pCollection) {
fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken> token_client;
if (pInfo->collectionToken) {
token_client = fidl::ClientEnd<::fuchsia_sysmem::BufferCollectionToken>(
zx::channel(pInfo->collectionToken));
} else {
auto endpoints =
fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollectionToken>();
if (!endpoints.is_ok()) {
ALOGE("zx_channel_create failed: %d", endpoints.status_value());
return VK_ERROR_INITIALIZATION_FAILED;
}
auto result = mSysmemAllocator->AllocateSharedCollection(
std::move(endpoints->server));
if (!result.ok()) {
ALOGE("AllocateSharedCollection failed: %d", result.status());
return VK_ERROR_INITIALIZATION_FAILED;
}
token_client = std::move(endpoints->client);
}
auto endpoints = fidl::CreateEndpoints<::fuchsia_sysmem::BufferCollection>();
if (!endpoints.is_ok()) {
ALOGE("zx_channel_create failed: %d", endpoints.status_value());
return VK_ERROR_INITIALIZATION_FAILED;
}
auto [collection_client, collection_server] = std::move(endpoints.value());
auto result = mSysmemAllocator->BindSharedCollection(
std::move(token_client), std::move(collection_server));
if (!result.ok()) {
ALOGE("BindSharedCollection failed: %d", result.status());
return VK_ERROR_INITIALIZATION_FAILED;
}
auto* sysmem_collection =
new fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>(
std::move(collection_client));
*pCollection =
reinterpret_cast<VkBufferCollectionFUCHSIAX>(sysmem_collection);
register_VkBufferCollectionFUCHSIAX(*pCollection);
return VK_SUCCESS;
}
void on_vkDestroyBufferCollectionFUCHSIA(
void*,
VkResult,
VkDevice,
VkBufferCollectionFUCHSIA collection,
const VkAllocationCallbacks*) {
auto sysmem_collection = reinterpret_cast<
fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(
collection);
if (sysmem_collection) {
(*sysmem_collection)->Close();
}
delete sysmem_collection;
unregister_VkBufferCollectionFUCHSIA(collection);
}
void on_vkDestroyBufferCollectionFUCHSIAX(
void*,
VkResult,
VkDevice,
VkBufferCollectionFUCHSIAX collection,
const VkAllocationCallbacks*) {
auto sysmem_collection = reinterpret_cast<
fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>*>(collection);
if (sysmem_collection) {
(*sysmem_collection)->Close();
}
delete sysmem_collection;
unregister_VkBufferCollectionFUCHSIAX(collection);
}
inline fuchsia_sysmem::wire::BufferCollectionConstraints
defaultBufferCollectionConstraints(
size_t minSizeBytes,
size_t minBufferCount,
size_t maxBufferCount = 0u,
size_t minBufferCountForCamping = 0u,
size_t minBufferCountForDedicatedSlack = 0u,
size_t minBufferCountForSharedSlack = 0u) {
fuchsia_sysmem::wire::BufferCollectionConstraints constraints = {};
constraints.min_buffer_count = minBufferCount;
if (maxBufferCount > 0) {
constraints.max_buffer_count = maxBufferCount;
}
if (minBufferCountForCamping) {
constraints.min_buffer_count_for_camping = minBufferCountForCamping;
}
if (minBufferCountForSharedSlack) {
constraints.min_buffer_count_for_shared_slack =
minBufferCountForSharedSlack;
}
constraints.has_buffer_memory_constraints = true;
fuchsia_sysmem::wire::BufferMemoryConstraints& buffer_constraints =
constraints.buffer_memory_constraints;
buffer_constraints.min_size_bytes = minSizeBytes;
buffer_constraints.max_size_bytes = 0xffffffff;
buffer_constraints.physically_contiguous_required = false;
buffer_constraints.secure_required = false;
// No restrictions on coherency domain or Heaps.
buffer_constraints.ram_domain_supported = true;
buffer_constraints.cpu_domain_supported = true;
buffer_constraints.inaccessible_domain_supported = true;
buffer_constraints.heap_permitted_count = 2;
buffer_constraints.heap_permitted[0] =
fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
buffer_constraints.heap_permitted[1] =
fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
return constraints;
}
uint32_t getBufferCollectionConstraintsVulkanImageUsage(
const VkImageCreateInfo* pImageInfo) {
uint32_t usage = 0u;
VkImageUsageFlags imageUsage = pImageInfo->usage;
#define SetUsageBit(BIT, VALUE) \
if (imageUsage & VK_IMAGE_USAGE_##BIT##_BIT) { \
usage |= fuchsia_sysmem::wire::kVulkanImageUsage##VALUE; \
}
SetUsageBit(COLOR_ATTACHMENT, ColorAttachment);
SetUsageBit(TRANSFER_SRC, TransferSrc);
SetUsageBit(TRANSFER_DST, TransferDst);
SetUsageBit(SAMPLED, Sampled);
#undef SetUsageBit
return usage;
}
uint32_t getBufferCollectionConstraintsVulkanBufferUsage(
VkBufferUsageFlags bufferUsage) {
uint32_t usage = 0u;
#define SetUsageBit(BIT, VALUE) \
if (bufferUsage & VK_BUFFER_USAGE_##BIT##_BIT) { \
usage |= fuchsia_sysmem::wire::kVulkanBufferUsage##VALUE; \
}
SetUsageBit(TRANSFER_SRC, TransferSrc);
SetUsageBit(TRANSFER_DST, TransferDst);
SetUsageBit(UNIFORM_TEXEL_BUFFER, UniformTexelBuffer);
SetUsageBit(STORAGE_TEXEL_BUFFER, StorageTexelBuffer);
SetUsageBit(UNIFORM_BUFFER, UniformBuffer);
SetUsageBit(STORAGE_BUFFER, StorageBuffer);
SetUsageBit(INDEX_BUFFER, IndexBuffer);
SetUsageBit(VERTEX_BUFFER, VertexBuffer);
SetUsageBit(INDIRECT_BUFFER, IndirectBuffer);
#undef SetUsageBit
return usage;
}
uint32_t getBufferCollectionConstraintsVulkanBufferUsage(
const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
VkBufferUsageFlags bufferUsage =
pBufferConstraintsInfo->createInfo.usage;
return getBufferCollectionConstraintsVulkanBufferUsage(bufferUsage);
}
uint32_t getBufferCollectionConstraintsVulkanBufferUsage(
const VkBufferConstraintsInfoFUCHSIAX* pBufferConstraintsInfo) {
VkBufferUsageFlags bufferUsage =
pBufferConstraintsInfo->pBufferCreateInfo->usage;
return getBufferCollectionConstraintsVulkanBufferUsage(bufferUsage);
}
static fuchsia_sysmem::wire::PixelFormatType vkFormatTypeToSysmem(
VkFormat format) {
switch (format) {
case VK_FORMAT_B8G8R8A8_SINT:
case VK_FORMAT_B8G8R8A8_UNORM:
case VK_FORMAT_B8G8R8A8_SRGB:
case VK_FORMAT_B8G8R8A8_SNORM:
case VK_FORMAT_B8G8R8A8_SSCALED:
case VK_FORMAT_B8G8R8A8_USCALED:
return fuchsia_sysmem::wire::PixelFormatType::kBgra32;
case VK_FORMAT_R8G8B8A8_SINT:
case VK_FORMAT_R8G8B8A8_UNORM:
case VK_FORMAT_R8G8B8A8_SRGB:
case VK_FORMAT_R8G8B8A8_SNORM:
case VK_FORMAT_R8G8B8A8_SSCALED:
case VK_FORMAT_R8G8B8A8_USCALED:
return fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
case VK_FORMAT_R8_UNORM:
case VK_FORMAT_R8_UINT:
case VK_FORMAT_R8_USCALED:
case VK_FORMAT_R8_SNORM:
case VK_FORMAT_R8_SINT:
case VK_FORMAT_R8_SSCALED:
case VK_FORMAT_R8_SRGB:
return fuchsia_sysmem::wire::PixelFormatType::kR8;
case VK_FORMAT_R8G8_UNORM:
case VK_FORMAT_R8G8_UINT:
case VK_FORMAT_R8G8_USCALED:
case VK_FORMAT_R8G8_SNORM:
case VK_FORMAT_R8G8_SINT:
case VK_FORMAT_R8G8_SSCALED:
case VK_FORMAT_R8G8_SRGB:
return fuchsia_sysmem::wire::PixelFormatType::kR8G8;
default:
return fuchsia_sysmem::wire::PixelFormatType::kInvalid;
}
}
static bool vkFormatMatchesSysmemFormat(
VkFormat vkFormat,
fuchsia_sysmem::wire::PixelFormatType sysmemFormat) {
switch (vkFormat) {
case VK_FORMAT_B8G8R8A8_SINT:
case VK_FORMAT_B8G8R8A8_UNORM:
case VK_FORMAT_B8G8R8A8_SRGB:
case VK_FORMAT_B8G8R8A8_SNORM:
case VK_FORMAT_B8G8R8A8_SSCALED:
case VK_FORMAT_B8G8R8A8_USCALED:
return sysmemFormat ==
fuchsia_sysmem::wire::PixelFormatType::kBgra32;
case VK_FORMAT_R8G8B8A8_SINT:
case VK_FORMAT_R8G8B8A8_UNORM:
case VK_FORMAT_R8G8B8A8_SRGB:
case VK_FORMAT_R8G8B8A8_SNORM:
case VK_FORMAT_R8G8B8A8_SSCALED:
case VK_FORMAT_R8G8B8A8_USCALED:
return sysmemFormat ==
fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8;
case VK_FORMAT_R8_UNORM:
case VK_FORMAT_R8_UINT:
case VK_FORMAT_R8_USCALED:
case VK_FORMAT_R8_SNORM:
case VK_FORMAT_R8_SINT:
case VK_FORMAT_R8_SSCALED:
case VK_FORMAT_R8_SRGB:
return sysmemFormat ==
fuchsia_sysmem::wire::PixelFormatType::kR8 ||
sysmemFormat ==
fuchsia_sysmem::wire::PixelFormatType::kL8;
case VK_FORMAT_R8G8_UNORM:
case VK_FORMAT_R8G8_UINT:
case VK_FORMAT_R8G8_USCALED:
case VK_FORMAT_R8G8_SNORM:
case VK_FORMAT_R8G8_SINT:
case VK_FORMAT_R8G8_SSCALED:
case VK_FORMAT_R8G8_SRGB:
return sysmemFormat ==
fuchsia_sysmem::wire::PixelFormatType::kR8G8;
default:
return false;
}
}
static VkFormat sysmemPixelFormatTypeToVk(
fuchsia_sysmem::wire::PixelFormatType format) {
switch (format) {
case fuchsia_sysmem::wire::PixelFormatType::kBgra32:
return VK_FORMAT_B8G8R8A8_SRGB;
case fuchsia_sysmem::wire::PixelFormatType::kR8G8B8A8:
return VK_FORMAT_R8G8B8A8_SRGB;
case fuchsia_sysmem::wire::PixelFormatType::kL8:
case fuchsia_sysmem::wire::PixelFormatType::kR8:
return VK_FORMAT_R8_UNORM;
case fuchsia_sysmem::wire::PixelFormatType::kR8G8:
return VK_FORMAT_R8G8_UNORM;
default:
return VK_FORMAT_UNDEFINED;
}
}
// TODO(fxbug.dev/90856): This is currently only used for allocating
// memory for dedicated external images. It should be migrated to use
// SetBufferCollectionImageConstraintsFUCHSIA.
VkResult setBufferCollectionConstraintsFUCHSIA(
VkEncoder* enc,
VkDevice device,
fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* collection,
const VkImageCreateInfo* pImageInfo) {
if (pImageInfo == nullptr) {
ALOGE("setBufferCollectionConstraints: pImageInfo cannot be null.");
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
const VkSysmemColorSpaceFUCHSIA kDefaultColorSpace = {
.sType = VK_STRUCTURE_TYPE_SYSMEM_COLOR_SPACE_FUCHSIA,
.pNext = nullptr,
.colorSpace = static_cast<uint32_t>(
fuchsia_sysmem::wire::ColorSpaceType::kSrgb),
};
std::vector<VkImageFormatConstraintsInfoFUCHSIA> formatInfos;
if (pImageInfo->format == VK_FORMAT_UNDEFINED) {
const auto kFormats = {
VK_FORMAT_B8G8R8A8_SRGB,
VK_FORMAT_R8G8B8A8_SRGB,
};
for (auto format : kFormats) {
// shallow copy, using pNext from pImageInfo directly.
auto createInfo = *pImageInfo;
createInfo.format = format;
formatInfos.push_back(VkImageFormatConstraintsInfoFUCHSIA{
.sType =
VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA,
.pNext = nullptr,
.imageCreateInfo = createInfo,
.colorSpaceCount = 1,
.pColorSpaces = &kDefaultColorSpace,
});
}
} else {
formatInfos.push_back(VkImageFormatConstraintsInfoFUCHSIA{
.sType =
VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA,
.pNext = nullptr,
.imageCreateInfo = *pImageInfo,
.colorSpaceCount = 1,
.pColorSpaces = &kDefaultColorSpace,
});
}
VkImageConstraintsInfoFUCHSIA imageConstraints = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA,
.pNext = nullptr,
.formatConstraintsCount = static_cast<uint32_t>(formatInfos.size()),
.pFormatConstraints = formatInfos.data(),
.bufferCollectionConstraints =
VkBufferCollectionConstraintsInfoFUCHSIA{
.sType =
VK_STRUCTURE_TYPE_BUFFER_COLLECTION_CONSTRAINTS_INFO_FUCHSIA,
.pNext = nullptr,
.minBufferCount = 1,
.maxBufferCount = 0,
.minBufferCountForCamping = 0,
.minBufferCountForDedicatedSlack = 0,
.minBufferCountForSharedSlack = 0,
},
.flags = 0u,
};
return setBufferCollectionImageConstraintsFUCHSIA(
enc, device, collection, &imageConstraints);
}
VkResult setBufferCollectionConstraintsFUCHSIAX(
VkEncoder* enc,
VkDevice device,
fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* collection,
const VkImageCreateInfo* pImageInfo) {
if (pImageInfo == nullptr) {
ALOGE("setBufferCollectionConstraints: pImageInfo cannot be null.");
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
}
std::vector<VkImageCreateInfo> createInfos;
if (pImageInfo->format == VK_FORMAT_UNDEFINED) {
const auto kFormats = {
VK_FORMAT_B8G8R8A8_SRGB,
VK_FORMAT_R8G8B8A8_SRGB,
};
for (auto format : kFormats) {
// shallow copy, using pNext from pImageInfo directly.
auto createInfo = *pImageInfo;
createInfo.format = format;
createInfos.push_back(createInfo);
}
} else {
createInfos.push_back(*pImageInfo);
}
VkImageConstraintsInfoFUCHSIAX imageConstraints;
imageConstraints.sType =
VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIAX;
imageConstraints.pNext = nullptr;
imageConstraints.createInfoCount = createInfos.size();
imageConstraints.pCreateInfos = createInfos.data();
imageConstraints.pFormatConstraints = nullptr;
imageConstraints.maxBufferCount = 0;
imageConstraints.minBufferCount = 1;
imageConstraints.minBufferCountForCamping = 0;
imageConstraints.minBufferCountForDedicatedSlack = 0;
imageConstraints.minBufferCountForSharedSlack = 0;
imageConstraints.flags = 0u;
return setBufferCollectionImageConstraintsFUCHSIAX(
enc, device, collection, &imageConstraints);
}
VkResult addImageBufferCollectionConstraintsFUCHSIA(
VkEncoder* enc,
VkDevice device,
VkPhysicalDevice physicalDevice,
const VkImageFormatConstraintsInfoFUCHSIA*
formatConstraints, // always non-zero
VkImageTiling tiling,
fuchsia_sysmem::wire::BufferCollectionConstraints* constraints) {
// First check if the format, tiling and usage is supported on host.
VkImageFormatProperties imageFormatProperties;
auto createInfo = &formatConstraints->imageCreateInfo;
auto result = enc->vkGetPhysicalDeviceImageFormatProperties(
physicalDevice, createInfo->format, createInfo->imageType, tiling,
createInfo->usage, createInfo->flags, &imageFormatProperties,
true /* do lock */);
if (result != VK_SUCCESS) {
ALOGW(
"%s: Image format (%u) type (%u) tiling (%u) "
"usage (%u) flags (%u) not supported by physical "
"device",
__func__, static_cast<uint32_t>(createInfo->format),
static_cast<uint32_t>(createInfo->imageType),
static_cast<uint32_t>(tiling),
static_cast<uint32_t>(createInfo->usage),
static_cast<uint32_t>(createInfo->flags));
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
// Check if format constraints contains unsupported format features.
{
VkFormatProperties formatProperties;
enc->vkGetPhysicalDeviceFormatProperties(
physicalDevice, createInfo->format, &formatProperties,
true /* do lock */);
auto supportedFeatures =
(tiling == VK_IMAGE_TILING_LINEAR)
? formatProperties.linearTilingFeatures
: formatProperties.optimalTilingFeatures;
auto requiredFeatures = formatConstraints->requiredFormatFeatures;
if ((~supportedFeatures) & requiredFeatures) {
ALOGW(
"%s: Host device support features for %s tiling: %08x, "
"required features: %08x, feature bits %08x missing",
__func__,
tiling == VK_IMAGE_TILING_LINEAR ? "LINEAR" : "OPTIMAL",
static_cast<uint32_t>(requiredFeatures),
static_cast<uint32_t>(supportedFeatures),
static_cast<uint32_t>((~supportedFeatures) &
requiredFeatures));
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
}
fuchsia_sysmem::wire::ImageFormatConstraints imageConstraints;
if (formatConstraints->sysmemPixelFormat != 0) {
auto pixelFormat =
static_cast<fuchsia_sysmem::wire::PixelFormatType>(
formatConstraints->sysmemPixelFormat);
if (createInfo->format != VK_FORMAT_UNDEFINED &&
!vkFormatMatchesSysmemFormat(createInfo->format, pixelFormat)) {
ALOGW("%s: VkFormat %u doesn't match sysmem pixelFormat %lu",
__func__, static_cast<uint32_t>(createInfo->format),
formatConstraints->sysmemPixelFormat);
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
imageConstraints.pixel_format.type = pixelFormat;
} else {
auto pixel_format = vkFormatTypeToSysmem(createInfo->format);
if (pixel_format ==
fuchsia_sysmem::wire::PixelFormatType::kInvalid) {
ALOGW("%s: Unsupported VkFormat %u", __func__,
static_cast<uint32_t>(createInfo->format));
return VK_ERROR_FORMAT_NOT_SUPPORTED;
}
imageConstraints.pixel_format.type = pixel_format;
}
imageConstraints.color_spaces_count =
formatConstraints->colorSpaceCount;
for (size_t i = 0; i < formatConstraints->colorSpaceCount; i++) {
imageConstraints.color_space[0].type =
static_cast<fuchsia_sysmem::wire::ColorSpaceType>(
formatConstraints->pColorSpaces[i].colorSpace);
}
// Get row alignment from host GPU.
VkDeviceSize offset;
VkDeviceSize rowPitchAlignment;
enc->vkGetLinearImageLayoutGOOGLE(device, createInfo->format, &offset,
&rowPitchAlignment,
true /* do lock */);
ALOGD(
"vkGetLinearImageLayoutGOOGLE: format %d offset %lu "
"rowPitchAlignment = %lu",
(int)createInfo->format, offset, rowPitchAlignment);
imageConstraints.min_coded_width = createInfo->extent.width;
imageConstraints.max_coded_width = 0xfffffff;
imageConstraints.min_coded_height = createInfo->extent.height;
imageConstraints.max_coded_height = 0xffffffff;
// The min_bytes_per_row can be calculated by sysmem using
// |min_coded_width|, |bytes_per_row_divisor| and color format.
imageConstraints.min_bytes_per_row = 0;
imageConstraints.max_bytes_per_row = 0xffffffff;
imageConstraints.max_coded_width_times_coded_height = 0xffffffff;
imageConstraints.layers = 1;
imageConstraints.coded_width_divisor = 1;
imageConstraints.coded_height_divisor = 1;
imageConstraints.bytes_per_row_divisor = rowPitchAlignment;
imageConstraints.start_offset_divisor = 1;
imageConstraints.display_width_divisor = 1;
imageConstraints.display_height_divisor = 1;
imageConstraints.pixel_format.has_format_modifier = true;
imageConstraints.pixel_format.format_modifier.value =
(tiling == VK_IMAGE_TILING_LINEAR)
? fuchsia_sysmem::wire::kFormatModifierLinear
: fuchsia_sysmem::wire::kFormatModifierGoogleGoldfishOptimal;
constraints->image_format_constraints
[constraints->image_format_constraints_count++] = imageConstraints;
return VK_SUCCESS;
}
struct SetBufferCollectionImageConstraintsResult {
VkResult result;
fuchsia_sysmem::wire::BufferCollectionConstraints constraints;
std::vector<uint32_t> createInfoIndex;
};
SetBufferCollectionImageConstraintsResult
setBufferCollectionImageConstraintsImpl(
VkEncoder* enc,
VkDevice device,
fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
const auto& collection = *pCollection;
if (!pImageConstraintsInfo ||
(pImageConstraintsInfo->sType !=
VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIAX &&
pImageConstraintsInfo->sType !=
VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA)) {
ALOGE("%s: invalid pImageConstraintsInfo", __func__);
return {VK_ERROR_INITIALIZATION_FAILED};
}
if (pImageConstraintsInfo->formatConstraintsCount == 0) {
ALOGE("%s: formatConstraintsCount must be greater than 0",
__func__);
abort();
}
fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
defaultBufferCollectionConstraints(
/* min_size_bytes */ 0,
pImageConstraintsInfo->bufferCollectionConstraints
.minBufferCount,
pImageConstraintsInfo->bufferCollectionConstraints
.maxBufferCount,
pImageConstraintsInfo->bufferCollectionConstraints
.minBufferCountForCamping,
pImageConstraintsInfo->bufferCollectionConstraints
.minBufferCountForDedicatedSlack,
pImageConstraintsInfo->bufferCollectionConstraints
.minBufferCountForSharedSlack);
std::vector<fuchsia_sysmem::wire::ImageFormatConstraints>
format_constraints;
VkPhysicalDevice physicalDevice;
{
AutoLock<RecursiveLock> lock(mLock);
auto deviceIt = info_VkDevice.find(device);
if (deviceIt == info_VkDevice.end()) {
return {VK_ERROR_INITIALIZATION_FAILED};
}
physicalDevice = deviceIt->second.physdev;
}
std::vector<uint32_t> createInfoIndex;
bool hasOptimalTiling = false;
for (uint32_t i = 0; i < pImageConstraintsInfo->formatConstraintsCount;
i++) {
const VkImageCreateInfo* createInfo =
&pImageConstraintsInfo->pFormatConstraints[i].imageCreateInfo;
const VkImageFormatConstraintsInfoFUCHSIA* formatConstraints =
&pImageConstraintsInfo->pFormatConstraints[i];
// add ImageFormatConstraints for *optimal* tiling
VkResult optimalResult = VK_ERROR_FORMAT_NOT_SUPPORTED;
if (createInfo->tiling == VK_IMAGE_TILING_OPTIMAL) {
optimalResult = addImageBufferCollectionConstraintsFUCHSIA(
enc, device, physicalDevice, formatConstraints,
VK_IMAGE_TILING_OPTIMAL, &constraints);
if (optimalResult == VK_SUCCESS) {
createInfoIndex.push_back(i);
hasOptimalTiling = true;
}
}
// Add ImageFormatConstraints for *linear* tiling
VkResult linearResult = addImageBufferCollectionConstraintsFUCHSIA(
enc, device, physicalDevice, formatConstraints,
VK_IMAGE_TILING_LINEAR, &constraints);
if (linearResult == VK_SUCCESS) {
createInfoIndex.push_back(i);
}
// Update usage and BufferMemoryConstraints
if (linearResult == VK_SUCCESS || optimalResult == VK_SUCCESS) {
constraints.usage.vulkan |=
getBufferCollectionConstraintsVulkanImageUsage(createInfo);
if (formatConstraints && formatConstraints->flags) {
ALOGW(
"%s: Non-zero flags (%08x) in image format "
"constraints; this is currently not supported, see "
"fxbug.dev/68833.",
__func__, formatConstraints->flags);
}
}
}
// Set buffer memory constraints based on optimal/linear tiling support
// and flags.
VkImageConstraintsInfoFlagsFUCHSIA flags = pImageConstraintsInfo->flags;
if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA)
constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageRead;
if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA)
constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageReadOften;
if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA)
constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWrite;
if (flags & VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA)
constraints.usage.cpu |= fuchsia_sysmem::wire::kCpuUsageWriteOften;
constraints.has_buffer_memory_constraints = true;
auto& memory_constraints = constraints.buffer_memory_constraints;
memory_constraints.cpu_domain_supported = true;
memory_constraints.ram_domain_supported = true;
memory_constraints.inaccessible_domain_supported =
hasOptimalTiling &&
!(flags & (VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_RARELY_FUCHSIA |
VK_IMAGE_CONSTRAINTS_INFO_CPU_READ_OFTEN_FUCHSIA |
VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_RARELY_FUCHSIA |
VK_IMAGE_CONSTRAINTS_INFO_CPU_WRITE_OFTEN_FUCHSIA));
if (memory_constraints.inaccessible_domain_supported) {
memory_constraints.heap_permitted_count = 2;
memory_constraints.heap_permitted[0] =
fuchsia_sysmem::wire::HeapType::kGoldfishDeviceLocal;
memory_constraints.heap_permitted[1] =
fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
} else {
memory_constraints.heap_permitted_count = 1;
memory_constraints.heap_permitted[0] =
fuchsia_sysmem::wire::HeapType::kGoldfishHostVisible;
}
if (constraints.image_format_constraints_count == 0) {
ALOGE("%s: none of the specified formats is supported by device",
__func__);
return {VK_ERROR_FORMAT_NOT_SUPPORTED};
}
constexpr uint32_t kVulkanPriority = 5;
const char kName[] = "GoldfishSysmemShared";
collection->SetName(kVulkanPriority, fidl::StringView(kName));
auto result = collection->SetConstraints(true, constraints);
if (!result.ok()) {
ALOGE("setBufferCollectionConstraints: SetConstraints failed: %d",
result.status());
return {VK_ERROR_INITIALIZATION_FAILED};
}
return {VK_SUCCESS, constraints, std::move(createInfoIndex)};
}
VkResult setBufferCollectionImageConstraintsFUCHSIA(
VkEncoder* enc,
VkDevice device,
fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
const VkImageConstraintsInfoFUCHSIA* pImageConstraintsInfo) {
const auto& collection = *pCollection;
auto setConstraintsResult = setBufferCollectionImageConstraintsImpl(
enc, device, pCollection, pImageConstraintsInfo);
if (setConstraintsResult.result != VK_SUCCESS) {
return setConstraintsResult.result;
}
// copy constraints to info_VkBufferCollectionFUCHSIA if
// |collection| is a valid VkBufferCollectionFUCHSIA handle.
AutoLock<RecursiveLock> lock(mLock);
VkBufferCollectionFUCHSIA buffer_collection =
reinterpret_cast<VkBufferCollectionFUCHSIA>(pCollection);
if (info_VkBufferCollectionFUCHSIA.find(buffer_collection) !=
info_VkBufferCollectionFUCHSIA.end()) {
info_VkBufferCollectionFUCHSIA[buffer_collection].constraints =
android::base::makeOptional(
std::move(setConstraintsResult.constraints));
info_VkBufferCollectionFUCHSIA[buffer_collection].createInfoIndex =
std::move(setConstraintsResult.createInfoIndex);
}
return VK_SUCCESS;
}
VkResult setBufferCollectionImageConstraintsFUCHSIAX(
VkEncoder* enc,
VkDevice device,
fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
const VkImageConstraintsInfoFUCHSIAX* pImageConstraintsInfo) {
const auto& collection = *pCollection;
const VkSysmemColorSpaceFUCHSIA kDefaultColorSpace = {
.sType = VK_STRUCTURE_TYPE_SYSMEM_COLOR_SPACE_FUCHSIA,
.pNext = nullptr,
.colorSpace = static_cast<uint32_t>(
fuchsia_sysmem::wire::ColorSpaceType::kSrgb),
};
std::vector<VkImageFormatConstraintsInfoFUCHSIA> formatConstraints;
for (size_t i = 0; i < pImageConstraintsInfo->createInfoCount; i++) {
VkImageFormatConstraintsInfoFUCHSIA constraints = {
.sType =
VK_STRUCTURE_TYPE_IMAGE_FORMAT_CONSTRAINTS_INFO_FUCHSIA,
.pNext = nullptr,
.imageCreateInfo = pImageConstraintsInfo->pCreateInfos[i],
.requiredFormatFeatures = {},
.flags = {},
.sysmemPixelFormat = 0u,
.colorSpaceCount = 1u,
.pColorSpaces = &kDefaultColorSpace,
};
if (pImageConstraintsInfo->pFormatConstraints) {
const auto* formatConstraintsFUCHSIAX =
&pImageConstraintsInfo->pFormatConstraints[i];
constraints.pNext = formatConstraintsFUCHSIAX->pNext;
constraints.requiredFormatFeatures =
formatConstraintsFUCHSIAX->requiredFormatFeatures;
constraints.flags =
reinterpret_cast<VkImageFormatConstraintsFlagsFUCHSIA>(
formatConstraintsFUCHSIAX->flags);
constraints.sysmemPixelFormat =
formatConstraintsFUCHSIAX->sysmemFormat;
constraints.colorSpaceCount =
formatConstraintsFUCHSIAX->colorSpaceCount > 0
? formatConstraintsFUCHSIAX->colorSpaceCount
: 1;
// VkSysmemColorSpaceFUCHSIA and VkSysmemColorSpaceFUCHSIAX have
// identical definitions so we can just do a reinterpret_cast.
constraints.pColorSpaces =
formatConstraintsFUCHSIAX->colorSpaceCount > 0
? reinterpret_cast<const VkSysmemColorSpaceFUCHSIA*>(
formatConstraintsFUCHSIAX->pColorSpaces)
: &kDefaultColorSpace;
}
formatConstraints.push_back(constraints);
}
VkImageConstraintsInfoFUCHSIA imageConstraintsInfoFUCHSIA = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CONSTRAINTS_INFO_FUCHSIA,
.pNext = pImageConstraintsInfo->pNext,
.formatConstraintsCount = pImageConstraintsInfo->createInfoCount,
.pFormatConstraints = formatConstraints.data(),
.bufferCollectionConstraints =
VkBufferCollectionConstraintsInfoFUCHSIA{
.sType = VK_STRUCTURE_TYPE_BUFFER_CONSTRAINTS_INFO_FUCHSIA,
.pNext = nullptr,
.minBufferCount = pImageConstraintsInfo->minBufferCount,
.maxBufferCount = pImageConstraintsInfo->maxBufferCount,
.minBufferCountForCamping =
pImageConstraintsInfo->minBufferCountForCamping,
.minBufferCountForDedicatedSlack =
pImageConstraintsInfo->minBufferCountForDedicatedSlack,
.minBufferCountForSharedSlack =
pImageConstraintsInfo->minBufferCountForSharedSlack,
},
.flags = pImageConstraintsInfo->flags,
};
auto setConstraintsResult = setBufferCollectionImageConstraintsImpl(
enc, device, pCollection, &imageConstraintsInfoFUCHSIA);
if (setConstraintsResult.result != VK_SUCCESS) {
return setConstraintsResult.result;
}
// copy constraints to info_VkBufferCollectionFUCHSIAX if
// |collection| is a valid VkBufferCollectionFUCHSIAX handle.
AutoLock<RecursiveLock> lock(mLock);
VkBufferCollectionFUCHSIAX buffer_collection =
reinterpret_cast<VkBufferCollectionFUCHSIAX>(pCollection);
if (info_VkBufferCollectionFUCHSIAX.find(buffer_collection) !=
info_VkBufferCollectionFUCHSIAX.end()) {
info_VkBufferCollectionFUCHSIAX[buffer_collection].constraints =
android::base::makeOptional(
std::move(setConstraintsResult.constraints));
info_VkBufferCollectionFUCHSIAX[buffer_collection].createInfoIndex =
std::move(setConstraintsResult.createInfoIndex);
}
return VK_SUCCESS;
}
struct SetBufferCollectionBufferConstraintsResult {
VkResult result;
fuchsia_sysmem::wire::BufferCollectionConstraints constraints;
};
SetBufferCollectionBufferConstraintsResult
setBufferCollectionBufferConstraintsImpl(
fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
const auto& collection = *pCollection;
if (pBufferConstraintsInfo == nullptr) {
ALOGE(
"setBufferCollectionBufferConstraints: "
"pBufferConstraintsInfo cannot be null.");
return {VK_ERROR_OUT_OF_DEVICE_MEMORY};
}
fuchsia_sysmem::wire::BufferCollectionConstraints constraints =
defaultBufferCollectionConstraints(
/* min_size_bytes */ pBufferConstraintsInfo->createInfo.size,
/* buffer_count */ pBufferConstraintsInfo
->bufferCollectionConstraints.minBufferCount);
constraints.usage.vulkan =
getBufferCollectionConstraintsVulkanBufferUsage(
pBufferConstraintsInfo);
constexpr uint32_t kVulkanPriority = 5;
const char kName[] = "GoldfishBufferSysmemShared";
collection->SetName(kVulkanPriority, fidl::StringView(kName));
auto result = collection->SetConstraints(true, constraints);
if (!result.ok()) {
ALOGE("setBufferCollectionConstraints: SetConstraints failed: %d",
result.status());
return {VK_ERROR_OUT_OF_DEVICE_MEMORY};
}
return {VK_SUCCESS, constraints};
}
VkResult setBufferCollectionBufferConstraintsFUCHSIA(
fidl::WireSyncClient<fuchsia_sysmem::BufferCollection>* pCollection,
const VkBufferConstraintsInfoFUCHSIA* pBufferConstraintsInfo) {
auto setConstraintsResult = setBufferCollectionBufferConstraintsImpl(
pCollection, pBufferConstraintsInfo);
if (setConstraintsResult.result != VK_SUCCESS) {