lovr/src/core/gpu_vk.c

666 lines
24 KiB
C
Raw Normal View History

2022-04-21 07:27:13 +00:00
#include "gpu.h"
#include <string.h>
#define VK_NO_PROTOTYPES
#include <vulkan/vulkan.h>
#ifdef _WIN32
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#else
#include <dlfcn.h>
#endif
typedef struct gpu_memory gpu_memory;
// Objects
struct gpu_buffer {
VkBuffer handle;
gpu_memory* memory;
};
size_t gpu_sizeof_buffer() { return sizeof(gpu_buffer); }
// Pools
struct gpu_memory {
VkDeviceMemory handle;
void* pointer;
uint32_t refs;
};
typedef enum {
GPU_MEMORY_BUFFER_GPU,
GPU_MEMORY_COUNT
} gpu_memory_type;
typedef struct {
gpu_memory* block;
uint32_t cursor;
uint16_t memoryType;
uint16_t memoryFlags;
} gpu_allocator;
typedef struct {
void* handle;
VkObjectType type;
uint32_t tick;
} gpu_victim;
typedef struct {
uint32_t head;
uint32_t tail;
gpu_victim data[256];
} gpu_morgue;
typedef struct {
VkCommandPool pool;
VkSemaphore semaphores[2];
VkFence fence;
} gpu_tick;
2022-04-21 07:27:13 +00:00
// State
static struct {
void* library;
gpu_config config;
VkInstance instance;
VkPhysicalDevice adapter;
VkDevice device;
VkQueue queue;
uint32_t queueFamilyIndex;
VkDebugUtilsMessengerEXT messenger;
gpu_allocator allocators[GPU_MEMORY_COUNT];
uint8_t allocatorLookup[GPU_MEMORY_COUNT];
gpu_memory memory[256];
gpu_morgue morgue;
gpu_tick ticks[2];
uint32_t tick[2];
2022-04-21 07:27:13 +00:00
} state;
// Helpers
enum { CPU, GPU };
2022-04-21 07:27:13 +00:00
#define MIN(a, b) (a < b ? a : b)
#define MAX(a, b) (a > b ? a : b)
#define COUNTOF(x) (sizeof(x) / sizeof(x[0]))
#define ALIGN(p, n) (((uintptr_t) (p) + (n - 1)) & ~(n - 1))
2022-04-21 07:27:13 +00:00
#define VK(f, s) if (!vcheck(f, s))
#define CHECK(c, s) if (!check(c, s))
#define TICK_MASK (COUNTOF(state.ticks) - 1)
#define MORGUE_MASK (COUNTOF(state.morgue.data) - 1)
2022-04-21 07:27:13 +00:00
static gpu_memory* gpu_allocate(gpu_memory_type type, VkMemoryRequirements info, VkDeviceSize* offset);
static void gpu_release(gpu_memory* memory);
static void condemn(void* handle, VkObjectType type);
static void expunge(void);
2022-04-21 07:27:13 +00:00
static VkBool32 relay(VkDebugUtilsMessageSeverityFlagBitsEXT severity, VkDebugUtilsMessageTypeFlagsEXT flags, const VkDebugUtilsMessengerCallbackDataEXT* data, void* userdata);
static void nickname(void* object, VkObjectType type, const char* name);
2022-04-21 07:27:13 +00:00
static bool vcheck(VkResult result, const char* message);
static bool check(bool condition, const char* message);
// Loader
// Functions that don't require an instance
#define GPU_FOREACH_ANONYMOUS(X)\
X(vkCreateInstance)
// Functions that require an instance but don't require a device
#define GPU_FOREACH_INSTANCE(X)\
X(vkDestroyInstance)\
X(vkCreateDebugUtilsMessengerEXT)\
X(vkDestroyDebugUtilsMessengerEXT)\
X(vkDestroySurfaceKHR)\
X(vkEnumeratePhysicalDevices)\
X(vkGetPhysicalDeviceProperties2)\
X(vkGetPhysicalDeviceFeatures2)\
X(vkGetPhysicalDeviceMemoryProperties)\
X(vkGetPhysicalDeviceFormatProperties)\
X(vkGetPhysicalDeviceQueueFamilyProperties)\
X(vkGetPhysicalDeviceSurfaceSupportKHR)\
X(vkGetPhysicalDeviceSurfaceCapabilitiesKHR)\
X(vkGetPhysicalDeviceSurfaceFormatsKHR)\
X(vkCreateDevice)\
X(vkDestroyDevice)\
X(vkGetDeviceQueue)\
X(vkGetDeviceProcAddr)
// Functions that require a device
#define GPU_FOREACH_DEVICE(X)\
X(vkSetDebugUtilsObjectNameEXT)\
X(vkCmdBeginDebugUtilsLabelEXT)\
X(vkCmdEndDebugUtilsLabelEXT)\
X(vkDeviceWaitIdle)\
X(vkQueueSubmit)\
X(vkQueuePresentKHR)\
X(vkCreateSwapchainKHR)\
X(vkDestroySwapchainKHR)\
X(vkGetSwapchainImagesKHR)\
X(vkAcquireNextImageKHR)\
X(vkCreateCommandPool)\
X(vkDestroyCommandPool)\
X(vkResetCommandPool)\
X(vkAllocateCommandBuffers)\
X(vkBeginCommandBuffer)\
X(vkEndCommandBuffer)\
X(vkCreateFence)\
X(vkDestroyFence)\
X(vkResetFences)\
X(vkGetFenceStatus)\
X(vkWaitForFences)\
X(vkCreateSemaphore)\
X(vkDestroySemaphore)\
X(vkCmdPipelineBarrier)\
X(vkCreateQueryPool)\
X(vkDestroyQueryPool)\
X(vkCmdResetQueryPool)\
X(vkCmdWriteTimestamp)\
X(vkCmdCopyQueryPoolResults)\
X(vkCreateBuffer)\
X(vkDestroyBuffer)\
X(vkGetBufferMemoryRequirements)\
X(vkBindBufferMemory)\
X(vkCreateImage)\
X(vkDestroyImage)\
X(vkGetImageMemoryRequirements)\
X(vkBindImageMemory)\
X(vkCmdCopyBuffer)\
X(vkCmdCopyImage)\
X(vkCmdBlitImage)\
X(vkCmdCopyBufferToImage)\
X(vkCmdCopyImageToBuffer)\
X(vkCmdFillBuffer)\
X(vkCmdClearColorImage)\
X(vkAllocateMemory)\
X(vkFreeMemory)\
X(vkMapMemory)\
X(vkCreateSampler)\
X(vkDestroySampler)\
X(vkCreateRenderPass)\
X(vkDestroyRenderPass)\
X(vkCmdBeginRenderPass)\
X(vkCmdEndRenderPass)\
X(vkCreateImageView)\
X(vkDestroyImageView)\
X(vkCreateFramebuffer)\
X(vkDestroyFramebuffer)\
X(vkCreateShaderModule)\
X(vkDestroyShaderModule)\
X(vkCreateDescriptorSetLayout)\
X(vkDestroyDescriptorSetLayout)\
X(vkCreatePipelineLayout)\
X(vkDestroyPipelineLayout)\
X(vkCreateDescriptorPool)\
X(vkDestroyDescriptorPool)\
X(vkAllocateDescriptorSets)\
X(vkResetDescriptorPool)\
X(vkUpdateDescriptorSets)\
X(vkCreatePipelineCache)\
X(vkDestroyPipelineCache)\
X(vkCreateGraphicsPipelines)\
X(vkCreateComputePipelines)\
X(vkDestroyPipeline)\
X(vkCmdSetViewport)\
X(vkCmdSetScissor)\
X(vkCmdPushConstants)\
X(vkCmdBindPipeline)\
X(vkCmdBindDescriptorSets)\
X(vkCmdBindVertexBuffers)\
X(vkCmdBindIndexBuffer)\
X(vkCmdDraw)\
X(vkCmdDrawIndexed)\
X(vkCmdDrawIndirect)\
X(vkCmdDrawIndexedIndirect)\
X(vkCmdDispatch)\
X(vkCmdDispatchIndirect)
// Used to load/declare Vulkan functions without lots of clutter
#define GPU_LOAD_ANONYMOUS(fn) fn = (PFN_##fn) vkGetInstanceProcAddr(NULL, #fn);
#define GPU_LOAD_INSTANCE(fn) fn = (PFN_##fn) vkGetInstanceProcAddr(state.instance, #fn);
#define GPU_LOAD_DEVICE(fn) fn = (PFN_##fn) vkGetDeviceProcAddr(state.device, #fn);
#define GPU_DECLARE(fn) static PFN_##fn fn;
// Declare function pointers
GPU_FOREACH_ANONYMOUS(GPU_DECLARE)
GPU_FOREACH_INSTANCE(GPU_DECLARE)
GPU_FOREACH_DEVICE(GPU_DECLARE)
// Buffer
bool gpu_buffer_init(gpu_buffer* buffer, gpu_buffer_info* info) {
if (info->handle) {
buffer->handle = (VkBuffer) info->handle;
return true;
}
VkBufferCreateInfo createInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.size = info->size,
.usage =
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT |
VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
VK_BUFFER_USAGE_TRANSFER_DST_BIT
};
VK(vkCreateBuffer(state.device, &createInfo, NULL, &buffer->handle), "Could not create buffer") return false;
nickname(buffer->handle, VK_OBJECT_TYPE_BUFFER, info->label);
VkDeviceSize offset;
VkMemoryRequirements requirements;
vkGetBufferMemoryRequirements(state.device, buffer->handle, &requirements);
buffer->memory = gpu_allocate(GPU_MEMORY_BUFFER_GPU, requirements, &offset);
VK(vkBindBufferMemory(state.device, buffer->handle, buffer->memory->handle, offset), "Could not bind buffer memory") {
vkDestroyBuffer(state.device, buffer->handle, NULL);
gpu_release(buffer->memory);
return false;
}
if (info->pointer) {
*info->pointer = buffer->memory->pointer ? (char*) buffer->memory->pointer + offset : NULL;
}
return true;
}
void gpu_buffer_destroy(gpu_buffer* buffer) {
condemn(buffer->handle, VK_OBJECT_TYPE_BUFFER);
gpu_release(buffer->memory);
}
2022-04-21 07:27:13 +00:00
// Entry
bool gpu_init(gpu_config* config) {
state.config = *config;
// Load
#ifdef _WIN32
state.library = LoadLibraryA("vulkan-1.dll");
CHECK(state.library, "Failed to load vulkan library") return gpu_destroy(), false;
PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr) GetProcAddress(state.library, "vkGetInstanceProcAddr");
#elif __APPLE__
state.library = dlopen("libvulkan.1.dylib", RTLD_NOW | RTLD_LOCAL);
CHECK(state.library, "Failed to load vulkan library") return gpu_destroy(), false;
PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr) dlsym(state.library, "vkGetInstanceProcAddr");
#else
state.library = dlopen("libvulkan.so", RTLD_NOW | RTLD_LOCAL);
CHECK(state.library, "Failed to load vulkan library") return gpu_destroy(), false;
PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr = (PFN_vkGetInstanceProcAddr) dlsym(state.library, "vkGetInstanceProcAddr");
#endif
GPU_FOREACH_ANONYMOUS(GPU_LOAD_ANONYMOUS);
{ // Instance
VkInstanceCreateInfo instanceInfo = {
.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
.pApplicationInfo = &(VkApplicationInfo) {
.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
.pEngineName = config->engineName,
.engineVersion = VK_MAKE_VERSION(config->engineVersion[0], config->engineVersion[1], config->engineVersion[3]),
.apiVersion = VK_MAKE_VERSION(1, 1, 0)
},
.enabledLayerCount = state.config.debug ? 1 : 0,
.ppEnabledLayerNames = (const char*[]) { "VK_LAYER_KHRONOS_validation" },
.enabledExtensionCount = state.config.debug ? 1 : 0,
.ppEnabledExtensionNames = (const char*[]) { "VK_EXT_debug_utils" }
};
VK(vkCreateInstance(&instanceInfo, NULL, &state.instance), "Instance creation failed") return gpu_destroy(), false;
GPU_FOREACH_INSTANCE(GPU_LOAD_INSTANCE);
if (state.config.debug) {
VkDebugUtilsMessengerCreateInfoEXT messengerInfo = {
.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT,
.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT,
.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT,
.pfnUserCallback = relay
};
VK(vkCreateDebugUtilsMessengerEXT(state.instance, &messengerInfo, NULL, &state.messenger), "Debug hook setup failed") return gpu_destroy(), false;
}
}
{ // Device
uint32_t deviceCount = 1;
VK(vkEnumeratePhysicalDevices(state.instance, &deviceCount, &state.adapter), "Physical device enumeration failed") return gpu_destroy(), false;
VkPhysicalDeviceMultiviewProperties multiviewProperties = { .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES };
VkPhysicalDeviceSubgroupProperties subgroupProperties = { .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES, .pNext = &multiviewProperties };
VkPhysicalDeviceProperties2 properties2 = { .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2, .pNext = &subgroupProperties };
vkGetPhysicalDeviceProperties2(state.adapter, &properties2);
if (config->device) {
VkPhysicalDeviceProperties* properties = &properties2.properties;
2022-04-22 20:28:59 +00:00
config->device->deviceId = properties->deviceID;
config->device->vendorId = properties->vendorID;
memcpy(config->device->deviceName, properties->deviceName, MIN(sizeof(config->device->deviceName), sizeof(properties->deviceName)));
config->device->renderer = "Vulkan";
config->device->subgroupSize = subgroupProperties.subgroupSize;
2022-04-22 20:28:59 +00:00
config->device->discrete = properties->deviceType == VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU;
}
if (config->limits) {
VkPhysicalDeviceLimits* limits = &properties2.properties.limits;
config->limits->textureSize2D = MIN(limits->maxImageDimension2D, UINT16_MAX);
config->limits->textureSize3D = MIN(limits->maxImageDimension3D, UINT16_MAX);
config->limits->textureSizeCube = MIN(limits->maxImageDimensionCube, UINT16_MAX);
config->limits->textureLayers = MIN(limits->maxImageArrayLayers, UINT16_MAX);
config->limits->renderSize[0] = limits->maxFramebufferWidth;
config->limits->renderSize[1] = limits->maxFramebufferHeight;
config->limits->renderSize[2] = multiviewProperties.maxMultiviewViewCount;
config->limits->uniformBufferRange = limits->maxUniformBufferRange;
config->limits->storageBufferRange = limits->maxStorageBufferRange;
config->limits->uniformBufferAlign = limits->minUniformBufferOffsetAlignment;
config->limits->storageBufferAlign = limits->minStorageBufferOffsetAlignment;
config->limits->vertexAttributes = limits->maxVertexInputAttributes;
config->limits->vertexBuffers = limits->maxVertexInputBindings;
config->limits->vertexBufferStride = MIN(limits->maxVertexInputBindingStride, UINT16_MAX);
config->limits->vertexShaderOutputs = limits->maxVertexOutputComponents;
config->limits->clipDistances = limits->maxClipDistances;
config->limits->cullDistances = limits->maxCullDistances;
config->limits->clipAndCullDistances = limits->maxCombinedClipAndCullDistances;
config->limits->computeDispatchCount[0] = limits->maxComputeWorkGroupCount[0];
config->limits->computeDispatchCount[1] = limits->maxComputeWorkGroupCount[1];
config->limits->computeDispatchCount[2] = limits->maxComputeWorkGroupCount[2];
config->limits->computeWorkgroupSize[0] = limits->maxComputeWorkGroupSize[0];
config->limits->computeWorkgroupSize[1] = limits->maxComputeWorkGroupSize[1];
config->limits->computeWorkgroupSize[2] = limits->maxComputeWorkGroupSize[2];
config->limits->computeWorkgroupVolume = limits->maxComputeWorkGroupInvocations;
config->limits->computeSharedMemory = limits->maxComputeSharedMemorySize;
config->limits->pushConstantSize = limits->maxPushConstantsSize;
config->limits->indirectDrawCount = limits->maxDrawIndirectCount;
config->limits->instances = multiviewProperties.maxMultiviewInstanceIndex;
config->limits->anisotropy = limits->maxSamplerAnisotropy;
config->limits->pointSize = limits->pointSizeRange[1];
}
VkPhysicalDeviceShaderDrawParameterFeatures shaderDrawParameterFeatures = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES
};
VkPhysicalDeviceMultiviewFeatures multiviewFeatures = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES,
.pNext = &shaderDrawParameterFeatures
};
VkPhysicalDeviceFeatures2 enabledFeatures = {
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2,
.pNext = &multiviewFeatures
};
if (config->features) {
VkPhysicalDeviceFeatures2 features2 = { .sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2 };
VkPhysicalDeviceFeatures* enable = &enabledFeatures.features;
VkPhysicalDeviceFeatures* supports = &features2.features;
vkGetPhysicalDeviceFeatures2(state.adapter, &features2);
// Required features
enable->fullDrawIndexUint32 = true;
multiviewFeatures.multiview = true;
shaderDrawParameterFeatures.shaderDrawParameters = true;
// Internal features (exposed as limits)
enable->samplerAnisotropy = supports->samplerAnisotropy;
enable->multiDrawIndirect = supports->multiDrawIndirect;
enable->shaderClipDistance = supports->shaderClipDistance;
enable->shaderCullDistance = supports->shaderCullDistance;
enable->largePoints = supports->largePoints;
// Optional features (currently always enabled when supported)
config->features->textureBC = enable->textureCompressionBC = supports->textureCompressionBC;
config->features->textureASTC = enable->textureCompressionASTC_LDR = supports->textureCompressionASTC_LDR;
config->features->wireframe = enable->fillModeNonSolid = supports->fillModeNonSolid;
config->features->depthClamp = enable->depthClamp = supports->depthClamp;
config->features->indirectDrawFirstInstance = enable->drawIndirectFirstInstance = supports->drawIndirectFirstInstance;
config->features->float64 = enable->shaderFloat64 = supports->shaderFloat64;
config->features->int64 = enable->shaderInt64 = supports->shaderInt64;
config->features->int16 = enable->shaderInt16 = supports->shaderInt16;
}
state.queueFamilyIndex = ~0u;
VkQueueFamilyProperties queueFamilies[8];
uint32_t queueFamilyCount = COUNTOF(queueFamilies);
vkGetPhysicalDeviceQueueFamilyProperties(state.adapter, &queueFamilyCount, queueFamilies);
for (uint32_t i = 0; i < queueFamilyCount; i++) {
uint32_t mask = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT;
if ((queueFamilies[i].queueFlags & mask) == mask) {
state.queueFamilyIndex = i;
break;
}
}
CHECK(state.queueFamilyIndex != ~0u, "Queue selection failed") return gpu_destroy(), false;
VkDeviceCreateInfo deviceInfo = {
.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
.pNext = config->features ? &enabledFeatures : NULL,
.queueCreateInfoCount = 1,
.pQueueCreateInfos = &(VkDeviceQueueCreateInfo) {
.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
.queueFamilyIndex = state.queueFamilyIndex,
.pQueuePriorities = &(float) { 1.f },
.queueCount = 1
}
};
VK(vkCreateDevice(state.adapter, &deviceInfo, NULL, &state.device), "Device creation failed") return gpu_destroy(), false;
vkGetDeviceQueue(state.device, state.queueFamilyIndex, 0, &state.queue);
GPU_FOREACH_DEVICE(GPU_LOAD_DEVICE);
}
{ // Allocators
VkPhysicalDeviceMemoryProperties memoryProperties;
vkGetPhysicalDeviceMemoryProperties(state.adapter, &memoryProperties);
VkMemoryType* memoryTypes = memoryProperties.memoryTypes;
struct { VkBufferUsageFlags usage; VkMemoryPropertyFlags flags; } bufferFlags[] = {
[GPU_MEMORY_BUFFER_GPU] = {
.usage =
VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
VK_BUFFER_USAGE_INDEX_BUFFER_BIT |
VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
VK_BUFFER_USAGE_INDIRECT_BUFFER_BIT |
VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
VK_BUFFER_USAGE_TRANSFER_DST_BIT,
.flags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT
}
};
// Without VK_KHR_maintenance4, you have to create objects to figure out memory requirements
for (uint32_t i = 0; i < COUNTOF(bufferFlags); i++) {
gpu_allocator* allocator = &state.allocators[i];
VkBufferCreateInfo createInfo = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.usage = bufferFlags[i].usage,
.size = 4
};
VkBuffer buffer;
VkMemoryRequirements requirements;
vkCreateBuffer(state.device, &createInfo, NULL, &buffer);
vkGetBufferMemoryRequirements(state.device, buffer, &requirements);
vkDestroyBuffer(state.device, buffer, NULL);
uint32_t fallback = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
for (uint32_t j = 0; j < memoryProperties.memoryTypeCount; j++) {
if (~requirements.memoryTypeBits & (1 << i)) {
continue;
}
if ((memoryTypes[i].propertyFlags & fallback) == fallback) {
allocator->memoryFlags = memoryTypes[j].propertyFlags;
allocator->memoryType = j;
continue;
}
if ((memoryTypes[i].propertyFlags & bufferFlags[i].flags) == bufferFlags[i].flags) {
allocator->memoryFlags = memoryTypes[j].propertyFlags;
allocator->memoryType = j;
break;
}
}
}
}
2022-04-21 07:27:13 +00:00
return true;
}
void gpu_destroy(void) {
if (state.device) vkDeviceWaitIdle(state.device);
if (state.device) vkDestroyDevice(state.device, NULL);
if (state.messenger) vkDestroyDebugUtilsMessengerEXT(state.instance, state.messenger, NULL);
if (state.instance) vkDestroyInstance(state.instance, NULL);
2022-04-21 07:27:13 +00:00
#ifdef _WIN32
if (state.library) FreeLibrary(state.library);
#else
if (state.library) dlclose(state.library);
#endif
memset(&state, 0, sizeof(state));
}
// Helpers
static gpu_memory* gpu_allocate(gpu_memory_type type, VkMemoryRequirements info, VkDeviceSize* offset) {
gpu_allocator* allocator = &state.allocators[state.allocatorLookup[type]];
static const uint32_t blockSizes[] = {
[GPU_MEMORY_BUFFER_GPU] = 1 << 26
};
uint32_t blockSize = blockSizes[type];
uint32_t cursor = ALIGN(allocator->cursor, info.alignment);
if (allocator->block && cursor + info.size <= blockSize) {
allocator->cursor = cursor + info.size;
allocator->block->refs++;
*offset = cursor;
return allocator->block;
}
// If there wasn't an active block or it overflowed, find an empty block to allocate
for (uint32_t i = 0; i < COUNTOF(state.memory); i++) {
if (!state.memory[i].handle) {
gpu_memory* memory = &state.memory[i];
VkMemoryAllocateInfo memoryInfo = {
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.allocationSize = MAX(blockSize, info.size),
.memoryTypeIndex = allocator->memoryType
};
VK(vkAllocateMemory(state.device, &memoryInfo, NULL, &memory->handle), "Failed to allocate GPU memory") {
allocator->block = NULL;
return NULL;
}
if (allocator->memoryFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
VK(vkMapMemory(state.device, memory->handle, 0, VK_WHOLE_SIZE, 0, &memory->pointer), "Failed to map memory") {
vkFreeMemory(state.device, memory->handle, NULL);
memory->handle = NULL;
return NULL;
}
}
allocator->block = memory;
allocator->cursor = info.size;
*offset = 0;
return memory;
}
}
check(false, "Out of space for memory blocks");
return NULL;
}
static void gpu_release(gpu_memory* memory) {
if (--memory->refs == 0) {
condemn(memory->handle, VK_OBJECT_TYPE_DEVICE_MEMORY);
}
}
static void condemn(void* handle, VkObjectType type) {
if (!handle) return;
gpu_morgue* morgue = &state.morgue;
// If the morgue is full, perform an emergency expunge
if (morgue->head - morgue->tail >= COUNTOF(morgue->data)) {
vkDeviceWaitIdle(state.device);
state.tick[GPU] = state.tick[CPU];
expunge();
}
morgue->data[morgue->head++ & MORGUE_MASK] = (gpu_victim) { handle, type, state.tick[CPU] };
}
static void expunge() {
gpu_morgue* morgue = &state.morgue;
while (morgue->tail != morgue->head && state.tick[GPU] >= morgue->data[morgue->tail & MORGUE_MASK].tick) {
gpu_victim* victim = &morgue->data[morgue->tail++ & MORGUE_MASK];
switch (victim->type) {
case VK_OBJECT_TYPE_BUFFER: vkDestroyBuffer(state.device, victim->handle, NULL); break;
case VK_OBJECT_TYPE_DEVICE_MEMORY: vkFreeMemory(state.device, victim->handle, NULL); break;
default: break;
}
}
}
2022-04-21 07:27:13 +00:00
static VkBool32 relay(VkDebugUtilsMessageSeverityFlagBitsEXT severity, VkDebugUtilsMessageTypeFlagsEXT flags, const VkDebugUtilsMessengerCallbackDataEXT* data, void* userdata) {
if (state.config.callback) {
bool severe = severity & VK_DEBUG_UTILS_MESSAGE_SEVERITY_ERROR_BIT_EXT;
state.config.callback(state.config.userdata, data->pMessage, severe);
}
return VK_FALSE;
}
static void nickname(void* handle, VkObjectType type, const char* name) {
if (name && state.config.debug) {
union { uint64_t u64; void* p; } pointer = { .p = handle };
VkDebugUtilsObjectNameInfoEXT info = {
.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_OBJECT_NAME_INFO_EXT,
.objectType = type,
.objectHandle = pointer.u64,
.pObjectName = name
};
VK(vkSetDebugUtilsObjectNameEXT(state.device, &info), "Nickname failed") {}
}
}
2022-04-21 07:27:13 +00:00
static bool vcheck(VkResult result, const char* message) {
if (result >= 0) return true;
if (!state.config.callback) return false;
#define CASE(x) case x: state.config.callback(state.config.userdata, "Vulkan error: " #x, false); break;
switch (result) {
CASE(VK_ERROR_OUT_OF_HOST_MEMORY);
CASE(VK_ERROR_OUT_OF_DEVICE_MEMORY);
CASE(VK_ERROR_INITIALIZATION_FAILED);
CASE(VK_ERROR_DEVICE_LOST);
CASE(VK_ERROR_MEMORY_MAP_FAILED);
CASE(VK_ERROR_LAYER_NOT_PRESENT);
CASE(VK_ERROR_EXTENSION_NOT_PRESENT);
CASE(VK_ERROR_FEATURE_NOT_PRESENT);
CASE(VK_ERROR_TOO_MANY_OBJECTS);
CASE(VK_ERROR_FORMAT_NOT_SUPPORTED);
CASE(VK_ERROR_FRAGMENTED_POOL);
CASE(VK_ERROR_OUT_OF_POOL_MEMORY);
default: break;
}
#undef CASE
state.config.callback(state.config.userdata, message, true);
return false;
}
static bool check(bool condition, const char* message) {
if (!condition && state.config.callback) {
state.config.callback(state.config.userdata, message, true);
}
return condition;
}