3374 lines
109 KiB
C
3374 lines
109 KiB
C
#define VK_USE_PLATFORM_MACOS_MVK
|
|
#include "vulkan/vulkan_core.h"
|
|
#include "vulkan/vk_enum_string_helper.h"
|
|
|
|
#define GLFW_INCLUDE_VULKAN
|
|
#include <GLFW/glfw3.h>
|
|
#define GLFW_EXPOSE_NATIVE_COCOA
|
|
#include <GLFW/glfw3native.h>
|
|
|
|
#define GLM_FORCE_RADIANS
|
|
#define GLM_FORCE_DEPTH_ZERO_TO_ONE
|
|
#include <cglm/types.h>
|
|
#include <cglm/mat4.h>
|
|
#include <cglm/vec3.h>
|
|
#include <cglm/affine.h>
|
|
#include <cglm/quat.h>
|
|
#include <cglm/cam.h>
|
|
#include <stdio.h>
|
|
#include <stdlib.h>
|
|
#include <string.h>
|
|
|
|
#include <ply.h>
|
|
#include <map.h>
|
|
#include <gpu_mem.h>
|
|
|
|
typedef struct QueueStruct {
|
|
VkQueue handle;
|
|
uint32_t family;
|
|
uint32_t index;
|
|
} Queue;
|
|
|
|
typedef struct SwapchainDetailsStruct {
|
|
VkSurfaceCapabilitiesKHR capabilities;
|
|
|
|
VkSurfaceFormatKHR* formats;
|
|
uint32_t formats_count;
|
|
|
|
VkPresentModeKHR* present_modes;
|
|
uint32_t present_modes_count;
|
|
} SwapchainDetails;
|
|
|
|
typedef struct SwapchainImagesStruct {
|
|
VkImage* images;
|
|
uint32_t count;
|
|
} SwapchainImages;
|
|
|
|
typedef struct TextureStruct {
|
|
GPUImage image;
|
|
VkImageView view;
|
|
VkSampler sampler;
|
|
} Texture;
|
|
|
|
typedef struct PositionStruct {
|
|
vec3 position;
|
|
vec3 scale;
|
|
versor rotation;
|
|
} Position;
|
|
|
|
typedef void(*MappingFunc)(void*,void*);
|
|
|
|
void attribute_mapping_position_to_matrix(void* dest, void* source) {
|
|
Position* position = source;
|
|
glm_translate_make(dest, position->position);
|
|
glm_quat_rotate(dest, position->rotation, dest);
|
|
glm_scale(dest, position->scale);
|
|
}
|
|
|
|
#define MAPPING_POSITION_TO_MATRIX 0
|
|
|
|
MappingFunc mapping_functions[] = {
|
|
attribute_mapping_position_to_matrix,
|
|
};
|
|
|
|
typedef struct MappingStruct {
|
|
uint32_t mapping_type; // What function to use to map it
|
|
uint32_t index; // Which index to use in the ATTRIBUTE_ID_DESCRIPTORS array
|
|
} Mapping;
|
|
|
|
#define ATTRIBUTE_ID_MESH 0x00000001 // Mesh*
|
|
#define ATTRIBUTE_ID_PIPELINE 0x00000002 // GraphicsPipeline*
|
|
#define ATTRIBUTE_ID_DESCRIPTORS 0x00000003 // void***(array of array of data pointers)
|
|
#define ATTRIBUTE_ID_DESCRIPTOR_SETS 0x00000004 // VkDescriptorSet*
|
|
#define ATTRIBUTE_ID_POSITION 0x00000005 // Position*
|
|
#define ATTRIBUTE_ID_PUSH_CONSTANTS 0x00000006 // void*
|
|
|
|
typedef struct ObjectStruct {
|
|
Map attributes;
|
|
} Object;
|
|
|
|
typedef struct MeshStruct {
|
|
uint32_t vertex_count;
|
|
GPUBuffer vertex_buffer;
|
|
|
|
uint32_t index_count;
|
|
GPUBuffer index_buffer;
|
|
} Mesh;
|
|
|
|
typedef struct GraphicsPipelineInfoStruct {
|
|
VkDescriptorSetLayout scene_layout;
|
|
VkDescriptorSetLayout set_layout;
|
|
|
|
VkPipelineVertexInputStateCreateInfo input_info;
|
|
|
|
uint32_t shader_stages_count;
|
|
VkPipelineShaderStageCreateInfo* shader_stages;
|
|
} GraphicsPipelineInfo;
|
|
|
|
typedef struct GraphicsPipelineStruct {
|
|
uint32_t max_frames_in_flight;
|
|
uint32_t max_objects;
|
|
|
|
VkDescriptorPool descriptor_pool;
|
|
VkDescriptorSet* descriptors;
|
|
|
|
VkPipelineLayout layout;
|
|
VkPipeline pipeline;
|
|
VkPipeline offscreen_pipeline;
|
|
} GraphicsPipeline;
|
|
|
|
typedef struct VulkanContextStruct {
|
|
VkInstance instance;
|
|
VkDebugUtilsMessengerEXT debug_messenger;
|
|
VkPhysicalDevice physical_device;
|
|
|
|
VkDevice device;
|
|
|
|
Queue graphics_queue;
|
|
Queue transfer_queue;
|
|
Queue present_queue;
|
|
|
|
// G Buffer
|
|
GPUPage* g_buffer_page;
|
|
VkRenderPass g_renderpass;
|
|
VkFramebuffer g_framebuffer;
|
|
|
|
GPUImage g_image_position;
|
|
GPUImage g_image_normal;
|
|
GPUImage g_image_depth;
|
|
|
|
VkImageView g_image_view_position;
|
|
VkImageView g_image_view_normal;
|
|
VkImageView g_image_view_depth;
|
|
|
|
VkFormat g_image_format_position;
|
|
VkFormat g_image_format_normal;
|
|
VkFormat g_image_format_depth;
|
|
|
|
|
|
// Present Swapchain
|
|
VkSurfaceKHR surface;
|
|
|
|
SwapchainDetails swapchain_details;
|
|
VkSwapchainKHR swapchain;
|
|
|
|
VkSurfaceFormatKHR swapchain_format;
|
|
VkPresentModeKHR swapchain_present_mode;
|
|
VkExtent2D swapchain_extent;
|
|
|
|
uint32_t swapchain_image_count;
|
|
// Per image objects
|
|
VkImage* swapchain_images;
|
|
VkImageView* swapchain_image_views;
|
|
VkFramebuffer* swapchain_framebuffers;
|
|
|
|
VkDeviceMemory depth_image_memory;
|
|
VkImage depth_image;
|
|
VkFormat depth_format;
|
|
VkImageView depth_image_view;
|
|
|
|
uint32_t max_frames_in_flight;
|
|
|
|
// Per frame objects
|
|
VkCommandBuffer* offscreen_command_buffers;
|
|
VkCommandBuffer* swapchain_command_buffers;
|
|
VkSemaphore* image_available_semaphores;
|
|
VkSemaphore* render_finished_semaphores;
|
|
VkSemaphore* offscreen_complete_semaphores;
|
|
VkFence* in_flight_fences;
|
|
|
|
VkRenderPass render_pass;
|
|
|
|
VkCommandPool graphics_command_pool;
|
|
VkCommandPool transfer_command_pool;
|
|
|
|
uint32_t current_frame;
|
|
|
|
VkPhysicalDeviceMemoryProperties memories;
|
|
VkCommandPool extra_graphics_pool;
|
|
} VulkanContext;
|
|
|
|
typedef struct SceneContextStruct {
|
|
VkDescriptorPool pool;
|
|
VkDescriptorSetLayout descriptor_layout;
|
|
VkDescriptorSet* descriptors;
|
|
GPUBuffer* ubos;
|
|
void** ubo_ptrs;
|
|
uint32_t pcr_size;
|
|
} SceneContext;
|
|
|
|
struct TextureVertex {
|
|
vec3 pos;
|
|
vec3 color;
|
|
vec2 tex;
|
|
};
|
|
|
|
struct Vertex {
|
|
vec3 pos;
|
|
vec3 color;
|
|
};
|
|
|
|
struct SceneUBO {
|
|
mat4 test;
|
|
};
|
|
|
|
struct ScenePC {
|
|
mat4 view;
|
|
mat4 proj;
|
|
};
|
|
|
|
const struct Vertex vertices[] = {
|
|
{.pos = {-1.f, -1.f, 0.f}, .color = {1.0f, 0.0f, 0.0f}},
|
|
{.pos = { 1.f, -1.f, 0.f}, .color = {0.0f, 1.0f, 0.0f}},
|
|
{.pos = { 1.f, 1.f, 0.f}, .color = {0.0f, 0.0f, 1.0f}},
|
|
{.pos = {-1.f, 1.f, 0.f}, .color = {1.0f, 1.0f, 1.0f}},
|
|
};
|
|
|
|
const struct TextureVertex texture_vertices[] = {
|
|
{.pos = {-1.f, -1.f, 0.f}, .color = {1.0f, 0.0f, 0.0f}, .tex = {1.0f, 1.0f}},
|
|
{.pos = { 1.f, -1.f, 0.f}, .color = {0.0f, 1.0f, 0.0f}, .tex = {0.0f, 1.0f}},
|
|
{.pos = { 1.f, 1.f, 0.f}, .color = {0.0f, 0.0f, 1.0f}, .tex = {0.0f, 0.0f}},
|
|
{.pos = {-1.f, 1.f, 0.f}, .color = {1.0f, 1.0f, 1.0f}, .tex = {1.0f, 0.0f}},
|
|
};
|
|
|
|
const uint16_t indices[] = {
|
|
2, 1, 0, 0, 3, 2,
|
|
};
|
|
|
|
const char * validation_layers[] = {
|
|
"VK_LAYER_KHRONOS_validation",
|
|
//"VK_LAYER_LUNARG_api_dump",
|
|
"VK_LAYER_KHRONOS_synchronization2",
|
|
"VK_LAYER_KHRONOS_shader_object",
|
|
};
|
|
uint32_t validation_layer_count = sizeof(validation_layers) / sizeof(const char *);
|
|
|
|
const char * instance_extensions[] = {
|
|
VK_KHR_PORTABILITY_ENUMERATION_EXTENSION_NAME,
|
|
VK_EXT_DEBUG_UTILS_EXTENSION_NAME,
|
|
"VK_EXT_metal_surface",
|
|
VK_KHR_SURFACE_EXTENSION_NAME,
|
|
};
|
|
uint32_t instance_extension_count = sizeof(instance_extensions) / sizeof(const char *);
|
|
|
|
const char * device_extensions[] = {
|
|
VK_KHR_SWAPCHAIN_EXTENSION_NAME,
|
|
VK_EXT_DESCRIPTOR_INDEXING_EXTENSION_NAME,
|
|
"VK_KHR_portability_subset",
|
|
};
|
|
uint32_t device_extension_count = sizeof(device_extensions) / sizeof(const char *);
|
|
|
|
void glfw_error(int error, const char* description) {
|
|
fprintf(stderr, "GLFW_ERR: 0x%02x - %s\n", error, description);
|
|
}
|
|
|
|
GLFWwindow* init_window(int width, int height) {
|
|
glfwInit();
|
|
glfwSetErrorCallback(glfw_error);
|
|
|
|
glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API);
|
|
glfwWindowHint(GLFW_RESIZABLE, GLFW_TRUE);
|
|
GLFWwindow* window = glfwCreateWindow(width, height, "Vulkan window", 0, 0);
|
|
|
|
|
|
return window;
|
|
}
|
|
|
|
VkSemaphore* create_semaphores(VkDevice device, VkSemaphoreCreateFlags flags, uint32_t count) {
|
|
VkSemaphore* semaphores = malloc(sizeof(VkSemaphore)*count);
|
|
if(semaphores == 0) {
|
|
return 0;
|
|
}
|
|
|
|
VkSemaphoreCreateInfo semaphore_info = {
|
|
.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
|
|
.flags = flags,
|
|
};
|
|
|
|
for(uint32_t i = 0; i < count; i++) {
|
|
VkResult result = vkCreateSemaphore(device, &semaphore_info, 0, &semaphores[i]);
|
|
if(result != VK_SUCCESS) {
|
|
free(semaphores);
|
|
return 0;
|
|
}
|
|
}
|
|
return semaphores;
|
|
}
|
|
|
|
VkFormat find_depth_format(VkPhysicalDevice physical_device, uint32_t num_requested, VkFormat* requested, VkImageTiling tiling, VkFormatFeatureFlags features) {
|
|
for(uint32_t i = 0; i < num_requested; i++) {
|
|
VkFormatProperties properties;
|
|
vkGetPhysicalDeviceFormatProperties(physical_device, requested[i], &properties);
|
|
|
|
if(tiling == VK_IMAGE_TILING_LINEAR && (properties.linearTilingFeatures & features) == features) {
|
|
return requested[i];
|
|
} else if (tiling == VK_IMAGE_TILING_OPTIMAL && (properties.optimalTilingFeatures & features) == features) {
|
|
return requested[i];
|
|
}
|
|
}
|
|
return VK_FORMAT_MAX_ENUM;
|
|
}
|
|
|
|
bool check_validation_layers(const char ** layers, uint32_t num_layers) {
|
|
uint32_t layer_count;
|
|
VkResult result;
|
|
|
|
result = vkEnumerateInstanceLayerProperties(&layer_count, 0);
|
|
if(result != VK_SUCCESS) {
|
|
return false;
|
|
}
|
|
|
|
VkLayerProperties* available_layers = malloc(sizeof(VkLayerProperties)*layer_count);
|
|
|
|
result = vkEnumerateInstanceLayerProperties(&layer_count, available_layers);
|
|
|
|
for(uint32_t i = 0; i < num_layers; i++) {
|
|
bool found = false;
|
|
for(uint32_t j = 0; j < layer_count; j++) {
|
|
if(strcmp(layers[i], available_layers[j].layerName) == 0) {
|
|
found = true;
|
|
}
|
|
}
|
|
if(found == false) {
|
|
free(available_layers);
|
|
return false;
|
|
}
|
|
}
|
|
|
|
free(available_layers);
|
|
return true;
|
|
}
|
|
|
|
static VKAPI_ATTR VkBool32 VKAPI_CALL debug_callback(
|
|
VkDebugUtilsMessageSeverityFlagBitsEXT severity,
|
|
VkDebugUtilsMessageTypeFlagsEXT type,
|
|
const VkDebugUtilsMessengerCallbackDataEXT* callback_data,
|
|
void* user_data) {
|
|
|
|
(void)severity;
|
|
(void)type;
|
|
(void)user_data;
|
|
|
|
fprintf(stderr, "Validation layer: %s\n", callback_data->pMessage);
|
|
|
|
return VK_FALSE;
|
|
}
|
|
|
|
VkDescriptorSet* create_descriptor_sets(VkDevice device, VkDescriptorSetLayout layout, VkDescriptorPool pool, uint32_t count) {
|
|
VkDescriptorSetLayout* layouts = malloc(sizeof(VkDescriptorSetLayout)*count);
|
|
if(layouts == 0) {
|
|
return 0;
|
|
}
|
|
|
|
VkDescriptorSet* sets = malloc(sizeof(VkDescriptorSet)*count);
|
|
if(sets == 0) {
|
|
free(layouts);
|
|
return 0;
|
|
}
|
|
|
|
for(uint32_t i = 0; i < count; i++) {
|
|
layouts[i] = layout;
|
|
}
|
|
|
|
VkDescriptorSetAllocateInfo alloc_info = {
|
|
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
|
|
.descriptorPool = pool,
|
|
.descriptorSetCount = count,
|
|
.pSetLayouts = layouts,
|
|
};
|
|
|
|
VkResult result = vkAllocateDescriptorSets(device, &alloc_info, sets);
|
|
free(layouts);
|
|
if(result != VK_SUCCESS) {
|
|
free(sets);
|
|
return 0;
|
|
}
|
|
|
|
|
|
return sets;
|
|
}
|
|
|
|
VkResult get_best_physical_device(VkInstance instance, VkPhysicalDevice* device) {
|
|
uint32_t device_count = 0;
|
|
VkResult result;
|
|
result = vkEnumeratePhysicalDevices(instance, &device_count, 0);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
VkPhysicalDevice* devices = malloc(sizeof(VkPhysicalDevice)*device_count);
|
|
result = vkEnumeratePhysicalDevices(instance, &device_count, devices);
|
|
if(result != VK_SUCCESS) {
|
|
free(devices);
|
|
return result;
|
|
}
|
|
|
|
int top_score = -1;
|
|
for(uint32_t i = 0; i < device_count; i++) {
|
|
int score = 0;
|
|
|
|
VkPhysicalDeviceProperties properties;
|
|
vkGetPhysicalDeviceProperties(devices[i], &properties);
|
|
fprintf(stderr, "%d\n", properties.limits.maxPerStageResources);
|
|
|
|
VkPhysicalDeviceFeatures features;
|
|
vkGetPhysicalDeviceFeatures(devices[i], &features);
|
|
|
|
switch(properties.deviceType) {
|
|
case VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU:
|
|
score += 100;
|
|
break;
|
|
case VK_PHYSICAL_DEVICE_TYPE_VIRTUAL_GPU:
|
|
score += 50;
|
|
break;
|
|
case VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU:
|
|
score += 25;
|
|
break;
|
|
case VK_PHYSICAL_DEVICE_TYPE_CPU:
|
|
score += 0;
|
|
break;
|
|
default:
|
|
continue;
|
|
}
|
|
|
|
if(score > top_score) {
|
|
top_score = score;
|
|
*device = devices[i];
|
|
}
|
|
}
|
|
|
|
free(devices);
|
|
|
|
return VK_SUCCESS;
|
|
}
|
|
|
|
VkResult create_debug_messenger(VkInstance instance, VkDebugUtilsMessengerEXT* debug_messenger) {
|
|
VkDebugUtilsMessengerCreateInfoEXT messenger_info = {
|
|
.sType = VK_STRUCTURE_TYPE_DEBUG_UTILS_MESSENGER_CREATE_INFO_EXT,
|
|
.messageSeverity = VK_DEBUG_UTILS_MESSAGE_SEVERITY_VERBOSE_BIT_EXT,
|
|
.messageType = VK_DEBUG_UTILS_MESSAGE_TYPE_GENERAL_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_VALIDATION_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_PERFORMANCE_BIT_EXT | VK_DEBUG_UTILS_MESSAGE_TYPE_DEVICE_ADDRESS_BINDING_BIT_EXT,
|
|
.pfnUserCallback = debug_callback,
|
|
.pUserData = 0,
|
|
};
|
|
|
|
PFN_vkCreateDebugUtilsMessengerEXT func = (PFN_vkCreateDebugUtilsMessengerEXT)vkGetInstanceProcAddr(instance, "vkCreateDebugUtilsMessengerEXT");
|
|
|
|
VkResult result;
|
|
result = func(instance, &messenger_info, 0, debug_messenger);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to create debug messenger\n");
|
|
return result;
|
|
}
|
|
|
|
return VK_SUCCESS;
|
|
}
|
|
|
|
VkResult create_instance(VkInstance* instance) {
|
|
if(instance == NULL) {
|
|
return VK_ERROR_VALIDATION_FAILED_EXT;
|
|
}
|
|
|
|
if(check_validation_layers(validation_layers, validation_layer_count) == false) {
|
|
fprintf(stderr, "requested validation layers not supported");
|
|
return VK_ERROR_VALIDATION_FAILED_EXT;
|
|
}
|
|
|
|
VkApplicationInfo app_info = {
|
|
.sType = VK_STRUCTURE_TYPE_APPLICATION_INFO,
|
|
.pApplicationName = "spacegame",
|
|
.applicationVersion = VK_MAKE_VERSION(0, 0, 1),
|
|
.pEngineName = "spacegame",
|
|
.engineVersion = VK_MAKE_VERSION(0, 0, 1),
|
|
.apiVersion = VK_API_VERSION_1_2,
|
|
};
|
|
|
|
uint32_t glfwExtensionCount = 0;
|
|
const char** glfwExtensions;
|
|
|
|
glfwExtensions = glfwGetRequiredInstanceExtensions(&glfwExtensionCount);
|
|
|
|
const char** requested_extensions = malloc(sizeof(char*)*(glfwExtensionCount + instance_extension_count));
|
|
for (uint32_t i = 0; i < glfwExtensionCount; i++) {
|
|
requested_extensions[i] = glfwExtensions[i];
|
|
}
|
|
|
|
for (uint32_t i = 0; i < instance_extension_count; i++) {
|
|
requested_extensions[glfwExtensionCount + i] = instance_extensions[i];
|
|
}
|
|
|
|
VkInstanceCreateInfo instance_info = {
|
|
.sType = VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
|
|
.pApplicationInfo = &app_info,
|
|
.enabledLayerCount = validation_layer_count,
|
|
.ppEnabledLayerNames = validation_layers,
|
|
.enabledExtensionCount = glfwExtensionCount + instance_extension_count,
|
|
.ppEnabledExtensionNames = requested_extensions,
|
|
.flags = VK_INSTANCE_CREATE_ENUMERATE_PORTABILITY_BIT_KHR,
|
|
};
|
|
|
|
|
|
|
|
VkResult result = vkCreateInstance(&instance_info, 0, instance);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "vkCreateInstance: %s\n", string_VkResult(result));
|
|
return result;
|
|
}
|
|
|
|
free(requested_extensions);
|
|
return VK_SUCCESS;
|
|
}
|
|
|
|
VkResult create_logical_device(VkPhysicalDevice physical_device, VkSurfaceKHR surface, Queue* graphics_queue, Queue* present_queue, Queue* transfer_queue, VkDevice* device) {
|
|
if(graphics_queue == NULL || present_queue == NULL || transfer_queue == NULL || device == NULL) {
|
|
return VK_ERROR_VALIDATION_FAILED_EXT;
|
|
}
|
|
|
|
uint32_t queue_family_count = 0;
|
|
vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &queue_family_count, NULL);
|
|
|
|
VkQueueFamilyProperties* queue_families = malloc(sizeof(VkQueueFamilyProperties)*queue_family_count);
|
|
vkGetPhysicalDeviceQueueFamilyProperties(physical_device, &queue_family_count, queue_families);
|
|
|
|
graphics_queue->family = 0xFFFFFFFF;
|
|
present_queue->family = 0xFFFFFFFF;
|
|
for(uint32_t idx = 0; idx < queue_family_count; idx++) {
|
|
VkBool32 present_support = VK_FALSE;
|
|
vkGetPhysicalDeviceSurfaceSupportKHR(physical_device, idx, surface, &present_support);
|
|
VkBool32 graphics_support = (queue_families[idx].queueFlags & VK_QUEUE_GRAPHICS_BIT);
|
|
|
|
if(graphics_support && present_support) {
|
|
graphics_queue->family = idx;
|
|
graphics_queue->index = 0;
|
|
|
|
present_queue->family = idx;
|
|
present_queue->index = 0;
|
|
break;
|
|
} else if (graphics_support && (graphics_queue->family == 0xFFFFFFFF)) {
|
|
graphics_queue->family = idx;
|
|
graphics_queue->index = 0;
|
|
} else if (present_support && (present_queue->family == 0xFFFFFFFF)) {
|
|
graphics_queue->family = idx;
|
|
present_queue->index = 0;
|
|
}
|
|
}
|
|
|
|
transfer_queue->family = 0xFFFFFFFF;
|
|
for(uint32_t idx = 0; idx < queue_family_count; idx++) {
|
|
VkBool32 graphics_support = (queue_families[idx].queueFlags & VK_QUEUE_GRAPHICS_BIT);
|
|
VkBool32 compute_support = (queue_families[idx].queueFlags & VK_QUEUE_COMPUTE_BIT);
|
|
VkBool32 is_graphics_family = (graphics_queue->family == idx);
|
|
VkBool32 is_present_family = (present_queue->family == idx);
|
|
uint32_t queue_count = queue_families[idx].queueCount;
|
|
|
|
if(is_graphics_family && (queue_count == 1)) {
|
|
continue;
|
|
} else if (is_present_family && (queue_count == 1)) {
|
|
continue;
|
|
}
|
|
|
|
if(graphics_support && compute_support) {
|
|
transfer_queue->family = idx;
|
|
if(is_graphics_family || is_present_family) {
|
|
transfer_queue->index = 1;
|
|
} else {
|
|
transfer_queue->index = 0;
|
|
}
|
|
}
|
|
}
|
|
|
|
if(graphics_queue->family == 0xFFFFFFFF || present_queue->family == 0xFFFFFFFF || transfer_queue->family == 0xFFFFFFFF) {
|
|
return VK_ERROR_INITIALIZATION_FAILED;
|
|
}
|
|
|
|
uint32_t family_indices[] = {
|
|
transfer_queue->family,
|
|
graphics_queue->family,
|
|
present_queue->family,
|
|
};
|
|
|
|
VkDeviceQueueCreateInfo queue_create_info[3] = {};
|
|
float default_queue_priority = 1.0f;
|
|
for(uint32_t i = 0; i < 3; i++) {
|
|
queue_create_info[i].sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
|
|
queue_create_info[i].queueFamilyIndex = family_indices[i];
|
|
queue_create_info[i].queueCount = 1;
|
|
queue_create_info[i].pQueuePriorities = &default_queue_priority;
|
|
}
|
|
|
|
VkPhysicalDeviceVulkan12Features features_12 = {
|
|
.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VULKAN_1_2_FEATURES,
|
|
.bufferDeviceAddress = VK_TRUE,
|
|
.descriptorIndexing = VK_TRUE,
|
|
.descriptorBindingPartiallyBound = VK_TRUE,
|
|
.descriptorBindingVariableDescriptorCount = VK_TRUE,
|
|
.descriptorBindingUniformBufferUpdateAfterBind = VK_TRUE,
|
|
.descriptorBindingStorageBufferUpdateAfterBind = VK_TRUE,
|
|
.descriptorBindingSampledImageUpdateAfterBind = VK_TRUE,
|
|
};
|
|
|
|
VkPhysicalDeviceFeatures device_features = {
|
|
.samplerAnisotropy = VK_TRUE,
|
|
};
|
|
|
|
VkDeviceCreateInfo device_create_info = {
|
|
.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
|
|
.pQueueCreateInfos = queue_create_info,
|
|
.queueCreateInfoCount = 3,
|
|
.pEnabledFeatures = &device_features,
|
|
.enabledExtensionCount = device_extension_count,
|
|
.ppEnabledExtensionNames = device_extensions,
|
|
.enabledLayerCount = validation_layer_count,
|
|
.ppEnabledLayerNames = validation_layers,
|
|
.pNext = &features_12,
|
|
};
|
|
|
|
VkResult result = vkCreateDevice(physical_device, &device_create_info, 0, device);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
vkGetDeviceQueue(*device, graphics_queue->family, graphics_queue->index, &graphics_queue->handle);
|
|
vkGetDeviceQueue(*device, present_queue->family, present_queue->index, &present_queue->handle);
|
|
vkGetDeviceQueue(*device, transfer_queue->family, transfer_queue->index, &transfer_queue->handle);
|
|
|
|
return VK_SUCCESS;
|
|
}
|
|
|
|
VkResult get_swapchain_details(VkPhysicalDevice physical_device, VkSurfaceKHR surface, SwapchainDetails* details) {
|
|
details->formats = 0;
|
|
details->present_modes = 0;
|
|
|
|
VkResult result;
|
|
|
|
result = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physical_device, surface, &details->capabilities);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
result = vkGetPhysicalDeviceSurfaceFormatsKHR(physical_device, surface, &details->formats_count, 0);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
details->formats = malloc(sizeof(VkSurfaceFormatKHR)*details->formats_count);
|
|
result = vkGetPhysicalDeviceSurfaceFormatsKHR(physical_device, surface, &details->formats_count, details->formats);
|
|
if(result != VK_SUCCESS) {
|
|
free(details->formats);
|
|
return result;
|
|
}
|
|
|
|
result = vkGetPhysicalDeviceSurfacePresentModesKHR(physical_device, surface, &details->present_modes_count, 0);
|
|
if(result != VK_SUCCESS) {
|
|
free(details->formats);
|
|
return result;
|
|
}
|
|
details->present_modes = malloc(sizeof(VkPresentModeKHR)*details->present_modes_count);
|
|
result = vkGetPhysicalDeviceSurfacePresentModesKHR(physical_device, surface, &details->present_modes_count, details->present_modes);
|
|
if(result != VK_SUCCESS) {
|
|
free(details->formats);
|
|
free(details->present_modes);
|
|
return result;
|
|
}
|
|
|
|
return VK_SUCCESS;
|
|
}
|
|
|
|
VkSurfaceFormatKHR choose_swapchain_format(SwapchainDetails swapchain_details) {
|
|
for(uint32_t i = 0; i < swapchain_details.formats_count; i++) {
|
|
VkSurfaceFormatKHR format = swapchain_details.formats[i];
|
|
if(format.format == VK_FORMAT_B8G8R8A8_SRGB && format.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) {
|
|
return format;
|
|
}
|
|
}
|
|
return swapchain_details.formats[0];
|
|
}
|
|
|
|
VkPresentModeKHR choose_present_mode(SwapchainDetails swapchain_details) {
|
|
for(uint32_t i = 0; i < swapchain_details.present_modes_count; i++) {
|
|
if(swapchain_details.present_modes[i] == VK_PRESENT_MODE_MAILBOX_KHR) {
|
|
return VK_PRESENT_MODE_MAILBOX_KHR;
|
|
}
|
|
}
|
|
|
|
return VK_PRESENT_MODE_FIFO_KHR;
|
|
}
|
|
|
|
VkExtent2D choose_swapchain_extent(SwapchainDetails swapchain_details) {
|
|
return swapchain_details.capabilities.currentExtent;
|
|
}
|
|
|
|
VkSwapchainKHR create_swapchain(VkDevice device, VkSurfaceFormatKHR format, VkPresentModeKHR present_mode, VkExtent2D extent, VkSurfaceKHR surface, VkSurfaceCapabilitiesKHR capabilities, uint32_t graphics_family_index, uint32_t present_family_index, VkSwapchainKHR old_swapchain) {
|
|
uint32_t image_count = capabilities.minImageCount + 1;
|
|
uint32_t max_images = capabilities.maxImageCount;
|
|
if((max_images > 0) && (image_count > max_images)) {
|
|
image_count = max_images;
|
|
}
|
|
|
|
VkSwapchainCreateInfoKHR swapchain_info = {
|
|
.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
|
|
.surface = surface,
|
|
.minImageCount = image_count,
|
|
.imageFormat = format.format,
|
|
.imageColorSpace = format.colorSpace,
|
|
.imageExtent = extent,
|
|
.imageArrayLayers = 1,
|
|
.imageUsage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
|
|
.preTransform = capabilities.currentTransform,
|
|
.compositeAlpha = VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR,
|
|
.presentMode = present_mode,
|
|
.clipped = VK_TRUE,
|
|
.oldSwapchain = old_swapchain,
|
|
};
|
|
|
|
uint32_t queue_families[2] = {graphics_family_index, present_family_index};
|
|
if(graphics_family_index != present_family_index) {
|
|
swapchain_info.imageSharingMode = VK_SHARING_MODE_CONCURRENT;
|
|
swapchain_info.queueFamilyIndexCount = 2;
|
|
swapchain_info.pQueueFamilyIndices = queue_families;
|
|
} else {
|
|
swapchain_info.imageSharingMode = VK_SHARING_MODE_EXCLUSIVE;
|
|
swapchain_info.queueFamilyIndexCount = 0;
|
|
swapchain_info.pQueueFamilyIndices = 0;
|
|
}
|
|
|
|
|
|
VkSwapchainKHR swapchain;
|
|
VkResult result;
|
|
result = vkCreateSwapchainKHR(device, &swapchain_info, 0, &swapchain);
|
|
if(result != VK_SUCCESS) {
|
|
return VK_NULL_HANDLE;
|
|
}
|
|
|
|
return swapchain;
|
|
}
|
|
|
|
SwapchainImages get_swapchain_images(VkDevice device, VkSwapchainKHR swapchain) {
|
|
SwapchainImages images = {
|
|
.images = NULL,
|
|
.count = 0,
|
|
};
|
|
|
|
VkResult result;
|
|
result = vkGetSwapchainImagesKHR(device, swapchain, &images.count, 0);
|
|
if(result != VK_SUCCESS) {
|
|
images.count = 0;
|
|
return images;
|
|
}
|
|
|
|
images.images = malloc(sizeof(VkImage)*images.count);
|
|
if(images.images == 0) {
|
|
images.count = 0;
|
|
return images;
|
|
}
|
|
|
|
result = vkGetSwapchainImagesKHR(device, swapchain, &images.count, images.images);
|
|
if(result != VK_SUCCESS) {
|
|
images.count = 0;
|
|
return images;
|
|
}
|
|
|
|
return images;
|
|
}
|
|
|
|
VkImageView* create_image_views(VkDevice device, uint32_t image_count, VkImage* images, VkSurfaceFormatKHR format) {
|
|
VkImageView* image_views = malloc(sizeof(VkImageView)*image_count);
|
|
if(image_views == 0) {
|
|
return 0;
|
|
}
|
|
|
|
for(uint32_t i = 0; i < image_count; i++) {
|
|
VkImageViewCreateInfo view_info = {
|
|
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
|
|
.image = images[i],
|
|
.viewType = VK_IMAGE_VIEW_TYPE_2D,
|
|
.format = format.format,
|
|
.components = {
|
|
.r = VK_COMPONENT_SWIZZLE_IDENTITY,
|
|
.g = VK_COMPONENT_SWIZZLE_IDENTITY,
|
|
.b = VK_COMPONENT_SWIZZLE_IDENTITY,
|
|
.a = VK_COMPONENT_SWIZZLE_IDENTITY,
|
|
},
|
|
.subresourceRange = {
|
|
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
|
|
.baseMipLevel = 0,
|
|
.levelCount = 1,
|
|
.baseArrayLayer = 0,
|
|
.layerCount = 1,
|
|
},
|
|
};
|
|
|
|
VkResult result = vkCreateImageView(device, &view_info, 0, &image_views[i]);
|
|
if(result != VK_SUCCESS) {
|
|
free(image_views);
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
return image_views;
|
|
}
|
|
|
|
VkFramebuffer* create_swapchain_framebuffers(VkDevice device, uint32_t image_count, VkImageView* image_views, VkImageView depth_image_view, VkRenderPass render_pass, VkExtent2D extent) {
|
|
VkFramebuffer* framebuffers = malloc(sizeof(VkFramebuffer)*image_count);
|
|
if(framebuffers == 0) {
|
|
return 0;
|
|
}
|
|
|
|
for(uint32_t i = 0; i < image_count; i++) {
|
|
VkImageView attachments[] = {
|
|
image_views[i],
|
|
depth_image_view,
|
|
};
|
|
|
|
VkFramebufferCreateInfo framebuffer_info = {
|
|
.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
|
|
.renderPass = render_pass,
|
|
.attachmentCount = 2,
|
|
.pAttachments = attachments,
|
|
.width = extent.width,
|
|
.height = extent.height,
|
|
.layers = 1,
|
|
};
|
|
|
|
VkResult result = vkCreateFramebuffer(device, &framebuffer_info, 0, &framebuffers[i]);
|
|
if(result != VK_SUCCESS) {
|
|
free(framebuffers);
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
return framebuffers;
|
|
}
|
|
|
|
VkShaderModule create_shader_module(VkDevice device, const char * code, uint32_t code_size) {
|
|
VkShaderModuleCreateInfo shader_info = {
|
|
.sType = VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
|
|
.codeSize = code_size,
|
|
.pCode = (uint32_t*)code,
|
|
};
|
|
|
|
VkShaderModule shader;
|
|
VkResult result;
|
|
result = vkCreateShaderModule(device, &shader_info, 0, &shader);
|
|
if(result != VK_SUCCESS) {
|
|
return VK_NULL_HANDLE;
|
|
}
|
|
|
|
return shader;
|
|
}
|
|
|
|
VkShaderModule load_shader_file(const char* path, VkDevice device) {
|
|
FILE* file;
|
|
file = fopen(path, "rb");
|
|
if(file == 0) {
|
|
return VK_NULL_HANDLE;
|
|
}
|
|
|
|
int result = fseek(file, 0, SEEK_END);
|
|
if(result != 0) {
|
|
return VK_NULL_HANDLE;
|
|
}
|
|
|
|
long buffer_size = ftell(file);
|
|
|
|
result = fseek(file, 0, SEEK_SET);
|
|
if(result != 0) {
|
|
return VK_NULL_HANDLE;
|
|
}
|
|
|
|
char * buffer = malloc(buffer_size);
|
|
if(buffer == 0) {
|
|
return VK_NULL_HANDLE;
|
|
}
|
|
|
|
size_t read = fread(buffer, 1, buffer_size, file);
|
|
|
|
VkShaderModule shader = create_shader_module(device, buffer, read);
|
|
free(buffer);
|
|
return shader;
|
|
}
|
|
|
|
VkRenderPass create_render_pass(VkDevice device, VkSurfaceFormatKHR format, VkFormat depth_format) {
|
|
VkAttachmentDescription attachments[] = {
|
|
{
|
|
.format = format.format,
|
|
.samples = VK_SAMPLE_COUNT_1_BIT,
|
|
.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
|
|
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
|
|
.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
|
|
.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE,
|
|
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
|
|
.finalLayout = VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
|
|
},
|
|
{
|
|
.format = depth_format,
|
|
.samples = VK_SAMPLE_COUNT_1_BIT,
|
|
.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
|
|
.storeOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
|
|
.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
|
|
.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
|
|
.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
|
|
.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
|
|
},
|
|
};
|
|
|
|
VkAttachmentReference color_attachment_refs[] = {
|
|
{
|
|
.attachment = 0,
|
|
.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
|
|
},
|
|
};
|
|
|
|
VkAttachmentReference depth_attachment_ref = {
|
|
.attachment = 1,
|
|
.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
|
|
};
|
|
|
|
// Create a subpass with the color and depth attachments
|
|
VkSubpassDescription subpasses[] = {
|
|
{
|
|
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
|
|
.colorAttachmentCount = sizeof(color_attachment_refs)/sizeof(VkAttachmentReference),
|
|
.pColorAttachments = color_attachment_refs,
|
|
.pDepthStencilAttachment = &depth_attachment_ref,
|
|
},
|
|
};
|
|
|
|
// This basically says "make sure nothing else is writing to the depth_stencil or the color attachment during the pipeline
|
|
VkSubpassDependency dependencies[] = {
|
|
{
|
|
.srcSubpass = VK_SUBPASS_EXTERNAL,
|
|
.dstSubpass = 0,
|
|
.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
|
|
.srcAccessMask = 0,
|
|
.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
|
|
.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
|
|
.dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT,
|
|
}
|
|
};
|
|
|
|
VkRenderPassCreateInfo render_info = {
|
|
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
|
|
.attachmentCount = sizeof(attachments)/sizeof(VkAttachmentDescription),
|
|
.pAttachments = attachments,
|
|
.subpassCount = sizeof(subpasses)/sizeof(VkSubpassDescription),
|
|
.pSubpasses = subpasses,
|
|
.dependencyCount = sizeof(dependencies)/sizeof(VkSubpassDependency),
|
|
.pDependencies = dependencies,
|
|
};
|
|
|
|
VkRenderPass render_pass;
|
|
VkResult result = vkCreateRenderPass(device, &render_info, 0, &render_pass);
|
|
if(result != VK_SUCCESS) {
|
|
return VK_NULL_HANDLE;
|
|
}
|
|
|
|
return render_pass;
|
|
}
|
|
|
|
VkCommandBuffer command_begin_single(VkDevice device, VkCommandPool transfer_pool) {
|
|
VkCommandBufferAllocateInfo command_info = {
|
|
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
|
|
.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
|
|
.commandPool = transfer_pool,
|
|
.commandBufferCount = 1,
|
|
};
|
|
|
|
VkCommandBuffer command_buffer;
|
|
VkResult result = vkAllocateCommandBuffers(device, &command_info, &command_buffer);
|
|
if(result != VK_SUCCESS) {
|
|
return VK_NULL_HANDLE;
|
|
}
|
|
|
|
VkCommandBufferBeginInfo begin_info = {
|
|
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
|
|
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
|
|
};
|
|
|
|
result = vkBeginCommandBuffer(command_buffer, &begin_info);
|
|
if(result != VK_SUCCESS) {
|
|
vkFreeCommandBuffers(device, transfer_pool, 1, &command_buffer);
|
|
return VK_NULL_HANDLE;
|
|
}
|
|
|
|
return command_buffer;
|
|
}
|
|
|
|
VkResult command_end_single(VkDevice device, VkCommandBuffer command_buffer, VkCommandPool transfer_pool, Queue transfer_queue) {
|
|
VkResult result = vkEndCommandBuffer(command_buffer);
|
|
if(result != VK_SUCCESS) {
|
|
vkFreeCommandBuffers(device, transfer_pool, 1, &command_buffer);
|
|
return result;
|
|
}
|
|
|
|
VkSubmitInfo submit_info = {
|
|
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
|
.commandBufferCount = 1,
|
|
.pCommandBuffers = &command_buffer,
|
|
};
|
|
|
|
result = vkQueueSubmit(transfer_queue.handle, 1, &submit_info, 0);
|
|
if(result != VK_SUCCESS) {
|
|
vkFreeCommandBuffers(device, transfer_pool, 1, &command_buffer);
|
|
return result;
|
|
}
|
|
|
|
result = vkQueueWaitIdle(transfer_queue.handle);
|
|
vkFreeCommandBuffers(device, transfer_pool, 1, &command_buffer);
|
|
return result;
|
|
}
|
|
|
|
VkResult command_transition_image_layout(VkDevice device, VkCommandPool transfer_pool, Queue transfer_queue, VkImageLayout old_layout, VkImageLayout new_layout, VkImage image, VkAccessFlags src_mask, VkAccessFlags dst_mask, VkPipelineStageFlags source, VkPipelineStageFlags dest, uint32_t source_family, uint32_t dest_family, VkImageAspectFlags aspect_flags) {
|
|
VkCommandBuffer command_buffer = command_begin_single(device, transfer_pool);
|
|
|
|
VkImageMemoryBarrier barrier = {
|
|
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
|
|
.oldLayout = old_layout,
|
|
.newLayout = new_layout,
|
|
.srcQueueFamilyIndex = source_family,
|
|
.dstQueueFamilyIndex = dest_family,
|
|
.image = image,
|
|
.subresourceRange = {
|
|
.aspectMask = aspect_flags,
|
|
.levelCount = 1,
|
|
.layerCount = 1,
|
|
.baseMipLevel = 0,
|
|
.baseArrayLayer = 0,
|
|
},
|
|
.srcAccessMask = src_mask,
|
|
.dstAccessMask = dst_mask,
|
|
};
|
|
vkCmdPipelineBarrier(command_buffer, source, dest, 0, 0, 0, 0, 0, 1, &barrier);
|
|
|
|
return command_end_single(device, command_buffer, transfer_pool, transfer_queue);
|
|
}
|
|
|
|
VkResult command_copy_buffer_to_image(VkDevice device, VkCommandPool transfer_pool, Queue transfer_queue, VkExtent3D image_size, VkBuffer source, VkImage dest) {
|
|
VkCommandBuffer command_buffer = command_begin_single(device, transfer_pool);
|
|
|
|
VkBufferImageCopy region = {
|
|
.bufferOffset = 0,
|
|
.bufferRowLength = 0,
|
|
.bufferImageHeight = 0,
|
|
.imageSubresource = {
|
|
.baseArrayLayer = 0,
|
|
.layerCount = 1,
|
|
.mipLevel = 0,
|
|
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
|
|
},
|
|
.imageOffset = {
|
|
.x = 0,
|
|
.y = 0,
|
|
.z = 0,
|
|
},
|
|
.imageExtent = image_size,
|
|
};
|
|
|
|
vkCmdCopyBufferToImage(command_buffer, source, dest, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, ®ion);
|
|
|
|
return command_end_single(device, command_buffer, transfer_pool, transfer_queue);
|
|
}
|
|
|
|
Texture load_texture(VkDevice device, GPUPage* page, GPUBuffer staging, VkCommandPool transfer_pool, Queue transfer_queue, VkCommandPool graphics_pool, Queue graphics_queue, VkExtent2D size, VkFormat format, void* image_data){
|
|
Texture ret = {
|
|
.image.page = NULL,
|
|
.image.memory = NULL,
|
|
.image.handle = VK_NULL_HANDLE,
|
|
.view = VK_NULL_HANDLE,
|
|
};
|
|
|
|
VkExtent3D full_extent = {
|
|
.width = size.width,
|
|
.height = size.height,
|
|
.depth = 1,
|
|
};
|
|
|
|
VkImageCreateInfo info = {
|
|
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
|
|
.imageType = VK_IMAGE_TYPE_2D,
|
|
.extent = full_extent,
|
|
.mipLevels = 1,
|
|
.arrayLayers = 1,
|
|
.format = format,
|
|
.tiling = VK_IMAGE_TILING_OPTIMAL,
|
|
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
|
|
.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
|
|
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
|
.samples = VK_SAMPLE_COUNT_1_BIT,
|
|
.flags = 0,
|
|
};
|
|
|
|
GPUImage image = {0};
|
|
VkResult result = gpu_image_malloc(device, page, &info, &image);
|
|
if(result != VK_SUCCESS) {
|
|
return ret;
|
|
}
|
|
|
|
memcpy(staging.page->ptr + staging.memory->offset, image_data, image.memory->size);
|
|
|
|
result = command_transition_image_layout(device, transfer_pool, transfer_queue, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, image.handle, 0, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, transfer_queue.family, transfer_queue.family, VK_IMAGE_ASPECT_COLOR_BIT);
|
|
if(result != VK_SUCCESS) {
|
|
gpu_image_free(device, image);
|
|
return ret;
|
|
}
|
|
|
|
result = command_copy_buffer_to_image(device, transfer_pool, transfer_queue, full_extent, staging.handle, image.handle);
|
|
if(result != VK_SUCCESS) {
|
|
gpu_image_free(device, image);
|
|
return ret;
|
|
}
|
|
|
|
result = command_transition_image_layout(device, transfer_pool, transfer_queue, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, image.handle, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, transfer_queue.family, graphics_queue.family, VK_IMAGE_ASPECT_COLOR_BIT);
|
|
if(result != VK_SUCCESS) {
|
|
gpu_image_free(device, image);
|
|
return ret;
|
|
}
|
|
|
|
result = command_transition_image_layout(device, graphics_pool, graphics_queue, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, image.handle, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, transfer_queue.family, graphics_queue.family, VK_IMAGE_ASPECT_COLOR_BIT);
|
|
if(result != VK_SUCCESS) {
|
|
gpu_image_free(device, image);
|
|
return ret;
|
|
}
|
|
|
|
VkImageView view;
|
|
VkImageViewCreateInfo view_info = {
|
|
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
|
|
.image = image.handle,
|
|
.viewType = VK_IMAGE_VIEW_TYPE_2D,
|
|
.components = {
|
|
.a = VK_COMPONENT_SWIZZLE_IDENTITY,
|
|
.b = VK_COMPONENT_SWIZZLE_IDENTITY,
|
|
.g = VK_COMPONENT_SWIZZLE_IDENTITY,
|
|
.r = VK_COMPONENT_SWIZZLE_IDENTITY,
|
|
},
|
|
.format = format,
|
|
.subresourceRange = {
|
|
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
|
|
.layerCount = 1,
|
|
.levelCount = 1,
|
|
.baseArrayLayer = 0,
|
|
.baseMipLevel = 0,
|
|
},
|
|
};
|
|
|
|
result = vkCreateImageView(device, &view_info, 0, &view);
|
|
if(result != VK_SUCCESS) {
|
|
gpu_image_free(device, image);
|
|
return ret;
|
|
}
|
|
|
|
VkSampler sampler;
|
|
VkSamplerCreateInfo sampler_info = {
|
|
.sType = VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,
|
|
.magFilter = VK_FILTER_NEAREST,
|
|
.minFilter = VK_FILTER_NEAREST,
|
|
.addressModeU = VK_SAMPLER_ADDRESS_MODE_REPEAT,
|
|
.addressModeV = VK_SAMPLER_ADDRESS_MODE_REPEAT,
|
|
.addressModeW = VK_SAMPLER_ADDRESS_MODE_REPEAT,
|
|
.anisotropyEnable = VK_FALSE,
|
|
.maxAnisotropy = 2.0f,
|
|
.borderColor = VK_BORDER_COLOR_INT_OPAQUE_BLACK,
|
|
.unnormalizedCoordinates = VK_FALSE,
|
|
.compareEnable = VK_FALSE,
|
|
.compareOp = VK_COMPARE_OP_ALWAYS,
|
|
.mipmapMode = VK_SAMPLER_MIPMAP_MODE_LINEAR,
|
|
.mipLodBias = 0.0f,
|
|
.minLod = 0.0f,
|
|
.maxLod = 0.0f,
|
|
};
|
|
|
|
result = vkCreateSampler(device, &sampler_info, 0, &sampler);
|
|
if(result != VK_SUCCESS) {
|
|
gpu_image_free(device, image);
|
|
vkDestroyImageView(device, view, 0);
|
|
return ret;
|
|
}
|
|
|
|
ret.image = image;
|
|
ret.view = view;
|
|
ret.sampler = sampler;
|
|
return ret;
|
|
}
|
|
|
|
int create_depth_image(VulkanContext* context) {
|
|
VkExtent3D depth_extent = {
|
|
.width = context->swapchain_extent.width,
|
|
.height = context->swapchain_extent.height,
|
|
.depth = 1,
|
|
};
|
|
|
|
VkImageCreateInfo depth_image_info = {
|
|
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
|
|
.imageType = VK_IMAGE_TYPE_2D,
|
|
.extent = depth_extent,
|
|
.mipLevels = 1,
|
|
.arrayLayers = 1,
|
|
.format = context->depth_format,
|
|
.tiling = VK_IMAGE_TILING_OPTIMAL,
|
|
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
|
|
.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT,
|
|
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
|
.samples = VK_SAMPLE_COUNT_1_BIT,
|
|
.flags = 0,
|
|
};
|
|
|
|
VkImage depth_image;
|
|
VkResult result = vkCreateImage(context->device, &depth_image_info, 0, &depth_image);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to create depth image\n");
|
|
return 1;
|
|
} else {
|
|
context->depth_image = depth_image;
|
|
}
|
|
|
|
VkMemoryRequirements depth_image_requirements;
|
|
vkGetImageMemoryRequirements(context->device, context->depth_image, &depth_image_requirements);
|
|
|
|
VkMemoryAllocateInfo depth_memory_info = {
|
|
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
|
|
.allocationSize = depth_image_requirements.size,
|
|
.memoryTypeIndex = pick_memory(context->memories, depth_image_requirements.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT | VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT, 0).index,
|
|
};
|
|
|
|
VkDeviceMemory depth_image_memory;
|
|
result = vkAllocateMemory(context->device, &depth_memory_info, 0, &depth_image_memory);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to allocate memory for depth image\n");
|
|
return 2;
|
|
} else {
|
|
context->depth_image_memory = depth_image_memory;
|
|
}
|
|
|
|
result = vkBindImageMemory(context->device, context->depth_image, context->depth_image_memory, 0);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to bind memory for depth image\n");
|
|
return 3;
|
|
}
|
|
|
|
VkImageViewCreateInfo depth_view_info = {
|
|
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
|
|
.image = context->depth_image,
|
|
.viewType = VK_IMAGE_VIEW_TYPE_2D,
|
|
.format = context->depth_format,
|
|
.components = {
|
|
.r = VK_COMPONENT_SWIZZLE_IDENTITY,
|
|
.g = VK_COMPONENT_SWIZZLE_IDENTITY,
|
|
.b = VK_COMPONENT_SWIZZLE_IDENTITY,
|
|
.a = VK_COMPONENT_SWIZZLE_IDENTITY,
|
|
},
|
|
.subresourceRange = {
|
|
.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT,
|
|
.baseMipLevel = 0,
|
|
.levelCount = 1,
|
|
.baseArrayLayer = 0,
|
|
.layerCount = 1,
|
|
},
|
|
};
|
|
|
|
VkImageView depth_image_view;
|
|
result = vkCreateImageView(context->device, &depth_view_info, 0, &depth_image_view);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to create vulkan depth image view\n");
|
|
return 4;
|
|
} else {
|
|
context->depth_image_view = depth_image_view;
|
|
}
|
|
|
|
result = command_transition_image_layout(context->device, context->extra_graphics_pool, context->graphics_queue, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, context->depth_image, 0, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, VK_IMAGE_ASPECT_DEPTH_BIT);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to transition depth image\n");
|
|
return 5;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
VkResult recreate_swapchain(VulkanContext* context) {
|
|
for(uint32_t i = 0; i < context->swapchain_image_count; i++) {
|
|
vkDestroyFramebuffer(context->device, context->swapchain_framebuffers[i], 0);
|
|
vkDestroyImageView(context->device, context->swapchain_image_views[i], 0);
|
|
}
|
|
|
|
for(uint32_t i = 0; i < context->max_frames_in_flight; i++) {
|
|
vkDestroySemaphore(context->device, context->image_available_semaphores[i], 0);
|
|
}
|
|
|
|
free(context->swapchain_image_views);
|
|
free(context->swapchain_framebuffers);
|
|
free(context->swapchain_details.formats);
|
|
free(context->swapchain_details.present_modes);
|
|
|
|
vkDestroyImageView(context->device, context->depth_image_view, 0);
|
|
vkDestroyImage(context->device, context->depth_image, 0);
|
|
vkFreeMemory(context->device, context->depth_image_memory, 0);
|
|
|
|
VkResult result = get_swapchain_details(context->physical_device, context->surface, &context->swapchain_details);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
context->swapchain_format = choose_swapchain_format(context->swapchain_details);
|
|
context->swapchain_present_mode = choose_present_mode(context->swapchain_details);
|
|
context->swapchain_extent = choose_swapchain_extent(context->swapchain_details);
|
|
|
|
create_depth_image(context);
|
|
|
|
VkSwapchainKHR swapchain = create_swapchain(context->device, context->swapchain_format, context->swapchain_present_mode, context->swapchain_extent, context->surface, context->swapchain_details.capabilities, context->graphics_queue.family, context->present_queue.family, context->swapchain);
|
|
if(swapchain == VK_NULL_HANDLE) {
|
|
context->swapchain = VK_NULL_HANDLE;
|
|
return VK_ERROR_INITIALIZATION_FAILED;
|
|
} else {
|
|
context->swapchain = swapchain;
|
|
}
|
|
|
|
SwapchainImages swapchain_images = get_swapchain_images(context->device, context->swapchain);
|
|
if(swapchain_images.count == 0) {
|
|
return VK_ERROR_INITIALIZATION_FAILED;
|
|
} else {
|
|
context->swapchain_images = swapchain_images.images;
|
|
context->swapchain_image_count = swapchain_images.count;
|
|
}
|
|
|
|
VkImageView* image_views = create_image_views(context->device, context->swapchain_image_count, context->swapchain_images, context->swapchain_format);
|
|
if(image_views == 0) {
|
|
return VK_ERROR_INITIALIZATION_FAILED;
|
|
} else {
|
|
context->swapchain_image_views = image_views;
|
|
}
|
|
|
|
VkFramebuffer* framebuffers = create_swapchain_framebuffers(context->device, context->swapchain_image_count, context->swapchain_image_views, context->depth_image_view, context->render_pass, context->swapchain_extent);
|
|
if(framebuffers == 0) {
|
|
return VK_ERROR_INITIALIZATION_FAILED;
|
|
} else {
|
|
context->swapchain_framebuffers = framebuffers;
|
|
}
|
|
|
|
VkSemaphore* ia_semaphores = create_semaphores(context->device, 0, context->max_frames_in_flight);
|
|
if(ia_semaphores == 0) {
|
|
fprintf(stderr, "failed to create vulkan image available semaphores\n");
|
|
return 0;
|
|
} else {
|
|
context->image_available_semaphores = ia_semaphores;
|
|
}
|
|
|
|
return VK_SUCCESS;
|
|
}
|
|
|
|
void command_draw_object(Object object, VkCommandBuffer command_buffer) {
|
|
MaybeValue maybe_mesh = map_lookup(object.attributes, ATTRIBUTE_ID_MESH);
|
|
if(maybe_mesh.has_value == false) {
|
|
return;
|
|
}
|
|
|
|
Mesh* mesh = maybe_mesh.value;
|
|
|
|
VkBuffer vertex_buffers[] = {mesh->vertex_buffer.handle};
|
|
VkDeviceSize offsets[] = {0};
|
|
|
|
vkCmdBindVertexBuffers(command_buffer, 0, 1, vertex_buffers, offsets);
|
|
vkCmdBindIndexBuffer(command_buffer, mesh->index_buffer.handle, 0, VK_INDEX_TYPE_UINT16);
|
|
|
|
vkCmdDrawIndexed(command_buffer, mesh->index_count, 1, 0, 0, 0);
|
|
}
|
|
|
|
void command_draw_pipeline(GraphicsPipeline pipeline, uint32_t object_count, Object* objects, uint32_t frame_num, VkCommandBuffer command_buffer, int offscreen) {
|
|
if(offscreen) {
|
|
vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline.offscreen_pipeline);
|
|
} else {
|
|
vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline.pipeline);
|
|
}
|
|
|
|
if(pipeline.descriptors != NULL) {
|
|
vkCmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline.layout, 1, 1, &pipeline.descriptors[frame_num], 0, 0);
|
|
}
|
|
|
|
for(uint32_t i = 0; i < object_count; i++) {
|
|
command_draw_object(objects[i], command_buffer);
|
|
}
|
|
}
|
|
|
|
// Setup a scene for vkCmdDrawIndexedIndirect
|
|
// Before the call is made the following needs to be ready:
|
|
// 1) Vertex data for entire scene in one vertex buffer
|
|
// 2) Index data for entire scene in one index buffer
|
|
// 3) Descriptor data in single descriptor set
|
|
// a) e.x. all textures for the draw call need to be in the bound pipeline descriptor set
|
|
// 4) Object Data populated per-instance into buffer which will be pushed by VkDeviceAddress
|
|
// 5) Buffer of draw command ordered by instance id(first is 0, increase by 1 every 'stride')
|
|
//
|
|
// Once that is setup, the call to vkCmdDrawIndexed will happen and then:
|
|
// 1) For each draw command read from the command buffer
|
|
// a) Consume the indices/vertices to run the vertex shaders
|
|
// i) Vertex shaders access the per-instance object data indexed by gl_instanceID
|
|
// ii) This object data can have indices for other descriptors
|
|
// x) e.x. a shader can read object data with int Z that tell it to use a sampler at binding Z
|
|
//
|
|
// If I want to start by not worrying about dynamic object load/unload then I can create the above buffers
|
|
// when loading the scene, and free them all at once
|
|
//
|
|
// If I want to start worrying about loading/unloading then I need to find a way to efficiently reconstruct
|
|
// these buffers on changes
|
|
// Ideas:
|
|
// - Mark objects as 'deleted' to cheaply cull during a compute cull/vertex shader
|
|
// - Would require only a 1-bit change per deletion
|
|
// - Would require defragmentation periodically(or whenever trying to add without space)
|
|
|
|
VkResult create_scene(){
|
|
|
|
|
|
return VK_SUCCESS;
|
|
}
|
|
|
|
VkResult command_draw_scene(uint32_t pipelines_count, GraphicsPipeline* pipelines, uint32_t* object_counts, Object** objects, uint32_t frame_num, VkDescriptorSet* scene_descriptors, struct ScenePC* scene_constants, VkCommandBuffer command_buffer, VkRenderPass render_pass, VkFramebuffer framebuffer, VkExtent2D extent, VkDeviceAddress object_buffer_addr, int offscreen) {
|
|
VkCommandBufferBeginInfo begin_info = {
|
|
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
|
|
.flags = 0,
|
|
.pInheritanceInfo = 0,
|
|
};
|
|
|
|
VkResult result = vkBeginCommandBuffer(command_buffer, &begin_info);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
VkClearValue clear_colors[] = {
|
|
{
|
|
.color = {
|
|
{0.0f, 0.0f, 0.0f, 1.0f}
|
|
}
|
|
},
|
|
{
|
|
.depthStencil = {1.0f, 0.0f},
|
|
},
|
|
};
|
|
|
|
VkRenderPassBeginInfo render_pass_info = {
|
|
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
|
|
.renderPass = render_pass,
|
|
.framebuffer = framebuffer,
|
|
.renderArea = {
|
|
.offset = {
|
|
.x = 0,
|
|
.y = 0,
|
|
},
|
|
.extent = extent,
|
|
},
|
|
.clearValueCount = sizeof(clear_colors)/sizeof(VkClearValue),
|
|
.pClearValues = clear_colors,
|
|
};
|
|
|
|
vkCmdBeginRenderPass(command_buffer, &render_pass_info, VK_SUBPASS_CONTENTS_INLINE);
|
|
|
|
VkViewport viewport = {
|
|
.x = 0.0f,
|
|
.y = 0.0f,
|
|
.width = (float)(extent.width),
|
|
.height = (float)(extent.height),
|
|
.minDepth = 0.0f,
|
|
.maxDepth = 1.0f,
|
|
};
|
|
vkCmdSetViewport(command_buffer, 0, 1, &viewport);
|
|
|
|
VkRect2D scissor = {
|
|
.offset = {
|
|
.x = 0.0f,
|
|
.y = 0.0f,
|
|
},
|
|
.extent = extent,
|
|
};
|
|
vkCmdSetScissor(command_buffer, 0, 1, &scissor);
|
|
|
|
vkCmdPushConstants(command_buffer, pipelines[0].layout, VK_SHADER_STAGE_VERTEX_BIT, 0, sizeof(struct ScenePC), scene_constants);
|
|
vkCmdPushConstants(command_buffer, pipelines[0].layout, VK_SHADER_STAGE_VERTEX_BIT, sizeof(struct ScenePC), sizeof(VkDeviceAddress), &object_buffer_addr);
|
|
|
|
// Bind the scene descriptor
|
|
vkCmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipelines[0].layout, 0, 1, &scene_descriptors[frame_num], 0, 0);
|
|
|
|
for(uint i = 0; i < pipelines_count; i++) {
|
|
command_draw_pipeline(pipelines[i], object_counts[i], objects[i], frame_num, command_buffer, offscreen);
|
|
}
|
|
|
|
vkCmdEndRenderPass(command_buffer);
|
|
|
|
return vkEndCommandBuffer(command_buffer);
|
|
}
|
|
|
|
VkCommandBuffer* create_command_buffers(VkDevice device, VkCommandPool command_pool, uint32_t image_count) {
|
|
VkCommandBufferAllocateInfo alloc_info = {
|
|
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
|
|
.commandPool = command_pool,
|
|
.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
|
|
.commandBufferCount = image_count,
|
|
};
|
|
|
|
VkCommandBuffer* command_buffers = malloc(sizeof(VkCommandBuffer)*image_count);
|
|
if(command_buffers == 0) {
|
|
return 0;
|
|
}
|
|
|
|
VkResult result = vkAllocateCommandBuffers(device, &alloc_info, command_buffers);
|
|
if(result != VK_SUCCESS) {
|
|
return VK_NULL_HANDLE;
|
|
}
|
|
|
|
return command_buffers;
|
|
}
|
|
|
|
VkFence* create_fences(VkDevice device, VkFenceCreateFlags flags, uint32_t count) {
|
|
VkFence* fences = malloc(sizeof(VkFence)*count);
|
|
if(fences == 0) {
|
|
return 0;
|
|
}
|
|
|
|
VkFenceCreateInfo fence_info = {
|
|
.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
|
|
.flags = flags,
|
|
};
|
|
|
|
for(uint32_t i = 0; i < count; i++) {
|
|
VkResult result = vkCreateFence(device, &fence_info, 0, &fences[i]);
|
|
if(result != VK_SUCCESS) {
|
|
free(fences);
|
|
return 0;
|
|
}
|
|
}
|
|
return fences;
|
|
}
|
|
|
|
Object create_object() {
|
|
Object ret = {
|
|
.attributes = {
|
|
.buckets = 0,
|
|
},
|
|
};
|
|
|
|
Map attributes = map_create(8, 2);
|
|
if(attributes.buckets == 0) {
|
|
return ret;
|
|
}
|
|
|
|
ret.attributes = attributes;
|
|
|
|
return ret;
|
|
}
|
|
|
|
Object create_renderable(Mesh* mesh, GraphicsPipeline* pipeline) {
|
|
Object zero = {
|
|
.attributes = {
|
|
.buckets = 0,
|
|
},
|
|
};
|
|
|
|
Object object = create_object();
|
|
if(object.attributes.buckets == 0) {
|
|
return zero;
|
|
}
|
|
|
|
if(mesh == 0 || pipeline == 0) {
|
|
return zero;
|
|
}
|
|
|
|
bool result = map_add(&object.attributes, ATTRIBUTE_ID_MESH, mesh);
|
|
if(result == false) {
|
|
map_destroy(object.attributes);
|
|
return zero;
|
|
}
|
|
|
|
result = map_add(&object.attributes, ATTRIBUTE_ID_PIPELINE, pipeline);
|
|
if(result == false) {
|
|
map_destroy(object.attributes);
|
|
return zero;
|
|
}
|
|
|
|
return object;
|
|
}
|
|
|
|
typedef struct TextureSetStruct {
|
|
uint32_t max_images;
|
|
Texture* textures;
|
|
|
|
VkDescriptorSet descriptor;
|
|
VkDescriptorPool pool;
|
|
} TextureSet;
|
|
|
|
// TODO
|
|
/*VkResult texture_set_add(VkDevice device, GPUPage* page, GPUBuffer staging, VkCommandPool transfer_pool, VkQueue transfer_queue, VkCommandPool graphics_pool, VkQueue graphics_queue, VkExtent2D size, VkFormat format, void* image_data, uint32_t transfer_family, uint32_t graphics_family) {
|
|
return VK_SUCCESS;
|
|
}*/
|
|
|
|
VkResult create_texture_set(VkDevice device, VkDescriptorSetLayout layout, uint32_t max_images, TextureSet* out) {
|
|
if(out == NULL) {
|
|
return VK_ERROR_VALIDATION_FAILED_EXT;
|
|
}
|
|
|
|
out->max_images = max_images;
|
|
|
|
out->textures = malloc(sizeof(Texture*)*max_images);
|
|
if(out->textures == NULL) {
|
|
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
|
}
|
|
|
|
VkDescriptorPoolSize sizes[] = {
|
|
{
|
|
.type = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
|
|
.descriptorCount = max_images,
|
|
},
|
|
};
|
|
|
|
VkDescriptorPoolCreateInfo pool_info = {
|
|
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
|
|
.flags = VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT,
|
|
.maxSets = 1,
|
|
.pPoolSizes = sizes,
|
|
.poolSizeCount = sizeof(sizes) / sizeof(VkDescriptorPoolSize),
|
|
.pNext = NULL,
|
|
};
|
|
|
|
VkResult result = vkCreateDescriptorPool(device, &pool_info, 0, &out->pool);
|
|
if(result != VK_SUCCESS) {
|
|
free(out->textures);
|
|
return result;
|
|
};
|
|
|
|
VkDescriptorSetLayout layouts[] = {
|
|
layout,
|
|
};
|
|
|
|
VkDescriptorSetAllocateInfo set_info = {
|
|
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
|
|
.pSetLayouts = layouts,
|
|
.descriptorSetCount = 1,
|
|
.descriptorPool = out->pool,
|
|
.pNext = NULL,
|
|
};
|
|
|
|
result = vkAllocateDescriptorSets(device, &set_info, &out->descriptor);
|
|
if(result != VK_SUCCESS) {
|
|
free(out->textures);
|
|
vkDestroyDescriptorPool(device, out->pool, 0);
|
|
return result;
|
|
}
|
|
|
|
return VK_SUCCESS;
|
|
}
|
|
|
|
VkResult create_graphics_pipeline(
|
|
VkDevice device,
|
|
VkExtent2D extent,
|
|
VkRenderPass draw_render_pass,
|
|
VkRenderPass offscreen_render_pass,
|
|
GraphicsPipelineInfo pipeline_info,
|
|
uint32_t max_frames_in_flight,
|
|
GraphicsPipeline* out
|
|
) {
|
|
|
|
if(out == NULL) {
|
|
return VK_ERROR_VALIDATION_FAILED_EXT;
|
|
}
|
|
|
|
out->max_frames_in_flight = max_frames_in_flight;
|
|
|
|
if(pipeline_info.set_layout != VK_NULL_HANDLE) {
|
|
out->descriptors = malloc(sizeof(VkDescriptorSet)*max_frames_in_flight);
|
|
if(out->descriptors == 0) {
|
|
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
|
}
|
|
for(uint32_t i = 0; i < max_frames_in_flight; i++) {
|
|
out->descriptors[i] = VK_NULL_HANDLE;
|
|
}
|
|
}
|
|
|
|
VkDescriptorSetLayout all_layouts[2] = {pipeline_info.scene_layout, pipeline_info.set_layout};
|
|
uint32_t num_sets = 1;
|
|
if(pipeline_info.set_layout != VK_NULL_HANDLE) {
|
|
num_sets += 1;
|
|
}
|
|
|
|
VkPushConstantRange pcr = {
|
|
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT,
|
|
.offset = 0,
|
|
.size = sizeof(struct ScenePC) + sizeof(VkDeviceAddress),
|
|
};
|
|
|
|
VkPipelineLayoutCreateInfo layout_info = {
|
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
|
|
.setLayoutCount = num_sets,
|
|
.pSetLayouts = all_layouts,
|
|
.pushConstantRangeCount = 1,
|
|
.pPushConstantRanges = &pcr,
|
|
};
|
|
|
|
VkResult result = vkCreatePipelineLayout(device, &layout_info, 0, &out->layout);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
VkDynamicState dynamic_states[] = {
|
|
VK_DYNAMIC_STATE_VIEWPORT,
|
|
VK_DYNAMIC_STATE_SCISSOR,
|
|
};
|
|
|
|
uint32_t dynamic_state_count = sizeof(dynamic_states)/sizeof(VkDynamicState);
|
|
|
|
VkPipelineDynamicStateCreateInfo dynamic_info = {
|
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_DYNAMIC_STATE_CREATE_INFO,
|
|
.dynamicStateCount = dynamic_state_count,
|
|
.pDynamicStates = dynamic_states,
|
|
};
|
|
|
|
VkPipelineInputAssemblyStateCreateInfo input_assembly_info = {
|
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
|
|
.topology = VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,
|
|
.primitiveRestartEnable = VK_FALSE,
|
|
};
|
|
|
|
VkViewport viewport = {
|
|
.x = 0.0f,
|
|
.y = 0.0f,
|
|
.width = (float)(extent.width),
|
|
.height = (float)(extent.height),
|
|
.minDepth = 0.0f,
|
|
.maxDepth = 1.0f,
|
|
};
|
|
|
|
VkRect2D scissor = {
|
|
.offset = {
|
|
.x = 0,
|
|
.y = 0,
|
|
},
|
|
.extent = extent,
|
|
};
|
|
|
|
VkPipelineViewportStateCreateInfo viewport_state = {
|
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
|
|
.viewportCount = 1,
|
|
.pViewports = &viewport,
|
|
.scissorCount = 1,
|
|
.pScissors = &scissor,
|
|
};
|
|
|
|
VkPipelineRasterizationStateCreateInfo raster_info = {
|
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
|
|
.depthClampEnable = VK_FALSE,
|
|
.rasterizerDiscardEnable = VK_FALSE,
|
|
.polygonMode = VK_POLYGON_MODE_FILL,
|
|
.lineWidth = 1.0f,
|
|
.cullMode = VK_CULL_MODE_BACK_BIT,
|
|
.frontFace = VK_FRONT_FACE_CLOCKWISE,
|
|
.depthBiasEnable = VK_FALSE,
|
|
.depthBiasConstantFactor = 0.0f,
|
|
.depthBiasClamp = 0.0f,
|
|
.depthBiasSlopeFactor = 0.0f,
|
|
};
|
|
|
|
VkPipelineMultisampleStateCreateInfo multisample_info = {
|
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
|
|
.sampleShadingEnable = VK_FALSE,
|
|
.rasterizationSamples = VK_SAMPLE_COUNT_1_BIT,
|
|
.minSampleShading = 1.0f,
|
|
.pSampleMask = 0,
|
|
.alphaToCoverageEnable = VK_FALSE,
|
|
.alphaToOneEnable = VK_FALSE,
|
|
};
|
|
|
|
VkPipelineDepthStencilStateCreateInfo depth_info = {
|
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_DEPTH_STENCIL_STATE_CREATE_INFO,
|
|
.depthTestEnable = VK_TRUE,
|
|
.depthWriteEnable = VK_TRUE,
|
|
.depthCompareOp = VK_COMPARE_OP_LESS,
|
|
.depthBoundsTestEnable = VK_FALSE,
|
|
.maxDepthBounds = 1.0f,
|
|
.minDepthBounds = 0.0f,
|
|
.stencilTestEnable = VK_FALSE,
|
|
.front = {},
|
|
.back = {},
|
|
};
|
|
|
|
VkPipelineColorBlendAttachmentState color_blend_attachment = {
|
|
.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT,
|
|
.blendEnable = VK_TRUE,
|
|
.srcColorBlendFactor = VK_BLEND_FACTOR_SRC_ALPHA,
|
|
.dstColorBlendFactor = VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,
|
|
.colorBlendOp = VK_BLEND_OP_ADD,
|
|
.srcAlphaBlendFactor = VK_BLEND_FACTOR_ONE,
|
|
.dstAlphaBlendFactor = VK_BLEND_FACTOR_ZERO,
|
|
.alphaBlendOp = VK_BLEND_OP_ADD,
|
|
};
|
|
|
|
VkPipelineColorBlendStateCreateInfo color_blend_info = {
|
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
|
|
.logicOpEnable = VK_FALSE,
|
|
.logicOp = VK_LOGIC_OP_COPY,
|
|
.attachmentCount = 1,
|
|
.pAttachments = &color_blend_attachment,
|
|
.blendConstants[0] = 0.0f,
|
|
.blendConstants[1] = 0.0f,
|
|
.blendConstants[2] = 0.0f,
|
|
.blendConstants[3] = 0.0f,
|
|
};
|
|
|
|
VkGraphicsPipelineCreateInfo draw_pipeline_info = {
|
|
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
|
|
.stageCount = pipeline_info.shader_stages_count,
|
|
.pStages = pipeline_info.shader_stages,
|
|
.pVertexInputState = &pipeline_info.input_info,
|
|
.pInputAssemblyState = &input_assembly_info,
|
|
.pViewportState = &viewport_state,
|
|
.pRasterizationState = &raster_info,
|
|
.pColorBlendState = &color_blend_info,
|
|
.pDynamicState = &dynamic_info,
|
|
.pDepthStencilState = &depth_info,
|
|
.pMultisampleState = &multisample_info,
|
|
.layout = out->layout,
|
|
.renderPass = draw_render_pass,
|
|
.subpass = 0,
|
|
.basePipelineHandle = VK_NULL_HANDLE,
|
|
.basePipelineIndex = -1,
|
|
};
|
|
|
|
VkPipelineColorBlendAttachmentState offscreen_attachment_states[] = {
|
|
{
|
|
.colorWriteMask = VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT,
|
|
.blendEnable = VK_FALSE,
|
|
},
|
|
};
|
|
|
|
VkPipelineColorBlendStateCreateInfo offscreen_blend_info = {
|
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
|
|
.logicOpEnable = VK_FALSE,
|
|
.logicOp = VK_LOGIC_OP_COPY,
|
|
.attachmentCount = sizeof(offscreen_attachment_states)/sizeof(VkPipelineColorBlendAttachmentState),
|
|
.pAttachments = offscreen_attachment_states,
|
|
.blendConstants[0] = 0.0f,
|
|
.blendConstants[1] = 0.0f,
|
|
.blendConstants[2] = 0.0f,
|
|
.blendConstants[3] = 0.0f,
|
|
};
|
|
|
|
VkGraphicsPipelineCreateInfo offscreen_pipeline_info = {
|
|
.sType = VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
|
|
.stageCount = pipeline_info.shader_stages_count,
|
|
.pStages = pipeline_info.shader_stages, // TODO: offscreen stages
|
|
.pVertexInputState = &pipeline_info.input_info,
|
|
.pInputAssemblyState = &input_assembly_info,
|
|
.pViewportState = &viewport_state,
|
|
.pRasterizationState = &raster_info,
|
|
.pColorBlendState = &offscreen_blend_info,
|
|
.pDynamicState = &dynamic_info,
|
|
.pDepthStencilState = &depth_info,
|
|
.pMultisampleState = &multisample_info,
|
|
.layout = out->layout,
|
|
.renderPass = offscreen_render_pass,
|
|
.subpass = 0,
|
|
.basePipelineHandle = VK_NULL_HANDLE,
|
|
.basePipelineIndex = -1,
|
|
};
|
|
|
|
result = vkCreateGraphicsPipelines(device, VK_NULL_HANDLE, 1, &draw_pipeline_info, 0, &out->pipeline);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
result = vkCreateGraphicsPipelines(device, VK_NULL_HANDLE, 1, &offscreen_pipeline_info, 0, &out->offscreen_pipeline);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
return VK_SUCCESS;
|
|
}
|
|
|
|
VkResult create_simple_mesh_pipeline(VkDevice device, VkExtent2D extent, VkRenderPass render_pass, VkRenderPass offscreen_render_pass, VkDescriptorSetLayout scene_layout, uint32_t max_frames_in_flight, GraphicsPipeline* out) {
|
|
if(out == NULL) {
|
|
return VK_ERROR_VALIDATION_FAILED_EXT;
|
|
}
|
|
|
|
VkShaderModule vert_shader = load_shader_file("shader_src/basic.vert.spv", device);
|
|
VkShaderModule frag_shader = load_shader_file("shader_src/basic.frag.spv", device);
|
|
VkPipelineShaderStageCreateInfo shader_stages[2] = {
|
|
{
|
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
|
|
.stage = VK_SHADER_STAGE_VERTEX_BIT,
|
|
.module = vert_shader,
|
|
.pName = "main",
|
|
},
|
|
{
|
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
|
|
.stage = VK_SHADER_STAGE_FRAGMENT_BIT,
|
|
.module = frag_shader,
|
|
.pName = "main",
|
|
},
|
|
};
|
|
|
|
VkVertexInputBindingDescription bindings[1] = {
|
|
{
|
|
.binding = 0, // Which buffer 'binding' to use
|
|
.stride = sizeof(struct Vertex), // How many bytes to increase the index between instance
|
|
.inputRate = VK_VERTEX_INPUT_RATE_VERTEX, // Whether an instance is a vertex or an index
|
|
},
|
|
};
|
|
|
|
VkVertexInputAttributeDescription attributes[2] = {
|
|
{
|
|
.binding = 0, // Which buffer 'binding' to use
|
|
.location = 0, // Which 'location' to export as to shader
|
|
.format = VK_FORMAT_R32G32B32_SFLOAT, // What format to interpret as for shader
|
|
.offset = offsetof(struct Vertex, pos), // What offset from instance start
|
|
},
|
|
{
|
|
.binding = 0,
|
|
.location = 1,
|
|
.format = VK_FORMAT_R32G32B32_SFLOAT,
|
|
.offset = offsetof(struct Vertex, color),
|
|
},
|
|
};
|
|
|
|
VkPipelineVertexInputStateCreateInfo input_info = {
|
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
|
|
.pVertexBindingDescriptions = bindings,
|
|
.vertexBindingDescriptionCount = sizeof(bindings)/sizeof(VkVertexInputBindingDescription),
|
|
.pVertexAttributeDescriptions = attributes,
|
|
.vertexAttributeDescriptionCount = sizeof(attributes)/sizeof(VkVertexInputAttributeDescription),
|
|
};
|
|
|
|
GraphicsPipelineInfo pipeline_info = {
|
|
.set_layout = VK_NULL_HANDLE,
|
|
.shader_stages_count = sizeof(shader_stages)/sizeof(VkPipelineShaderStageCreateInfo),
|
|
.shader_stages = shader_stages,
|
|
.scene_layout = scene_layout,
|
|
.input_info = input_info,
|
|
};
|
|
|
|
return create_graphics_pipeline(device, extent, render_pass, offscreen_render_pass, pipeline_info, max_frames_in_flight, out);
|
|
}
|
|
|
|
VkResult create_texture_mesh_pipeline(VkDevice device, VkPhysicalDeviceMemoryProperties memories, VkExtent2D extent, VkRenderPass render_pass, VkRenderPass offscreen_render_pass, VkDescriptorSetLayout scene_layout, uint32_t max_frames_in_flight, VkCommandPool transfer_pool, Queue transfer_queue, Queue graphics_queue, VkCommandPool graphics_pool, GraphicsPipeline* out) {
|
|
if(out == NULL) {
|
|
return VK_ERROR_VALIDATION_FAILED_EXT;
|
|
}
|
|
|
|
VkShaderModule vert_shader = load_shader_file("shader_src/texture.vert.spv", device);
|
|
VkShaderModule frag_shader = load_shader_file("shader_src/texture.frag.spv", device);
|
|
VkPipelineShaderStageCreateInfo shader_stages[2] = {
|
|
{
|
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
|
|
.stage = VK_SHADER_STAGE_VERTEX_BIT,
|
|
.module = vert_shader,
|
|
.pName = "main",
|
|
},
|
|
{
|
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
|
|
.stage = VK_SHADER_STAGE_FRAGMENT_BIT,
|
|
.module = frag_shader,
|
|
.pName = "main",
|
|
},
|
|
};
|
|
|
|
|
|
VkVertexInputBindingDescription bindings[] = {
|
|
{
|
|
.binding = 0,
|
|
.stride = sizeof(struct TextureVertex),
|
|
.inputRate = VK_VERTEX_INPUT_RATE_VERTEX,
|
|
},
|
|
};
|
|
|
|
VkVertexInputAttributeDescription attributes[] = {
|
|
{
|
|
.binding = 0,
|
|
.location = 0,
|
|
.format = VK_FORMAT_R32G32B32_SFLOAT,
|
|
.offset = offsetof(struct TextureVertex, pos),
|
|
},
|
|
{
|
|
.binding = 0,
|
|
.location = 1,
|
|
.format = VK_FORMAT_R32G32B32_SFLOAT,
|
|
.offset = offsetof(struct TextureVertex, color),
|
|
},
|
|
{
|
|
.binding = 0,
|
|
.location = 2,
|
|
.format = VK_FORMAT_R32G32_SFLOAT,
|
|
.offset = offsetof(struct TextureVertex, tex),
|
|
},
|
|
};
|
|
|
|
// TODO: use bindless descriptors for textures, so each draw command will bind a large buffer that is indexed by object ID to get the address of the texture in GPU memory
|
|
VkDescriptorSetLayoutBinding set_bindings[] = {
|
|
{
|
|
.binding = 0,
|
|
.descriptorCount = 1000,
|
|
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
|
|
.pImmutableSamplers = 0,
|
|
.stageFlags = VK_SHADER_STAGE_FRAGMENT_BIT,
|
|
},
|
|
};
|
|
|
|
VkDescriptorBindingFlags set_binding_flags[] = {
|
|
VK_DESCRIPTOR_BINDING_PARTIALLY_BOUND_BIT | VK_DESCRIPTOR_BINDING_UPDATE_AFTER_BIND_BIT,
|
|
};
|
|
|
|
VkDescriptorSetLayoutBindingFlagsCreateInfo set_flags_info = {
|
|
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_BINDING_FLAGS_CREATE_INFO,
|
|
.bindingCount = sizeof(set_bindings)/sizeof(VkDescriptorSetLayoutBinding),
|
|
.pBindingFlags = set_binding_flags,
|
|
};
|
|
|
|
VkDescriptorSetLayoutCreateInfo set_info = {
|
|
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
|
|
.pBindings = set_bindings,
|
|
.bindingCount = sizeof(set_bindings)/sizeof(VkDescriptorSetLayoutBinding),
|
|
.flags = VK_DESCRIPTOR_SET_LAYOUT_CREATE_UPDATE_AFTER_BIND_POOL_BIT,
|
|
.pNext = &set_flags_info,
|
|
};
|
|
|
|
VkDescriptorSetLayout set_layout = VK_NULL_HANDLE;
|
|
VkResult result = vkCreateDescriptorSetLayout(device, &set_info, 0, &set_layout);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
TextureSet texture_set = {0};
|
|
result = create_texture_set(device, set_layout, 1000, &texture_set);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
VkPipelineVertexInputStateCreateInfo input_info = {
|
|
.sType = VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
|
|
.pVertexBindingDescriptions = bindings,
|
|
.vertexBindingDescriptionCount = sizeof(bindings)/sizeof(VkVertexInputBindingDescription),
|
|
.pVertexAttributeDescriptions = attributes,
|
|
.vertexAttributeDescriptionCount = sizeof(attributes)/sizeof(VkVertexInputAttributeDescription),
|
|
};
|
|
|
|
GraphicsPipelineInfo pipeline_info = {
|
|
.set_layout = set_layout,
|
|
.shader_stages_count = sizeof(shader_stages)/sizeof(VkPipelineShaderStageCreateInfo),
|
|
.shader_stages = shader_stages,
|
|
.scene_layout = scene_layout,
|
|
.input_info = input_info,
|
|
};
|
|
|
|
GPUPage* memory = NULL;
|
|
result = gpu_page_allocate(device, memories, 100000, 0xFFFFFFFF, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, 0, &memory);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
result = create_graphics_pipeline(device, extent, render_pass, offscreen_render_pass, pipeline_info, max_frames_in_flight, out);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
VkExtent2D texture_size = {
|
|
.width = 10,
|
|
.height = 10,
|
|
};
|
|
(void)texture_size;
|
|
|
|
struct __attribute__((__packed__)) texel {
|
|
uint8_t r;
|
|
uint8_t g;
|
|
uint8_t b;
|
|
uint8_t a;
|
|
};
|
|
|
|
struct texel WHT = {255, 255, 255, 255};
|
|
struct texel BLK = {0, 0, 0, 255};
|
|
struct texel RED = {255, 0, 0, 255};
|
|
struct texel GRN = {0, 255, 0, 255};
|
|
struct texel BLU = {0, 0, 255, 255};
|
|
|
|
struct texel texture_data_0[100] = {
|
|
RED, WHT, GRN, WHT, BLU, WHT, RED, WHT, GRN, BLK,
|
|
RED, WHT, GRN, WHT, BLU, WHT, RED, WHT, GRN, BLK,
|
|
RED, WHT, GRN, WHT, BLU, WHT, RED, WHT, GRN, WHT,
|
|
RED, WHT, GRN, WHT, BLU, WHT, RED, WHT, GRN, WHT,
|
|
RED, WHT, GRN, WHT, BLU, WHT, RED, WHT, GRN, BLK,
|
|
RED, WHT, GRN, WHT, BLU, WHT, RED, WHT, GRN, BLK,
|
|
RED, WHT, GRN, WHT, BLU, WHT, RED, WHT, GRN, WHT,
|
|
RED, WHT, GRN, WHT, BLU, WHT, RED, WHT, GRN, WHT,
|
|
RED, WHT, GRN, WHT, BLU, WHT, RED, WHT, GRN, BLK,
|
|
RED, WHT, GRN, WHT, BLU, WHT, RED, WHT, GRN, BLK,
|
|
};
|
|
|
|
struct texel texture_data_1[100] = {
|
|
RED, RED, RED, RED, RED, RED, RED, RED, RED, RED,
|
|
RED, WHT, WHT, WHT, WHT, WHT, WHT, WHT, WHT, RED,
|
|
RED, WHT, GRN, GRN, GRN, GRN, GRN, GRN, WHT, RED,
|
|
RED, WHT, GRN, BLU, BLU, BLU, BLU, GRN, WHT, RED,
|
|
RED, WHT, GRN, BLU, BLK, BLK, BLU, GRN, WHT, RED,
|
|
RED, WHT, GRN, BLU, BLK, BLK, BLU, GRN, WHT, RED,
|
|
RED, WHT, GRN, BLU, BLU, BLU, BLU, GRN, WHT, RED,
|
|
RED, WHT, GRN, GRN, GRN, GRN, GRN, GRN, WHT, RED,
|
|
RED, WHT, WHT, WHT, WHT, WHT, WHT, WHT, WHT, RED,
|
|
RED, RED, RED, RED, RED, RED, RED, RED, RED, RED,
|
|
};
|
|
|
|
GPUPage* texture_memory = NULL;
|
|
result = gpu_page_allocate(device, memories, 100000, 0xFFFFFFFF, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT, 0, &texture_memory);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
GPUPage* staging_memory = NULL;
|
|
result = gpu_page_allocate(device, memories, 100000, 0xFFFFFFFF, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, 0, &staging_memory);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
GPUBuffer staging = {0};
|
|
result = gpu_buffer_malloc(device, staging_memory, 100000, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, &staging);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
Texture test_texture_0 = load_texture(device, texture_memory, staging, transfer_pool, transfer_queue, graphics_pool, graphics_queue, texture_size, VK_FORMAT_R8G8B8A8_SRGB, texture_data_0);
|
|
|
|
Texture test_texture_1 = load_texture(device, texture_memory, staging, transfer_pool, transfer_queue, graphics_pool, graphics_queue, texture_size, VK_FORMAT_R8G8B8A8_SRGB, texture_data_1);
|
|
|
|
VkDescriptorImageInfo image_info_0 = {
|
|
.sampler = test_texture_0.sampler,
|
|
.imageView = test_texture_0.view,
|
|
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
|
|
};
|
|
|
|
VkDescriptorImageInfo image_info_1 = {
|
|
.sampler = test_texture_1.sampler,
|
|
.imageView = test_texture_1.view,
|
|
.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
|
|
};
|
|
|
|
VkWriteDescriptorSet descriptor_write = {
|
|
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
|
|
.dstSet = texture_set.descriptor,
|
|
.dstBinding = 0,
|
|
.descriptorType = VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,
|
|
.descriptorCount = 1,
|
|
.pBufferInfo = 0,
|
|
.pTexelBufferView = 0,
|
|
};
|
|
|
|
descriptor_write.pImageInfo = &image_info_0;
|
|
descriptor_write.dstArrayElement = 0;
|
|
vkUpdateDescriptorSets(device, 1, &descriptor_write, 0, 0);
|
|
|
|
descriptor_write.pImageInfo = &image_info_1;
|
|
descriptor_write.dstArrayElement = 1;
|
|
vkUpdateDescriptorSets(device, 1, &descriptor_write, 0, 0);
|
|
|
|
for(uint32_t i = 0; i < out->max_frames_in_flight; i++) {
|
|
out->descriptors[i] = texture_set.descriptor;
|
|
fprintf(stderr, "descriptor[%d]: %p\n", i, out->descriptors[i]);
|
|
}
|
|
|
|
return VK_SUCCESS;
|
|
}
|
|
|
|
VkResult command_copy_to_buffer(VkDevice device, GPUBuffer staging, VkBuffer destination, void* data, VkDeviceSize size, VkDeviceSize offset, VkCommandPool pool, Queue queue) {
|
|
memcpy(staging.page->ptr + staging.memory->offset, data, size);
|
|
|
|
VkCommandBuffer command_buffer = command_begin_single(device, pool);
|
|
if(command_buffer == VK_NULL_HANDLE) {
|
|
return VK_ERROR_UNKNOWN;
|
|
}
|
|
|
|
VkBufferCopy region = {
|
|
.srcOffset = 0,
|
|
.dstOffset = offset,
|
|
.size = size,
|
|
};
|
|
|
|
vkCmdCopyBuffer(command_buffer, staging.handle, destination, 1, ®ion);
|
|
|
|
return command_end_single(device, command_buffer, pool, queue);
|
|
}
|
|
|
|
Mesh* load_mesh_to_buffer(VkDevice device, GPUPage* page, GPUBuffer staging, uint32_t vertex_count, uint32_t vertex_stride, void* vertex_data, uint32_t index_count, uint32_t index_stride, void* index_data, VkCommandPool pool, Queue queue) {
|
|
GPUBuffer vertex_buffer = {0};
|
|
GPUBuffer index_buffer = {0};
|
|
|
|
VkResult result = gpu_buffer_malloc(device, page, vertex_count*vertex_stride, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, &vertex_buffer);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "Failed to create vertex buffer\n");
|
|
return NULL;
|
|
}
|
|
|
|
result = gpu_buffer_malloc(device, page, sizeof(uint16_t)*index_count, VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, &index_buffer);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "Failed to create index buffer\n");
|
|
return NULL;
|
|
}
|
|
|
|
result = command_copy_to_buffer(device, staging, vertex_buffer.handle, vertex_data, vertex_count*vertex_stride, 0, pool, queue);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "Failed to copy to vertex buffer\n");
|
|
return NULL;
|
|
}
|
|
|
|
result = command_copy_to_buffer(device, staging, index_buffer.handle, index_data, index_stride*index_count, 0, pool, queue);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "Failed to copy to index buffer\n");
|
|
return NULL;
|
|
}
|
|
|
|
Mesh* mesh = malloc(sizeof(Mesh));
|
|
if(mesh == NULL) {
|
|
return NULL;
|
|
}
|
|
|
|
mesh->vertex_buffer = vertex_buffer;
|
|
mesh->vertex_count = vertex_count;
|
|
mesh->index_buffer = index_buffer;
|
|
mesh->index_count = index_count;
|
|
|
|
return mesh;
|
|
}
|
|
|
|
VulkanContext* init_vulkan(GLFWwindow* window, uint32_t max_frames_in_flight) {
|
|
VulkanContext* context = (VulkanContext*)malloc(sizeof(VulkanContext));
|
|
|
|
VkResult result = create_instance(&context->instance);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to initialize vulkan instance\n");
|
|
return 0;
|
|
}
|
|
|
|
result = create_debug_messenger(context->instance, &context->debug_messenger);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to initialize vulkan debug messenger\n");
|
|
return 0;
|
|
}
|
|
|
|
result = get_best_physical_device(context->instance, &context->physical_device);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to pick vulkan physical device\n");
|
|
return 0;
|
|
}
|
|
|
|
vkGetPhysicalDeviceMemoryProperties(context->physical_device, &context->memories);
|
|
|
|
result = glfwCreateWindowSurface(context->instance, window, 0, &context->surface);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to create vulkan surface\n");
|
|
return 0;
|
|
}
|
|
|
|
result = create_logical_device(context->physical_device, context->surface, &context->graphics_queue, &context->present_queue, &context->transfer_queue, &context->device);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to create vulkan logical device\n");
|
|
return 0;
|
|
}
|
|
|
|
result = get_swapchain_details(context->physical_device, context->surface, &context->swapchain_details);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to create vulkan logical device\n");
|
|
return 0;
|
|
}
|
|
|
|
context->swapchain_format = choose_swapchain_format(context->swapchain_details);
|
|
context->swapchain_present_mode = choose_present_mode(context->swapchain_details);
|
|
context->swapchain_extent = choose_swapchain_extent(context->swapchain_details);
|
|
|
|
VkSwapchainKHR swapchain = create_swapchain(context->device, context->swapchain_format, context->swapchain_present_mode, context->swapchain_extent, context->surface, context->swapchain_details.capabilities, context->graphics_queue.family, context->present_queue.family, VK_NULL_HANDLE);
|
|
if(swapchain == VK_NULL_HANDLE) {
|
|
fprintf(stderr, "failed to create vulkan swapchain\n");
|
|
return 0;
|
|
} else {
|
|
context->swapchain = swapchain;
|
|
}
|
|
|
|
SwapchainImages swapchain_images = get_swapchain_images(context->device, context->swapchain);
|
|
if(swapchain_images.count == 0) {
|
|
fprintf(stderr, "failed to get vulkan swapchain images\n");
|
|
return 0;
|
|
} else {
|
|
context->swapchain_image_count = swapchain_images.count;
|
|
context->swapchain_images = swapchain_images.images;
|
|
}
|
|
|
|
VkImageView* image_views = create_image_views(context->device, context->swapchain_image_count, context->swapchain_images, context->swapchain_format);
|
|
if(image_views == 0) {
|
|
fprintf(stderr, "failed to create vulkan image views\n");
|
|
return 0;
|
|
} else {
|
|
context->swapchain_image_views = image_views;
|
|
}
|
|
|
|
VkFormat requested[] = {
|
|
VK_FORMAT_D32_SFLOAT,
|
|
VK_FORMAT_D32_SFLOAT_S8_UINT,
|
|
VK_FORMAT_D24_UNORM_S8_UINT
|
|
};
|
|
VkFormat depth_format = find_depth_format(context->physical_device, 3, requested, VK_IMAGE_TILING_OPTIMAL, VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT);
|
|
if(depth_format == VK_FORMAT_MAX_ENUM) {
|
|
fprintf(stderr, "failed to find a suitable depth image format\n");
|
|
return 0;
|
|
} else {
|
|
context->depth_format = depth_format;
|
|
}
|
|
|
|
VkCommandPoolCreateInfo extra_pool_info = {
|
|
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
|
|
.queueFamilyIndex = context->graphics_queue.family,
|
|
.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT,
|
|
};
|
|
|
|
result = vkCreateCommandPool(context->device, &extra_pool_info, 0, &context->extra_graphics_pool);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to create extra graphics command pool\n");
|
|
return 0;
|
|
}
|
|
|
|
if(create_depth_image(context) != 0) {
|
|
fprintf(stderr, "failed to create depth image\n");
|
|
return 0;
|
|
}
|
|
|
|
VkRenderPass render_pass = create_render_pass(context->device, context->swapchain_format, context->depth_format);
|
|
if(render_pass == VK_NULL_HANDLE) {
|
|
fprintf(stderr, "failed to create vulkan render pass\n");
|
|
return 0;
|
|
} else {
|
|
context->render_pass = render_pass;
|
|
}
|
|
|
|
VkFramebuffer* framebuffers = create_swapchain_framebuffers(context->device, context->swapchain_image_count, context->swapchain_image_views, context->depth_image_view, context->render_pass, context->swapchain_extent);
|
|
if(framebuffers == 0) {
|
|
fprintf(stderr, "failed to create vulkan framebuffers\n");
|
|
return 0;
|
|
} else {
|
|
context->swapchain_framebuffers = framebuffers;
|
|
}
|
|
|
|
VkCommandPoolCreateInfo graphics_pool_info = {
|
|
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
|
|
.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,
|
|
.queueFamilyIndex = context->graphics_queue.family,
|
|
};
|
|
VkCommandPool graphics_command_pool;
|
|
result = vkCreateCommandPool(context->device, &graphics_pool_info, 0, &graphics_command_pool);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to create vulkan graphics command pool");
|
|
return 0;
|
|
} else {
|
|
context->graphics_command_pool = graphics_command_pool;
|
|
}
|
|
|
|
VkCommandPoolCreateInfo transfer_pool_info = {
|
|
.sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
|
|
.flags = VK_COMMAND_POOL_CREATE_TRANSIENT_BIT,
|
|
.queueFamilyIndex = context->transfer_queue.family,
|
|
};
|
|
VkCommandPool transfer_command_pool;
|
|
result = vkCreateCommandPool(context->device, &transfer_pool_info, 0, &transfer_command_pool);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to create vulkan transfer command pool");
|
|
return 0;
|
|
} else {
|
|
context->transfer_command_pool = transfer_command_pool;
|
|
}
|
|
|
|
context->max_frames_in_flight = max_frames_in_flight;
|
|
|
|
VkCommandBuffer* swapchain_command_buffers = create_command_buffers(context->device, context->graphics_command_pool, max_frames_in_flight);
|
|
if(swapchain_command_buffers == VK_NULL_HANDLE) {
|
|
fprintf(stderr, "failed to create vulkan swapchain command buffer\n");
|
|
return 0;
|
|
} else {
|
|
context->swapchain_command_buffers = swapchain_command_buffers;
|
|
}
|
|
|
|
context->offscreen_command_buffers = create_command_buffers(context->device, context->graphics_command_pool, context->max_frames_in_flight);
|
|
if(swapchain_command_buffers == VK_NULL_HANDLE) {
|
|
fprintf(stderr, "failed to create vulkan swapchain command buffer\n");
|
|
return 0;
|
|
}
|
|
|
|
VkSemaphore* ia_semaphores = create_semaphores(context->device, 0, max_frames_in_flight);
|
|
if(ia_semaphores == 0) {
|
|
fprintf(stderr, "failed to create vulkan image available semaphores\n");
|
|
return 0;
|
|
} else {
|
|
context->image_available_semaphores = ia_semaphores;
|
|
}
|
|
|
|
VkSemaphore* rf_semaphores = create_semaphores(context->device, 0, max_frames_in_flight);
|
|
if(rf_semaphores == 0) {
|
|
fprintf(stderr, "failed to create vulkan render finished semaphores\n");
|
|
return 0;
|
|
} else {
|
|
context->render_finished_semaphores = rf_semaphores;
|
|
}
|
|
|
|
context->offscreen_complete_semaphores = create_semaphores(context->device, 0, 1);
|
|
if(context->offscreen_complete_semaphores == NULL) {
|
|
return 0;
|
|
}
|
|
|
|
VkFence* if_fences = create_fences(context->device, VK_FENCE_CREATE_SIGNALED_BIT, max_frames_in_flight);
|
|
if(if_fences == 0) {
|
|
fprintf(stderr, "failed to create vulkan in flight fence\n");
|
|
return 0;
|
|
} else {
|
|
context->in_flight_fences = if_fences;
|
|
}
|
|
|
|
result = gpu_page_allocate(context->device, context->memories, 15360000*3, 0xFFFFFFFF, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT, 0, &context->g_buffer_page);
|
|
if(result != VK_SUCCESS) {
|
|
return 0;
|
|
}
|
|
|
|
VkImageCreateInfo g_pos_info = {
|
|
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
|
|
.usage = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT,
|
|
.imageType = VK_IMAGE_TYPE_2D,
|
|
.extent = {
|
|
.width = context->swapchain_extent.width,
|
|
.height = context->swapchain_extent.height,
|
|
.depth = 1,
|
|
},
|
|
.mipLevels = 1,
|
|
.arrayLayers = 1,
|
|
.samples = VK_SAMPLE_COUNT_1_BIT,
|
|
.tiling = VK_IMAGE_TILING_OPTIMAL,
|
|
.format = VK_FORMAT_R16G16B16A16_SFLOAT,
|
|
};
|
|
|
|
result = gpu_image_malloc(context->device, context->g_buffer_page, &g_pos_info, &context->g_image_position);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to allocate image\n");
|
|
return 0;
|
|
}
|
|
|
|
VkImageViewCreateInfo g_pos_view_info = {
|
|
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
|
|
.image = context->g_image_position.handle,
|
|
.viewType = VK_IMAGE_VIEW_TYPE_2D,
|
|
.format = VK_FORMAT_R16G16B16A16_SFLOAT,
|
|
.subresourceRange = {
|
|
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
|
|
.baseMipLevel = 0,
|
|
.levelCount = 1,
|
|
.baseArrayLayer = 0,
|
|
.layerCount = 1,
|
|
},
|
|
};
|
|
|
|
result = vkCreateImageView(context->device, &g_pos_view_info, 0, &context->g_image_view_position);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to create image view\n");
|
|
return 0;
|
|
}
|
|
|
|
VkExtent3D offscreen_extent = {
|
|
.width = context->swapchain_extent.width,
|
|
.height = context->swapchain_extent.height,
|
|
.depth = 1,
|
|
};
|
|
|
|
VkImageCreateInfo offscreen_depth_info = {
|
|
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
|
|
.imageType = VK_IMAGE_TYPE_2D,
|
|
.pNext = NULL,
|
|
.extent = offscreen_extent,
|
|
.mipLevels = 1,
|
|
.arrayLayers = 1,
|
|
.format = context->depth_format,
|
|
.tiling = VK_IMAGE_TILING_OPTIMAL,
|
|
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
|
|
.usage = VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
|
|
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
|
.samples = VK_SAMPLE_COUNT_1_BIT,
|
|
.flags = 0,
|
|
};
|
|
|
|
result = gpu_image_malloc(context->device, context->g_buffer_page, &offscreen_depth_info, &context->g_image_depth);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to allocate g_image_depth\n");
|
|
return 0;
|
|
}
|
|
|
|
VkImageViewCreateInfo g_depth_view_info = {
|
|
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
|
|
.image = context->g_image_depth.handle,
|
|
.viewType = VK_IMAGE_VIEW_TYPE_2D,
|
|
.format = context->depth_format,
|
|
.components = {
|
|
.r = VK_COMPONENT_SWIZZLE_IDENTITY,
|
|
.g = VK_COMPONENT_SWIZZLE_IDENTITY,
|
|
.b = VK_COMPONENT_SWIZZLE_IDENTITY,
|
|
.a = VK_COMPONENT_SWIZZLE_IDENTITY,
|
|
},
|
|
.subresourceRange = {
|
|
.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT,
|
|
.baseMipLevel = 0,
|
|
.levelCount = 1,
|
|
.baseArrayLayer = 0,
|
|
.layerCount = 1,
|
|
},
|
|
};
|
|
|
|
result = vkCreateImageView(context->device, &g_depth_view_info, 0, &context->g_image_view_depth);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "Failed to allocate g_image_view_depth\n");
|
|
return 0;
|
|
}
|
|
|
|
result = command_transition_image_layout(context->device, context->extra_graphics_pool, context->graphics_queue, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL, context->g_image_depth.handle, 0, VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT, VK_QUEUE_FAMILY_IGNORED, VK_QUEUE_FAMILY_IGNORED, VK_IMAGE_ASPECT_DEPTH_BIT);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "Failed to transition g_image_depth to VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL\n");
|
|
return 0;
|
|
}
|
|
|
|
VkAttachmentDescription offscreen_attachments[] = {
|
|
{
|
|
.format = VK_FORMAT_R16G16B16A16_SFLOAT,
|
|
.samples = VK_SAMPLE_COUNT_1_BIT,
|
|
.loadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
|
|
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
|
|
.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
|
|
.stencilStoreOp = VK_ATTACHMENT_STORE_OP_DONT_CARE,
|
|
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
|
|
.finalLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
|
|
},
|
|
{
|
|
.format = context->depth_format,
|
|
.samples = VK_SAMPLE_COUNT_1_BIT,
|
|
.loadOp = VK_ATTACHMENT_LOAD_OP_CLEAR,
|
|
.storeOp = VK_ATTACHMENT_STORE_OP_STORE,
|
|
.stencilLoadOp = VK_ATTACHMENT_LOAD_OP_DONT_CARE,
|
|
.stencilStoreOp = VK_ATTACHMENT_STORE_OP_STORE,
|
|
.initialLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
|
|
.finalLayout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
|
|
},
|
|
};
|
|
|
|
VkAttachmentReference offscreen_attachment_refs[] = {
|
|
{
|
|
.attachment = 0,
|
|
.layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,
|
|
}
|
|
};
|
|
|
|
VkAttachmentReference offscreen_depth_reference = {
|
|
.attachment = 1,
|
|
.layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
|
|
};
|
|
|
|
VkSubpassDescription offscreen_pass_descs[] = {
|
|
{
|
|
.pipelineBindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS,
|
|
.colorAttachmentCount = sizeof(offscreen_attachment_refs)/sizeof(VkAttachmentReference),
|
|
.pColorAttachments = offscreen_attachment_refs,
|
|
.pDepthStencilAttachment = &offscreen_depth_reference,
|
|
},
|
|
};
|
|
|
|
VkSubpassDependency offscreen_pass_dependencies[] = {
|
|
{
|
|
.srcSubpass = VK_SUBPASS_EXTERNAL,
|
|
.dstSubpass = 0,
|
|
.srcStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
|
|
.dstStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
|
|
.srcAccessMask = VK_ACCESS_MEMORY_READ_BIT,
|
|
.dstAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
|
|
.dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT,
|
|
},
|
|
{
|
|
.srcSubpass = 0,
|
|
.dstSubpass = VK_SUBPASS_EXTERNAL,
|
|
.srcStageMask = VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT,
|
|
.dstStageMask = VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
|
|
.srcAccessMask = VK_ACCESS_COLOR_ATTACHMENT_READ_BIT | VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT | VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT,
|
|
.dstAccessMask = VK_ACCESS_MEMORY_READ_BIT,
|
|
.dependencyFlags = VK_DEPENDENCY_BY_REGION_BIT,
|
|
},
|
|
};
|
|
|
|
VkRenderPassCreateInfo offscreen_pass_info = {
|
|
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
|
|
.attachmentCount = sizeof(offscreen_attachments)/sizeof(VkAttachmentDescription),
|
|
.pAttachments = offscreen_attachments,
|
|
.subpassCount = sizeof(offscreen_pass_descs)/sizeof(VkSubpassDescription),
|
|
.pSubpasses = offscreen_pass_descs,
|
|
.dependencyCount = sizeof(offscreen_pass_dependencies)/sizeof(VkSubpassDependency),
|
|
.pDependencies = offscreen_pass_dependencies,
|
|
};
|
|
|
|
result = vkCreateRenderPass(context->device, &offscreen_pass_info, 0, &context->g_renderpass);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "Failed to create offscreen renderpass\n");
|
|
return 0;
|
|
}
|
|
|
|
VkImageView offscreen_fb_attachments[2] = {
|
|
context->g_image_view_position,
|
|
context->g_image_view_depth,
|
|
};
|
|
|
|
VkFramebufferCreateInfo offscreen_framebuffer_info = {
|
|
.sType = VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
|
|
.renderPass = context->g_renderpass,
|
|
.attachmentCount = sizeof(offscreen_fb_attachments)/sizeof(VkImageView),
|
|
.pAttachments = offscreen_fb_attachments,
|
|
.width = context->swapchain_extent.width,
|
|
.height = context->swapchain_extent.height,
|
|
.layers = 1,
|
|
};
|
|
|
|
result = vkCreateFramebuffer(context->device, &offscreen_framebuffer_info, 0, &context->g_framebuffer);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to allocate offscreen framebuffer\n");
|
|
return 0;
|
|
}
|
|
|
|
return context;
|
|
}
|
|
|
|
SceneContext create_scene_context(VkDevice device, VkPhysicalDeviceMemoryProperties memories, uint32_t max_frames_in_flight) {
|
|
SceneContext ret = {
|
|
.pool = VK_NULL_HANDLE,
|
|
.descriptor_layout = VK_NULL_HANDLE,
|
|
.descriptors = 0,
|
|
.ubos = 0,
|
|
.ubo_ptrs = 0,
|
|
};
|
|
|
|
VkDescriptorPoolSize pool_sizes[] = {
|
|
{
|
|
.type = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
|
|
.descriptorCount = max_frames_in_flight,
|
|
}
|
|
};
|
|
|
|
VkDescriptorPoolCreateInfo pool_info = {
|
|
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
|
|
.poolSizeCount = 1,
|
|
.pPoolSizes = pool_sizes,
|
|
.maxSets = max_frames_in_flight,
|
|
};
|
|
|
|
VkDescriptorPool pool;
|
|
VkResult result = vkCreateDescriptorPool(device, &pool_info, 0, &pool);
|
|
if(result != VK_SUCCESS) {
|
|
return ret;
|
|
}
|
|
|
|
VkDescriptorSetLayoutBinding layout_bindings[] = {
|
|
{
|
|
.binding = 0,
|
|
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT,
|
|
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
|
|
.descriptorCount = 1,
|
|
.pImmutableSamplers = 0,
|
|
}
|
|
};
|
|
|
|
VkDescriptorSetLayoutCreateInfo layout_info = {
|
|
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
|
|
.bindingCount = 1,
|
|
.pBindings = layout_bindings,
|
|
};
|
|
|
|
VkDescriptorSetLayout layout;
|
|
result = vkCreateDescriptorSetLayout(device, &layout_info, 0, &layout);
|
|
if(result != VK_SUCCESS) {
|
|
vkDestroyDescriptorPool(device, pool, 0);
|
|
return ret;
|
|
}
|
|
|
|
VkDescriptorSetLayout* layouts = malloc(sizeof(VkDescriptorSetLayout)*max_frames_in_flight);
|
|
if(layouts == 0) {
|
|
vkDestroyDescriptorPool(device, pool, 0);
|
|
return ret;
|
|
}
|
|
|
|
for(uint32_t i = 0; i < max_frames_in_flight; i++) {
|
|
layouts[i] = layout;
|
|
}
|
|
|
|
VkDescriptorSet* sets = malloc(sizeof(VkDescriptorSet)*max_frames_in_flight);
|
|
if(sets == 0) {
|
|
free(layouts);
|
|
vkDestroyDescriptorPool(device, pool, 0);
|
|
return ret;
|
|
}
|
|
|
|
VkDescriptorSetAllocateInfo set_alloc_info = {
|
|
.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
|
|
.descriptorPool = pool,
|
|
.descriptorSetCount = max_frames_in_flight,
|
|
.pSetLayouts = layouts,
|
|
};
|
|
|
|
result = vkAllocateDescriptorSets(device, &set_alloc_info, sets);
|
|
free(layouts);
|
|
if(result != VK_SUCCESS) {
|
|
free(sets);
|
|
vkDestroyDescriptorPool(device, pool, 0);
|
|
return ret;
|
|
}
|
|
|
|
GPUPage* scene_ubo_memory = NULL;
|
|
result = gpu_page_allocate(device, memories, 1000, 0xFFFFFFFF, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, 0, &scene_ubo_memory);
|
|
if(result != VK_SUCCESS) {
|
|
return ret;
|
|
}
|
|
|
|
GPUBuffer* ubos = malloc(sizeof(GPUBuffer)*max_frames_in_flight);
|
|
if(ubos == NULL) {
|
|
return ret;
|
|
}
|
|
|
|
for(uint32_t i = 0; i < max_frames_in_flight; i++) {
|
|
result = gpu_buffer_malloc(device, scene_ubo_memory, sizeof(struct SceneUBO), VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, &ubos[i]);
|
|
if(result != VK_SUCCESS) {
|
|
free(sets);
|
|
free(ubos);
|
|
vkFreeDescriptorSets(device, pool, max_frames_in_flight, sets);
|
|
vkDestroyDescriptorPool(device, pool, 0);
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
void** ubo_ptrs = malloc(sizeof(void*)*max_frames_in_flight);
|
|
if(ubo_ptrs == 0) {
|
|
free(sets);
|
|
vkFreeDescriptorSets(device, pool, max_frames_in_flight, sets);
|
|
vkDestroyDescriptorPool(device, pool, 0);
|
|
free(ubos);
|
|
return ret;
|
|
}
|
|
|
|
for(uint32_t i = 0; i < max_frames_in_flight; i++) {
|
|
ubo_ptrs[i] = ubos[i].page->ptr + ubos[i].memory->offset;
|
|
|
|
VkDescriptorBufferInfo buffer_info = {
|
|
.buffer = ubos[i].handle,
|
|
.offset = 0,
|
|
.range = sizeof(struct SceneUBO),
|
|
};
|
|
VkWriteDescriptorSet write = {
|
|
.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
|
|
.dstSet = sets[i],
|
|
.dstBinding = 0,
|
|
.dstArrayElement = 0,
|
|
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
|
|
.descriptorCount = 1,
|
|
.pBufferInfo = &buffer_info,
|
|
};
|
|
|
|
vkUpdateDescriptorSets(device, 1, &write, 0, 0);
|
|
}
|
|
|
|
SceneContext scene = {
|
|
.pool = pool,
|
|
.descriptor_layout = layout,
|
|
.descriptors = sets,
|
|
.ubos = ubos,
|
|
.ubo_ptrs = ubo_ptrs,
|
|
.pcr_size = sizeof(struct ScenePC),
|
|
};
|
|
|
|
return scene;
|
|
}
|
|
|
|
struct {
|
|
bool forward;
|
|
bool backward;
|
|
bool left;
|
|
bool right;
|
|
bool up;
|
|
bool down;
|
|
bool turn_left;
|
|
bool turn_right;
|
|
bool turn_up;
|
|
bool turn_down;
|
|
bool roll_left;
|
|
bool roll_right;
|
|
} key_flags = {
|
|
.forward = false,
|
|
.backward = false,
|
|
.left = false,
|
|
.right = false,
|
|
|
|
.turn_left = false,
|
|
.turn_right = false,
|
|
.turn_up = false,
|
|
.turn_down = false,
|
|
.roll_left = false,
|
|
.roll_right = false,
|
|
};
|
|
|
|
void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods) {
|
|
(void)scancode;
|
|
(void)window;
|
|
(void)mods;
|
|
|
|
switch(key) {
|
|
case GLFW_KEY_W:
|
|
if(action == GLFW_PRESS) {
|
|
key_flags.forward = true;
|
|
} else if(action == GLFW_RELEASE) {
|
|
key_flags.forward = false;
|
|
}
|
|
break;
|
|
|
|
case GLFW_KEY_A:
|
|
if(action == GLFW_PRESS) {
|
|
key_flags.left = true;
|
|
} else if(action == GLFW_RELEASE) {
|
|
key_flags.left = false;
|
|
}
|
|
break;
|
|
|
|
case GLFW_KEY_S:
|
|
if(action == GLFW_PRESS) {
|
|
key_flags.backward = true;
|
|
} else if(action == GLFW_RELEASE) {
|
|
key_flags.backward = false;
|
|
}
|
|
break;
|
|
|
|
case GLFW_KEY_D:
|
|
if(action == GLFW_PRESS) {
|
|
key_flags.right = true;
|
|
} else if(action == GLFW_RELEASE) {
|
|
key_flags.right = false;
|
|
}
|
|
break;
|
|
|
|
case GLFW_KEY_SPACE:
|
|
if(action == GLFW_PRESS) {
|
|
key_flags.up = true;
|
|
} else if(action == GLFW_RELEASE) {
|
|
key_flags.up = false;
|
|
}
|
|
break;
|
|
|
|
case GLFW_KEY_LEFT_SHIFT:
|
|
if(action == GLFW_PRESS) {
|
|
key_flags.down = true;
|
|
} else if(action == GLFW_RELEASE) {
|
|
key_flags.down = false;
|
|
}
|
|
break;
|
|
|
|
case GLFW_KEY_RIGHT:
|
|
if(action == GLFW_PRESS) {
|
|
key_flags.turn_right = true;
|
|
} else if(action == GLFW_RELEASE) {
|
|
key_flags.turn_right = false;
|
|
}
|
|
break;
|
|
|
|
case GLFW_KEY_LEFT:
|
|
if(action == GLFW_PRESS) {
|
|
key_flags.turn_left = true;
|
|
} else if(action == GLFW_RELEASE) {
|
|
key_flags.turn_left = false;
|
|
}
|
|
break;
|
|
|
|
case GLFW_KEY_UP:
|
|
if(action == GLFW_PRESS) {
|
|
key_flags.turn_up = true;
|
|
} else if(action == GLFW_RELEASE) {
|
|
key_flags.turn_up = false;
|
|
}
|
|
break;
|
|
|
|
case GLFW_KEY_DOWN:
|
|
if(action == GLFW_PRESS) {
|
|
key_flags.turn_down = true;
|
|
} else if(action == GLFW_RELEASE) {
|
|
key_flags.turn_down = false;
|
|
}
|
|
break;
|
|
|
|
case GLFW_KEY_Q:
|
|
if(action == GLFW_PRESS) {
|
|
key_flags.roll_left = true;
|
|
} else if(action == GLFW_RELEASE) {
|
|
key_flags.roll_left = false;
|
|
}
|
|
break;
|
|
|
|
case GLFW_KEY_E:
|
|
if(action == GLFW_PRESS) {
|
|
key_flags.roll_right = true;
|
|
} else if(action == GLFW_RELEASE) {
|
|
key_flags.roll_right = false;
|
|
}
|
|
break;
|
|
}
|
|
}
|
|
|
|
vec3 world_position = {0.0f, 0.0f, 0.0f};
|
|
versor world_rotation = {-1.0f, 0.0f, 0.0f, 0.0f};
|
|
|
|
struct ScenePC get_scene_constants(vec3 world_position, versor world_rotation, float aspect_ratio, float time_delta) {
|
|
vec3 movement_sum = {0.0f, 0.0f, 0.0f};
|
|
|
|
if(key_flags.forward) {
|
|
movement_sum[2] -= 1 * time_delta;
|
|
}
|
|
|
|
if(key_flags.backward) {
|
|
movement_sum[2] += 1 * time_delta;
|
|
}
|
|
|
|
if(key_flags.right) {
|
|
movement_sum[0] += 1 * time_delta;
|
|
}
|
|
|
|
if(key_flags.left) {
|
|
movement_sum[0] -= 1 * time_delta;
|
|
}
|
|
|
|
if(key_flags.up) {
|
|
movement_sum[1] -= 1 * time_delta;
|
|
}
|
|
|
|
if(key_flags.down) {
|
|
movement_sum[1] += 1 * time_delta;
|
|
}
|
|
|
|
|
|
vec3 eular_rotation = {0.0f, 0.0f, 0.0f};
|
|
if(key_flags.turn_right) {
|
|
eular_rotation[0] -= 1 * time_delta;
|
|
}
|
|
|
|
if(key_flags.turn_left) {
|
|
eular_rotation[0] += 1 * time_delta;
|
|
}
|
|
|
|
if(key_flags.turn_up) {
|
|
eular_rotation[1] -= 1 * time_delta;
|
|
}
|
|
|
|
if(key_flags.turn_down) {
|
|
eular_rotation[1] += 1 * time_delta;
|
|
}
|
|
|
|
if(key_flags.roll_right) {
|
|
eular_rotation[2] += 1 * time_delta;
|
|
}
|
|
|
|
if(key_flags.roll_left) {
|
|
eular_rotation[2] -= 1 * time_delta;
|
|
}
|
|
|
|
vec3 right = {1.0f, 0.0f, 0.0f};
|
|
vec3 up = {0.0f, 1.0f, 0.0f};
|
|
vec3 forward = {0.0f, 0.0f, 1.0f};
|
|
glm_quat_rotatev(world_rotation, right, right);
|
|
glm_quat_rotatev(world_rotation, up, up);
|
|
glm_quat_rotatev(world_rotation, forward, forward);
|
|
|
|
versor relative_rotation_y;
|
|
glm_quatv(relative_rotation_y, eular_rotation[1], right);
|
|
|
|
versor relative_rotation_x;
|
|
glm_quatv(relative_rotation_x, eular_rotation[0], up);
|
|
|
|
versor relative_rotation_z;
|
|
glm_quatv(relative_rotation_z, eular_rotation[2], forward);
|
|
|
|
glm_quat_mul(relative_rotation_x, world_rotation, world_rotation);
|
|
glm_quat_mul(relative_rotation_y, world_rotation, world_rotation);
|
|
glm_quat_mul(relative_rotation_z, world_rotation, world_rotation);
|
|
|
|
vec3 movement_rot;
|
|
glm_quat_rotatev(world_rotation, movement_sum, movement_rot);
|
|
glm_vec3_add(movement_rot, world_position, world_position);
|
|
|
|
struct ScenePC constants = {};
|
|
|
|
glm_perspective(1.5708f, aspect_ratio, 0.01, 1000, constants.proj);
|
|
glm_quat_look(world_position, world_rotation, constants.view);
|
|
|
|
return constants;
|
|
}
|
|
|
|
VkResult draw_frame(VulkanContext* context, SceneContext* scene, uint32_t pipelines_count, GraphicsPipeline* pipelines, uint32_t* objects_counts, Object** objects, VkDeviceAddress object_buffer_addr) {
|
|
struct ScenePC scene_constants = get_scene_constants(world_position, world_rotation, (float)context->swapchain_extent.width/(float)context->swapchain_extent.height, 0.01);
|
|
|
|
VkResult result;
|
|
result = vkWaitForFences(context->device, 1, &context->in_flight_fences[context->current_frame], VK_TRUE, UINT64_MAX);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
uint32_t image_index;
|
|
result = vkAcquireNextImageKHR(context->device, context->swapchain, UINT64_MAX, context->image_available_semaphores[context->current_frame], VK_NULL_HANDLE, &image_index);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
result = vkResetFences(context->device, 1, &context->in_flight_fences[context->current_frame]);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
result = vkResetCommandBuffer(context->swapchain_command_buffers[context->current_frame], 0);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
result = command_draw_scene(pipelines_count, pipelines, objects_counts, objects, context->current_frame, scene->descriptors, &scene_constants, context->swapchain_command_buffers[context->current_frame], context->render_pass, context->swapchain_framebuffers[image_index], context->swapchain_extent, object_buffer_addr, 0);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
result = vkResetCommandBuffer(context->offscreen_command_buffers[context->current_frame], 0);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
result = command_draw_scene(pipelines_count, pipelines, objects_counts, objects, context->current_frame, scene->descriptors, &scene_constants, context->offscreen_command_buffers[context->current_frame], context->g_renderpass, context->g_framebuffer, context->swapchain_extent, object_buffer_addr, 1);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
VkPipelineStageFlags wait_stages[] = {VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT};
|
|
VkSubmitInfo offscreen_submit_info = {
|
|
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
|
.waitSemaphoreCount = 1,
|
|
.pWaitSemaphores = &context->image_available_semaphores[context->current_frame],
|
|
.pWaitDstStageMask = wait_stages,
|
|
.commandBufferCount = 1,
|
|
.pCommandBuffers = &context->offscreen_command_buffers[context->current_frame],
|
|
.signalSemaphoreCount = 1,
|
|
.pSignalSemaphores = &context->offscreen_complete_semaphores[0],
|
|
};
|
|
|
|
VkSubmitInfo submit_info = {
|
|
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
|
.waitSemaphoreCount = 1,
|
|
.pWaitSemaphores = &context->offscreen_complete_semaphores[0],
|
|
.pWaitDstStageMask = wait_stages,
|
|
.commandBufferCount = 1,
|
|
.pCommandBuffers = &context->swapchain_command_buffers[context->current_frame],
|
|
.signalSemaphoreCount = 1,
|
|
.pSignalSemaphores = &context->render_finished_semaphores[context->current_frame],
|
|
};
|
|
|
|
result = vkQueueSubmit(context->graphics_queue.handle, 1, &offscreen_submit_info, 0);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
result = vkQueueSubmit(context->graphics_queue.handle, 1, &submit_info, context->in_flight_fences[context->current_frame]);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
VkPresentInfoKHR present_info = {
|
|
.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
|
|
.waitSemaphoreCount = 1,
|
|
.pWaitSemaphores = &context->render_finished_semaphores[context->current_frame],
|
|
.swapchainCount = 1,
|
|
.pSwapchains = &context->swapchain,
|
|
.pImageIndices = &image_index,
|
|
.pResults = 0,
|
|
};
|
|
|
|
return vkQueuePresentKHR(context->present_queue.handle, &present_info);
|
|
}
|
|
|
|
Object create_simple_mesh_object(PlyMesh ply_mesh, GraphicsPipeline* simple_mesh_pipeline, VkPhysicalDeviceMemoryProperties memories, VkDevice device, VkCommandPool transfer_pool, Queue transfer_queue) {
|
|
Object zero = {};
|
|
|
|
GPUPage* mesh_memory = NULL;
|
|
VkResult result = gpu_page_allocate(device, memories, 100000, 0xFFFFFFFF, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT, 0, &mesh_memory);
|
|
if(result != VK_SUCCESS) {
|
|
return zero;
|
|
}
|
|
|
|
GPUPage* transfer_memory = NULL;
|
|
result = gpu_page_allocate(device, memories, 200000, 0xFFFFFFFF, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, 0, &transfer_memory);
|
|
if(result != VK_SUCCESS) {
|
|
return zero;
|
|
}
|
|
|
|
GPUBuffer transfer_buffer = {0};
|
|
result = gpu_buffer_malloc(device, transfer_memory, 100000, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, &transfer_buffer);
|
|
if(result != VK_SUCCESS) {
|
|
return zero;
|
|
}
|
|
|
|
struct Vertex* tmp = malloc(sizeof(struct Vertex)*ply_mesh.vertex_count);
|
|
for(uint32_t i = 0; i < ply_mesh.vertex_count; i++) {
|
|
tmp[i].pos[0] = ply_mesh.position[i][0];
|
|
tmp[i].pos[1] = ply_mesh.position[i][1];
|
|
tmp[i].pos[2] = ply_mesh.position[i][2];
|
|
tmp[i].color[0] = ply_mesh.colour[i][0];
|
|
tmp[i].color[1] = ply_mesh.colour[i][1];
|
|
tmp[i].color[2] = ply_mesh.colour[i][2];
|
|
}
|
|
|
|
Mesh* mesh = load_mesh_to_buffer(device, mesh_memory, transfer_buffer, ply_mesh.vertex_count, sizeof(struct Vertex), (void*)tmp, ply_mesh.index_count, sizeof(uint16_t), (void*)ply_mesh.index, transfer_pool, transfer_queue);
|
|
free(tmp);
|
|
if(mesh == 0) {
|
|
return zero;
|
|
}
|
|
|
|
Object object = create_renderable(mesh, simple_mesh_pipeline);
|
|
if(object.attributes.buckets == 0) {
|
|
return zero;
|
|
}
|
|
|
|
Position* position = malloc(sizeof(Position));
|
|
if(position == 0) {
|
|
return zero;
|
|
}
|
|
glm_quat_identity(position->rotation);
|
|
position->scale[0] = 1.f;
|
|
position->scale[1] = 1.f;
|
|
position->scale[2] = 1.f;
|
|
position->position[0] = 0.0f;
|
|
position->position[1] = 0.0f;
|
|
position->position[2] = 1.1f;
|
|
bool map_result = map_add(&object.attributes, ATTRIBUTE_ID_POSITION, position);
|
|
if(map_result == 0) {
|
|
return zero;
|
|
}
|
|
|
|
return object;
|
|
}
|
|
|
|
Object create_texture_mesh_object(GraphicsPipeline* texture_mesh_pipeline, VkPhysicalDeviceMemoryProperties memories, VkDevice device, VkCommandPool transfer_pool, Queue transfer_queue) {
|
|
Object zero = {};
|
|
|
|
GPUPage* mesh_memory = NULL;
|
|
VkResult result = gpu_page_allocate(device, memories, 100000, 0xFFFFFFFF, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT, 0, &mesh_memory);
|
|
if(result != VK_SUCCESS) {
|
|
return zero;
|
|
}
|
|
|
|
GPUPage* transfer_memory = NULL;
|
|
result = gpu_page_allocate(device, memories, 100000, 0xFFFFFFFF, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, 0, &transfer_memory);
|
|
if(result != VK_SUCCESS) {
|
|
return zero;
|
|
}
|
|
|
|
GPUBuffer transfer_buffer = {0};
|
|
result = gpu_buffer_malloc(device, transfer_memory, 10000, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, &transfer_buffer);
|
|
if(result != VK_SUCCESS) {
|
|
return zero;
|
|
}
|
|
|
|
Mesh* mesh = load_mesh_to_buffer(device, mesh_memory, transfer_buffer, 4, sizeof(struct TextureVertex), (void*)texture_vertices, 6, sizeof(uint16_t), (void*)indices, transfer_pool, transfer_queue);
|
|
if(mesh == 0) {
|
|
return zero;
|
|
}
|
|
|
|
Object object = create_renderable(mesh, texture_mesh_pipeline);
|
|
if(object.attributes.buckets == 0) {
|
|
return zero;
|
|
}
|
|
|
|
Position* position = malloc(sizeof(Position));
|
|
if(position == 0) {
|
|
return zero;
|
|
}
|
|
glm_quat_identity(position->rotation);
|
|
position->scale[0] = 0.5f;
|
|
position->scale[1] = 0.5f;
|
|
position->scale[2] = 0.5f;
|
|
position->position[0] = 0.0f;
|
|
position->position[1] = 0.0f;
|
|
position->position[2] = 1.0f;
|
|
bool map_result = map_add(&object.attributes, ATTRIBUTE_ID_POSITION, position);
|
|
if(map_result == 0) {
|
|
return zero;
|
|
}
|
|
|
|
return object;
|
|
}
|
|
|
|
void main_loop(PlyMesh ply_mesh, GLFWwindow* window, VulkanContext* context) {
|
|
SceneContext scene = create_scene_context(context->device, context->memories, context->max_frames_in_flight);
|
|
if(scene.pool == VK_NULL_HANDLE) {
|
|
return;
|
|
}
|
|
|
|
GraphicsPipeline simple_mesh_pipeline = {0};
|
|
GraphicsPipeline texture_mesh_pipeline = {0};
|
|
VkResult result = create_simple_mesh_pipeline(context->device, context->swapchain_extent, context->render_pass, context->g_renderpass, scene.descriptor_layout, context->max_frames_in_flight, &simple_mesh_pipeline);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to create simple mesh material: %s\n", string_VkResult(result));
|
|
return;
|
|
}
|
|
|
|
Object triangle_object = create_simple_mesh_object(ply_mesh, &simple_mesh_pipeline, context->memories, context->device, context->transfer_command_pool, context->transfer_queue);
|
|
if(triangle_object.attributes.buckets == 0) {
|
|
fprintf(stderr, "failed to create simple mesh object\n");
|
|
return;
|
|
}
|
|
|
|
result = create_texture_mesh_pipeline(context->device, context->memories, context->swapchain_extent, context->render_pass, context->g_renderpass, scene.descriptor_layout, context->max_frames_in_flight, context->transfer_command_pool, context->transfer_queue, context->graphics_queue, context->extra_graphics_pool, &texture_mesh_pipeline);
|
|
if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "failed to create texture mesh material\n");
|
|
return;
|
|
}
|
|
|
|
Object triangle_object_textured = create_texture_mesh_object(&texture_mesh_pipeline, context->memories, context->device, context->transfer_command_pool, context->transfer_queue);
|
|
if(triangle_object_textured.attributes.buckets == 0) {
|
|
fprintf(stderr, "failed to create texture mesh object\n");
|
|
return;
|
|
}
|
|
|
|
Object* objects[] = {&triangle_object, &triangle_object_textured};
|
|
GraphicsPipeline pipelines[] = {simple_mesh_pipeline, texture_mesh_pipeline};
|
|
uint32_t objects_counts[] = {1, 1};
|
|
|
|
GPUPage* memory = NULL;
|
|
result = gpu_page_allocate(context->device, context->memories, 100000, 0xFFFFFFFF, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, VK_MEMORY_ALLOCATE_DEVICE_ADDRESS_BIT, &memory);
|
|
if(result != VK_SUCCESS) {
|
|
return;
|
|
}
|
|
|
|
GPUBuffer buffer = {0};
|
|
result = gpu_buffer_malloc(context->device, memory, sizeof(mat4)*100, VK_BUFFER_USAGE_SHADER_DEVICE_ADDRESS_BIT | VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, &buffer);
|
|
if(result != VK_SUCCESS) {
|
|
return;
|
|
}
|
|
|
|
mat4* model_1 = memory->ptr + buffer.memory->offset;
|
|
mat4* model_2 = memory->ptr + buffer.memory->offset + sizeof(mat4);
|
|
glm_mat4_identity(*model_1);
|
|
glm_translate_x(*model_1, 1.0f);
|
|
glm_mat4_identity(*model_2);
|
|
glm_translate_x(*model_2, -1.0f);
|
|
|
|
VkBufferDeviceAddressInfo addr_info = {
|
|
.sType = VK_STRUCTURE_TYPE_BUFFER_DEVICE_ADDRESS_INFO,
|
|
.buffer = buffer.handle,
|
|
};
|
|
VkDeviceAddress object_address = vkGetBufferDeviceAddress(context->device, &addr_info);
|
|
|
|
context->current_frame = 0;
|
|
while(!glfwWindowShouldClose(window)) {
|
|
glfwPollEvents();
|
|
|
|
VkResult result = draw_frame(context, &scene, sizeof(pipelines)/sizeof(GraphicsPipeline), pipelines, objects_counts, objects, object_address);
|
|
if(result == VK_ERROR_OUT_OF_DATE_KHR || result == VK_SUBOPTIMAL_KHR) {
|
|
vkDeviceWaitIdle(context->device);
|
|
recreate_swapchain(context);
|
|
} else if(result != VK_SUCCESS) {
|
|
fprintf(stderr, "draw_frame error %d\n", result);
|
|
return;
|
|
}
|
|
|
|
context->current_frame += 1;
|
|
if(context->current_frame >= context->max_frames_in_flight) {
|
|
context->current_frame = 0;
|
|
}
|
|
}
|
|
|
|
vkDeviceWaitIdle(context->device);
|
|
}
|
|
|
|
void cleanup(GLFWwindow* window, VulkanContext* context) {
|
|
if(context != 0) {
|
|
if(context->instance != VK_NULL_HANDLE) {
|
|
if(context->extra_graphics_pool != VK_NULL_HANDLE) {
|
|
vkDestroyCommandPool(context->device, context->extra_graphics_pool, 0);
|
|
}
|
|
|
|
if(context->render_pass != VK_NULL_HANDLE) {
|
|
vkDestroyRenderPass(context->device, context->render_pass, 0);
|
|
}
|
|
|
|
if(context->depth_image != VK_NULL_HANDLE) {
|
|
vkDestroyImage(context->device, context->depth_image, 0);
|
|
}
|
|
|
|
for(uint32_t i = 0; i < context->swapchain_image_count; i++) {
|
|
vkDestroyFramebuffer(context->device, context->swapchain_framebuffers[i], 0);
|
|
vkDestroyImageView(context->device, context->swapchain_image_views[i], 0);
|
|
}
|
|
|
|
for(uint32_t i = 0; i < context->max_frames_in_flight; i++) {
|
|
vkDestroySemaphore(context->device, context->image_available_semaphores[i], 0);
|
|
vkDestroySemaphore(context->device, context->render_finished_semaphores[i], 0);
|
|
vkDestroyFence(context->device, context->in_flight_fences[i], 0);
|
|
}
|
|
|
|
if(context->depth_image_view != VK_NULL_HANDLE) {
|
|
vkDestroyImageView(context->device, context->depth_image_view, 0);
|
|
}
|
|
|
|
if(context->depth_image_memory != VK_NULL_HANDLE) {
|
|
vkFreeMemory(context->device, context->depth_image_memory, 0);
|
|
}
|
|
|
|
if(context->graphics_command_pool != VK_NULL_HANDLE) {
|
|
vkDestroyCommandPool(context->device, context->graphics_command_pool, 0);
|
|
}
|
|
|
|
if(context->transfer_command_pool != VK_NULL_HANDLE) {
|
|
vkDestroyCommandPool(context->device, context->transfer_command_pool, 0);
|
|
}
|
|
|
|
if(context->swapchain_command_buffers != NULL) {
|
|
free(context->swapchain_command_buffers);
|
|
}
|
|
|
|
if(context->swapchain != VK_NULL_HANDLE) {
|
|
vkDestroySwapchainKHR(context->device, context->swapchain, 0);
|
|
}
|
|
|
|
if(context->surface != VK_NULL_HANDLE) {
|
|
vkDestroySurfaceKHR(context->instance, context->surface, 0);
|
|
}
|
|
|
|
if(context->device != VK_NULL_HANDLE) {
|
|
vkDestroyDevice(context->device, 0);
|
|
}
|
|
|
|
if(context->debug_messenger != VK_NULL_HANDLE) {
|
|
PFN_vkDestroyDebugUtilsMessengerEXT destroy_messenger = (PFN_vkDestroyDebugUtilsMessengerEXT)vkGetInstanceProcAddr(context->instance, "vkDestroyDebugUtilsMessengerEXT");
|
|
destroy_messenger(context->instance, context->debug_messenger, 0);
|
|
}
|
|
|
|
vkDestroyInstance(context->instance, 0);
|
|
}
|
|
free(context);
|
|
}
|
|
|
|
if(window != 0) {
|
|
glfwDestroyWindow(window);
|
|
glfwTerminate();
|
|
}
|
|
}
|
|
|
|
int main() {
|
|
PlyMesh monkey = ply_load_mesh("monkey.ply", default_ply_mappings);
|
|
if(monkey.position == 0) {
|
|
fprintf(stderr, "failed to load %s\n", "monkey.ply");
|
|
}
|
|
|
|
GLFWwindow* window = init_window(800, 600);
|
|
if(window == 0) {
|
|
fprintf(stderr, "failed to initialize glfw window\n");
|
|
return 1;
|
|
}
|
|
|
|
VulkanContext* context = init_vulkan(window, 2);
|
|
if (context == 0) {
|
|
fprintf(stderr, "failed to initialize vulkan context\n");
|
|
return 2;
|
|
}
|
|
|
|
glfwSetKeyCallback(window, key_callback);
|
|
main_loop(monkey, window, context);
|
|
|
|
cleanup(window, context);
|
|
|
|
return 0;
|
|
}
|