|
|
|
@ -50,12 +50,28 @@ typedef struct AllocatedBufferStruct {
|
|
|
|
|
VkDeviceMemory memory;
|
|
|
|
|
} AllocatedBuffer;
|
|
|
|
|
|
|
|
|
|
typedef uint32_t PipelineIdx;
|
|
|
|
|
typedef struct MaterialStruct {
|
|
|
|
|
uint32_t descriptor_set_layouts_count;
|
|
|
|
|
VkDescriptorSetLayout* descriptor_set_layouts;
|
|
|
|
|
|
|
|
|
|
uint32_t push_constant_ranges_count;
|
|
|
|
|
VkPushConstantRange* push_constant_ranges;
|
|
|
|
|
|
|
|
|
|
VkPipelineLayout pipeline_layout;
|
|
|
|
|
VkPipeline pipeline;
|
|
|
|
|
|
|
|
|
|
VkDescriptorPool descriptor_pool;
|
|
|
|
|
uint32_t frame_count;
|
|
|
|
|
VkDescriptorSet* descriptor_sets;
|
|
|
|
|
AllocatedBuffer* uniform_buffers;
|
|
|
|
|
void** uniform_buffer_ptrs;
|
|
|
|
|
} Material;
|
|
|
|
|
|
|
|
|
|
typedef struct MeshStruct {
|
|
|
|
|
uint32_t vertex_count;
|
|
|
|
|
AllocatedBuffer vertex_buffer;
|
|
|
|
|
uint32_t index_count;
|
|
|
|
|
AllocatedBuffer index_buffer;
|
|
|
|
|
PipelineIdx pipeline;
|
|
|
|
|
} Mesh;
|
|
|
|
|
|
|
|
|
|
typedef struct VulkanContextStruct {
|
|
|
|
@ -94,15 +110,8 @@ typedef struct VulkanContextStruct {
|
|
|
|
|
VkCommandPool graphics_command_pool;
|
|
|
|
|
VkCommandPool transfer_command_pool;
|
|
|
|
|
|
|
|
|
|
VkDescriptorSetLayout mesh_descriptor_set_layout;
|
|
|
|
|
VkPipelineLayout mesh_pipeline_layout;
|
|
|
|
|
VkPipeline default_mesh_pipeline;
|
|
|
|
|
AllocatedBuffer* mesh_uniform_buffers;
|
|
|
|
|
void** mesh_uniform_buffer_ptrs;
|
|
|
|
|
VkDescriptorPool mesh_descriptor_pool;
|
|
|
|
|
VkDescriptorSet* mesh_descriptor_sets;
|
|
|
|
|
|
|
|
|
|
Mesh triangle_mesh;
|
|
|
|
|
Material simple_mesh_material;
|
|
|
|
|
|
|
|
|
|
uint32_t current_frame;
|
|
|
|
|
} VulkanContext;
|
|
|
|
@ -236,7 +245,7 @@ static VKAPI_ATTR VkBool32 VKAPI_CALL debug_callback(
|
|
|
|
|
return VK_FALSE;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkDescriptorSet* create_descriptor_sets(VkDevice device, VkDescriptorSetLayout layout, VkDescriptorPool pool, AllocatedBuffer* uniform_buffers, uint32_t count) {
|
|
|
|
|
VkDescriptorSet* create_descriptor_sets(VkDevice device, VkDescriptorSetLayout layout, VkDescriptorPool pool, uint32_t count) {
|
|
|
|
|
VkDescriptorSetLayout* layouts = malloc(sizeof(VkDescriptorSetLayout)*count);
|
|
|
|
|
if(layouts == 0) {
|
|
|
|
|
return 0;
|
|
|
|
@ -265,41 +274,15 @@ VkDescriptorSet* create_descriptor_sets(VkDevice device, VkDescriptorSetLayout l
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for(uint32_t i = 0; i < count; i++) {
|
|
|
|
|
VkDescriptorBufferInfo buffer_info = {};
|
|
|
|
|
buffer_info.buffer = uniform_buffers[i].buffer;
|
|
|
|
|
buffer_info.offset = 0;
|
|
|
|
|
buffer_info.range = sizeof(struct ShaderUBO);
|
|
|
|
|
|
|
|
|
|
VkWriteDescriptorSet descriptor_write = {};
|
|
|
|
|
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
|
|
|
|
|
descriptor_write.dstSet = sets[i];
|
|
|
|
|
descriptor_write.dstBinding = 0;
|
|
|
|
|
descriptor_write.dstArrayElement = 0;
|
|
|
|
|
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
|
|
|
|
descriptor_write.descriptorCount = 1;
|
|
|
|
|
descriptor_write.pBufferInfo = &buffer_info;
|
|
|
|
|
descriptor_write.pImageInfo = 0;
|
|
|
|
|
descriptor_write.pTexelBufferView = 0;
|
|
|
|
|
|
|
|
|
|
vkUpdateDescriptorSets(device, 1, &descriptor_write, 0, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return sets;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkDescriptorSetLayout create_descriptor_set_layout(VkDevice device) {
|
|
|
|
|
VkDescriptorSetLayoutBinding layout_binding = {};
|
|
|
|
|
layout_binding.binding = 0;
|
|
|
|
|
layout_binding.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
|
|
|
|
layout_binding.descriptorCount = 1;
|
|
|
|
|
layout_binding.stageFlags = VK_SHADER_STAGE_VERTEX_BIT;
|
|
|
|
|
layout_binding.pImmutableSamplers = 0;
|
|
|
|
|
|
|
|
|
|
VkDescriptorSetLayout create_descriptor_set_layout(VkDevice device, VkDescriptorSetLayoutBinding* bindings, uint32_t bindings_count) {
|
|
|
|
|
VkDescriptorSetLayoutCreateInfo layout_info = {};
|
|
|
|
|
layout_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO;
|
|
|
|
|
layout_info.bindingCount = 1;
|
|
|
|
|
layout_info.pBindings = &layout_binding;
|
|
|
|
|
layout_info.bindingCount = bindings_count;
|
|
|
|
|
layout_info.pBindings = bindings;
|
|
|
|
|
|
|
|
|
|
VkDescriptorSetLayout layout;
|
|
|
|
|
VkResult result = vkCreateDescriptorSetLayout(device, &layout_info, 0, &layout);
|
|
|
|
@ -1222,7 +1205,7 @@ VkCommandPool create_command_pool(VkDevice device, uint32_t queue_family) {
|
|
|
|
|
return command_pool;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkResult record_command_buffer_triangle(VkCommandBuffer command_buffer, uint32_t image_index, VkRenderPass render_pass, VkFramebuffer* framebuffers, VkExtent2D extent, VkPipeline graphics_pipeline, VkPipelineLayout pipeline_layout, VkDescriptorSet descriptor_set, VkBuffer vertex_buffer, VkBuffer index_buffer, uint32_t num_vertices) {
|
|
|
|
|
VkResult record_command_buffer_mesh(VkCommandBuffer command_buffer, VkRenderPass render_pass, VkFramebuffer framebuffer, VkExtent2D extent, Material material, Mesh mesh, uint32_t current_frame) {
|
|
|
|
|
VkCommandBufferBeginInfo begin_info = {};
|
|
|
|
|
begin_info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
|
|
|
|
|
begin_info.flags = 0;
|
|
|
|
@ -1236,7 +1219,7 @@ VkResult record_command_buffer_triangle(VkCommandBuffer command_buffer, uint32_t
|
|
|
|
|
VkRenderPassBeginInfo render_pass_info = {};
|
|
|
|
|
render_pass_info.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO;
|
|
|
|
|
render_pass_info.renderPass = render_pass;
|
|
|
|
|
render_pass_info.framebuffer = framebuffers[image_index];
|
|
|
|
|
render_pass_info.framebuffer = framebuffer;
|
|
|
|
|
VkOffset2D render_offset = {.x = 0, .y = 0};
|
|
|
|
|
render_pass_info.renderArea.offset = render_offset;
|
|
|
|
|
render_pass_info.renderArea.extent = extent;
|
|
|
|
@ -1245,12 +1228,12 @@ VkResult record_command_buffer_triangle(VkCommandBuffer command_buffer, uint32_t
|
|
|
|
|
render_pass_info.pClearValues = &clear_color;
|
|
|
|
|
|
|
|
|
|
vkCmdBeginRenderPass(command_buffer, &render_pass_info, VK_SUBPASS_CONTENTS_INLINE);
|
|
|
|
|
vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, graphics_pipeline);
|
|
|
|
|
vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, material.pipeline);
|
|
|
|
|
|
|
|
|
|
VkBuffer vertex_buffers[] = {vertex_buffer};
|
|
|
|
|
VkBuffer vertex_buffers[] = {mesh.vertex_buffer.buffer};
|
|
|
|
|
VkDeviceSize offsets[] = {0};
|
|
|
|
|
vkCmdBindVertexBuffers(command_buffer, 0, 1, vertex_buffers, offsets);
|
|
|
|
|
vkCmdBindIndexBuffer(command_buffer, index_buffer, 0, VK_INDEX_TYPE_UINT16);
|
|
|
|
|
vkCmdBindIndexBuffer(command_buffer, mesh.index_buffer.buffer, 0, VK_INDEX_TYPE_UINT16);
|
|
|
|
|
|
|
|
|
|
VkViewport viewport = {};
|
|
|
|
|
viewport.x = 0.0f;
|
|
|
|
@ -1267,8 +1250,8 @@ VkResult record_command_buffer_triangle(VkCommandBuffer command_buffer, uint32_t
|
|
|
|
|
scissor.extent = extent;
|
|
|
|
|
vkCmdSetScissor(command_buffer, 0, 1, &scissor);
|
|
|
|
|
|
|
|
|
|
vkCmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline_layout, 0, 1, &descriptor_set, 0, 0);
|
|
|
|
|
vkCmdDrawIndexed(command_buffer, num_vertices, 1, 0, 0, 0);
|
|
|
|
|
vkCmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, material.pipeline_layout, 0, 1, &material.descriptor_sets[current_frame], 0, 0);
|
|
|
|
|
vkCmdDrawIndexed(command_buffer, mesh.index_count, 1, 0, 0, 0);
|
|
|
|
|
vkCmdEndRenderPass(command_buffer);
|
|
|
|
|
|
|
|
|
|
return vkEndCommandBuffer(command_buffer);
|
|
|
|
@ -1334,34 +1317,140 @@ VkFence* create_fences(VkDevice device, VkFenceCreateFlags flags, uint32_t count
|
|
|
|
|
return fences;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Mesh load_mesh(VkPhysicalDevice physical_device, VkDevice device, struct Vertex* vertices, uint32_t vertex_count, uint16_t* indices, uint32_t index_count, VkCommandPool transfer_pool, VkQueue transfer_queue, PipelineIdx pipeline_index) {
|
|
|
|
|
Mesh load_mesh(VkPhysicalDevice physical_device, VkDevice device, struct Vertex* vertices, uint32_t vertex_count, uint16_t* indices, uint32_t index_count, VkCommandPool transfer_pool, VkQueue transfer_queue) {
|
|
|
|
|
Mesh mesh = {};
|
|
|
|
|
mesh.vertex_buffer.buffer = VK_NULL_HANDLE;
|
|
|
|
|
mesh.vertex_buffer.memory = VK_NULL_HANDLE;
|
|
|
|
|
mesh.index_buffer.buffer = VK_NULL_HANDLE;
|
|
|
|
|
mesh.index_buffer.memory = VK_NULL_HANDLE;
|
|
|
|
|
mesh.pipeline = 0xFFFFFFFF;
|
|
|
|
|
|
|
|
|
|
AllocatedBuffer vertex_buffer = create_populated_buffer(physical_device, device, (void*)vertices, sizeof(struct Vertex) * vertex_count, transfer_pool, transfer_queue, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT);
|
|
|
|
|
if(vertex_buffer.memory == VK_NULL_HANDLE) {
|
|
|
|
|
return mesh;
|
|
|
|
|
} else {
|
|
|
|
|
mesh.vertex_buffer = vertex_buffer;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mesh.vertex_buffer = vertex_buffer;
|
|
|
|
|
mesh.vertex_count = vertex_count;
|
|
|
|
|
|
|
|
|
|
AllocatedBuffer index_buffer = create_populated_buffer(physical_device, device, (void*)indices, sizeof(uint16_t) * index_count, transfer_pool, transfer_queue, VK_BUFFER_USAGE_INDEX_BUFFER_BIT);
|
|
|
|
|
if(index_buffer.memory == VK_NULL_HANDLE) {
|
|
|
|
|
deallocate_buffer(device, vertex_buffer);
|
|
|
|
|
AllocatedBuffer tmp = { .memory = VK_NULL_HANDLE, .buffer = VK_NULL_HANDLE};
|
|
|
|
|
mesh.vertex_buffer = tmp;
|
|
|
|
|
return mesh;
|
|
|
|
|
} else {
|
|
|
|
|
mesh.index_buffer = index_buffer;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
mesh.pipeline = pipeline_index;
|
|
|
|
|
mesh.index_buffer = index_buffer;
|
|
|
|
|
mesh.index_count = index_count;
|
|
|
|
|
|
|
|
|
|
return mesh;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Material create_simple_mesh_material(VkPhysicalDevice physical_device, VkDevice device, VkExtent2D extent, VkRenderPass render_pass, uint32_t num_frames) {
|
|
|
|
|
Material zero_material = {
|
|
|
|
|
.pipeline = VK_NULL_HANDLE,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
AllocatedBuffer* uniform_buffers = allocate_buffers(physical_device, device, sizeof(struct ShaderUBO), VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, num_frames);
|
|
|
|
|
if(uniform_buffers == 0) {
|
|
|
|
|
return zero_material;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void** uniform_buffer_ptrs = malloc(sizeof(void*)*num_frames);
|
|
|
|
|
if(uniform_buffer_ptrs == 0) {
|
|
|
|
|
free(uniform_buffers);
|
|
|
|
|
return zero_material;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for(uint32_t i = 0; i < num_frames; i++) {
|
|
|
|
|
VkResult result = vkMapMemory(device, uniform_buffers[i].memory, 0, sizeof(struct ShaderUBO), 0, &uniform_buffer_ptrs[i]);
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
for(uint32_t j = 0; j < i; j++) {
|
|
|
|
|
vkUnmapMemory(device, uniform_buffers[j].memory);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
free(uniform_buffers);
|
|
|
|
|
free(uniform_buffer_ptrs);
|
|
|
|
|
return zero_material;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkDescriptorPool descriptor_pool = create_descriptor_pool(device, num_frames);
|
|
|
|
|
if(descriptor_pool == VK_NULL_HANDLE) {
|
|
|
|
|
for(uint32_t i = 0; i < num_frames; i++) {
|
|
|
|
|
vkUnmapMemory(device, uniform_buffers[i].memory);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
free(uniform_buffers);
|
|
|
|
|
free(uniform_buffer_ptrs);
|
|
|
|
|
return zero_material;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkDescriptorSetLayoutBinding ubo_layout_binding = {
|
|
|
|
|
.binding = 0,
|
|
|
|
|
.stageFlags = VK_SHADER_STAGE_VERTEX_BIT,
|
|
|
|
|
.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
|
|
|
|
|
.descriptorCount = 1,
|
|
|
|
|
.pImmutableSamplers = 0,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
VkDescriptorSetLayout descriptor_set_layout = create_descriptor_set_layout(device, &ubo_layout_binding, 1);
|
|
|
|
|
if(descriptor_set_layout == VK_NULL_HANDLE) {
|
|
|
|
|
return zero_material;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkDescriptorSet* descriptor_sets = create_descriptor_sets(device, descriptor_set_layout, descriptor_pool, num_frames);
|
|
|
|
|
if(descriptor_sets == 0) {
|
|
|
|
|
return zero_material;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for(uint32_t i = 0; i < num_frames; i++) {
|
|
|
|
|
VkDescriptorBufferInfo buffer_info = {};
|
|
|
|
|
buffer_info.buffer = uniform_buffers[i].buffer;
|
|
|
|
|
buffer_info.offset = 0;
|
|
|
|
|
buffer_info.range = sizeof(struct ShaderUBO);
|
|
|
|
|
|
|
|
|
|
VkWriteDescriptorSet descriptor_write = {};
|
|
|
|
|
descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET;
|
|
|
|
|
descriptor_write.dstSet = descriptor_sets[i];
|
|
|
|
|
descriptor_write.dstBinding = 0;
|
|
|
|
|
descriptor_write.dstArrayElement = 0;
|
|
|
|
|
descriptor_write.descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER;
|
|
|
|
|
descriptor_write.descriptorCount = 1;
|
|
|
|
|
descriptor_write.pBufferInfo = &buffer_info;
|
|
|
|
|
descriptor_write.pImageInfo = 0;
|
|
|
|
|
descriptor_write.pTexelBufferView = 0;
|
|
|
|
|
|
|
|
|
|
vkUpdateDescriptorSets(device, 1, &descriptor_write, 0, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkPipelineLayout pipeline_layout = create_pipeline_layout(device, 1, &descriptor_set_layout, 0, 0);
|
|
|
|
|
if(pipeline_layout == VK_NULL_HANDLE) {
|
|
|
|
|
return zero_material;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkPipeline pipeline = create_graphics_pipeline(device, extent, pipeline_layout, render_pass);
|
|
|
|
|
if(pipeline == VK_NULL_HANDLE) {
|
|
|
|
|
return zero_material;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Material material = {
|
|
|
|
|
.descriptor_set_layouts_count = 1,
|
|
|
|
|
.descriptor_set_layouts = &descriptor_set_layout,
|
|
|
|
|
.push_constant_ranges_count = 0,
|
|
|
|
|
.push_constant_ranges = 0,
|
|
|
|
|
.pipeline_layout = pipeline_layout,
|
|
|
|
|
.pipeline = pipeline,
|
|
|
|
|
.descriptor_pool = descriptor_pool,
|
|
|
|
|
.frame_count = num_frames,
|
|
|
|
|
.descriptor_sets = descriptor_sets,
|
|
|
|
|
.uniform_buffers = uniform_buffers,
|
|
|
|
|
.uniform_buffer_ptrs = uniform_buffer_ptrs,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
return material;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VulkanContext* init_vulkan(GLFWwindow* window, uint32_t max_frames_in_flight) {
|
|
|
|
|
VulkanContext* context = (VulkanContext*)malloc(sizeof(VulkanContext));
|
|
|
|
|
|
|
|
|
@ -1519,67 +1608,15 @@ VulkanContext* init_vulkan(GLFWwindow* window, uint32_t max_frames_in_flight) {
|
|
|
|
|
context->in_flight_fences = if_fences;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
AllocatedBuffer* mesh_uniform_buffers = allocate_buffers(context->physical_device, context->device, sizeof(struct ShaderUBO), VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, max_frames_in_flight);
|
|
|
|
|
if(mesh_uniform_buffers == 0) {
|
|
|
|
|
fprintf(stderr, "failed to create vulkan uniform buffers\n");
|
|
|
|
|
return 0;
|
|
|
|
|
} else {
|
|
|
|
|
context->mesh_uniform_buffers = mesh_uniform_buffers;
|
|
|
|
|
context->mesh_uniform_buffer_ptrs = malloc(sizeof(void*)*max_frames_in_flight);
|
|
|
|
|
if(context->mesh_uniform_buffer_ptrs == 0) {
|
|
|
|
|
fprintf(stderr, "failed to allocate cpu pointers for uniform buffers\n");
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
for(uint32_t i = 0; i < max_frames_in_flight; i++) {
|
|
|
|
|
VkResult result = vkMapMemory(context->device, context->mesh_uniform_buffers[i].memory, 0, sizeof(struct ShaderUBO), 0, &context->mesh_uniform_buffer_ptrs[i]);
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
fprintf(stderr, "failed to map cpu pointer for uniform buffer\n");
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkDescriptorPool mesh_descriptor_pool = create_descriptor_pool(context->device, max_frames_in_flight);
|
|
|
|
|
if(mesh_descriptor_pool == VK_NULL_HANDLE) {
|
|
|
|
|
fprintf(stderr, "failed to create vulkan descriptor pool\n");
|
|
|
|
|
return 0;
|
|
|
|
|
} else {
|
|
|
|
|
context->mesh_descriptor_pool = mesh_descriptor_pool;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkDescriptorSetLayout mesh_descriptor_set_layout = create_descriptor_set_layout(device);
|
|
|
|
|
if(mesh_descriptor_set_layout == VK_NULL_HANDLE) {
|
|
|
|
|
fprintf(stderr, "failed to create vulkan descriptor set layout\n");
|
|
|
|
|
return 0;
|
|
|
|
|
} else {
|
|
|
|
|
context->mesh_descriptor_set_layout = mesh_descriptor_set_layout;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkDescriptorSet* mesh_descriptor_sets = create_descriptor_sets(context->device, context->mesh_descriptor_set_layout, context->mesh_descriptor_pool, context->mesh_uniform_buffers, max_frames_in_flight);
|
|
|
|
|
if(mesh_descriptor_sets == 0) {
|
|
|
|
|
fprintf(stderr, "failed to create vulkan descriptor sets\n");
|
|
|
|
|
return 0;
|
|
|
|
|
} else {
|
|
|
|
|
context->mesh_descriptor_sets = mesh_descriptor_sets;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkPipelineLayout mesh_pipeline_layout = create_pipeline_layout(device, 1, &context->mesh_descriptor_set_layout, 0, 0);
|
|
|
|
|
if(mesh_pipeline_layout == VK_NULL_HANDLE) {
|
|
|
|
|
fprintf(stderr, "failed to create vulkan pipeline layout\n");
|
|
|
|
|
return 0;
|
|
|
|
|
} else {
|
|
|
|
|
context->mesh_pipeline_layout = mesh_pipeline_layout;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkPipeline default_mesh_pipeline = create_graphics_pipeline(context->device, context->swapchain_extent, context->mesh_pipeline_layout, context->render_pass);
|
|
|
|
|
if(default_mesh_pipeline == VK_NULL_HANDLE) {
|
|
|
|
|
fprintf(stderr, "failed to create vulkan graphics pipeline\n");
|
|
|
|
|
Material simple_mesh_material = create_simple_mesh_material(context->physical_device, context->device, context->swapchain_extent, context->render_pass, max_frames_in_flight);
|
|
|
|
|
if(simple_mesh_material.pipeline == VK_NULL_HANDLE) {
|
|
|
|
|
fprintf(stderr, "failed to create simple mesh material\n");
|
|
|
|
|
return 0;
|
|
|
|
|
} else {
|
|
|
|
|
context->default_mesh_pipeline = default_mesh_pipeline;
|
|
|
|
|
context->simple_mesh_material = simple_mesh_material;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Mesh triangle_mesh = load_mesh(context->physical_device, context->device, (struct Vertex*)vertices, 4, (uint16_t*)indices, 6, context->transfer_command_pool, context->queues.transfer, 0x00000000);
|
|
|
|
|
Mesh triangle_mesh = load_mesh(context->physical_device, context->device, (struct Vertex*)vertices, 4, (uint16_t*)indices, 6, context->transfer_command_pool, context->queues.transfer);
|
|
|
|
|
if(triangle_mesh.vertex_buffer.buffer == VK_NULL_HANDLE) {
|
|
|
|
|
fprintf(stderr, "failed to load triangle mesh\n");
|
|
|
|
|
} else {
|
|
|
|
@ -1605,7 +1642,7 @@ VkResult update_ubo(void** buffers, uint32_t frame_index) {
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkResult draw_frame(VulkanContext* context) {
|
|
|
|
|
update_ubo(context->mesh_uniform_buffer_ptrs, context->current_frame);
|
|
|
|
|
update_ubo(context->simple_mesh_material.uniform_buffer_ptrs, context->current_frame);
|
|
|
|
|
|
|
|
|
|
VkResult result;
|
|
|
|
|
result = vkWaitForFences(context->device, 1, &context->in_flight_fences[context->current_frame], VK_TRUE, UINT64_MAX);
|
|
|
|
@ -1629,7 +1666,7 @@ VkResult draw_frame(VulkanContext* context) {
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
result = record_command_buffer_triangle(context->swapchain_command_buffers[context->current_frame], image_index, context->render_pass, context->swapchain_framebuffers, context->swapchain_extent, context->default_mesh_pipeline, context->mesh_pipeline_layout, context->mesh_descriptor_sets[context->current_frame], context->triangle_mesh.vertex_buffer.buffer, context->triangle_mesh.index_buffer.buffer, 6);
|
|
|
|
|
result = record_command_buffer_mesh(context->swapchain_command_buffers[context->current_frame], context->render_pass, context->swapchain_framebuffers[image_index], context->swapchain_extent, context->simple_mesh_material, context->triangle_mesh, context->current_frame);
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|