|
|
|
@ -127,10 +127,10 @@ typedef struct PipelineLayoutStruct {
|
|
|
|
|
|
|
|
|
|
typedef struct MeshStruct {
|
|
|
|
|
uint32_t vertex_count;
|
|
|
|
|
VkBuffer vertex_buffer;
|
|
|
|
|
GPUBuffer vertex_buffer;
|
|
|
|
|
|
|
|
|
|
uint32_t index_count;
|
|
|
|
|
VkBuffer index_buffer;
|
|
|
|
|
GPUBuffer index_buffer;
|
|
|
|
|
} Mesh;
|
|
|
|
|
|
|
|
|
|
typedef struct MaterialStruct {
|
|
|
|
@ -1733,11 +1733,11 @@ void command_draw_object(Material material, Object object, uint32_t frame_num, V
|
|
|
|
|
|
|
|
|
|
Mesh* mesh = maybe_mesh.value;
|
|
|
|
|
|
|
|
|
|
VkBuffer vertex_buffers[] = {mesh->vertex_buffer};
|
|
|
|
|
VkBuffer vertex_buffers[] = {mesh->vertex_buffer.handle};
|
|
|
|
|
VkDeviceSize offsets[] = {0};
|
|
|
|
|
|
|
|
|
|
vkCmdBindVertexBuffers(command_buffer, 0, 1, vertex_buffers, offsets);
|
|
|
|
|
vkCmdBindIndexBuffer(command_buffer, mesh->index_buffer, 0, VK_INDEX_TYPE_UINT16);
|
|
|
|
|
vkCmdBindIndexBuffer(command_buffer, mesh->index_buffer.handle, 0, VK_INDEX_TYPE_UINT16);
|
|
|
|
|
|
|
|
|
|
if(material.object_set_layout != VK_NULL_HANDLE) {
|
|
|
|
|
MaybeValue maybe_descriptors = map_lookup(object.attributes, ATTRIBUTE_ID_DESCRIPTOR_SETS);
|
|
|
|
@ -2479,21 +2479,11 @@ VkResult command_copy_to_image(VkDevice device, VkBuffer staging_buffer, VkDevic
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkResult command_copy_to_buffer(VkDevice device, VkBuffer staging_buffer, VkDeviceMemory staging_memory, VkBuffer destination, void* data, VkDeviceSize size, VkDeviceSize offset, VkCommandPool pool, VkQueue queue) {
|
|
|
|
|
void* mapped_ptr = NULL;
|
|
|
|
|
VkResult result = vkMapMemory(device, staging_memory, 0, size, 0, &mapped_ptr);
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
vkDestroyBuffer(device, staging_buffer, 0);
|
|
|
|
|
return result;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
memcpy(mapped_ptr, data, size);
|
|
|
|
|
|
|
|
|
|
vkUnmapMemory(device, staging_memory);
|
|
|
|
|
VkResult command_copy_to_buffer(VkDevice device, GPUBuffer staging, VkBuffer destination, void* data, VkDeviceSize size, VkDeviceSize offset, VkCommandPool pool, VkQueue queue) {
|
|
|
|
|
memcpy(staging.page->ptr + staging.memory->offset, data, size);
|
|
|
|
|
|
|
|
|
|
VkCommandBuffer command_buffer = command_begin_single(device, pool);
|
|
|
|
|
if(command_buffer == VK_NULL_HANDLE) {
|
|
|
|
|
vkDestroyBuffer(device, staging_buffer, 0);
|
|
|
|
|
return VK_ERROR_UNKNOWN;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
@ -2503,35 +2493,34 @@ VkResult command_copy_to_buffer(VkDevice device, VkBuffer staging_buffer, VkDevi
|
|
|
|
|
.size = size,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
vkCmdCopyBuffer(command_buffer, staging_buffer, destination, 1, ®ion);
|
|
|
|
|
vkCmdCopyBuffer(command_buffer, staging.handle, destination, 1, ®ion);
|
|
|
|
|
|
|
|
|
|
result = command_end_single(device, command_buffer, pool, queue);
|
|
|
|
|
return result;
|
|
|
|
|
return command_end_single(device, command_buffer, pool, queue);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Mesh* load_mesh_to_buffer(VkDevice device, MemoryChunk* memory, VkBuffer staging_buffer, VkDeviceMemory staging_memory, uint32_t vertex_count, uint32_t vertex_stride, void* vertex_data, uint32_t index_count, uint32_t index_stride, void* index_data, VkCommandPool pool, VkQueue queue) {
|
|
|
|
|
VkBuffer vertex_buffer = VK_NULL_HANDLE;
|
|
|
|
|
VkBuffer index_buffer = VK_NULL_HANDLE;
|
|
|
|
|
Mesh* load_mesh_to_buffer(VkDevice device, GPUPage* page, GPUBuffer staging, uint32_t vertex_count, uint32_t vertex_stride, void* vertex_data, uint32_t index_count, uint32_t index_stride, void* index_data, VkCommandPool pool, VkQueue queue) {
|
|
|
|
|
GPUBuffer vertex_buffer = {0};
|
|
|
|
|
GPUBuffer index_buffer = {0};
|
|
|
|
|
|
|
|
|
|
VkResult result = create_buffer(memory, device, vertex_count*vertex_stride, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, &vertex_buffer);
|
|
|
|
|
VkResult result = gpu_buffer_malloc(device, page, vertex_count*vertex_stride, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, &vertex_buffer);
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
fprintf(stderr, "Failed to create vertex buffer\n");
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
result = create_buffer(memory, device, sizeof(uint16_t)*index_count, VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, &index_buffer);
|
|
|
|
|
result = gpu_buffer_malloc(device, page, sizeof(uint16_t)*index_count, VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, &index_buffer);
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
fprintf(stderr, "Failed to create index buffer\n");
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
result = command_copy_to_buffer(device, staging_buffer, staging_memory, vertex_buffer, vertex_data, vertex_count*vertex_stride, 0, pool, queue);
|
|
|
|
|
result = command_copy_to_buffer(device, staging, vertex_buffer.handle, vertex_data, vertex_count*vertex_stride, 0, pool, queue);
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
fprintf(stderr, "Failed to copy to vertex buffer\n");
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
result = command_copy_to_buffer(device, staging_buffer, staging_memory, index_buffer, index_data, index_stride*index_count, 0, pool, queue);
|
|
|
|
|
result = command_copy_to_buffer(device, staging, index_buffer.handle, index_data, index_stride*index_count, 0, pool, queue);
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
fprintf(stderr, "Failed to copy to index buffer\n");
|
|
|
|
|
return NULL;
|
|
|
|
@ -3193,25 +3182,25 @@ VkResult draw_frame(VulkanContext* context, SceneContext* scene, uint32_t materi
|
|
|
|
|
Object create_simple_mesh_object(Material* simple_mesh_material, VkPhysicalDeviceMemoryProperties memories, VkDevice device, VkCommandPool transfer_pool, VkQueue transfer_queue, uint32_t max_frames_in_flight, VkDescriptorPool pool) {
|
|
|
|
|
Object zero = {};
|
|
|
|
|
|
|
|
|
|
MemoryChunk mesh_memory = {0};
|
|
|
|
|
VkResult result = allocate_memory_chunk(0, device, 10000, &mesh_memory);
|
|
|
|
|
GPUPage* mesh_memory = NULL;
|
|
|
|
|
VkResult result = gpu_page_allocate(device, memories, 10000, 0xFFFFFFFF, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT, &mesh_memory);
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
return zero;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
MemoryChunk transfer_memory = {0};
|
|
|
|
|
result = allocate_memory_chunk(2, device, 10000, &transfer_memory);
|
|
|
|
|
GPUPage* transfer_memory = NULL;
|
|
|
|
|
result = gpu_page_allocate(device, memories, 10000, 0xFFFFFFFF, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &transfer_memory);
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
return zero;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkBuffer transfer_buffer = VK_NULL_HANDLE;
|
|
|
|
|
result = create_buffer(&transfer_memory, device, 10000, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, &transfer_buffer);
|
|
|
|
|
GPUBuffer transfer_buffer = {0};
|
|
|
|
|
result = gpu_buffer_malloc(device, transfer_memory, 10000, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, &transfer_buffer);
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
return zero;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Mesh* mesh = load_mesh_to_buffer(device, &mesh_memory, transfer_buffer, transfer_memory.memory, 4, sizeof(struct Vertex), (void*)vertices, 6, sizeof(uint16_t), (void*)indices, transfer_pool, transfer_queue);
|
|
|
|
|
Mesh* mesh = load_mesh_to_buffer(device, mesh_memory, transfer_buffer, 4, sizeof(struct Vertex), (void*)vertices, 6, sizeof(uint16_t), (void*)indices, transfer_pool, transfer_queue);
|
|
|
|
|
if(mesh == 0) {
|
|
|
|
|
return zero;
|
|
|
|
|
}
|
|
|
|
@ -3305,25 +3294,25 @@ Object create_simple_mesh_object(Material* simple_mesh_material, VkPhysicalDevic
|
|
|
|
|
Object create_texture_mesh_object(Material* texture_mesh_material, VkPhysicalDeviceMemoryProperties memories, VkDevice device, VkCommandPool transfer_pool, VkQueue transfer_queue, VkCommandPool graphics_pool, VkQueue graphics_queue, uint32_t max_frames_in_flight, VkDescriptorPool pool, uint32_t transfer_family, uint32_t graphics_family) {
|
|
|
|
|
Object zero = {};
|
|
|
|
|
|
|
|
|
|
MemoryChunk mesh_memory = {0};
|
|
|
|
|
VkResult result = allocate_memory_chunk(0, device, 10000, &mesh_memory);
|
|
|
|
|
GPUPage* mesh_memory = NULL;
|
|
|
|
|
VkResult result = gpu_page_allocate(device, memories, 10000, 0xFFFFFFFF, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT, &mesh_memory);
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
return zero;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
MemoryChunk transfer_memory = {0};
|
|
|
|
|
result = allocate_memory_chunk(2, device, 10000, &transfer_memory);
|
|
|
|
|
GPUPage* transfer_memory = NULL;
|
|
|
|
|
result = gpu_page_allocate(device, memories, 10000, 0xFFFFFFFF, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &transfer_memory);
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
return zero;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
VkBuffer transfer_buffer = VK_NULL_HANDLE;
|
|
|
|
|
result = create_buffer(&transfer_memory, device, 10000, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, &transfer_buffer);
|
|
|
|
|
GPUBuffer transfer_buffer = {0};
|
|
|
|
|
result = gpu_buffer_malloc(device, transfer_memory, 10000, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, &transfer_buffer);
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
return zero;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Mesh* mesh = load_mesh_to_buffer(device, &mesh_memory, transfer_buffer, transfer_memory.memory, 4, sizeof(struct TextureVertex), (void*)texture_vertices, 6, sizeof(uint16_t), (void*)indices, transfer_pool, transfer_queue);
|
|
|
|
|
Mesh* mesh = load_mesh_to_buffer(device, mesh_memory, transfer_buffer, 4, sizeof(struct TextureVertex), (void*)texture_vertices, 6, sizeof(uint16_t), (void*)indices, transfer_pool, transfer_queue);
|
|
|
|
|
if(mesh == 0) {
|
|
|
|
|
return zero;
|
|
|
|
|
}
|
|
|
|
|