Cleanup unused functions\n

main
noah metz 2024-01-12 20:17:42 -07:00
parent d18e918deb
commit ed034b39ab
1 changed files with 23 additions and 409 deletions

@ -22,16 +22,6 @@
#include <map.h>
#include <gpu_mem.h>
typedef struct AllocatedBufferStruct {
VkDeviceMemory memory;
VkBuffer buffer;
} AllocatedBuffer;
typedef struct AllocatedImageStruct {
VkDeviceMemory memory;
VkImage image;
} AllocatedImage;
typedef struct QueueIndicesStruct {
uint32_t graphics_family;
uint32_t graphics_index;
@ -198,7 +188,7 @@ typedef struct SceneContextStruct {
VkDescriptorPool pool;
VkDescriptorSetLayout descriptor_layout;
VkDescriptorSet* descriptors;
AllocatedBuffer* ubos;
GPUBuffer* ubos;
void** ubo_ptrs;
} SceneContext;
@ -995,139 +985,6 @@ VkRenderPass create_render_pass(VkDevice device, VkSurfaceFormatKHR format, VkFo
return render_pass;
}
AllocatedImage allocate_image(VkPhysicalDeviceMemoryProperties memories, VkDevice device, VkImageType type, VkFormat format, VkExtent3D size, VkImageUsageFlags usage, VkMemoryPropertyFlags include, VkMemoryPropertyFlags exclude) {
VkImageCreateInfo image_info = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.imageType = type,
.extent = size,
.mipLevels = 1,
.arrayLayers = 1,
.format = format,
.tiling = VK_IMAGE_TILING_OPTIMAL,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.usage = usage,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.samples = VK_SAMPLE_COUNT_1_BIT,
.flags = 0,
};
AllocatedImage allocated = {
.memory = VK_NULL_HANDLE,
.image = VK_NULL_HANDLE,
};
VkResult result = vkCreateImage(device, &image_info, 0, &allocated.image);
if(result != VK_SUCCESS) {
return allocated;
}
VkMemoryRequirements memory_requirements;
vkGetImageMemoryRequirements(device, allocated.image, &memory_requirements);
VkMemoryAllocateInfo memory_info = {
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.allocationSize = memory_requirements.size,
.memoryTypeIndex = pick_memory(memories, memory_requirements.memoryTypeBits, include, exclude).index,
};
result = vkAllocateMemory(device, &memory_info, 0, &allocated.memory);
if(result != VK_SUCCESS) {
vkDestroyImage(device, allocated.image, 0);
allocated.image = VK_NULL_HANDLE;
return allocated;
}
result = vkBindImageMemory(device, allocated.image, allocated.memory, 0);
if(result != VK_SUCCESS) {
vkFreeMemory(device, allocated.memory, 0);
vkDestroyImage(device, allocated.image, 0);
allocated.memory = VK_NULL_HANDLE;
allocated.image = VK_NULL_HANDLE;
return allocated;
}
return allocated;
}
AllocatedBuffer allocate_buffer(VkPhysicalDeviceMemoryProperties memories, VkDevice device, VkDeviceSize size, VkBufferUsageFlags usage, VkMemoryPropertyFlags include, VkMemoryPropertyFlags exclude) {
AllocatedBuffer ret = {
.memory = VK_NULL_HANDLE,
.buffer = VK_NULL_HANDLE,
};
VkBufferCreateInfo buffer_info = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.size = size,
.usage = usage,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
};
VkResult result = vkCreateBuffer(device, &buffer_info, 0, &ret.buffer);
if(result != VK_SUCCESS) {
ret.buffer = VK_NULL_HANDLE;
ret.memory = VK_NULL_HANDLE;
return ret;
}
VkMemoryRequirements memory_requirements;
vkGetBufferMemoryRequirements(device, ret.buffer, &memory_requirements);
VkMemoryAllocateInfo alloc_info = {
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.allocationSize = memory_requirements.size,
.memoryTypeIndex = pick_memory(memories, memory_requirements.memoryTypeBits, include, exclude).index,
};
result = vkAllocateMemory(device, &alloc_info, 0, &ret.memory);
if(result != VK_SUCCESS) {
vkDestroyBuffer(device, ret.buffer, 0);
ret.buffer = VK_NULL_HANDLE;
ret.memory = VK_NULL_HANDLE;
return ret;
}
result = vkBindBufferMemory(device, ret.buffer, ret.memory, 0);
if(result != VK_SUCCESS) {
vkDestroyBuffer(device, ret.buffer, 0);
ret.buffer = VK_NULL_HANDLE;
ret.memory = VK_NULL_HANDLE;
return ret;
}
return ret;
}
void deallocate_buffer(VkDevice device, AllocatedBuffer buffer) {
vkDestroyBuffer(device, buffer.buffer, 0);
vkFreeMemory(device, buffer.memory, 0);
};
void deallocate_image(VkDevice device, AllocatedImage image) {
vkDestroyImage(device, image.image, 0);
vkFreeMemory(device, image.memory, 0);
};
AllocatedBuffer* allocate_buffers(VkPhysicalDeviceMemoryProperties memories, VkDevice device, VkDeviceSize size, VkBufferUsageFlags usage, uint32_t count, VkMemoryPropertyFlags include, VkMemoryPropertyFlags exclude) {
AllocatedBuffer* buffers = malloc(sizeof(AllocatedBuffer)*count);
if(buffers == 0) {
return 0;
}
for(uint32_t i = 0; i < count; i++) {
buffers[i] = allocate_buffer(memories, device, size, usage, include, exclude);
if(buffers[i].memory == VK_NULL_HANDLE) {
for(uint32_t j = 0; j < i; j++) {
deallocate_buffer(device, buffers[i]);
}
free(buffers);
return 0;
}
}
return buffers;
}
VkCommandBuffer command_begin_single(VkDevice device, VkCommandPool transfer_pool) {
VkCommandBufferAllocateInfo command_info = {
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
@ -1180,20 +1037,6 @@ VkResult command_end_single(VkDevice device, VkCommandBuffer command_buffer, VkC
return result;
}
VkResult command_copy_buffers(VkDevice device, VkCommandPool transfer_pool, VkQueue transfer_queue, VkBuffer source, VkBuffer dest, VkDeviceSize size) {
VkCommandBuffer command_buffer = command_begin_single(device, transfer_pool);
VkBufferCopy copy_region = {
.srcOffset = 0,
.dstOffset = 0,
.size = size,
};
vkCmdCopyBuffer(command_buffer, source, dest, 1, &copy_region);
return command_end_single(device, command_buffer, transfer_pool, transfer_queue);
}
VkResult command_transition_image_layout(VkDevice device, VkCommandPool transfer_pool, VkQueue transfer_queue, VkImageLayout old_layout, VkImageLayout new_layout, VkImage image, VkAccessFlags src_mask, VkAccessFlags dst_mask, VkPipelineStageFlags source, VkPipelineStageFlags dest, uint32_t source_family, uint32_t dest_family, VkImageAspectFlags aspect_flags) {
VkCommandBuffer command_buffer = command_begin_single(device, transfer_pool);
@ -1245,44 +1088,6 @@ VkResult command_copy_buffer_to_image(VkDevice device, VkCommandPool transfer_po
return command_end_single(device, command_buffer, transfer_pool, transfer_queue);
}
AllocatedBuffer create_populated_buffer(VkPhysicalDeviceMemoryProperties memories, VkDevice device, void* data, VkDeviceSize size, VkCommandPool transfer_pool, VkQueue transfer_queue, VkBufferUsageFlags usage) {
AllocatedBuffer staging_buffer = {};
AllocatedBuffer vertex_buffer = {};
staging_buffer = allocate_buffer(memories, device, size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
if(staging_buffer.memory == VK_NULL_HANDLE) {
return vertex_buffer;
}
void* buffer_data;
VkResult result = vkMapMemory(device, staging_buffer.memory, 0, size, 0, &buffer_data);
if(result != VK_SUCCESS) {
deallocate_buffer(device, staging_buffer);
return vertex_buffer;
}
memcpy(buffer_data, data, size);
vkUnmapMemory(device, staging_buffer.memory);
vertex_buffer = allocate_buffer(memories, device, size, VK_BUFFER_USAGE_TRANSFER_DST_BIT | usage, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT);
if(vertex_buffer.memory == VK_NULL_HANDLE) {
deallocate_buffer(device, staging_buffer);
return vertex_buffer;
}
result = command_copy_buffers(device, transfer_pool, transfer_queue, staging_buffer.buffer, vertex_buffer.buffer, size);
if(result != VK_SUCCESS) {
deallocate_buffer(device, staging_buffer);
deallocate_buffer(device, vertex_buffer);
vertex_buffer.buffer = VK_NULL_HANDLE;
vertex_buffer.memory = VK_NULL_HANDLE;
return vertex_buffer;
}
return vertex_buffer;
}
Texture load_texture(VkDevice device, GPUPage* page, GPUBuffer staging, VkCommandPool transfer_pool, VkQueue transfer_queue, VkCommandPool graphics_pool, VkQueue graphics_queue, VkExtent2D size, VkFormat format, void* image_data, uint32_t transfer_family, uint32_t graphics_family){
Texture ret = {
.image.page = NULL,
@ -2288,192 +2093,6 @@ Material create_texture_mesh_material(VkDevice device, VkExtent2D extent, VkRend
return create_material(device, extent, render_pass, 2, shader_stages, scene_ubo_layout, texture_layout, textured_mesh_type, max_frames_in_flight, object_descriptor_mappings);
}
typedef struct MemoryChunkStruct {
VkDeviceMemory memory;
VkDeviceSize used;
VkDeviceSize allocated;
} MemoryChunk;
VkResult allocate_memory_chunk(uint32_t memory_type, VkDevice device, VkDeviceSize size, MemoryChunk* allocated) {
if(allocated == NULL) {
return VK_ERROR_UNKNOWN;
}
VkMemoryAllocateInfo allocate_info = {
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
.memoryTypeIndex = memory_type,
.allocationSize = size,
};
VkResult result = vkAllocateMemory(device, &allocate_info, 0, &allocated->memory);
if(result != VK_SUCCESS) {
return result;
}
allocated->used = 0;
allocated->allocated = size;
return VK_SUCCESS;
}
VkResult create_image(MemoryChunk* memory, VkDevice device, VkDeviceSize offset, VkImageType type, VkFormat format, VkExtent3D extent, VkImageUsageFlags usage, VkImage* image) {
if(image == NULL) {
return VK_ERROR_UNKNOWN;
} else if (*image != VK_NULL_HANDLE) {
return VK_ERROR_UNKNOWN;
} else if (memory == NULL) {
return VK_ERROR_UNKNOWN;
}
VkImageCreateInfo image_info = {
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
.imageType = type,
.extent = extent,
.mipLevels = 1,
.arrayLayers = 1,
.format = format,
.tiling = VK_IMAGE_TILING_OPTIMAL,
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
.usage = usage,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
.samples = VK_SAMPLE_COUNT_1_BIT,
.flags = 0,
};
VkResult result = vkCreateImage(device, &image_info, 0, image);
if(result != VK_SUCCESS) {
return result;
}
result = vkBindImageMemory(device, *image, memory->memory, offset);
if(result != VK_SUCCESS) {
vkDestroyImage(device, *image, 0);
*image = VK_NULL_HANDLE;
return result;
}
VkMemoryRequirements memory_requirements;
vkGetImageMemoryRequirements(device, *image, &memory_requirements);
memory->used += memory_requirements.size;
return VK_SUCCESS;
}
VkResult create_buffer(MemoryChunk* memory, VkDevice device, VkDeviceSize size, VkBufferUsageFlags usage, VkBuffer* buffer) {
if(buffer == NULL) {
fprintf(stderr, "buffer is null\n");
return VK_ERROR_UNKNOWN;
} else if (*buffer != VK_NULL_HANDLE) {
fprintf(stderr, "buffer has value\n");
return VK_ERROR_UNKNOWN;
} else if (memory == NULL) {
fprintf(stderr, "memory is null\n");
return VK_ERROR_UNKNOWN;
} else if ((memory->allocated - memory->used) < size) {
fprintf(stderr, "memory has not enough space\n");
return VK_ERROR_UNKNOWN;
}
VkBufferCreateInfo buffer_info = {
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
.size = size,
.usage = usage,
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
};
VkResult result = vkCreateBuffer(device, &buffer_info, 0, buffer);
if(result != VK_SUCCESS) {
fprintf(stderr, "vkCreateBuffer returned %d\n", result);
return result;
}
VkMemoryRequirements memory_requirements;
vkGetBufferMemoryRequirements(device, *buffer, &memory_requirements);
result = vkBindBufferMemory(device, *buffer, memory->memory, memory->used);
if(result != VK_SUCCESS) {
vkDestroyBuffer(device, *buffer, 0);
*buffer = VK_NULL_HANDLE;
fprintf(stderr, "vkBindBufferMemory returned %d\n", result);
return result;
}
memory->used += size;
return VK_SUCCESS;
}
VkResult create_buffers(MemoryChunk* memory, VkDevice device, VkDeviceSize size, VkBufferUsageFlags usage, VkBuffer** buffers, uint32_t count) {
if(buffers == NULL) {
return VK_ERROR_UNKNOWN;
} else if(*buffers == NULL) {
return VK_ERROR_UNKNOWN;
}
*buffers = malloc(sizeof(VkBuffer)*count);
if(*buffers == NULL) {
return VK_ERROR_OUT_OF_HOST_MEMORY;
}
for(uint32_t i = 0; i < count; i++) {
VkResult result = create_buffer(memory, device, size, usage, &(*buffers)[i]);
if(result != VK_SUCCESS) {
for(uint32_t j = 0; j < i; j++) {
vkDestroyBuffer(device, *buffers[j], 0);
}
free(buffers);
return result;
}
}
return VK_SUCCESS;
}
VkResult command_copy_to_image(VkDevice device, VkBuffer staging_buffer, VkDeviceMemory staging_memory, VkImage destination, void* data, VkExtent3D size, VkDeviceSize stride, VkCommandPool pool, VkQueue queue) {
VkDeviceSize data_size = size.height * size.width * stride;
void* mapped_ptr = NULL;
VkResult result = vkMapMemory(device, staging_memory, 0, data_size, 0, &mapped_ptr);
if(result != VK_SUCCESS) {
vkDestroyBuffer(device, staging_buffer, 0);
return result;
}
memcpy(mapped_ptr, data, data_size);
vkUnmapMemory(device, staging_memory);
VkCommandBuffer command_buffer = command_begin_single(device, pool);
if(command_buffer == VK_NULL_HANDLE) {
vkDestroyBuffer(device, staging_buffer, 0);
return VK_ERROR_UNKNOWN;
}
VkBufferImageCopy region = {
.bufferOffset = 0,
.bufferRowLength = 0,
.bufferImageHeight = 0,
.imageSubresource = {
.baseArrayLayer = 0,
.layerCount = 1,
.mipLevel = 0,
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
},
.imageOffset = {
.x = 0,
.y = 0,
.z = 0,
},
.imageExtent = size,
};
vkCmdCopyBufferToImage(command_buffer, staging_buffer, destination, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &region);
result = command_end_single(device, command_buffer, pool, queue);
vkDestroyBuffer(device, staging_buffer, 0);
return result;
}
VkResult command_copy_to_buffer(VkDevice device, GPUBuffer staging, VkBuffer destination, void* data, VkDeviceSize size, VkDeviceSize offset, VkCommandPool pool, VkQueue queue) {
memcpy(staging.page->ptr + staging.memory->offset, data, size);
@ -2823,49 +2442,44 @@ SceneContext create_scene_context(VkDevice device, VkPhysicalDeviceMemoryPropert
return ret;
}
AllocatedBuffer* ubos = allocate_buffers(memories, device, sizeof(struct SceneUBO), VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, max_frames_in_flight, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
if(ubos == 0) {
free(layouts);
free(sets);
vkFreeDescriptorSets(device, pool, max_frames_in_flight, sets);
vkDestroyDescriptorPool(device, pool, 0);
GPUPage* scene_ubo_memory = NULL;
result = gpu_page_allocate(device, memories, 1000, 0xFFFFFFFF, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &scene_ubo_memory);
if(result != VK_SUCCESS) {
return ret;
}
void** ubo_ptrs = malloc(sizeof(void*)*max_frames_in_flight);
if(ubo_ptrs == 0) {
free(layouts);
free(sets);
vkFreeDescriptorSets(device, pool, max_frames_in_flight, sets);
vkDestroyDescriptorPool(device, pool, 0);
for(uint32_t i = 0; i < max_frames_in_flight; i++) {
deallocate_buffer(device, ubos[i]);
}
free(ubos);
GPUBuffer* ubos = malloc(sizeof(GPUBuffer)*max_frames_in_flight);
if(ubos == NULL) {
return ret;
}
for(uint32_t i = 0; i < max_frames_in_flight; i++) {
VkResult result = vkMapMemory(device, ubos[i].memory, 0, sizeof(struct SceneUBO), 0, &ubo_ptrs[i]);
result = gpu_buffer_malloc(device, scene_ubo_memory, sizeof(struct SceneUBO), VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, &ubos[i]);
if(result != VK_SUCCESS) {
for(uint32_t j = 0; j < i; j++) {
vkUnmapMemory(device, ubos[j].memory);
}
free(layouts);
free(sets);
free(ubos);
vkFreeDescriptorSets(device, pool, max_frames_in_flight, sets);
vkDestroyDescriptorPool(device, pool, 0);
for(uint32_t i = 0; i < max_frames_in_flight; i++) {
deallocate_buffer(device, ubos[i]);
}
free(ubos);
return ret;
}
}
void** ubo_ptrs = malloc(sizeof(void*)*max_frames_in_flight);
if(ubo_ptrs == 0) {
free(layouts);
free(sets);
vkFreeDescriptorSets(device, pool, max_frames_in_flight, sets);
vkDestroyDescriptorPool(device, pool, 0);
free(ubos);
return ret;
}
for(uint32_t i = 0; i < max_frames_in_flight; i++) {
ubo_ptrs[i] = ubos[i].page->ptr + ubos[i].memory->offset;
VkDescriptorBufferInfo buffer_info = {
.buffer = ubos[i].buffer,
.buffer = ubos[i].handle,
.offset = 0,
.range = sizeof(struct SceneUBO),
};