352 lines
8.6 KiB
C
352 lines
8.6 KiB
C
#include "vulkan/vulkan_core.h"
|
|
#include <string.h>
|
|
#include <stdlib.h>
|
|
#include <stdio.h>
|
|
#include <stdbool.h>
|
|
#include <gpu_mem.h>
|
|
|
|
GPUMemoryType pick_memory(VkPhysicalDeviceMemoryProperties memories, uint32_t filter, VkMemoryPropertyFlags include, VkMemoryPropertyFlags exclude) {
|
|
for(uint32_t i = 0; i < memories.memoryTypeCount; i++){
|
|
if((filter & (1 << i))
|
|
&& ((include & memories.memoryTypes[i].propertyFlags) == include)
|
|
&& ((exclude & memories.memoryTypes[i].propertyFlags) == 0)) {
|
|
GPUMemoryType ret = {
|
|
.flags = memories.memoryTypes[i].propertyFlags,
|
|
.index = i,
|
|
};
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
GPUMemoryType err = {
|
|
.flags = 0,
|
|
.index = 0xFFFFFFFF,
|
|
};
|
|
return err;
|
|
}
|
|
|
|
VkResult gpu_page_allocate(VkDevice device, VkPhysicalDeviceMemoryProperties memories, VkDeviceSize size, uint32_t filter, VkMemoryPropertyFlags include, VkMemoryPropertyFlags exclude, VkMemoryAllocateFlags allocate_flags, GPUPage** handle) {
|
|
if(handle == NULL) {
|
|
return VK_ERROR_VALIDATION_FAILED_EXT;
|
|
}
|
|
|
|
GPUPage* output = malloc(sizeof(GPUPage));
|
|
if(output == NULL) {
|
|
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
|
}
|
|
|
|
GPUMemoryChunk* initial_chunk = malloc(sizeof(GPUMemoryChunk));
|
|
if(initial_chunk == 0) {
|
|
free(output);
|
|
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
|
}
|
|
|
|
initial_chunk->size = size;
|
|
initial_chunk->offset = 0;
|
|
initial_chunk->next = NULL;
|
|
|
|
GPUMemoryType memory_type = pick_memory(memories, filter, include, exclude);
|
|
if(memory_type.index == 0xFFFFFFFF) {
|
|
free(initial_chunk);
|
|
free(output);
|
|
return VK_ERROR_UNKNOWN;
|
|
}
|
|
|
|
VkMemoryAllocateFlagsInfo allocate_flags_info = {
|
|
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO,
|
|
.flags = allocate_flags,
|
|
};
|
|
|
|
VkMemoryAllocateInfo allocate_info = {
|
|
.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
|
|
.allocationSize = size,
|
|
.memoryTypeIndex = memory_type.index,
|
|
.pNext = &allocate_flags_info,
|
|
};
|
|
|
|
VkDeviceMemory memory = VK_NULL_HANDLE;
|
|
VkResult result = vkAllocateMemory(device, &allocate_info, 0, &memory);
|
|
if(result != VK_SUCCESS) {
|
|
free(initial_chunk);
|
|
free(output);
|
|
return result;
|
|
}
|
|
|
|
if(include & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) {
|
|
result = vkMapMemory(device, memory, 0, size, 0, &output->ptr);
|
|
if(result != VK_SUCCESS) {
|
|
free(initial_chunk);
|
|
free(output);
|
|
vkFreeMemory(device, memory, 0);
|
|
return result;
|
|
}
|
|
} else {
|
|
output->ptr = NULL;
|
|
}
|
|
|
|
output->memory = memory;
|
|
output->size = size;
|
|
output->type = memory_type;
|
|
output->free = initial_chunk;
|
|
output->allocated = NULL;
|
|
|
|
*handle = output;
|
|
|
|
return VK_SUCCESS;
|
|
}
|
|
|
|
void gpu_page_free(VkDevice device, GPUPage* page) {
|
|
if(page == NULL) {
|
|
return;
|
|
}
|
|
|
|
GPUMemoryChunk* cur = page->free;
|
|
while(cur != NULL) {
|
|
GPUMemoryChunk* last = cur;
|
|
cur = cur->next;
|
|
free(last);
|
|
}
|
|
|
|
vkFreeMemory(device, page->memory, 0);
|
|
free(page);
|
|
}
|
|
|
|
void gpu_add_allocation(GPUPage* page, GPUMemoryChunk* allocation, VkDeviceSize size, GPUMemoryChunk* prev, GPUMemoryChunk* cur) {
|
|
if(page->allocated == NULL) {
|
|
page->allocated = allocation;
|
|
} else {
|
|
GPUMemoryChunk* alloc_cur = page->allocated;
|
|
while(alloc_cur->next != NULL) {
|
|
alloc_cur = alloc_cur->next;
|
|
}
|
|
alloc_cur->next = allocation;
|
|
}
|
|
|
|
if(cur->size == size && prev == NULL) {
|
|
free(cur);
|
|
page->free = NULL;
|
|
} else if(cur->size == size && prev != NULL) {
|
|
prev->next = cur->next;
|
|
free(cur);
|
|
} else if(cur->size > size) {
|
|
cur->offset += size;
|
|
cur->size -= size;
|
|
}
|
|
|
|
}
|
|
|
|
VkResult gpu_new_allocation(GPUPage* page, GPUMemoryChunk** prev, GPUMemoryChunk** cur, GPUMemoryChunk** allocation, VkDeviceSize size) {
|
|
if(prev == NULL || cur == NULL || allocation == NULL) {
|
|
return VK_ERROR_VALIDATION_FAILED_EXT;
|
|
}
|
|
|
|
*cur = page->free;
|
|
*prev = NULL;
|
|
|
|
|
|
// Find a chunk
|
|
while(*cur != NULL) {
|
|
if((*cur)->size >= size) {
|
|
break;
|
|
}
|
|
*prev = *cur;
|
|
*cur = (*cur)->next;
|
|
}
|
|
|
|
if(*cur == NULL) {
|
|
return VK_ERROR_OUT_OF_DEVICE_MEMORY;
|
|
}
|
|
|
|
*allocation = malloc(sizeof(GPUMemoryChunk));
|
|
if(*allocation == NULL) {
|
|
return VK_ERROR_OUT_OF_HOST_MEMORY;
|
|
}
|
|
|
|
|
|
(*allocation)->next = NULL;
|
|
(*allocation)->size = size;
|
|
(*allocation)->offset = (*cur)->offset;
|
|
|
|
return VK_SUCCESS;
|
|
}
|
|
|
|
VkResult gpu_image_malloc(VkDevice device, GPUPage* page, VkImageCreateInfo* info, GPUImage* image) {
|
|
if(image == NULL || info == NULL || page == NULL) {
|
|
return VK_ERROR_VALIDATION_FAILED_EXT;
|
|
}
|
|
|
|
VkResult result = vkCreateImage(device, info, 0, &image->handle);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
VkMemoryRequirements requirements;
|
|
vkGetImageMemoryRequirements(device, image->handle, &requirements);
|
|
|
|
GPUMemoryChunk* cur;
|
|
GPUMemoryChunk* prev;
|
|
GPUMemoryChunk* allocation;
|
|
|
|
result = gpu_new_allocation(page, &prev, &cur, &allocation, requirements.size);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
result = vkBindImageMemory(device, image->handle, page->memory, cur->offset);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
image->page = page;
|
|
image->memory = allocation;
|
|
|
|
gpu_add_allocation(page, allocation, requirements.size, prev, cur);
|
|
|
|
return VK_SUCCESS;
|
|
}
|
|
|
|
VkResult gpu_buffer_malloc(VkDevice device, GPUPage* page, VkDeviceSize size, VkBufferUsageFlags usage, GPUBuffer* buffer) {
|
|
if(buffer == NULL || page == NULL) {
|
|
return VK_ERROR_VALIDATION_FAILED_EXT;
|
|
}
|
|
|
|
//TODO: use real alignment size instead of hard-coded to 16
|
|
size += size % 16;
|
|
|
|
VkBufferCreateInfo buffer_info = {
|
|
.sType = VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
|
|
.size = size,
|
|
.usage = usage,
|
|
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
|
};
|
|
|
|
VkResult result = vkCreateBuffer(device, &buffer_info, 0, &buffer->handle);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
GPUMemoryChunk* cur;
|
|
GPUMemoryChunk* prev;
|
|
GPUMemoryChunk* allocation;
|
|
|
|
result = gpu_new_allocation(page, &prev, &cur, &allocation, size);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
result = vkBindBufferMemory(device, buffer->handle, page->memory, cur->offset);
|
|
if(result != VK_SUCCESS) {
|
|
return result;
|
|
}
|
|
|
|
buffer->page = page;
|
|
buffer->memory = allocation;
|
|
|
|
gpu_add_allocation(page, allocation, size, prev, cur);
|
|
|
|
return VK_SUCCESS;
|
|
}
|
|
|
|
void gpu_image_free(VkDevice device, GPUImage image) {
|
|
vkDestroyImage(device, image.handle, 0);
|
|
gpu_free(image.page, image.memory);
|
|
}
|
|
|
|
void gpu_buffer_free(VkDevice device, GPUBuffer buffer) {
|
|
vkDestroyBuffer(device, buffer.handle, 0);
|
|
gpu_free(buffer.page, buffer.memory);
|
|
}
|
|
|
|
void gpu_free(GPUPage* page, GPUMemoryChunk* memory) {
|
|
|
|
if(memory == page->allocated) {
|
|
page->allocated = memory->next;
|
|
} else {
|
|
GPUMemoryChunk* cur = page->allocated;
|
|
while(cur->next != NULL) {
|
|
if(cur->next == memory) {
|
|
cur->next = memory->next;
|
|
break;
|
|
}
|
|
cur = cur->next;
|
|
}
|
|
if(cur == NULL) {
|
|
return;
|
|
}
|
|
}
|
|
|
|
memory->next = NULL;
|
|
|
|
GPUMemoryChunk* free_cur = page->free;
|
|
GPUMemoryChunk* free_prev = NULL;
|
|
while(free_cur != NULL) {
|
|
if(free_cur->offset > memory->offset) {
|
|
break;
|
|
}
|
|
free_prev = free_cur;
|
|
free_cur = free_cur->next;
|
|
}
|
|
if(free_cur == NULL && free_prev == NULL) {
|
|
page->free = memory;
|
|
} else {
|
|
bool left_cont = false;
|
|
if (free_prev != NULL) {
|
|
left_cont = ((free_prev->offset + free_prev->size) == memory->offset);
|
|
}
|
|
bool right_cont = false;
|
|
if (free_cur != NULL) {
|
|
right_cont = ((memory->offset + memory->size) == free_cur->offset);
|
|
}
|
|
|
|
fprintf(stderr, "l: %d, r: %d\n", left_cont, right_cont);
|
|
|
|
if(left_cont && right_cont) {
|
|
free_prev->next = free_cur->next;
|
|
free_prev->size += free_cur->size;
|
|
free_prev->size += memory->size;
|
|
|
|
free(free_cur);
|
|
free(memory);
|
|
} else if(!left_cont && right_cont) {
|
|
free_cur->offset -= memory->size;
|
|
free_cur->size += memory->size;
|
|
free(memory);
|
|
} else if(left_cont && !right_cont) {
|
|
free_prev->size += memory->size;
|
|
free(memory);
|
|
} else if(!left_cont && !right_cont) {
|
|
if(free_cur == NULL) {
|
|
memory->next = NULL;
|
|
} else {
|
|
memory->next = free_cur->next;
|
|
}
|
|
|
|
if(free_prev == NULL) {
|
|
memory->next = page->free;
|
|
page->free = memory;
|
|
} else {
|
|
free_prev->next = memory;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
void fprintchunks(FILE* out, GPUMemoryChunk* start) {
|
|
if(start == NULL) {
|
|
fprintf(out, "Chunks: {}\n");
|
|
return;
|
|
}
|
|
|
|
fprintf(out, "Chunks: {");
|
|
GPUMemoryChunk* cur = start;
|
|
while(cur != NULL) {
|
|
if(cur->next == NULL) {
|
|
fprintf(out, "%llu@%llu}", cur->size, cur->offset);
|
|
} else {
|
|
fprintf(out, "%llu@%llu, ", cur->size, cur->offset);
|
|
}
|
|
cur = cur->next;
|
|
}
|
|
fprintf(out, "\n");
|
|
}
|