|
|
@ -1,6 +1,5 @@
|
|
|
|
#define VK_USE_PLATFORM_MACOS_MVK
|
|
|
|
#define VK_USE_PLATFORM_MACOS_MVK
|
|
|
|
#include "vulkan/vulkan_core.h"
|
|
|
|
#include "vulkan/vulkan_core.h"
|
|
|
|
#include "vulkan/vk_enum_string_helper.h"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#define GLFW_INCLUDE_VULKAN
|
|
|
|
#define GLFW_INCLUDE_VULKAN
|
|
|
|
#include <GLFW/glfw3.h>
|
|
|
|
#include <GLFW/glfw3.h>
|
|
|
@ -66,7 +65,7 @@ typedef struct SwapchainImagesStruct {
|
|
|
|
} SwapchainImages;
|
|
|
|
} SwapchainImages;
|
|
|
|
|
|
|
|
|
|
|
|
typedef struct TextureStruct {
|
|
|
|
typedef struct TextureStruct {
|
|
|
|
AllocatedImage image;
|
|
|
|
GPUImage image;
|
|
|
|
VkImageView view;
|
|
|
|
VkImageView view;
|
|
|
|
VkSampler sampler;
|
|
|
|
VkSampler sampler;
|
|
|
|
} Texture;
|
|
|
|
} Texture;
|
|
|
@ -1284,74 +1283,71 @@ AllocatedBuffer create_populated_buffer(VkPhysicalDeviceMemoryProperties memorie
|
|
|
|
return vertex_buffer;
|
|
|
|
return vertex_buffer;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
Texture load_texture(VkPhysicalDeviceMemoryProperties memories, VkDevice device, VkCommandPool transfer_pool, VkQueue transfer_queue, VkCommandPool graphics_pool, VkQueue graphics_queue, VkExtent2D size, uint32_t stride, VkFormat format, void* image_data, uint32_t transfer_family, uint32_t graphics_family){
|
|
|
|
Texture load_texture(VkDevice device, GPUPage* page, GPUBuffer staging, VkCommandPool transfer_pool, VkQueue transfer_queue, VkCommandPool graphics_pool, VkQueue graphics_queue, VkExtent2D size, VkFormat format, void* image_data, uint32_t transfer_family, uint32_t graphics_family){
|
|
|
|
Texture ret = {
|
|
|
|
Texture ret = {
|
|
|
|
.image.image = VK_NULL_HANDLE,
|
|
|
|
.image.page = NULL,
|
|
|
|
.image.memory = VK_NULL_HANDLE,
|
|
|
|
.image.memory = NULL,
|
|
|
|
|
|
|
|
.image.handle = VK_NULL_HANDLE,
|
|
|
|
.view = VK_NULL_HANDLE,
|
|
|
|
.view = VK_NULL_HANDLE,
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
uint32_t image_size = size.width * size.height * stride;
|
|
|
|
|
|
|
|
AllocatedBuffer staging = allocate_buffer(memories, device, image_size, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
|
|
|
|
|
|
|
|
if(staging.memory == VK_NULL_HANDLE) {
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void* staging_ptr;
|
|
|
|
|
|
|
|
VkResult result = vkMapMemory(device, staging.memory, 0, image_size, 0, &staging_ptr);
|
|
|
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
|
|
|
deallocate_buffer(device, staging);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
memcpy(staging_ptr, image_data, image_size);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
vkUnmapMemory(device, staging.memory);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
VkExtent3D full_extent = {
|
|
|
|
VkExtent3D full_extent = {
|
|
|
|
.width = size.width,
|
|
|
|
.width = size.width,
|
|
|
|
.height = size.height,
|
|
|
|
.height = size.height,
|
|
|
|
.depth = 1,
|
|
|
|
.depth = 1,
|
|
|
|
};
|
|
|
|
};
|
|
|
|
AllocatedImage image = allocate_image(memories, device, VK_IMAGE_TYPE_2D, format, full_extent, VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_SAMPLED_BIT, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, 0);
|
|
|
|
|
|
|
|
if(image.memory == VK_NULL_HANDLE) {
|
|
|
|
VkImageCreateInfo info = {
|
|
|
|
deallocate_buffer(device, staging);
|
|
|
|
.sType = VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,
|
|
|
|
deallocate_image(device, image);
|
|
|
|
.imageType = VK_IMAGE_TYPE_2D,
|
|
|
|
|
|
|
|
.extent = full_extent,
|
|
|
|
|
|
|
|
.mipLevels = 1,
|
|
|
|
|
|
|
|
.arrayLayers = 1,
|
|
|
|
|
|
|
|
.format = format,
|
|
|
|
|
|
|
|
.tiling = VK_IMAGE_TILING_OPTIMAL,
|
|
|
|
|
|
|
|
.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED,
|
|
|
|
|
|
|
|
.usage = VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT,
|
|
|
|
|
|
|
|
.sharingMode = VK_SHARING_MODE_EXCLUSIVE,
|
|
|
|
|
|
|
|
.samples = VK_SAMPLE_COUNT_1_BIT,
|
|
|
|
|
|
|
|
.flags = 0,
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GPUImage image = {0};
|
|
|
|
|
|
|
|
VkResult result = gpu_image_malloc(device, page, &info, &image);
|
|
|
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
return ret;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
result = command_transition_image_layout(device, transfer_pool, transfer_queue, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, image.image, 0, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, transfer_family, transfer_family, VK_IMAGE_ASPECT_COLOR_BIT);
|
|
|
|
memcpy(staging.page->ptr + staging.memory->offset, image_data, image.memory->size);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
result = command_transition_image_layout(device, transfer_pool, transfer_queue, VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, image.handle, 0, VK_ACCESS_TRANSFER_WRITE_BIT, VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, transfer_family, transfer_family, VK_IMAGE_ASPECT_COLOR_BIT);
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
deallocate_buffer(device, staging);
|
|
|
|
gpu_image_free(device, image);
|
|
|
|
deallocate_image(device, image);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
result = command_copy_buffer_to_image(device, transfer_pool, transfer_queue, full_extent, staging.buffer, image.image);
|
|
|
|
result = command_copy_buffer_to_image(device, transfer_pool, transfer_queue, full_extent, staging.handle, image.handle);
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
deallocate_buffer(device, staging);
|
|
|
|
gpu_image_free(device, image);
|
|
|
|
deallocate_image(device, image);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
result = command_transition_image_layout(device, transfer_pool, transfer_queue, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, image.image, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, transfer_family, graphics_family, VK_IMAGE_ASPECT_COLOR_BIT);
|
|
|
|
result = command_transition_image_layout(device, transfer_pool, transfer_queue, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, image.handle, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, transfer_family, graphics_family, VK_IMAGE_ASPECT_COLOR_BIT);
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
deallocate_buffer(device, staging);
|
|
|
|
gpu_image_free(device, image);
|
|
|
|
deallocate_image(device, image);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
result = command_transition_image_layout(device, graphics_pool, graphics_queue, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, image.image, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, transfer_family, graphics_family, VK_IMAGE_ASPECT_COLOR_BIT);
|
|
|
|
result = command_transition_image_layout(device, graphics_pool, graphics_queue, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL, image.handle, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, transfer_family, graphics_family, VK_IMAGE_ASPECT_COLOR_BIT);
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
deallocate_buffer(device, staging);
|
|
|
|
gpu_image_free(device, image);
|
|
|
|
deallocate_image(device, image);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
VkImageView view;
|
|
|
|
VkImageView view;
|
|
|
|
VkImageViewCreateInfo view_info = {
|
|
|
|
VkImageViewCreateInfo view_info = {
|
|
|
|
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
|
|
|
|
.sType = VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
|
|
|
|
.image = image.image,
|
|
|
|
.image = image.handle,
|
|
|
|
.viewType = VK_IMAGE_VIEW_TYPE_2D,
|
|
|
|
.viewType = VK_IMAGE_VIEW_TYPE_2D,
|
|
|
|
.components = {
|
|
|
|
.components = {
|
|
|
|
.a = VK_COMPONENT_SWIZZLE_IDENTITY,
|
|
|
|
.a = VK_COMPONENT_SWIZZLE_IDENTITY,
|
|
|
@ -1371,8 +1367,7 @@ Texture load_texture(VkPhysicalDeviceMemoryProperties memories, VkDevice device,
|
|
|
|
|
|
|
|
|
|
|
|
result = vkCreateImageView(device, &view_info, 0, &view);
|
|
|
|
result = vkCreateImageView(device, &view_info, 0, &view);
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
deallocate_buffer(device, staging);
|
|
|
|
gpu_image_free(device, image);
|
|
|
|
deallocate_image(device, image);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
@ -1397,9 +1392,9 @@ Texture load_texture(VkPhysicalDeviceMemoryProperties memories, VkDevice device,
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
result = vkCreateSampler(device, &sampler_info, 0, &sampler);
|
|
|
|
result = vkCreateSampler(device, &sampler_info, 0, &sampler);
|
|
|
|
deallocate_buffer(device, staging);
|
|
|
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
deallocate_image(device, image);
|
|
|
|
gpu_image_free(device, image);
|
|
|
|
|
|
|
|
vkDestroyImageView(device, view, 0);
|
|
|
|
return ret;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
@ -3189,7 +3184,7 @@ Object create_simple_mesh_object(Material* simple_mesh_material, VkPhysicalDevic
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
GPUPage* transfer_memory = NULL;
|
|
|
|
GPUPage* transfer_memory = NULL;
|
|
|
|
result = gpu_page_allocate(device, memories, 10000, 0xFFFFFFFF, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &transfer_memory);
|
|
|
|
result = gpu_page_allocate(device, memories, 100000, 0xFFFFFFFF, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &transfer_memory);
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
return zero;
|
|
|
|
return zero;
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -3252,11 +3247,18 @@ Object create_simple_mesh_object(Material* simple_mesh_material, VkPhysicalDevic
|
|
|
|
return zero;
|
|
|
|
return zero;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
AllocatedBuffer* position_buffers = allocate_buffers(memories, device, sizeof(struct ModelUBO), VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, max_frames_in_flight, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
|
|
|
|
GPUBuffer* position_buffers = malloc(sizeof(GPUBuffer)*max_frames_in_flight);
|
|
|
|
if(position_buffers == 0) {
|
|
|
|
if(position_buffers == NULL) {
|
|
|
|
return zero;
|
|
|
|
return zero;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for(uint32_t i = 0; i < max_frames_in_flight; i++) {
|
|
|
|
|
|
|
|
result = gpu_buffer_malloc(device, transfer_memory, sizeof(struct ModelUBO), VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, &position_buffers[i]);
|
|
|
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
|
|
|
return zero;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
MaybeValue maybe_ptrs = map_lookup(object.attributes, ATTRIBUTE_ID_DESCRIPTORS);
|
|
|
|
MaybeValue maybe_ptrs = map_lookup(object.attributes, ATTRIBUTE_ID_DESCRIPTORS);
|
|
|
|
if(maybe_ptrs.has_value == false) {
|
|
|
|
if(maybe_ptrs.has_value == false) {
|
|
|
|
return zero;
|
|
|
|
return zero;
|
|
|
@ -3264,13 +3266,9 @@ Object create_simple_mesh_object(Material* simple_mesh_material, VkPhysicalDevic
|
|
|
|
|
|
|
|
|
|
|
|
void*** ptrs = maybe_ptrs.value;
|
|
|
|
void*** ptrs = maybe_ptrs.value;
|
|
|
|
for(uint32_t i = 0; i < max_frames_in_flight; i++) {
|
|
|
|
for(uint32_t i = 0; i < max_frames_in_flight; i++) {
|
|
|
|
VkResult result = vkMapMemory(device, position_buffers[i].memory, 0, sizeof(struct ModelUBO), 0, &ptrs[i][0]);
|
|
|
|
*ptrs[i] = position_buffers[i].page->ptr + position_buffers[i].memory->offset;
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
|
|
|
return zero;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
VkDescriptorBufferInfo buffer_info = {
|
|
|
|
VkDescriptorBufferInfo buffer_info = {
|
|
|
|
.buffer = position_buffers[i].buffer,
|
|
|
|
.buffer = position_buffers[i].handle,
|
|
|
|
.offset = 0,
|
|
|
|
.offset = 0,
|
|
|
|
.range = sizeof(struct ModelUBO),
|
|
|
|
.range = sizeof(struct ModelUBO),
|
|
|
|
};
|
|
|
|
};
|
|
|
@ -3295,13 +3293,13 @@ Object create_texture_mesh_object(Material* texture_mesh_material, VkPhysicalDev
|
|
|
|
Object zero = {};
|
|
|
|
Object zero = {};
|
|
|
|
|
|
|
|
|
|
|
|
GPUPage* mesh_memory = NULL;
|
|
|
|
GPUPage* mesh_memory = NULL;
|
|
|
|
VkResult result = gpu_page_allocate(device, memories, 10000, 0xFFFFFFFF, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT, &mesh_memory);
|
|
|
|
VkResult result = gpu_page_allocate(device, memories, 100000, 0xFFFFFFFF, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT, &mesh_memory);
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
return zero;
|
|
|
|
return zero;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
GPUPage* transfer_memory = NULL;
|
|
|
|
GPUPage* transfer_memory = NULL;
|
|
|
|
result = gpu_page_allocate(device, memories, 10000, 0xFFFFFFFF, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &transfer_memory);
|
|
|
|
result = gpu_page_allocate(device, memories, 100000, 0xFFFFFFFF, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &transfer_memory);
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
return zero;
|
|
|
|
return zero;
|
|
|
|
}
|
|
|
|
}
|
|
|
@ -3364,11 +3362,18 @@ Object create_texture_mesh_object(Material* texture_mesh_material, VkPhysicalDev
|
|
|
|
return zero;
|
|
|
|
return zero;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
AllocatedBuffer* ubos = allocate_buffers(memories, device, sizeof(struct ModelUBO), VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, max_frames_in_flight, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT, VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
|
|
|
|
GPUBuffer* position_buffers = malloc(sizeof(GPUBuffer)*max_frames_in_flight);
|
|
|
|
if(ubos == 0) {
|
|
|
|
if(position_buffers == NULL) {
|
|
|
|
return zero;
|
|
|
|
return zero;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for(uint32_t i = 0; i < max_frames_in_flight; i++) {
|
|
|
|
|
|
|
|
result = gpu_buffer_malloc(device, transfer_memory, sizeof(struct ModelUBO), VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT, &position_buffers[i]);
|
|
|
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
|
|
|
return zero;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
MaybeValue maybe_ptrs = map_lookup(object.attributes, ATTRIBUTE_ID_DESCRIPTORS);
|
|
|
|
MaybeValue maybe_ptrs = map_lookup(object.attributes, ATTRIBUTE_ID_DESCRIPTORS);
|
|
|
|
if(maybe_ptrs.has_value == false) {
|
|
|
|
if(maybe_ptrs.has_value == false) {
|
|
|
|
return zero;
|
|
|
|
return zero;
|
|
|
@ -3377,13 +3382,10 @@ Object create_texture_mesh_object(Material* texture_mesh_material, VkPhysicalDev
|
|
|
|
void*** ptrs = maybe_ptrs.value;
|
|
|
|
void*** ptrs = maybe_ptrs.value;
|
|
|
|
|
|
|
|
|
|
|
|
for(uint32_t i = 0; i < max_frames_in_flight; i++) {
|
|
|
|
for(uint32_t i = 0; i < max_frames_in_flight; i++) {
|
|
|
|
VkResult result = vkMapMemory(device, ubos[i].memory, 0, sizeof(struct ModelUBO), 0, &ptrs[i][0]);
|
|
|
|
*ptrs[i] = position_buffers[i].page->ptr + position_buffers[i].memory->offset;
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
|
|
|
return zero;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
VkDescriptorBufferInfo buffer_info = {
|
|
|
|
VkDescriptorBufferInfo buffer_info = {
|
|
|
|
.buffer = ubos[i].buffer,
|
|
|
|
.buffer = position_buffers[i].handle,
|
|
|
|
.offset = 0,
|
|
|
|
.offset = 0,
|
|
|
|
.range = sizeof(struct ModelUBO),
|
|
|
|
.range = sizeof(struct ModelUBO),
|
|
|
|
};
|
|
|
|
};
|
|
|
@ -3431,7 +3433,7 @@ Object create_texture_mesh_object(Material* texture_mesh_material, VkPhysicalDev
|
|
|
|
RED, WHT, GRN, WHT, BLU, WHT, RED, WHT, GRN, BLK,
|
|
|
|
RED, WHT, GRN, WHT, BLU, WHT, RED, WHT, GRN, BLK,
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
Texture test_texture = load_texture(memories, device, transfer_pool, transfer_queue, graphics_pool, graphics_queue, texture_size, 4, VK_FORMAT_R8G8B8A8_SRGB, texture_data, transfer_family, graphics_family);
|
|
|
|
Texture test_texture = load_texture(device, mesh_memory, transfer_buffer, transfer_pool, transfer_queue, graphics_pool, graphics_queue, texture_size, VK_FORMAT_R8G8B8A8_SRGB, texture_data, transfer_family, graphics_family);
|
|
|
|
|
|
|
|
|
|
|
|
for(uint32_t i = 0; i < max_frames_in_flight; i++) {
|
|
|
|
for(uint32_t i = 0; i < max_frames_in_flight; i++) {
|
|
|
|
VkDescriptorImageInfo image_info = {
|
|
|
|
VkDescriptorImageInfo image_info = {
|
|
|
@ -3609,45 +3611,6 @@ int main() {
|
|
|
|
return 2;
|
|
|
|
return 2;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
GPUPage* page = NULL;
|
|
|
|
|
|
|
|
VkResult result = gpu_page_allocate(context->device, context->memories, 500, 0xFFFFFFFF, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, VK_MEMORY_PROPERTY_LAZILY_ALLOCATED_BIT, &page);
|
|
|
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
GPUBuffer buffers[10] = {0};
|
|
|
|
|
|
|
|
for(int i = 0; i < 10; i++) {
|
|
|
|
|
|
|
|
result = gpu_buffer_malloc(context->device, page, 100, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, &buffers[i]);
|
|
|
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
|
|
|
fprintf(stderr, "gpu_malloc error: %s\n", string_VkResult(result));
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
fprintf(stderr, "gpu_malloc: %p@%llu\n", buffers[i].handle, buffers[i].memory->offset);
|
|
|
|
|
|
|
|
fprintchunks(stderr, page->allocated);
|
|
|
|
|
|
|
|
fprintchunks(stderr, page->free);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
int test[] = {3, 0, 2, 4, 1};
|
|
|
|
|
|
|
|
for(size_t i = 0; i < (sizeof(test)/sizeof(int)); i++) {
|
|
|
|
|
|
|
|
int idx = test[i];
|
|
|
|
|
|
|
|
fprintf(stderr, "freeing %llu@%llu\n", buffers[idx].memory->size, buffers[idx].memory->offset);
|
|
|
|
|
|
|
|
gpu_buffer_free(context->device, buffers[idx]);
|
|
|
|
|
|
|
|
fprintchunks(stderr, page->free);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for(int i = 0; i < 10; i++) {
|
|
|
|
|
|
|
|
result = gpu_buffer_malloc(context->device, page, 100, VK_BUFFER_USAGE_VERTEX_BUFFER_BIT, &buffers[i]);
|
|
|
|
|
|
|
|
if(result != VK_SUCCESS) {
|
|
|
|
|
|
|
|
fprintf(stderr, "gpu_malloc error: %s\n", string_VkResult(result));
|
|
|
|
|
|
|
|
} else {
|
|
|
|
|
|
|
|
fprintf(stderr, "gpu_malloc: %p@%llu\n", buffers[i].handle, buffers[i].memory->offset);
|
|
|
|
|
|
|
|
fprintchunks(stderr, page->allocated);
|
|
|
|
|
|
|
|
fprintchunks(stderr, page->free);
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
gpu_page_free(context->device, page);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
glfwSetKeyCallback(window, key_callback);
|
|
|
|
glfwSetKeyCallback(window, key_callback);
|
|
|
|
main_loop(window, context);
|
|
|
|
main_loop(window, context);
|
|
|
|
|
|
|
|
|
|
|
|