|
|
@ -1,12 +1,12 @@
|
|
|
|
#include "draw.h"
|
|
|
|
#include "draw.h"
|
|
|
|
#include "gpu.h"
|
|
|
|
#include "gpu.h"
|
|
|
|
#include "stdatomic.h"
|
|
|
|
|
|
|
|
#include "vulkan/vulkan_core.h"
|
|
|
|
#include "vulkan/vulkan_core.h"
|
|
|
|
|
|
|
|
|
|
|
|
void record_ui_draw(VkCommandBuffer command_buffer, UIContext* ui_context, double time, uint32_t frame) {
|
|
|
|
void record_ui_draw(VkCommandBuffer command_buffer, UIContext* ui_context, double time, uint32_t frame) {
|
|
|
|
UIPushConstant push = {
|
|
|
|
UIPushConstant push = {
|
|
|
|
.time = (float)time,
|
|
|
|
.time = (float)time,
|
|
|
|
.layer = 0,
|
|
|
|
.layer = 0,
|
|
|
|
|
|
|
|
.pad = frame,
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, ui_context->pipeline.pipeline);
|
|
|
|
vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, ui_context->pipeline.pipeline);
|
|
|
@ -15,9 +15,7 @@ void record_ui_draw(VkCommandBuffer command_buffer, UIContext* ui_context, doubl
|
|
|
|
vkCmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, ui_context->pipeline.layout, 2, 1, &ui_context->samplers, 0, NULL);
|
|
|
|
vkCmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, ui_context->pipeline.layout, 2, 1, &ui_context->samplers, 0, NULL);
|
|
|
|
vkCmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, ui_context->pipeline.layout, 3, 1, &ui_context->textures, 0, NULL);
|
|
|
|
vkCmdBindDescriptorSets(command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, ui_context->pipeline.layout, 3, 1, &ui_context->textures, 0, NULL);
|
|
|
|
for(uint32_t i = 0; i < ui_context->max_containers; i++) {
|
|
|
|
for(uint32_t i = 0; i < ui_context->max_containers; i++) {
|
|
|
|
uint32_t layer_count = atomic_load(&ui_context->containers[i].layer_count);
|
|
|
|
for(uint32_t j = 0; j < ui_context->containers[i].layer_count; j++) {
|
|
|
|
__sync_synchronize();
|
|
|
|
|
|
|
|
for(uint32_t j = 0; j < layer_count; j++) {
|
|
|
|
|
|
|
|
push.layer = ui_context->containers[i].layers[j].address[frame];
|
|
|
|
push.layer = ui_context->containers[i].layers[j].address[frame];
|
|
|
|
vkCmdPushConstants(command_buffer, ui_context->pipeline.layout, VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 0, 16, &push);
|
|
|
|
vkCmdPushConstants(command_buffer, ui_context->pipeline.layout, VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT, 0, 16, &push);
|
|
|
|
vkCmdDrawIndirect(command_buffer, ui_context->containers[i].layers[j].layer[frame], offsetof(GPULayer, draw), 1, 0);
|
|
|
|
vkCmdDrawIndirect(command_buffer, ui_context->containers[i].layers[j].layer[frame], offsetof(GPULayer, draw), 1, 0);
|
|
|
@ -29,19 +27,28 @@ void record_ui_compute(VkCommandBuffer command_buffer, UIContext* ui, uint32_t f
|
|
|
|
UIPushConstant push = {
|
|
|
|
UIPushConstant push = {
|
|
|
|
.time = 0.0,
|
|
|
|
.time = 0.0,
|
|
|
|
.layer = 0,
|
|
|
|
.layer = 0,
|
|
|
|
|
|
|
|
.pad = frame,
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, ui->string_pipeline.pipeline);
|
|
|
|
vkCmdBindPipeline(command_buffer, VK_PIPELINE_BIND_POINT_COMPUTE, ui->string_pipeline.pipeline);
|
|
|
|
for(uint32_t i = 0; i < ui->max_containers; i++) {
|
|
|
|
for(uint32_t i = 0; i < ui->max_containers; i++) {
|
|
|
|
if(ui->containers[i].id != 0x00000000) {
|
|
|
|
if(ui->containers[i].id != 0x00000000) {
|
|
|
|
for(uint32_t j = 0; j < ui->containers[i].layer_count; j++) {
|
|
|
|
for(uint32_t j = 0; j < ui->containers[i].layer_count; j++) {
|
|
|
|
push.layer = ui->containers[i].layers[j].address[frame];
|
|
|
|
|
|
|
|
command_copy_buffer(command_buffer, ui->containers[i].layers[j].layer[frame], ui->containers[i].layers[j].layer[frame], offsetof(GPULayer, num_drawables), offsetof(GPULayer, draw) + offsetof(DrawCommand, instance_count), sizeof(uint32_t));
|
|
|
|
command_copy_buffer(command_buffer, ui->containers[i].layers[j].layer[frame], ui->containers[i].layers[j].layer[frame], offsetof(GPULayer, num_drawables), offsetof(GPULayer, draw) + offsetof(DrawCommand, instance_count), sizeof(uint32_t));
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for(uint32_t i = 0; i < ui->max_containers; i++) {
|
|
|
|
|
|
|
|
if(ui->containers[i].id != 0x00000000) {
|
|
|
|
|
|
|
|
for(uint32_t j = 0; j < ui->containers[i].layer_count; j++) {
|
|
|
|
|
|
|
|
push.layer = ui->containers[i].layers[j].address[frame];
|
|
|
|
vkCmdPushConstants(command_buffer, ui->string_pipeline.layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, 16, &push);
|
|
|
|
vkCmdPushConstants(command_buffer, ui->string_pipeline.layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, 16, &push);
|
|
|
|
vkCmdDispatchIndirect(command_buffer, ui->containers[i].layers[j].layer[frame], offsetof(GPULayer, dispatch_strings));
|
|
|
|
vkCmdDispatchIndirect(command_buffer, ui->containers[i].layers[j].layer[frame], offsetof(GPULayer, dispatch_strings));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
VkResult draw_frame(
|
|
|
|
VkResult draw_frame(
|
|
|
@ -59,54 +66,50 @@ VkResult draw_frame(
|
|
|
|
};
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
if(context->frame[context->current_frame].transfer_count > 0) {
|
|
|
|
if(context->frame[context->current_frame].transfer_count > 0) {
|
|
|
|
VkFence fences[] = {context->frame[context->current_frame].transfer_ready[0], context->frame[context->current_frame].transfer_ready[1]};
|
|
|
|
VkCommandBuffer transfer_commands = context->frame[context->current_frame].transfer_commands;
|
|
|
|
VK_RESULT(vkWaitForFences(context->device, 2, fences, VK_TRUE, UINT64_MAX));
|
|
|
|
VK_RESULT(vkResetCommandBuffer(transfer_commands, 0));
|
|
|
|
VK_RESULT(vkResetFences(context->device, 2, fences));
|
|
|
|
VK_RESULT(vkBeginCommandBuffer(transfer_commands, &begin_info));
|
|
|
|
|
|
|
|
|
|
|
|
for(uint32_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) {
|
|
|
|
VkDeviceSize src_offset = 0;
|
|
|
|
VkCommandBuffer transfer_commands = context->frame[context->current_frame].transfer_commands[i];
|
|
|
|
for(uint32_t transfer_index = 0; transfer_index < context->frame[context->current_frame].transfer_count; transfer_index++) {
|
|
|
|
VK_RESULT(vkResetCommandBuffer(transfer_commands, 0));
|
|
|
|
command_copy_buffer(
|
|
|
|
VK_RESULT(vkBeginCommandBuffer(transfer_commands, &begin_info));
|
|
|
|
transfer_commands,
|
|
|
|
|
|
|
|
context->frame[context->current_frame].transfer_buffer,
|
|
|
|
VkDeviceSize src_offset = 0;
|
|
|
|
context->frame[context->current_frame].transfer_infos[transfer_index].buffer,
|
|
|
|
for(uint32_t j = 0; j < context->frame[context->current_frame].transfer_count; j++) {
|
|
|
|
src_offset,
|
|
|
|
command_copy_buffer(
|
|
|
|
context->frame[context->current_frame].transfer_infos[transfer_index].offset,
|
|
|
|
transfer_commands,
|
|
|
|
context->frame[context->current_frame].transfer_infos[transfer_index].size);
|
|
|
|
context->frame[context->current_frame].transfer_buffer,
|
|
|
|
src_offset += context->frame[context->current_frame].transfer_infos[transfer_index].size;
|
|
|
|
context->frame[context->current_frame].transfer_infos[j].buffers[i],
|
|
|
|
|
|
|
|
src_offset,
|
|
|
|
|
|
|
|
context->frame[context->current_frame].transfer_infos[j].offset,
|
|
|
|
|
|
|
|
context->frame[context->current_frame].transfer_infos[j].size);
|
|
|
|
|
|
|
|
src_offset += context->frame[context->current_frame].transfer_infos[j].size;
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
record_ui_compute(transfer_commands, ui, i);
|
|
|
|
|
|
|
|
VK_RESULT(vkEndCommandBuffer(transfer_commands));
|
|
|
|
|
|
|
|
VkPipelineStageFlags wait_stages[] = {VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT};
|
|
|
|
|
|
|
|
context->frame[i].transfer_index += 1;
|
|
|
|
|
|
|
|
VkSemaphore transfer_signals[] = {context->frame[i].transfer};
|
|
|
|
|
|
|
|
uint64_t transfer_signal_values[] = {context->frame[i].transfer_index};
|
|
|
|
|
|
|
|
VkSemaphore transfer_waits[] = {context->frame[i].transfer, context->frame[i].frame};
|
|
|
|
|
|
|
|
uint64_t transfer_wait_values[] = {context->frame[i].transfer_index-1, context->frame[i].frame_index};
|
|
|
|
|
|
|
|
VkTimelineSemaphoreSubmitInfo timeline_info = {
|
|
|
|
|
|
|
|
.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO,
|
|
|
|
|
|
|
|
.signalSemaphoreValueCount = sizeof(transfer_signal_values)/sizeof(uint64_t),
|
|
|
|
|
|
|
|
.pSignalSemaphoreValues = transfer_signal_values,
|
|
|
|
|
|
|
|
.waitSemaphoreValueCount = sizeof(transfer_wait_values)/sizeof(uint64_t),
|
|
|
|
|
|
|
|
.pWaitSemaphoreValues = transfer_wait_values,
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
VkSubmitInfo submit_info = {
|
|
|
|
|
|
|
|
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
|
|
|
|
|
|
|
.commandBufferCount = 1,
|
|
|
|
|
|
|
|
.pCommandBuffers = &transfer_commands,
|
|
|
|
|
|
|
|
.pSignalSemaphores = transfer_signals,
|
|
|
|
|
|
|
|
.signalSemaphoreCount = sizeof(transfer_signals)/sizeof(VkSemaphore),
|
|
|
|
|
|
|
|
.pWaitSemaphores = transfer_waits,
|
|
|
|
|
|
|
|
.pWaitDstStageMask = wait_stages,
|
|
|
|
|
|
|
|
.waitSemaphoreCount = sizeof(transfer_waits)/sizeof(VkSemaphore),
|
|
|
|
|
|
|
|
.pNext = &timeline_info,
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
VK_RESULT(vkQueueSubmit(context->transfer_queue.handle, 1, &submit_info, context->frame[context->current_frame].transfer_ready[i]));
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
record_ui_compute(transfer_commands, ui, context->current_frame);
|
|
|
|
|
|
|
|
VK_RESULT(vkEndCommandBuffer(transfer_commands));
|
|
|
|
|
|
|
|
VkPipelineStageFlags wait_stages[] = {VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT};
|
|
|
|
|
|
|
|
context->frame[context->current_frame].transfer_index += 1;
|
|
|
|
|
|
|
|
VkSemaphore transfer_signals[] = {context->frame[context->current_frame].transfer};
|
|
|
|
|
|
|
|
uint64_t transfer_signal_values[] = {context->frame[context->current_frame].transfer_index};
|
|
|
|
|
|
|
|
VkSemaphore transfer_waits[] = {context->frame[context->current_frame].frame};
|
|
|
|
|
|
|
|
uint64_t transfer_wait_values[] = {context->frame[context->current_frame].frame_index};
|
|
|
|
|
|
|
|
VkTimelineSemaphoreSubmitInfo timeline_info = {
|
|
|
|
|
|
|
|
.sType = VK_STRUCTURE_TYPE_TIMELINE_SEMAPHORE_SUBMIT_INFO,
|
|
|
|
|
|
|
|
.signalSemaphoreValueCount = sizeof(transfer_signal_values)/sizeof(uint64_t),
|
|
|
|
|
|
|
|
.pSignalSemaphoreValues = transfer_signal_values,
|
|
|
|
|
|
|
|
.waitSemaphoreValueCount = sizeof(transfer_wait_values)/sizeof(uint64_t),
|
|
|
|
|
|
|
|
.pWaitSemaphoreValues = transfer_wait_values,
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
VkSubmitInfo submit_info = {
|
|
|
|
|
|
|
|
.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO,
|
|
|
|
|
|
|
|
.commandBufferCount = 1,
|
|
|
|
|
|
|
|
.pCommandBuffers = &transfer_commands,
|
|
|
|
|
|
|
|
.pSignalSemaphores = transfer_signals,
|
|
|
|
|
|
|
|
.signalSemaphoreCount = sizeof(transfer_signals)/sizeof(VkSemaphore),
|
|
|
|
|
|
|
|
.pWaitSemaphores = transfer_waits,
|
|
|
|
|
|
|
|
.pWaitDstStageMask = wait_stages,
|
|
|
|
|
|
|
|
.waitSemaphoreCount = sizeof(transfer_waits)/sizeof(VkSemaphore),
|
|
|
|
|
|
|
|
.pNext = &timeline_info,
|
|
|
|
|
|
|
|
};
|
|
|
|
|
|
|
|
VK_RESULT(vkQueueSubmit(context->transfer_queue.handle, 1, &submit_info, VK_NULL_HANDLE));
|
|
|
|
context->frame[context->current_frame].transfer_count = 0;
|
|
|
|
context->frame[context->current_frame].transfer_count = 0;
|
|
|
|
context->frame[context->current_frame].transfer_written = 0;
|
|
|
|
context->frame[context->current_frame].transfer_written = 0;
|
|
|
|
}
|
|
|
|
}
|
|
|
|