vk_scheduler: Allow command submission on worker thread
This changes how Scheduler::Flush works. It queues the current command buffer to be sent to the GPU but does not do it immediately. The Vulkan worker thread takes care of that. Users will have to use Scheduler::Flush + Scheduler::WaitWorker to get the previous behavior. Scheduler::Finish is unchanged. To avoid waiting on work never queued, Scheduler::Wait sends the current command buffer if that's what the caller wants to wait.
This commit is contained in:
parent
c5425b38c1
commit
53acdda772
|
@ -97,19 +97,14 @@ RendererVulkan::RendererVulkan(Core::TelemetrySession& telemetry_session_,
|
||||||
Core::Frontend::EmuWindow& emu_window,
|
Core::Frontend::EmuWindow& emu_window,
|
||||||
Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_,
|
Core::Memory::Memory& cpu_memory_, Tegra::GPU& gpu_,
|
||||||
std::unique_ptr<Core::Frontend::GraphicsContext> context_) try
|
std::unique_ptr<Core::Frontend::GraphicsContext> context_) try
|
||||||
: RendererBase(emu_window, std::move(context_)),
|
: RendererBase(emu_window, std::move(context_)), telemetry_session(telemetry_session_),
|
||||||
telemetry_session(telemetry_session_),
|
cpu_memory(cpu_memory_), gpu(gpu_), library(OpenLibrary()),
|
||||||
cpu_memory(cpu_memory_),
|
|
||||||
gpu(gpu_),
|
|
||||||
library(OpenLibrary()),
|
|
||||||
instance(CreateInstance(library, dld, VK_API_VERSION_1_1, render_window.GetWindowInfo().type,
|
instance(CreateInstance(library, dld, VK_API_VERSION_1_1, render_window.GetWindowInfo().type,
|
||||||
true, Settings::values.renderer_debug.GetValue())),
|
true, Settings::values.renderer_debug.GetValue())),
|
||||||
debug_callback(Settings::values.renderer_debug ? CreateDebugCallback(instance) : nullptr),
|
debug_callback(Settings::values.renderer_debug ? CreateDebugCallback(instance) : nullptr),
|
||||||
surface(CreateSurface(instance, render_window)),
|
surface(CreateSurface(instance, render_window)),
|
||||||
device(CreateDevice(instance, dld, *surface)),
|
device(CreateDevice(instance, dld, *surface)), memory_allocator(device, false),
|
||||||
memory_allocator(device, false),
|
state_tracker(gpu), scheduler(device, state_tracker),
|
||||||
state_tracker(gpu),
|
|
||||||
scheduler(device, state_tracker),
|
|
||||||
swapchain(*surface, device, scheduler, render_window.GetFramebufferLayout().width,
|
swapchain(*surface, device, scheduler, render_window.GetFramebufferLayout().width,
|
||||||
render_window.GetFramebufferLayout().height, false),
|
render_window.GetFramebufferLayout().height, false),
|
||||||
blit_screen(cpu_memory, render_window, device, memory_allocator, swapchain, scheduler,
|
blit_screen(cpu_memory, render_window, device, memory_allocator, swapchain, scheduler,
|
||||||
|
@ -130,37 +125,49 @@ void RendererVulkan::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
|
||||||
if (!framebuffer) {
|
if (!framebuffer) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
const auto& layout = render_window.GetFramebufferLayout();
|
SCOPE_EXIT({ render_window.OnFrameDisplayed(); });
|
||||||
if (layout.width > 0 && layout.height > 0 && render_window.IsShown()) {
|
if (!render_window.IsShown()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
const VAddr framebuffer_addr = framebuffer->address + framebuffer->offset;
|
const VAddr framebuffer_addr = framebuffer->address + framebuffer->offset;
|
||||||
const bool use_accelerated =
|
const bool use_accelerated =
|
||||||
rasterizer.AccelerateDisplay(*framebuffer, framebuffer_addr, framebuffer->stride);
|
rasterizer.AccelerateDisplay(*framebuffer, framebuffer_addr, framebuffer->stride);
|
||||||
const bool is_srgb = use_accelerated && screen_info.is_srgb;
|
const bool is_srgb = use_accelerated && screen_info.is_srgb;
|
||||||
if (swapchain.HasFramebufferChanged(layout) || swapchain.GetSrgbState() != is_srgb) {
|
|
||||||
swapchain.Create(layout.width, layout.height, is_srgb);
|
|
||||||
blit_screen.Recreate();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
const Layout::FramebufferLayout layout = render_window.GetFramebufferLayout();
|
||||||
|
bool has_been_recreated = false;
|
||||||
|
const auto recreate_swapchain = [&] {
|
||||||
|
if (!has_been_recreated) {
|
||||||
|
has_been_recreated = true;
|
||||||
scheduler.WaitWorker();
|
scheduler.WaitWorker();
|
||||||
|
}
|
||||||
while (!swapchain.AcquireNextImage()) {
|
|
||||||
swapchain.Create(layout.width, layout.height, is_srgb);
|
swapchain.Create(layout.width, layout.height, is_srgb);
|
||||||
|
};
|
||||||
|
if (swapchain.NeedsRecreate() ||
|
||||||
|
swapchain.HasDifferentLayout(layout.width, layout.height, is_srgb)) {
|
||||||
|
recreate_swapchain();
|
||||||
|
}
|
||||||
|
bool needs_recreate;
|
||||||
|
do {
|
||||||
|
needs_recreate = false;
|
||||||
|
swapchain.AcquireNextImage();
|
||||||
|
if (swapchain.NeedsRecreate()) {
|
||||||
|
recreate_swapchain();
|
||||||
|
needs_recreate = true;
|
||||||
|
}
|
||||||
|
} while (needs_recreate);
|
||||||
|
if (has_been_recreated) {
|
||||||
blit_screen.Recreate();
|
blit_screen.Recreate();
|
||||||
}
|
}
|
||||||
const VkSemaphore render_semaphore = blit_screen.Draw(*framebuffer, use_accelerated);
|
const VkSemaphore render_semaphore = blit_screen.Draw(*framebuffer, use_accelerated);
|
||||||
|
|
||||||
scheduler.Flush(render_semaphore);
|
scheduler.Flush(render_semaphore);
|
||||||
|
scheduler.WaitWorker();
|
||||||
|
swapchain.Present(render_semaphore);
|
||||||
|
|
||||||
if (swapchain.Present(render_semaphore)) {
|
|
||||||
blit_screen.Recreate();
|
|
||||||
}
|
|
||||||
gpu.RendererFrameEndNotify();
|
gpu.RendererFrameEndNotify();
|
||||||
rasterizer.TickFrame();
|
rasterizer.TickFrame();
|
||||||
}
|
}
|
||||||
|
|
||||||
render_window.OnFrameDisplayed();
|
|
||||||
}
|
|
||||||
|
|
||||||
void RendererVulkan::Report() const {
|
void RendererVulkan::Report() const {
|
||||||
const std::string vendor_name{device.GetVendorName()};
|
const std::string vendor_name{device.GetVendorName()};
|
||||||
const std::string model_name{device.GetModelName()};
|
const std::string model_name{device.GetModelName()};
|
||||||
|
|
|
@ -184,8 +184,8 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
|
||||||
.depth = 1,
|
.depth = 1,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
scheduler.Record(
|
scheduler.Record([this, copy, image_index](vk::CommandBuffer cmdbuf) {
|
||||||
[buffer = *buffer, image = *raw_images[image_index], copy](vk::CommandBuffer cmdbuf) {
|
const VkImage image = *raw_images[image_index];
|
||||||
const VkImageMemoryBarrier base_barrier{
|
const VkImageMemoryBarrier base_barrier{
|
||||||
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
|
.sType = VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
|
||||||
.pNext = nullptr,
|
.pNext = nullptr,
|
||||||
|
@ -196,8 +196,7 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
|
||||||
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
.srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||||
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
.dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED,
|
||||||
.image = image,
|
.image = image,
|
||||||
.subresourceRange =
|
.subresourceRange{
|
||||||
{
|
|
||||||
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
|
.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT,
|
||||||
.baseMipLevel = 0,
|
.baseMipLevel = 0,
|
||||||
.levelCount = 1,
|
.levelCount = 1,
|
||||||
|
@ -214,17 +213,14 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
|
||||||
write_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
|
write_barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
|
||||||
write_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
|
write_barrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT;
|
||||||
|
|
||||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
|
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, 0,
|
||||||
0, read_barrier);
|
read_barrier);
|
||||||
cmdbuf.CopyBufferToImage(buffer, image, VK_IMAGE_LAYOUT_GENERAL, copy);
|
cmdbuf.CopyBufferToImage(*buffer, image, VK_IMAGE_LAYOUT_GENERAL, copy);
|
||||||
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
|
cmdbuf.PipelineBarrier(VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||||
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, write_barrier);
|
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, 0, write_barrier);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
scheduler.Record([renderpass = *renderpass, framebuffer = *framebuffers[image_index],
|
scheduler.Record([this, image_index, size = swapchain.GetSize()](vk::CommandBuffer cmdbuf) {
|
||||||
descriptor_set = descriptor_sets[image_index], buffer = *buffer,
|
|
||||||
size = swapchain.GetSize(), pipeline = *pipeline,
|
|
||||||
layout = *pipeline_layout](vk::CommandBuffer cmdbuf) {
|
|
||||||
const f32 bg_red = Settings::values.bg_red.GetValue() / 255.0f;
|
const f32 bg_red = Settings::values.bg_red.GetValue() / 255.0f;
|
||||||
const f32 bg_green = Settings::values.bg_green.GetValue() / 255.0f;
|
const f32 bg_green = Settings::values.bg_green.GetValue() / 255.0f;
|
||||||
const f32 bg_blue = Settings::values.bg_blue.GetValue() / 255.0f;
|
const f32 bg_blue = Settings::values.bg_blue.GetValue() / 255.0f;
|
||||||
|
@ -234,8 +230,8 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
|
||||||
const VkRenderPassBeginInfo renderpass_bi{
|
const VkRenderPassBeginInfo renderpass_bi{
|
||||||
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
|
.sType = VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
|
||||||
.pNext = nullptr,
|
.pNext = nullptr,
|
||||||
.renderPass = renderpass,
|
.renderPass = *renderpass,
|
||||||
.framebuffer = framebuffer,
|
.framebuffer = *framebuffers[image_index],
|
||||||
.renderArea =
|
.renderArea =
|
||||||
{
|
{
|
||||||
.offset = {0, 0},
|
.offset = {0, 0},
|
||||||
|
@ -257,12 +253,13 @@ VkSemaphore VKBlitScreen::Draw(const Tegra::FramebufferConfig& framebuffer, bool
|
||||||
.extent = size,
|
.extent = size,
|
||||||
};
|
};
|
||||||
cmdbuf.BeginRenderPass(renderpass_bi, VK_SUBPASS_CONTENTS_INLINE);
|
cmdbuf.BeginRenderPass(renderpass_bi, VK_SUBPASS_CONTENTS_INLINE);
|
||||||
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, pipeline);
|
cmdbuf.BindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
|
||||||
cmdbuf.SetViewport(0, viewport);
|
cmdbuf.SetViewport(0, viewport);
|
||||||
cmdbuf.SetScissor(0, scissor);
|
cmdbuf.SetScissor(0, scissor);
|
||||||
|
|
||||||
cmdbuf.BindVertexBuffer(0, buffer, offsetof(BufferData, vertices));
|
cmdbuf.BindVertexBuffer(0, *buffer, offsetof(BufferData, vertices));
|
||||||
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, layout, 0, descriptor_set, {});
|
cmdbuf.BindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline_layout, 0,
|
||||||
|
descriptor_sets[image_index], {});
|
||||||
cmdbuf.Draw(4, 1, 0, 0);
|
cmdbuf.Draw(4, 1, 0, 0);
|
||||||
cmdbuf.EndRenderPass();
|
cmdbuf.EndRenderPass();
|
||||||
});
|
});
|
||||||
|
@ -304,8 +301,7 @@ void VKBlitScreen::CreateShaders() {
|
||||||
|
|
||||||
void VKBlitScreen::CreateSemaphores() {
|
void VKBlitScreen::CreateSemaphores() {
|
||||||
semaphores.resize(image_count);
|
semaphores.resize(image_count);
|
||||||
std::generate(semaphores.begin(), semaphores.end(),
|
std::ranges::generate(semaphores, [this] { return device.GetLogical().CreateSemaphore(); });
|
||||||
[this] { return device.GetLogical().CreateSemaphore(); });
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKBlitScreen::CreateDescriptorPool() {
|
void VKBlitScreen::CreateDescriptorPool() {
|
||||||
|
@ -633,8 +629,8 @@ void VKBlitScreen::CreateFramebuffers() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKBlitScreen::ReleaseRawImages() {
|
void VKBlitScreen::ReleaseRawImages() {
|
||||||
for (std::size_t i = 0; i < raw_images.size(); ++i) {
|
for (const u64 tick : resource_ticks) {
|
||||||
scheduler.Wait(resource_ticks.at(i));
|
scheduler.Wait(tick);
|
||||||
}
|
}
|
||||||
raw_images.clear();
|
raw_images.clear();
|
||||||
raw_buffer_commits.clear();
|
raw_buffer_commits.clear();
|
||||||
|
|
|
@ -114,10 +114,13 @@ void HostCounter::EndQuery() {
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 HostCounter::BlockingQuery() const {
|
u64 HostCounter::BlockingQuery() const {
|
||||||
if (tick >= cache.GetScheduler().CurrentTick()) {
|
auto& scheduler{cache.GetScheduler()};
|
||||||
cache.GetScheduler().Flush();
|
if (tick >= scheduler.CurrentTick()) {
|
||||||
|
scheduler.Flush();
|
||||||
|
// This may not be necessary, but it's better to play it safe and assume drivers don't
|
||||||
|
// support wait before signal on vkGetQueryPoolResults
|
||||||
|
scheduler.WaitWorker();
|
||||||
}
|
}
|
||||||
|
|
||||||
u64 data;
|
u64 data;
|
||||||
const VkResult query_result = cache.GetDevice().GetLogical().GetQueryResults(
|
const VkResult query_result = cache.GetDevice().GetLogical().GetQueryResults(
|
||||||
query.first, query.second, 1, sizeof(data), &data, sizeof(data),
|
query.first, query.second, 1, sizeof(data), &data, sizeof(data),
|
||||||
|
|
|
@ -452,11 +452,12 @@ void RasterizerVulkan::TiledCacheBarrier() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void RasterizerVulkan::FlushCommands() {
|
void RasterizerVulkan::FlushCommands() {
|
||||||
if (draw_counter > 0) {
|
if (draw_counter == 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
draw_counter = 0;
|
draw_counter = 0;
|
||||||
scheduler.Flush();
|
scheduler.Flush();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void RasterizerVulkan::TickFrame() {
|
void RasterizerVulkan::TickFrame() {
|
||||||
draw_counter = 0;
|
draw_counter = 0;
|
||||||
|
|
|
@ -31,7 +31,7 @@ void VKScheduler::CommandChunk::ExecuteAll(vk::CommandBuffer cmdbuf) {
|
||||||
command->~Command();
|
command->~Command();
|
||||||
command = next;
|
command = next;
|
||||||
}
|
}
|
||||||
|
submit = false;
|
||||||
command_offset = 0;
|
command_offset = 0;
|
||||||
first = nullptr;
|
first = nullptr;
|
||||||
last = nullptr;
|
last = nullptr;
|
||||||
|
@ -42,7 +42,7 @@ VKScheduler::VKScheduler(const Device& device_, StateTracker& state_tracker_)
|
||||||
master_semaphore{std::make_unique<MasterSemaphore>(device)},
|
master_semaphore{std::make_unique<MasterSemaphore>(device)},
|
||||||
command_pool{std::make_unique<CommandPool>(*master_semaphore, device)} {
|
command_pool{std::make_unique<CommandPool>(*master_semaphore, device)} {
|
||||||
AcquireNewChunk();
|
AcquireNewChunk();
|
||||||
AllocateNewContext();
|
AllocateWorkerCommandBuffer();
|
||||||
worker_thread = std::thread(&VKScheduler::WorkerThread, this);
|
worker_thread = std::thread(&VKScheduler::WorkerThread, this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -60,6 +60,7 @@ void VKScheduler::Flush(VkSemaphore semaphore) {
|
||||||
void VKScheduler::Finish(VkSemaphore semaphore) {
|
void VKScheduler::Finish(VkSemaphore semaphore) {
|
||||||
const u64 presubmit_tick = CurrentTick();
|
const u64 presubmit_tick = CurrentTick();
|
||||||
SubmitExecution(semaphore);
|
SubmitExecution(semaphore);
|
||||||
|
WaitWorker();
|
||||||
Wait(presubmit_tick);
|
Wait(presubmit_tick);
|
||||||
AllocateNewContext();
|
AllocateNewContext();
|
||||||
}
|
}
|
||||||
|
@ -140,31 +141,45 @@ void VKScheduler::WorkerThread() {
|
||||||
if (quit) {
|
if (quit) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
while (!chunk_queue.Empty()) {
|
||||||
auto extracted_chunk = std::move(chunk_queue.Front());
|
auto extracted_chunk = std::move(chunk_queue.Front());
|
||||||
chunk_queue.Pop();
|
chunk_queue.Pop();
|
||||||
|
const bool has_submit = extracted_chunk->HasSubmit();
|
||||||
extracted_chunk->ExecuteAll(current_cmdbuf);
|
extracted_chunk->ExecuteAll(current_cmdbuf);
|
||||||
|
if (has_submit) {
|
||||||
|
AllocateWorkerCommandBuffer();
|
||||||
|
}
|
||||||
chunk_reserve.Push(std::move(extracted_chunk));
|
chunk_reserve.Push(std::move(extracted_chunk));
|
||||||
|
}
|
||||||
} while (!quit);
|
} while (!quit);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void VKScheduler::AllocateWorkerCommandBuffer() {
|
||||||
|
current_cmdbuf = vk::CommandBuffer(command_pool->Commit(), device.GetDispatchLoader());
|
||||||
|
current_cmdbuf.Begin({
|
||||||
|
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
|
||||||
|
.pNext = nullptr,
|
||||||
|
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
|
||||||
|
.pInheritanceInfo = nullptr,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
|
void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
|
||||||
EndPendingOperations();
|
EndPendingOperations();
|
||||||
InvalidateState();
|
InvalidateState();
|
||||||
WaitWorker();
|
|
||||||
|
|
||||||
std::unique_lock lock{mutex};
|
|
||||||
|
|
||||||
current_cmdbuf.End();
|
|
||||||
|
|
||||||
const VkSemaphore timeline_semaphore = master_semaphore->Handle();
|
|
||||||
const u32 num_signal_semaphores = semaphore ? 2U : 1U;
|
|
||||||
|
|
||||||
const u64 signal_value = master_semaphore->CurrentTick();
|
const u64 signal_value = master_semaphore->CurrentTick();
|
||||||
|
master_semaphore->NextTick();
|
||||||
|
|
||||||
|
Record([semaphore, signal_value, this](vk::CommandBuffer cmdbuf) {
|
||||||
|
cmdbuf.End();
|
||||||
|
|
||||||
|
const u32 num_signal_semaphores = semaphore ? 2U : 1U;
|
||||||
|
|
||||||
const u64 wait_value = signal_value - 1;
|
const u64 wait_value = signal_value - 1;
|
||||||
const VkPipelineStageFlags wait_stage_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
|
const VkPipelineStageFlags wait_stage_mask = VK_PIPELINE_STAGE_ALL_COMMANDS_BIT;
|
||||||
|
|
||||||
master_semaphore->NextTick();
|
const VkSemaphore timeline_semaphore = master_semaphore->Handle();
|
||||||
|
|
||||||
const std::array signal_values{signal_value, u64(0)};
|
const std::array signal_values{signal_value, u64(0)};
|
||||||
const std::array signal_semaphores{timeline_semaphore, semaphore};
|
const std::array signal_semaphores{timeline_semaphore, semaphore};
|
||||||
|
|
||||||
|
@ -183,7 +198,7 @@ void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
|
||||||
.pWaitSemaphores = &timeline_semaphore,
|
.pWaitSemaphores = &timeline_semaphore,
|
||||||
.pWaitDstStageMask = &wait_stage_mask,
|
.pWaitDstStageMask = &wait_stage_mask,
|
||||||
.commandBufferCount = 1,
|
.commandBufferCount = 1,
|
||||||
.pCommandBuffers = current_cmdbuf.address(),
|
.pCommandBuffers = cmdbuf.address(),
|
||||||
.signalSemaphoreCount = num_signal_semaphores,
|
.signalSemaphoreCount = num_signal_semaphores,
|
||||||
.pSignalSemaphores = signal_semaphores.data(),
|
.pSignalSemaphores = signal_semaphores.data(),
|
||||||
};
|
};
|
||||||
|
@ -196,19 +211,12 @@ void VKScheduler::SubmitExecution(VkSemaphore semaphore) {
|
||||||
default:
|
default:
|
||||||
vk::Check(result);
|
vk::Check(result);
|
||||||
}
|
}
|
||||||
|
});
|
||||||
|
chunk->MarkSubmit();
|
||||||
|
DispatchWork();
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKScheduler::AllocateNewContext() {
|
void VKScheduler::AllocateNewContext() {
|
||||||
std::unique_lock lock{mutex};
|
|
||||||
|
|
||||||
current_cmdbuf = vk::CommandBuffer(command_pool->Commit(), device.GetDispatchLoader());
|
|
||||||
current_cmdbuf.Begin({
|
|
||||||
.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
|
|
||||||
.pNext = nullptr,
|
|
||||||
.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
|
|
||||||
.pInheritanceInfo = nullptr,
|
|
||||||
});
|
|
||||||
|
|
||||||
// Enable counters once again. These are disabled when a command buffer is finished.
|
// Enable counters once again. These are disabled when a command buffer is finished.
|
||||||
if (query_cache) {
|
if (query_cache) {
|
||||||
query_cache->UpdateCounters();
|
query_cache->UpdateCounters();
|
||||||
|
|
|
@ -86,6 +86,10 @@ public:
|
||||||
|
|
||||||
/// Waits for the given tick to trigger on the GPU.
|
/// Waits for the given tick to trigger on the GPU.
|
||||||
void Wait(u64 tick) {
|
void Wait(u64 tick) {
|
||||||
|
if (tick >= master_semaphore->CurrentTick()) {
|
||||||
|
// Make sure we are not waiting for the current tick without signalling
|
||||||
|
Flush();
|
||||||
|
}
|
||||||
master_semaphore->Wait(tick);
|
master_semaphore->Wait(tick);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -155,15 +159,24 @@ private:
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MarkSubmit() {
|
||||||
|
submit = true;
|
||||||
|
}
|
||||||
|
|
||||||
bool Empty() const {
|
bool Empty() const {
|
||||||
return command_offset == 0;
|
return command_offset == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool HasSubmit() const {
|
||||||
|
return submit;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Command* first = nullptr;
|
Command* first = nullptr;
|
||||||
Command* last = nullptr;
|
Command* last = nullptr;
|
||||||
|
|
||||||
size_t command_offset = 0;
|
size_t command_offset = 0;
|
||||||
|
bool submit = false;
|
||||||
alignas(std::max_align_t) std::array<u8, 0x8000> data{};
|
alignas(std::max_align_t) std::array<u8, 0x8000> data{};
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -176,6 +189,8 @@ private:
|
||||||
|
|
||||||
void WorkerThread();
|
void WorkerThread();
|
||||||
|
|
||||||
|
void AllocateWorkerCommandBuffer();
|
||||||
|
|
||||||
void SubmitExecution(VkSemaphore semaphore);
|
void SubmitExecution(VkSemaphore semaphore);
|
||||||
|
|
||||||
void AllocateNewContext();
|
void AllocateNewContext();
|
||||||
|
|
|
@ -65,6 +65,8 @@ VKSwapchain::VKSwapchain(VkSurfaceKHR surface_, const Device& device_, VKSchedul
|
||||||
VKSwapchain::~VKSwapchain() = default;
|
VKSwapchain::~VKSwapchain() = default;
|
||||||
|
|
||||||
void VKSwapchain::Create(u32 width, u32 height, bool srgb) {
|
void VKSwapchain::Create(u32 width, u32 height, bool srgb) {
|
||||||
|
needs_recreate = false;
|
||||||
|
|
||||||
const auto physical_device = device.GetPhysical();
|
const auto physical_device = device.GetPhysical();
|
||||||
const auto capabilities{physical_device.GetSurfaceCapabilitiesKHR(surface)};
|
const auto capabilities{physical_device.GetSurfaceCapabilitiesKHR(surface)};
|
||||||
if (capabilities.maxImageExtent.width == 0 || capabilities.maxImageExtent.height == 0) {
|
if (capabilities.maxImageExtent.width == 0 || capabilities.maxImageExtent.height == 0) {
|
||||||
|
@ -82,21 +84,20 @@ void VKSwapchain::Create(u32 width, u32 height, bool srgb) {
|
||||||
resource_ticks.resize(image_count);
|
resource_ticks.resize(image_count);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool VKSwapchain::AcquireNextImage() {
|
void VKSwapchain::AcquireNextImage() {
|
||||||
const VkResult result =
|
const VkResult result =
|
||||||
device.GetLogical().AcquireNextImageKHR(*swapchain, std::numeric_limits<u64>::max(),
|
device.GetLogical().AcquireNextImageKHR(*swapchain, std::numeric_limits<u64>::max(),
|
||||||
*present_semaphores[frame_index], {}, &image_index);
|
*present_semaphores[frame_index], {}, &image_index);
|
||||||
|
needs_recreate |= result != VK_SUCCESS && result != VK_SUBOPTIMAL_KHR;
|
||||||
|
|
||||||
scheduler.Wait(resource_ticks[image_index]);
|
scheduler.Wait(resource_ticks[image_index]);
|
||||||
return result == VK_SUCCESS || result == VK_SUBOPTIMAL_KHR;
|
resource_ticks[image_index] = scheduler.CurrentTick();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool VKSwapchain::Present(VkSemaphore render_semaphore) {
|
void VKSwapchain::Present(VkSemaphore render_semaphore) {
|
||||||
const VkSemaphore present_semaphore{*present_semaphores[frame_index]};
|
const VkSemaphore present_semaphore{*present_semaphores[frame_index]};
|
||||||
const std::array<VkSemaphore, 2> semaphores{present_semaphore, render_semaphore};
|
const std::array<VkSemaphore, 2> semaphores{present_semaphore, render_semaphore};
|
||||||
const auto present_queue{device.GetPresentQueue()};
|
const auto present_queue{device.GetPresentQueue()};
|
||||||
bool recreated = false;
|
|
||||||
|
|
||||||
const VkPresentInfoKHR present_info{
|
const VkPresentInfoKHR present_info{
|
||||||
.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
|
.sType = VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
|
||||||
.pNext = nullptr,
|
.pNext = nullptr,
|
||||||
|
@ -107,7 +108,6 @@ bool VKSwapchain::Present(VkSemaphore render_semaphore) {
|
||||||
.pImageIndices = &image_index,
|
.pImageIndices = &image_index,
|
||||||
.pResults = nullptr,
|
.pResults = nullptr,
|
||||||
};
|
};
|
||||||
|
|
||||||
switch (const VkResult result = present_queue.Present(present_info)) {
|
switch (const VkResult result = present_queue.Present(present_info)) {
|
||||||
case VK_SUCCESS:
|
case VK_SUCCESS:
|
||||||
break;
|
break;
|
||||||
|
@ -115,24 +115,16 @@ bool VKSwapchain::Present(VkSemaphore render_semaphore) {
|
||||||
LOG_DEBUG(Render_Vulkan, "Suboptimal swapchain");
|
LOG_DEBUG(Render_Vulkan, "Suboptimal swapchain");
|
||||||
break;
|
break;
|
||||||
case VK_ERROR_OUT_OF_DATE_KHR:
|
case VK_ERROR_OUT_OF_DATE_KHR:
|
||||||
if (current_width > 0 && current_height > 0) {
|
needs_recreate = true;
|
||||||
Create(current_width, current_height, current_srgb);
|
|
||||||
recreated = true;
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
LOG_CRITICAL(Render_Vulkan, "Failed to present with error {}", vk::ToString(result));
|
LOG_CRITICAL(Render_Vulkan, "Failed to present with error {}", vk::ToString(result));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
++frame_index;
|
||||||
resource_ticks[image_index] = scheduler.CurrentTick();
|
if (frame_index >= image_count) {
|
||||||
frame_index = (frame_index + 1) % static_cast<u32>(image_count);
|
frame_index = 0;
|
||||||
return recreated;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool VKSwapchain::HasFramebufferChanged(const Layout::FramebufferLayout& framebuffer) const {
|
|
||||||
// TODO(Rodrigo): Handle framebuffer pixel format changes
|
|
||||||
return framebuffer.width != current_width || framebuffer.height != current_height;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u32 width,
|
void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u32 width,
|
||||||
|
@ -148,7 +140,6 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
|
||||||
if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) {
|
if (capabilities.maxImageCount > 0 && requested_image_count > capabilities.maxImageCount) {
|
||||||
requested_image_count = capabilities.maxImageCount;
|
requested_image_count = capabilities.maxImageCount;
|
||||||
}
|
}
|
||||||
|
|
||||||
VkSwapchainCreateInfoKHR swapchain_ci{
|
VkSwapchainCreateInfoKHR swapchain_ci{
|
||||||
.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
|
.sType = VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
|
||||||
.pNext = nullptr,
|
.pNext = nullptr,
|
||||||
|
@ -169,7 +160,6 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
|
||||||
.clipped = VK_FALSE,
|
.clipped = VK_FALSE,
|
||||||
.oldSwapchain = nullptr,
|
.oldSwapchain = nullptr,
|
||||||
};
|
};
|
||||||
|
|
||||||
const u32 graphics_family{device.GetGraphicsFamily()};
|
const u32 graphics_family{device.GetGraphicsFamily()};
|
||||||
const u32 present_family{device.GetPresentFamily()};
|
const u32 present_family{device.GetPresentFamily()};
|
||||||
const std::array<u32, 2> queue_indices{graphics_family, present_family};
|
const std::array<u32, 2> queue_indices{graphics_family, present_family};
|
||||||
|
@ -178,7 +168,6 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
|
||||||
swapchain_ci.queueFamilyIndexCount = static_cast<u32>(queue_indices.size());
|
swapchain_ci.queueFamilyIndexCount = static_cast<u32>(queue_indices.size());
|
||||||
swapchain_ci.pQueueFamilyIndices = queue_indices.data();
|
swapchain_ci.pQueueFamilyIndices = queue_indices.data();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Request the size again to reduce the possibility of a TOCTOU race condition.
|
// Request the size again to reduce the possibility of a TOCTOU race condition.
|
||||||
const auto updated_capabilities = physical_device.GetSurfaceCapabilitiesKHR(surface);
|
const auto updated_capabilities = physical_device.GetSurfaceCapabilitiesKHR(surface);
|
||||||
swapchain_ci.imageExtent = ChooseSwapExtent(updated_capabilities, width, height);
|
swapchain_ci.imageExtent = ChooseSwapExtent(updated_capabilities, width, height);
|
||||||
|
@ -186,8 +175,6 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
|
||||||
swapchain = device.GetLogical().CreateSwapchainKHR(swapchain_ci);
|
swapchain = device.GetLogical().CreateSwapchainKHR(swapchain_ci);
|
||||||
|
|
||||||
extent = swapchain_ci.imageExtent;
|
extent = swapchain_ci.imageExtent;
|
||||||
current_width = extent.width;
|
|
||||||
current_height = extent.height;
|
|
||||||
current_srgb = srgb;
|
current_srgb = srgb;
|
||||||
|
|
||||||
images = swapchain.GetImages();
|
images = swapchain.GetImages();
|
||||||
|
@ -197,7 +184,7 @@ void VKSwapchain::CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities,
|
||||||
|
|
||||||
void VKSwapchain::CreateSemaphores() {
|
void VKSwapchain::CreateSemaphores() {
|
||||||
present_semaphores.resize(image_count);
|
present_semaphores.resize(image_count);
|
||||||
std::generate(present_semaphores.begin(), present_semaphores.end(),
|
std::ranges::generate(present_semaphores,
|
||||||
[this] { return device.GetLogical().CreateSemaphore(); });
|
[this] { return device.GetLogical().CreateSemaphore(); });
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,14 +28,20 @@ public:
|
||||||
void Create(u32 width, u32 height, bool srgb);
|
void Create(u32 width, u32 height, bool srgb);
|
||||||
|
|
||||||
/// Acquires the next image in the swapchain, waits as needed.
|
/// Acquires the next image in the swapchain, waits as needed.
|
||||||
bool AcquireNextImage();
|
void AcquireNextImage();
|
||||||
|
|
||||||
/// Presents the rendered image to the swapchain. Returns true when the swapchains had to be
|
/// Presents the rendered image to the swapchain.
|
||||||
/// recreated. Takes responsability for the ownership of fence.
|
void Present(VkSemaphore render_semaphore);
|
||||||
bool Present(VkSemaphore render_semaphore);
|
|
||||||
|
|
||||||
/// Returns true when the framebuffer layout has changed.
|
/// Returns true when the framebuffer layout has changed.
|
||||||
bool HasFramebufferChanged(const Layout::FramebufferLayout& framebuffer) const;
|
bool HasDifferentLayout(u32 width, u32 height, bool is_srgb) const {
|
||||||
|
return extent.width != width || extent.height != height || current_srgb != is_srgb;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true when the image has to be recreated.
|
||||||
|
bool NeedsRecreate() const {
|
||||||
|
return needs_recreate;
|
||||||
|
}
|
||||||
|
|
||||||
VkExtent2D GetSize() const {
|
VkExtent2D GetSize() const {
|
||||||
return extent;
|
return extent;
|
||||||
|
@ -61,10 +67,6 @@ public:
|
||||||
return image_format;
|
return image_format;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool GetSrgbState() const {
|
|
||||||
return current_srgb;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
void CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u32 width, u32 height,
|
void CreateSwapchain(const VkSurfaceCapabilitiesKHR& capabilities, u32 width, u32 height,
|
||||||
bool srgb);
|
bool srgb);
|
||||||
|
@ -92,9 +94,8 @@ private:
|
||||||
VkFormat image_format{};
|
VkFormat image_format{};
|
||||||
VkExtent2D extent{};
|
VkExtent2D extent{};
|
||||||
|
|
||||||
u32 current_width{};
|
|
||||||
u32 current_height{};
|
|
||||||
bool current_srgb{};
|
bool current_srgb{};
|
||||||
|
bool needs_recreate{};
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Vulkan
|
} // namespace Vulkan
|
||||||
|
|
Loading…
Reference in New Issue