2019-01-24 03:17:55 +00:00
|
|
|
// Copyright 2019 yuzu Emulator Project
|
|
|
|
// Licensed under GPLv2 or any later version
|
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
|
|
|
#include "common/assert.h"
|
|
|
|
#include "common/microprofile.h"
|
2020-12-25 07:28:46 +00:00
|
|
|
#include "common/scope_exit.h"
|
2020-02-25 15:12:46 +00:00
|
|
|
#include "common/thread.h"
|
2019-03-31 00:08:09 +00:00
|
|
|
#include "core/core.h"
|
2020-03-25 02:58:49 +00:00
|
|
|
#include "core/frontend/emu_window.h"
|
2020-02-18 02:29:04 +00:00
|
|
|
#include "core/settings.h"
|
2019-01-24 03:17:55 +00:00
|
|
|
#include "video_core/dma_pusher.h"
|
|
|
|
#include "video_core/gpu.h"
|
|
|
|
#include "video_core/gpu_thread.h"
|
|
|
|
#include "video_core/renderer_base.h"
|
|
|
|
|
|
|
|
namespace VideoCommon::GPUThread {
|
|
|
|
|
|
|
|
/// Runs the GPU thread
|
2020-02-20 15:55:32 +00:00
|
|
|
static void RunThread(Core::System& system, VideoCore::RendererBase& renderer,
|
|
|
|
Core::Frontend::GraphicsContext& context, Tegra::DmaPusher& dma_pusher,
|
2021-03-01 03:03:00 +00:00
|
|
|
SynchState& state) {
|
2020-02-25 15:12:46 +00:00
|
|
|
std::string name = "yuzu:GPU";
|
|
|
|
MicroProfileOnThreadCreate(name.c_str());
|
2020-12-25 07:28:46 +00:00
|
|
|
SCOPE_EXIT({ MicroProfileOnThreadExit(); });
|
|
|
|
|
2020-02-25 15:12:46 +00:00
|
|
|
Common::SetCurrentThreadName(name.c_str());
|
2020-04-05 13:48:53 +00:00
|
|
|
Common::SetCurrentThreadPriority(Common::ThreadPriority::High);
|
2020-02-25 15:12:46 +00:00
|
|
|
system.RegisterHostThread();
|
2019-01-24 03:17:55 +00:00
|
|
|
|
|
|
|
// Wait for first GPU command before acquiring the window context
|
2021-04-06 18:30:22 +00:00
|
|
|
state.queue.Wait();
|
2019-01-24 03:17:55 +00:00
|
|
|
|
|
|
|
// If emulation was stopped during disk shader loading, abort before trying to acquire context
|
|
|
|
if (!state.is_running) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-03-25 02:58:49 +00:00
|
|
|
auto current_context = context.Acquire();
|
2021-01-05 07:09:39 +00:00
|
|
|
VideoCore::RasterizerInterface* const rasterizer = renderer.ReadRasterizer();
|
2019-01-24 03:17:55 +00:00
|
|
|
|
2019-02-19 01:58:32 +00:00
|
|
|
CommandDataContainer next;
|
2019-01-24 03:17:55 +00:00
|
|
|
while (state.is_running) {
|
2019-11-23 20:17:28 +00:00
|
|
|
next = state.queue.PopWait();
|
2020-12-04 19:39:12 +00:00
|
|
|
if (auto* submit_list = std::get_if<SubmitListCommand>(&next.data)) {
|
2019-11-23 20:17:28 +00:00
|
|
|
dma_pusher.Push(std::move(submit_list->entries));
|
|
|
|
dma_pusher.DispatchCalls();
|
2021-01-07 20:56:15 +00:00
|
|
|
} else if (const auto* data = std::get_if<SwapBuffersCommand>(&next.data)) {
|
2019-11-23 20:17:28 +00:00
|
|
|
renderer.SwapBuffers(data->framebuffer ? &*data->framebuffer : nullptr);
|
2020-07-21 04:52:27 +00:00
|
|
|
} else if (std::holds_alternative<OnCommandListEndCommand>(next.data)) {
|
2021-01-05 07:09:39 +00:00
|
|
|
rasterizer->ReleaseFences();
|
2020-07-21 04:52:27 +00:00
|
|
|
} else if (std::holds_alternative<GPUTickCommand>(next.data)) {
|
2020-02-20 15:55:32 +00:00
|
|
|
system.GPU().TickWork();
|
2020-12-04 19:39:12 +00:00
|
|
|
} else if (const auto* flush = std::get_if<FlushRegionCommand>(&next.data)) {
|
2021-01-05 07:09:39 +00:00
|
|
|
rasterizer->FlushRegion(flush->addr, flush->size);
|
2020-12-04 19:39:12 +00:00
|
|
|
} else if (const auto* invalidate = std::get_if<InvalidateRegionCommand>(&next.data)) {
|
2021-01-05 07:09:39 +00:00
|
|
|
rasterizer->OnCPUWrite(invalidate->addr, invalidate->size);
|
2019-11-23 20:17:28 +00:00
|
|
|
} else if (std::holds_alternative<EndProcessingCommand>(next.data)) {
|
2021-04-07 09:41:31 +00:00
|
|
|
ASSERT(state.is_running == false);
|
2019-11-23 20:17:28 +00:00
|
|
|
} else {
|
|
|
|
UNREACHABLE();
|
2019-01-24 03:17:55 +00:00
|
|
|
}
|
2019-11-23 20:17:28 +00:00
|
|
|
state.signaled_fence.store(next.fence);
|
2021-04-07 09:41:31 +00:00
|
|
|
if (next.block) {
|
|
|
|
// We have to lock the write_lock to ensure that the condition_variable wait not get a
|
|
|
|
// race between the check and the lock itself.
|
|
|
|
std::lock_guard lk(state.write_lock);
|
|
|
|
state.cv.notify_all();
|
|
|
|
}
|
2019-01-24 03:17:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-12 08:24:33 +00:00
|
|
|
ThreadManager::ThreadManager(Core::System& system_, bool is_async_)
|
|
|
|
: system{system_}, is_async{is_async_} {}
|
2019-01-24 03:17:55 +00:00
|
|
|
|
|
|
|
ThreadManager::~ThreadManager() {
|
2021-04-07 06:42:54 +00:00
|
|
|
ShutDown();
|
2019-01-24 03:17:55 +00:00
|
|
|
}
|
|
|
|
|
2020-03-25 02:58:49 +00:00
|
|
|
void ThreadManager::StartThread(VideoCore::RendererBase& renderer,
|
|
|
|
Core::Frontend::GraphicsContext& context,
|
2021-03-01 03:03:00 +00:00
|
|
|
Tegra::DmaPusher& dma_pusher) {
|
2021-01-05 07:09:39 +00:00
|
|
|
rasterizer = renderer.ReadRasterizer();
|
2020-10-27 03:07:36 +00:00
|
|
|
thread = std::thread(RunThread, std::ref(system), std::ref(renderer), std::ref(context),
|
2021-03-01 03:03:00 +00:00
|
|
|
std::ref(dma_pusher), std::ref(state));
|
2019-04-09 18:02:00 +00:00
|
|
|
}
|
|
|
|
|
2019-01-24 03:17:55 +00:00
|
|
|
void ThreadManager::SubmitList(Tegra::CommandList&& entries) {
|
2019-09-25 23:43:23 +00:00
|
|
|
PushCommand(SubmitListCommand(std::move(entries)));
|
2020-10-27 03:07:36 +00:00
|
|
|
}
|
|
|
|
|
2019-08-21 04:55:25 +00:00
|
|
|
void ThreadManager::SwapBuffers(const Tegra::FramebufferConfig* framebuffer) {
|
2019-11-27 22:46:07 +00:00
|
|
|
PushCommand(SwapBuffersCommand(framebuffer ? std::make_optional(*framebuffer) : std::nullopt));
|
2019-01-24 03:17:55 +00:00
|
|
|
}
|
|
|
|
|
2020-04-05 16:58:23 +00:00
|
|
|
void ThreadManager::FlushRegion(VAddr addr, u64 size) {
|
2020-12-12 08:24:33 +00:00
|
|
|
if (!is_async) {
|
|
|
|
// Always flush with synchronous GPU mode
|
2020-04-19 17:47:45 +00:00
|
|
|
PushCommand(FlushRegionCommand(addr, size));
|
|
|
|
return;
|
|
|
|
}
|
2020-12-12 08:24:33 +00:00
|
|
|
|
|
|
|
// Asynchronous GPU mode
|
|
|
|
switch (Settings::values.gpu_accuracy.GetValue()) {
|
|
|
|
case Settings::GPUAccuracy::Normal:
|
|
|
|
PushCommand(FlushRegionCommand(addr, size));
|
|
|
|
break;
|
|
|
|
case Settings::GPUAccuracy::High:
|
|
|
|
// TODO(bunnei): Is this right? Preserving existing behavior for now
|
|
|
|
break;
|
|
|
|
case Settings::GPUAccuracy::Extreme: {
|
2020-02-20 15:55:32 +00:00
|
|
|
auto& gpu = system.GPU();
|
|
|
|
u64 fence = gpu.RequestFlush(addr, size);
|
2021-04-07 09:41:31 +00:00
|
|
|
PushCommand(GPUTickCommand(), true);
|
|
|
|
ASSERT(fence <= gpu.CurrentFlushRequestFence());
|
2020-12-12 08:24:33 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
UNIMPLEMENTED_MSG("Unsupported gpu_accuracy {}", Settings::values.gpu_accuracy.GetValue());
|
2020-02-18 02:29:04 +00:00
|
|
|
}
|
2019-01-24 03:17:55 +00:00
|
|
|
}
|
|
|
|
|
2020-04-05 16:58:23 +00:00
|
|
|
void ThreadManager::InvalidateRegion(VAddr addr, u64 size) {
|
2021-01-05 07:09:39 +00:00
|
|
|
rasterizer->OnCPUWrite(addr, size);
|
2019-01-24 03:17:55 +00:00
|
|
|
}
|
|
|
|
|
2020-04-05 16:58:23 +00:00
|
|
|
void ThreadManager::FlushAndInvalidateRegion(VAddr addr, u64 size) {
|
2019-02-19 01:58:32 +00:00
|
|
|
// Skip flush on asynch mode, as FlushAndInvalidateRegion is not used for anything too important
|
2021-01-05 07:09:39 +00:00
|
|
|
rasterizer->OnCPUWrite(addr, size);
|
2019-01-24 03:17:55 +00:00
|
|
|
}
|
|
|
|
|
2021-04-07 06:42:54 +00:00
|
|
|
void ThreadManager::ShutDown() {
|
|
|
|
if (!state.is_running) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2021-04-07 09:41:31 +00:00
|
|
|
{
|
|
|
|
std::lock_guard lk(state.write_lock);
|
|
|
|
state.is_running = false;
|
|
|
|
state.cv.notify_all();
|
|
|
|
}
|
2021-04-07 06:42:54 +00:00
|
|
|
|
|
|
|
if (!thread.joinable()) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Notify GPU thread that a shutdown is pending
|
|
|
|
PushCommand(EndProcessingCommand());
|
|
|
|
thread.join();
|
|
|
|
}
|
|
|
|
|
2020-02-17 22:10:23 +00:00
|
|
|
void ThreadManager::OnCommandListEnd() {
|
|
|
|
PushCommand(OnCommandListEndCommand());
|
|
|
|
}
|
|
|
|
|
2021-04-07 09:41:31 +00:00
|
|
|
u64 ThreadManager::PushCommand(CommandData&& command_data, bool block) {
|
|
|
|
if (!is_async) {
|
|
|
|
// In synchronous GPU mode, block the caller until the command has executed
|
|
|
|
block = true;
|
|
|
|
}
|
|
|
|
|
2021-04-07 11:57:49 +00:00
|
|
|
std::unique_lock lk(state.write_lock);
|
2019-03-31 00:08:09 +00:00
|
|
|
const u64 fence{++state.last_fence};
|
2021-04-07 09:41:31 +00:00
|
|
|
state.queue.Push(CommandDataContainer(std::move(command_data), fence, block));
|
2020-12-12 08:24:33 +00:00
|
|
|
|
2021-04-07 09:41:31 +00:00
|
|
|
if (block) {
|
|
|
|
state.cv.wait(lk, [this, fence] {
|
|
|
|
return fence <= state.signaled_fence.load(std::memory_order_relaxed) ||
|
|
|
|
!state.is_running;
|
|
|
|
});
|
2020-12-12 08:24:33 +00:00
|
|
|
}
|
|
|
|
|
2019-03-31 00:08:09 +00:00
|
|
|
return fence;
|
|
|
|
}
|
|
|
|
|
2019-01-24 03:17:55 +00:00
|
|
|
} // namespace VideoCommon::GPUThread
|