mirror of
https://github.com/citra-emu/citra.git
synced 2024-11-30 09:10:05 +00:00
threaded vertex rendering
This commit is contained in:
parent
5620327e03
commit
07ff3527a0
@ -75,6 +75,7 @@ set(HEADERS
|
|||||||
synchronized_wrapper.h
|
synchronized_wrapper.h
|
||||||
telemetry.h
|
telemetry.h
|
||||||
thread.h
|
thread.h
|
||||||
|
thread_pool.h
|
||||||
thread_queue_list.h
|
thread_queue_list.h
|
||||||
timer.h
|
timer.h
|
||||||
vector_math.h
|
vector_math.h
|
||||||
|
118
src/common/thread_pool.h
Normal file
118
src/common/thread_pool.h
Normal file
@ -0,0 +1,118 @@
|
|||||||
|
// Copyright 2016 Citra Emulator Project / PPSSPP Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
|
#include <functional>
|
||||||
|
#include <future>
|
||||||
|
#include <mutex>
|
||||||
|
#include <thread>
|
||||||
|
#include <vector>
|
||||||
|
#include <boost/lockfree/spsc_queue.hpp>
|
||||||
|
|
||||||
|
#include "common/assert.h"
|
||||||
|
|
||||||
|
namespace Common {
|
||||||
|
|
||||||
|
class ThreadPool {
|
||||||
|
private:
|
||||||
|
explicit ThreadPool(unsigned int num_threads) :
|
||||||
|
num_threads(num_threads),
|
||||||
|
workers(num_threads) {
|
||||||
|
ASSERT(num_threads);
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
|
static ThreadPool& GetPool() {
|
||||||
|
static ThreadPool thread_pool(std::thread::hardware_concurrency());
|
||||||
|
return thread_pool;
|
||||||
|
}
|
||||||
|
|
||||||
|
void set_spinlocking(bool enable) {
|
||||||
|
for (auto& worker : workers) {
|
||||||
|
worker.spinlock_enabled = enable;
|
||||||
|
if (enable) {
|
||||||
|
std::unique_lock<std::mutex> lock(worker.mutex);
|
||||||
|
lock.unlock();
|
||||||
|
worker.cv.notify_one();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename F, typename... Args>
|
||||||
|
auto push(F&& f, Args&&... args) {
|
||||||
|
auto ret = workers[next_worker].push(std::forward<F>(f), std::forward<Args>(args)...);
|
||||||
|
next_worker = (next_worker + 1) % num_threads;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned int total_threads() {
|
||||||
|
return num_threads;
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
class Worker {
|
||||||
|
public:
|
||||||
|
Worker() :
|
||||||
|
exit_loop(false),
|
||||||
|
spinlock_enabled(false),
|
||||||
|
thread([this] { loop(); }) {
|
||||||
|
}
|
||||||
|
|
||||||
|
~Worker() {
|
||||||
|
exit_loop = true;
|
||||||
|
std::unique_lock<std::mutex> lock(mutex);
|
||||||
|
lock.unlock();
|
||||||
|
cv.notify_one();
|
||||||
|
thread.join();
|
||||||
|
}
|
||||||
|
|
||||||
|
void loop() {
|
||||||
|
for (;;) {
|
||||||
|
while (queue.consume_all([](const auto& f) {
|
||||||
|
f();
|
||||||
|
}));
|
||||||
|
if (spinlock_enabled)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
std::unique_lock<std::mutex> lock(mutex);
|
||||||
|
if (queue.read_available())
|
||||||
|
continue;
|
||||||
|
if (exit_loop)
|
||||||
|
break;
|
||||||
|
cv.wait(lock);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename F, typename... Args>
|
||||||
|
auto push(F&& f, Args&&... args) {
|
||||||
|
auto task = std::make_shared<std::packaged_task<decltype(f(args...))()>>(
|
||||||
|
std::bind(std::forward<F>(f), std::forward<Args>(args)...)
|
||||||
|
);
|
||||||
|
|
||||||
|
while (!queue.push([task]() {(*task)(); }))
|
||||||
|
std::this_thread::yield();
|
||||||
|
|
||||||
|
if (!spinlock_enabled.load(std::memory_order_relaxed)) {
|
||||||
|
std::unique_lock<std::mutex> lock(mutex);
|
||||||
|
lock.unlock();
|
||||||
|
cv.notify_one();
|
||||||
|
}
|
||||||
|
|
||||||
|
return task->get_future();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool exit_loop;
|
||||||
|
std::atomic<bool> spinlock_enabled;
|
||||||
|
std::mutex mutex;
|
||||||
|
std::condition_variable cv;
|
||||||
|
boost::lockfree::spsc_queue<std::function<void()>, boost::lockfree::capacity<100>> queue;
|
||||||
|
std::thread thread;
|
||||||
|
};
|
||||||
|
|
||||||
|
const unsigned int num_threads;
|
||||||
|
int next_worker = 0;
|
||||||
|
std::vector<Worker> workers;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace ThreadPool
|
@ -9,6 +9,7 @@
|
|||||||
#include "common/assert.h"
|
#include "common/assert.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
#include "common/microprofile.h"
|
#include "common/microprofile.h"
|
||||||
|
#include "common/thread_pool.h"
|
||||||
#include "common/vector_math.h"
|
#include "common/vector_math.h"
|
||||||
#include "core/hle/service/gsp_gpu.h"
|
#include "core/hle/service/gsp_gpu.h"
|
||||||
#include "core/hw/gpu.h"
|
#include "core/hw/gpu.h"
|
||||||
@ -298,6 +299,36 @@ static void WritePicaReg(u32 id, u32 value, u32 mask) {
|
|||||||
const u16* index_address_16 = reinterpret_cast<const u16*>(index_address_8);
|
const u16* index_address_16 = reinterpret_cast<const u16*>(index_address_8);
|
||||||
bool index_u16 = index_info.format != 0;
|
bool index_u16 = index_info.format != 0;
|
||||||
|
|
||||||
|
struct CacheEntry {
|
||||||
|
Shader::AttributeBuffer output_attr;
|
||||||
|
Shader::OutputVertex output_vertex;
|
||||||
|
std::atomic<u32> id;
|
||||||
|
std::atomic_flag writing{ ATOMIC_FLAG_INIT }; // Set when a thread is writing into this entry
|
||||||
|
};
|
||||||
|
static std::array<CacheEntry, 0x10000> cache;
|
||||||
|
|
||||||
|
// used as a mean to invalidate data from the previous batch without clearing it
|
||||||
|
static u32 cache_batch_id = std::numeric_limits<u32>::max();
|
||||||
|
|
||||||
|
++cache_batch_id;
|
||||||
|
if (cache_batch_id == 0) { // reset cache if the emu ever runs long enough to overflow id
|
||||||
|
++cache_batch_id;
|
||||||
|
for (auto& entry : cache)
|
||||||
|
entry.id = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct VsOutput {
|
||||||
|
explicit VsOutput() = default;
|
||||||
|
VsOutput(VsOutput&& other) { batch_id = 0; }
|
||||||
|
|
||||||
|
Pica::Shader::OutputVertex vertex;
|
||||||
|
std::atomic<u32> batch_id;
|
||||||
|
};
|
||||||
|
static std::vector<VsOutput> vs_output;
|
||||||
|
while (vs_output.size() < regs.pipeline.num_vertices) {
|
||||||
|
vs_output.emplace_back();
|
||||||
|
}
|
||||||
|
|
||||||
PrimitiveAssembler<Shader::OutputVertex>& primitive_assembler = g_state.primitive_assembler;
|
PrimitiveAssembler<Shader::OutputVertex>& primitive_assembler = g_state.primitive_assembler;
|
||||||
|
|
||||||
if (g_debug_context && g_debug_context->recorder) {
|
if (g_debug_context && g_debug_context->recorder) {
|
||||||
@ -314,20 +345,7 @@ static void WritePicaReg(u32 id, u32 value, u32 mask) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
DebugUtils::MemoryAccessTracker memory_accesses;
|
|
||||||
|
|
||||||
// Simple circular-replacement vertex cache
|
|
||||||
// The size has been tuned for optimal balance between hit-rate and the cost of lookup
|
|
||||||
const size_t VERTEX_CACHE_SIZE = 32;
|
|
||||||
std::array<u16, VERTEX_CACHE_SIZE> vertex_cache_ids;
|
|
||||||
std::array<Shader::AttributeBuffer, VERTEX_CACHE_SIZE> vertex_cache;
|
|
||||||
Shader::AttributeBuffer vs_output;
|
|
||||||
|
|
||||||
unsigned int vertex_cache_pos = 0;
|
|
||||||
vertex_cache_ids.fill(-1);
|
|
||||||
|
|
||||||
auto* shader_engine = Shader::GetEngine();
|
auto* shader_engine = Shader::GetEngine();
|
||||||
Shader::UnitState shader_unit;
|
|
||||||
|
|
||||||
shader_engine->SetupBatch(g_state.vs, regs.vs.main_offset);
|
shader_engine->SetupBatch(g_state.vs, regs.vs.main_offset);
|
||||||
|
|
||||||
@ -336,66 +354,134 @@ static void WritePicaReg(u32 id, u32 value, u32 mask) {
|
|||||||
if (g_state.geometry_pipeline.NeedIndexInput())
|
if (g_state.geometry_pipeline.NeedIndexInput())
|
||||||
ASSERT(is_indexed);
|
ASSERT(is_indexed);
|
||||||
|
|
||||||
for (unsigned int index = 0; index < regs.pipeline.num_vertices; ++index) {
|
auto UnitLoop = [&](bool single_thread,
|
||||||
// Indexed rendering doesn't use the start offset
|
u32 index_start,
|
||||||
unsigned int vertex =
|
u32 index_end) {
|
||||||
is_indexed ? (index_u16 ? index_address_16[index] : index_address_8[index])
|
DebugUtils::MemoryAccessTracker memory_accesses;
|
||||||
: (index + regs.pipeline.vertex_offset);
|
Shader::UnitState shader_unit;
|
||||||
|
|
||||||
// -1 is a common special value used for primitive restart. Since it's unknown if
|
for (unsigned int index = index_start; index < index_end; ++index) {
|
||||||
// the PICA supports it, and it would mess up the caching, guard against it here.
|
// Indexed rendering doesn't use the start offset
|
||||||
ASSERT(vertex != -1);
|
unsigned int vertex =
|
||||||
|
is_indexed ? (index_u16 ? index_address_16[index] : index_address_8[index])
|
||||||
|
: (index + regs.pipeline.vertex_offset);
|
||||||
|
|
||||||
bool vertex_cache_hit = false;
|
// -1 is a common special value used for primitive restart. Since it's unknown if
|
||||||
|
// the PICA supports it, and it would mess up the caching, guard against it here.
|
||||||
|
ASSERT(vertex != -1);
|
||||||
|
|
||||||
if (is_indexed) {
|
bool vertex_cache_hit = false;
|
||||||
if (g_state.geometry_pipeline.NeedIndexInput()) {
|
|
||||||
g_state.geometry_pipeline.SubmitIndex(vertex);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (g_debug_context && Pica::g_debug_context->recorder) {
|
Shader::AttributeBuffer output_attr_tmp;
|
||||||
int size = index_u16 ? 2 : 1;
|
Shader::AttributeBuffer& output_attr = is_indexed ? cache[vertex].output_attr : output_attr_tmp;
|
||||||
memory_accesses.AddAccess(base_address + index_info.offset + size * index,
|
|
||||||
size);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (unsigned int i = 0; i < VERTEX_CACHE_SIZE; ++i) {
|
Pica::Shader::OutputVertex output_vertex_tmp;
|
||||||
if (vertex == vertex_cache_ids[i]) {
|
Pica::Shader::OutputVertex& output_vertex = is_indexed ? cache[vertex].output_vertex : output_vertex_tmp;
|
||||||
vs_output = vertex_cache[i];
|
|
||||||
vertex_cache_hit = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!vertex_cache_hit) {
|
|
||||||
// Initialize data for the current vertex
|
|
||||||
Shader::AttributeBuffer input;
|
|
||||||
loader.LoadVertex(base_address, index, vertex, input, memory_accesses);
|
|
||||||
|
|
||||||
// Send to vertex shader
|
|
||||||
if (g_debug_context)
|
|
||||||
g_debug_context->OnEvent(DebugContext::Event::VertexShaderInvocation,
|
|
||||||
(void*)&input);
|
|
||||||
shader_unit.LoadInput(regs.vs, input);
|
|
||||||
shader_engine->Run(g_state.vs, shader_unit);
|
|
||||||
shader_unit.WriteOutput(regs.vs, vs_output);
|
|
||||||
|
|
||||||
if (is_indexed) {
|
if (is_indexed) {
|
||||||
vertex_cache[vertex_cache_pos] = vs_output;
|
if (single_thread && g_state.geometry_pipeline.NeedIndexInput()) {
|
||||||
vertex_cache_ids[vertex_cache_pos] = vertex;
|
g_state.geometry_pipeline.SubmitIndex(vertex);
|
||||||
vertex_cache_pos = (vertex_cache_pos + 1) % VERTEX_CACHE_SIZE;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (g_debug_context && Pica::g_debug_context->recorder) {
|
||||||
|
int size = index_u16 ? 2 : 1;
|
||||||
|
memory_accesses.AddAccess(base_address + index_info.offset + size * index,
|
||||||
|
size);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (single_thread) {
|
||||||
|
if (cache[vertex].id.load(std::memory_order_relaxed) == cache_batch_id) {
|
||||||
|
vertex_cache_hit = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (cache[vertex].id.load(std::memory_order_acquire) == cache_batch_id) {
|
||||||
|
vertex_cache_hit = true;
|
||||||
|
}
|
||||||
|
// Set the "writing" flag and check its previous status
|
||||||
|
else if (cache[vertex].writing.test_and_set(std::memory_order_acquire)) {
|
||||||
|
// Another thread is writing into the cache, spin until it's done
|
||||||
|
while (cache[vertex].writing.test_and_set(std::memory_order_acquire));
|
||||||
|
cache[vertex].writing.clear(std::memory_order_release);
|
||||||
|
vertex_cache_hit = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!vertex_cache_hit) {
|
||||||
|
// Initialize data for the current vertex
|
||||||
|
Shader::AttributeBuffer input;
|
||||||
|
loader.LoadVertex(base_address, index, vertex, input, memory_accesses);
|
||||||
|
|
||||||
|
// Send to vertex shader
|
||||||
|
if (g_debug_context)
|
||||||
|
g_debug_context->OnEvent(DebugContext::Event::VertexShaderInvocation, &input);
|
||||||
|
shader_unit.LoadInput(regs.vs, input);
|
||||||
|
shader_engine->Run(g_state.vs, shader_unit);
|
||||||
|
|
||||||
|
shader_unit.WriteOutput(regs.vs, output_attr);
|
||||||
|
if (!single_thread)
|
||||||
|
output_vertex = Shader::OutputVertex::FromAttributeBuffer(regs.rasterizer, output_attr);
|
||||||
|
|
||||||
|
if (is_indexed) {
|
||||||
|
if (single_thread) {
|
||||||
|
cache[vertex].id.store(cache_batch_id, std::memory_order_relaxed);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
cache[vertex].id.store(cache_batch_id, std::memory_order_release);
|
||||||
|
cache[vertex].writing.clear(std::memory_order_release);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (single_thread) {
|
||||||
|
// Send to geometry pipeline
|
||||||
|
g_state.geometry_pipeline.SubmitVertex(output_attr);
|
||||||
|
} else {
|
||||||
|
vs_output[index].vertex = output_vertex;
|
||||||
|
vs_output[index].batch_id.store(cache_batch_id, std::memory_order_release);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Send to geometry pipeline
|
static std::mutex dbg_mtx;
|
||||||
g_state.geometry_pipeline.SubmitVertex(vs_output);
|
if (!memory_accesses.ranges.empty()) {
|
||||||
}
|
std::lock_guard<std::mutex> lock(dbg_mtx);
|
||||||
|
for (auto& range : memory_accesses.ranges) {
|
||||||
|
g_debug_context->recorder->MemoryAccessed(Memory::GetPhysicalPointer(range.first),
|
||||||
|
range.second, range.first);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
for (auto& range : memory_accesses.ranges) {
|
constexpr unsigned int VS_UNITS = 3;
|
||||||
g_debug_context->recorder->MemoryAccessed(Memory::GetPhysicalPointer(range.first),
|
const bool use_gs = regs.pipeline.use_gs == PipelineRegs::UseGS::Yes;
|
||||||
range.second, range.first);
|
|
||||||
|
auto& thread_pool = Common::ThreadPool::GetPool();
|
||||||
|
unsigned int num_threads = use_gs ? 1 : thread_pool.total_threads();//VS_UNITS;
|
||||||
|
|
||||||
|
if (num_threads == 1) {
|
||||||
|
UnitLoop(true, 0, regs.pipeline.num_vertices);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
const u32 range = std::max(regs.pipeline.num_vertices / num_threads + 1, 50u);
|
||||||
|
for (unsigned int thread_id = 0; thread_id < num_threads; ++thread_id) {
|
||||||
|
const u32 loop_start = range * thread_id;
|
||||||
|
const u32 loop_end = loop_start + range;
|
||||||
|
if (loop_end >= regs.pipeline.num_vertices) {
|
||||||
|
thread_pool.push(UnitLoop, false, loop_start, regs.pipeline.num_vertices);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
thread_pool.push(UnitLoop, false, loop_start, loop_end);
|
||||||
|
}
|
||||||
|
for (unsigned int index = 0; index < regs.pipeline.num_vertices; ++index) {
|
||||||
|
while (vs_output[index].batch_id.load(std::memory_order_acquire) != cache_batch_id);
|
||||||
|
using Pica::Shader::OutputVertex;
|
||||||
|
primitive_assembler.SubmitVertex(vs_output[index].vertex,
|
||||||
|
[] (const OutputVertex& v0,
|
||||||
|
const OutputVertex& v1,
|
||||||
|
const OutputVertex& v2) {
|
||||||
|
VideoCore::g_renderer->Rasterizer()->AddTriangle(v0, v1, v2);
|
||||||
|
});
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
VideoCore::g_renderer->Rasterizer()->DrawTriangles();
|
VideoCore::g_renderer->Rasterizer()->DrawTriangles();
|
||||||
|
Loading…
Reference in New Issue
Block a user