threaded vertex rendering

This commit is contained in:
Phantom 2017-09-27 07:23:55 +02:00
parent 5620327e03
commit 07ff3527a0
3 changed files with 267 additions and 62 deletions

View File

@ -75,6 +75,7 @@ set(HEADERS
synchronized_wrapper.h
telemetry.h
thread.h
thread_pool.h
thread_queue_list.h
timer.h
vector_math.h

118
src/common/thread_pool.h Normal file
View File

@ -0,0 +1,118 @@
// Copyright 2016 Citra Emulator Project / PPSSPP Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <atomic>
#include <functional>
#include <future>
#include <mutex>
#include <thread>
#include <vector>
#include <boost/lockfree/spsc_queue.hpp>
#include "common/assert.h"
namespace Common {
class ThreadPool {
private:
explicit ThreadPool(unsigned int num_threads) :
num_threads(num_threads),
workers(num_threads) {
ASSERT(num_threads);
}
public:
static ThreadPool& GetPool() {
static ThreadPool thread_pool(std::thread::hardware_concurrency());
return thread_pool;
}
void set_spinlocking(bool enable) {
for (auto& worker : workers) {
worker.spinlock_enabled = enable;
if (enable) {
std::unique_lock<std::mutex> lock(worker.mutex);
lock.unlock();
worker.cv.notify_one();
}
}
}
template <typename F, typename... Args>
auto push(F&& f, Args&&... args) {
auto ret = workers[next_worker].push(std::forward<F>(f), std::forward<Args>(args)...);
next_worker = (next_worker + 1) % num_threads;
return ret;
}
unsigned int total_threads() {
return num_threads;
}
private:
class Worker {
public:
Worker() :
exit_loop(false),
spinlock_enabled(false),
thread([this] { loop(); }) {
}
~Worker() {
exit_loop = true;
std::unique_lock<std::mutex> lock(mutex);
lock.unlock();
cv.notify_one();
thread.join();
}
void loop() {
for (;;) {
while (queue.consume_all([](const auto& f) {
f();
}));
if (spinlock_enabled)
continue;
std::unique_lock<std::mutex> lock(mutex);
if (queue.read_available())
continue;
if (exit_loop)
break;
cv.wait(lock);
}
}
template <typename F, typename... Args>
auto push(F&& f, Args&&... args) {
auto task = std::make_shared<std::packaged_task<decltype(f(args...))()>>(
std::bind(std::forward<F>(f), std::forward<Args>(args)...)
);
while (!queue.push([task]() {(*task)(); }))
std::this_thread::yield();
if (!spinlock_enabled.load(std::memory_order_relaxed)) {
std::unique_lock<std::mutex> lock(mutex);
lock.unlock();
cv.notify_one();
}
return task->get_future();
}
bool exit_loop;
std::atomic<bool> spinlock_enabled;
std::mutex mutex;
std::condition_variable cv;
boost::lockfree::spsc_queue<std::function<void()>, boost::lockfree::capacity<100>> queue;
std::thread thread;
};
const unsigned int num_threads;
int next_worker = 0;
std::vector<Worker> workers;
};
} // namespace ThreadPool

View File

@ -9,6 +9,7 @@
#include "common/assert.h"
#include "common/logging/log.h"
#include "common/microprofile.h"
#include "common/thread_pool.h"
#include "common/vector_math.h"
#include "core/hle/service/gsp_gpu.h"
#include "core/hw/gpu.h"
@ -298,6 +299,36 @@ static void WritePicaReg(u32 id, u32 value, u32 mask) {
const u16* index_address_16 = reinterpret_cast<const u16*>(index_address_8);
bool index_u16 = index_info.format != 0;
struct CacheEntry {
Shader::AttributeBuffer output_attr;
Shader::OutputVertex output_vertex;
std::atomic<u32> id;
std::atomic_flag writing{ ATOMIC_FLAG_INIT }; // Set when a thread is writing into this entry
};
static std::array<CacheEntry, 0x10000> cache;
// used as a mean to invalidate data from the previous batch without clearing it
static u32 cache_batch_id = std::numeric_limits<u32>::max();
++cache_batch_id;
if (cache_batch_id == 0) { // reset cache if the emu ever runs long enough to overflow id
++cache_batch_id;
for (auto& entry : cache)
entry.id = 0;
}
struct VsOutput {
explicit VsOutput() = default;
VsOutput(VsOutput&& other) { batch_id = 0; }
Pica::Shader::OutputVertex vertex;
std::atomic<u32> batch_id;
};
static std::vector<VsOutput> vs_output;
while (vs_output.size() < regs.pipeline.num_vertices) {
vs_output.emplace_back();
}
PrimitiveAssembler<Shader::OutputVertex>& primitive_assembler = g_state.primitive_assembler;
if (g_debug_context && g_debug_context->recorder) {
@ -314,20 +345,7 @@ static void WritePicaReg(u32 id, u32 value, u32 mask) {
}
}
DebugUtils::MemoryAccessTracker memory_accesses;
// Simple circular-replacement vertex cache
// The size has been tuned for optimal balance between hit-rate and the cost of lookup
const size_t VERTEX_CACHE_SIZE = 32;
std::array<u16, VERTEX_CACHE_SIZE> vertex_cache_ids;
std::array<Shader::AttributeBuffer, VERTEX_CACHE_SIZE> vertex_cache;
Shader::AttributeBuffer vs_output;
unsigned int vertex_cache_pos = 0;
vertex_cache_ids.fill(-1);
auto* shader_engine = Shader::GetEngine();
Shader::UnitState shader_unit;
shader_engine->SetupBatch(g_state.vs, regs.vs.main_offset);
@ -336,7 +354,13 @@ static void WritePicaReg(u32 id, u32 value, u32 mask) {
if (g_state.geometry_pipeline.NeedIndexInput())
ASSERT(is_indexed);
for (unsigned int index = 0; index < regs.pipeline.num_vertices; ++index) {
auto UnitLoop = [&](bool single_thread,
u32 index_start,
u32 index_end) {
DebugUtils::MemoryAccessTracker memory_accesses;
Shader::UnitState shader_unit;
for (unsigned int index = index_start; index < index_end; ++index) {
// Indexed rendering doesn't use the start offset
unsigned int vertex =
is_indexed ? (index_u16 ? index_address_16[index] : index_address_8[index])
@ -348,8 +372,14 @@ static void WritePicaReg(u32 id, u32 value, u32 mask) {
bool vertex_cache_hit = false;
Shader::AttributeBuffer output_attr_tmp;
Shader::AttributeBuffer& output_attr = is_indexed ? cache[vertex].output_attr : output_attr_tmp;
Pica::Shader::OutputVertex output_vertex_tmp;
Pica::Shader::OutputVertex& output_vertex = is_indexed ? cache[vertex].output_vertex : output_vertex_tmp;
if (is_indexed) {
if (g_state.geometry_pipeline.NeedIndexInput()) {
if (single_thread && g_state.geometry_pipeline.NeedIndexInput()) {
g_state.geometry_pipeline.SubmitIndex(vertex);
continue;
}
@ -360,13 +390,21 @@ static void WritePicaReg(u32 id, u32 value, u32 mask) {
size);
}
for (unsigned int i = 0; i < VERTEX_CACHE_SIZE; ++i) {
if (vertex == vertex_cache_ids[i]) {
vs_output = vertex_cache[i];
if (single_thread) {
if (cache[vertex].id.load(std::memory_order_relaxed) == cache_batch_id) {
vertex_cache_hit = true;
break;
}
}
else if (cache[vertex].id.load(std::memory_order_acquire) == cache_batch_id) {
vertex_cache_hit = true;
}
// Set the "writing" flag and check its previous status
else if (cache[vertex].writing.test_and_set(std::memory_order_acquire)) {
// Another thread is writing into the cache, spin until it's done
while (cache[vertex].writing.test_and_set(std::memory_order_acquire));
cache[vertex].writing.clear(std::memory_order_release);
vertex_cache_hit = true;
}
}
if (!vertex_cache_hit) {
@ -376,27 +414,75 @@ static void WritePicaReg(u32 id, u32 value, u32 mask) {
// Send to vertex shader
if (g_debug_context)
g_debug_context->OnEvent(DebugContext::Event::VertexShaderInvocation,
(void*)&input);
g_debug_context->OnEvent(DebugContext::Event::VertexShaderInvocation, &input);
shader_unit.LoadInput(regs.vs, input);
shader_engine->Run(g_state.vs, shader_unit);
shader_unit.WriteOutput(regs.vs, vs_output);
shader_unit.WriteOutput(regs.vs, output_attr);
if (!single_thread)
output_vertex = Shader::OutputVertex::FromAttributeBuffer(regs.rasterizer, output_attr);
if (is_indexed) {
vertex_cache[vertex_cache_pos] = vs_output;
vertex_cache_ids[vertex_cache_pos] = vertex;
vertex_cache_pos = (vertex_cache_pos + 1) % VERTEX_CACHE_SIZE;
if (single_thread) {
cache[vertex].id.store(cache_batch_id, std::memory_order_relaxed);
}
else {
cache[vertex].id.store(cache_batch_id, std::memory_order_release);
cache[vertex].writing.clear(std::memory_order_release);
}
}
}
if (single_thread) {
// Send to geometry pipeline
g_state.geometry_pipeline.SubmitVertex(vs_output);
g_state.geometry_pipeline.SubmitVertex(output_attr);
} else {
vs_output[index].vertex = output_vertex;
vs_output[index].batch_id.store(cache_batch_id, std::memory_order_release);
}
}
static std::mutex dbg_mtx;
if (!memory_accesses.ranges.empty()) {
std::lock_guard<std::mutex> lock(dbg_mtx);
for (auto& range : memory_accesses.ranges) {
g_debug_context->recorder->MemoryAccessed(Memory::GetPhysicalPointer(range.first),
range.second, range.first);
}
}
};
constexpr unsigned int VS_UNITS = 3;
const bool use_gs = regs.pipeline.use_gs == PipelineRegs::UseGS::Yes;
auto& thread_pool = Common::ThreadPool::GetPool();
unsigned int num_threads = use_gs ? 1 : thread_pool.total_threads();//VS_UNITS;
if (num_threads == 1) {
UnitLoop(true, 0, regs.pipeline.num_vertices);
}
else {
const u32 range = std::max(regs.pipeline.num_vertices / num_threads + 1, 50u);
for (unsigned int thread_id = 0; thread_id < num_threads; ++thread_id) {
const u32 loop_start = range * thread_id;
const u32 loop_end = loop_start + range;
if (loop_end >= regs.pipeline.num_vertices) {
thread_pool.push(UnitLoop, false, loop_start, regs.pipeline.num_vertices);
break;
}
thread_pool.push(UnitLoop, false, loop_start, loop_end);
}
for (unsigned int index = 0; index < regs.pipeline.num_vertices; ++index) {
while (vs_output[index].batch_id.load(std::memory_order_acquire) != cache_batch_id);
using Pica::Shader::OutputVertex;
primitive_assembler.SubmitVertex(vs_output[index].vertex,
[] (const OutputVertex& v0,
const OutputVertex& v1,
const OutputVertex& v2) {
VideoCore::g_renderer->Rasterizer()->AddTriangle(v0, v1, v2);
});
}
}
VideoCore::g_renderer->Rasterizer()->DrawTriangles();
if (g_debug_context) {