mirror of
https://github.com/citra-emu/citra.git
synced 2024-11-26 19:50:05 +00:00
Merge branch 'threads_vtx' of https://github.com/Phanto-m/citra
# Conflicts: # src/video_core/command_processor.cpp # src/video_core/renderer_opengl/gl_rasterizer.cpp
This commit is contained in:
commit
adcbdb2b5f
@ -76,6 +76,7 @@ set(HEADERS
|
||||
synchronized_wrapper.h
|
||||
telemetry.h
|
||||
thread.h
|
||||
thread_pool.h
|
||||
thread_queue_list.h
|
||||
timer.h
|
||||
vector_math.h
|
||||
|
120
src/common/thread_pool.h
Normal file
120
src/common/thread_pool.h
Normal file
@ -0,0 +1,120 @@
|
||||
// Copyright 2017 Citra Emulator Project
|
||||
// Licensed under GPLv2 or any later version
|
||||
// Refer to the license.txt file included.
|
||||
|
||||
#include <condition_variable>
|
||||
#include <functional>
|
||||
#include <future>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
#include "common/assert.h"
|
||||
|
||||
namespace Common {
|
||||
|
||||
class ThreadPool {
|
||||
private:
|
||||
explicit ThreadPool(size_t num_threads) : num_threads(num_threads), workers(num_threads) {
|
||||
ASSERT(num_threads);
|
||||
}
|
||||
|
||||
public:
|
||||
static ThreadPool& GetPool() {
|
||||
static ThreadPool thread_pool(std::thread::hardware_concurrency());
|
||||
return thread_pool;
|
||||
}
|
||||
|
||||
template <typename F, typename... Args>
|
||||
auto push(F&& f, Args&&... args) {
|
||||
auto ret = workers[next_worker].push(std::forward<F>(f), std::forward<Args>(args)...);
|
||||
next_worker = (next_worker + 1) % num_threads;
|
||||
return ret;
|
||||
}
|
||||
|
||||
const size_t total_threads() const {
|
||||
return num_threads;
|
||||
}
|
||||
|
||||
private:
|
||||
template <typename T>
|
||||
class ThreadsafeQueue {
|
||||
private:
|
||||
const size_t capacity;
|
||||
std::vector<T> queue_storage;
|
||||
std::mutex mutex;
|
||||
std::condition_variable queue_changed;
|
||||
|
||||
public:
|
||||
explicit ThreadsafeQueue(const size_t capacity) : capacity(capacity) {
|
||||
queue_storage.reserve(capacity);
|
||||
}
|
||||
|
||||
void push(const T& element) {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
while (queue_storage.size() >= capacity) {
|
||||
queue_changed.wait(lock);
|
||||
}
|
||||
queue_storage.push_back(element);
|
||||
queue_changed.notify_one();
|
||||
}
|
||||
|
||||
T Pop() {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
while (queue_storage.empty()) {
|
||||
queue_changed.wait(lock);
|
||||
}
|
||||
T element(std::move(queue_storage.back()));
|
||||
queue_storage.pop_back();
|
||||
queue_changed.notify_one();
|
||||
return element;
|
||||
}
|
||||
|
||||
void push(T&& element) {
|
||||
std::unique_lock<std::mutex> lock(mutex);
|
||||
while (queue_storage.size() >= capacity) {
|
||||
queue_changed.wait(lock);
|
||||
}
|
||||
queue_storage.emplace_back(std::move(element));
|
||||
queue_changed.notify_one();
|
||||
}
|
||||
};
|
||||
|
||||
class Worker {
|
||||
private:
|
||||
ThreadsafeQueue<std::function<void()>> queue;
|
||||
std::thread thread;
|
||||
static constexpr size_t MAX_QUEUE_CAPACITY = 50;
|
||||
|
||||
public:
|
||||
Worker() : queue(MAX_QUEUE_CAPACITY), thread([this] { Loop(); }) {}
|
||||
|
||||
~Worker() {
|
||||
queue.push(nullptr); // Exit the loop
|
||||
thread.join();
|
||||
}
|
||||
|
||||
void Loop() {
|
||||
while (true) {
|
||||
std::function<void()> fn(queue.Pop());
|
||||
if (!fn) // a nullptr function is the signal to exit the loop
|
||||
break;
|
||||
fn();
|
||||
}
|
||||
}
|
||||
|
||||
template <typename F, typename... Args>
|
||||
auto push(F&& f, Args&&... args) {
|
||||
auto task = std::make_shared<std::packaged_task<decltype(f(args...))()>>(
|
||||
std::bind(std::forward<F>(f), std::forward<Args>(args)...));
|
||||
queue.push([task] { (*task)(); });
|
||||
return task->get_future();
|
||||
}
|
||||
};
|
||||
|
||||
const size_t num_threads;
|
||||
size_t next_worker = 0;
|
||||
std::vector<Worker> workers;
|
||||
};
|
||||
|
||||
} // namespace Common
|
@ -4,11 +4,13 @@
|
||||
|
||||
#include <array>
|
||||
#include <cstddef>
|
||||
#include <future>
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
#include "common/assert.h"
|
||||
#include "common/logging/log.h"
|
||||
#include "common/microprofile.h"
|
||||
#include "common/thread_pool.h"
|
||||
#include "common/vector_math.h"
|
||||
#include "core/hle/service/gsp_gpu.h"
|
||||
#include "core/hw/gpu.h"
|
||||
@ -119,228 +121,6 @@ static void WriteUniformFloatReg(ShaderRegs& config, Shader::ShaderSetup& setup,
|
||||
}
|
||||
}
|
||||
|
||||
static void LoadDefaultVertexAttributes(u32 register_value) {
|
||||
auto& regs = g_state.regs;
|
||||
|
||||
// TODO: Does actual hardware indeed keep an intermediate buffer or does
|
||||
// it directly write the values?
|
||||
default_attr_write_buffer[default_attr_counter++] = register_value;
|
||||
|
||||
// Default attributes are written in a packed format such that four float24 values are encoded
|
||||
// in three 32-bit numbers.
|
||||
// We write to internal memory once a full such vector is written.
|
||||
if (default_attr_counter >= 3) {
|
||||
default_attr_counter = 0;
|
||||
|
||||
auto& setup = regs.pipeline.vs_default_attributes_setup;
|
||||
|
||||
if (setup.index >= 16) {
|
||||
LOG_ERROR(HW_GPU, "Invalid VS default attribute index %d", (int)setup.index);
|
||||
return;
|
||||
}
|
||||
|
||||
Math::Vec4<float24> attribute;
|
||||
|
||||
// NOTE: The destination component order indeed is "backwards"
|
||||
attribute.w = float24::FromRaw(default_attr_write_buffer[0] >> 8);
|
||||
attribute.z = float24::FromRaw(((default_attr_write_buffer[0] & 0xFF) << 16) |
|
||||
((default_attr_write_buffer[1] >> 16) & 0xFFFF));
|
||||
attribute.y = float24::FromRaw(((default_attr_write_buffer[1] & 0xFFFF) << 8) |
|
||||
((default_attr_write_buffer[2] >> 24) & 0xFF));
|
||||
attribute.x = float24::FromRaw(default_attr_write_buffer[2] & 0xFFFFFF);
|
||||
|
||||
LOG_TRACE(HW_GPU, "Set default VS attribute %x to (%f %f %f %f)", (int)setup.index,
|
||||
attribute.x.ToFloat32(), attribute.y.ToFloat32(), attribute.z.ToFloat32(),
|
||||
attribute.w.ToFloat32());
|
||||
|
||||
// TODO: Verify that this actually modifies the register!
|
||||
if (setup.index < 15) {
|
||||
g_state.input_default_attributes.attr[setup.index] = attribute;
|
||||
setup.index++;
|
||||
} else {
|
||||
// Put each attribute into an immediate input buffer. When all specified immediate
|
||||
// attributes are present, the Vertex Shader is invoked and everything is sent to
|
||||
// the primitive assembler.
|
||||
|
||||
auto& immediate_input = g_state.immediate.input_vertex;
|
||||
auto& immediate_attribute_id = g_state.immediate.current_attribute;
|
||||
|
||||
immediate_input.attr[immediate_attribute_id] = attribute;
|
||||
|
||||
if (immediate_attribute_id < regs.pipeline.max_input_attrib_index) {
|
||||
immediate_attribute_id += 1;
|
||||
} else {
|
||||
MICROPROFILE_SCOPE(GPU_Drawing);
|
||||
immediate_attribute_id = 0;
|
||||
|
||||
auto* shader_engine = Shader::GetEngine();
|
||||
shader_engine->SetupBatch(g_state.vs, regs.vs.main_offset);
|
||||
|
||||
// Send to vertex shader
|
||||
if (g_debug_context)
|
||||
g_debug_context->OnEvent(DebugContext::Event::VertexShaderInvocation,
|
||||
static_cast<void*>(&immediate_input));
|
||||
Shader::UnitState shader_unit;
|
||||
Shader::AttributeBuffer output{};
|
||||
|
||||
shader_unit.LoadInput(regs.vs, immediate_input);
|
||||
shader_engine->Run(g_state.vs, shader_unit);
|
||||
shader_unit.WriteOutput(regs.vs, output);
|
||||
|
||||
// Send to geometry pipeline
|
||||
if (g_state.immediate.reset_geometry_pipeline) {
|
||||
g_state.geometry_pipeline.Reconfigure();
|
||||
g_state.immediate.reset_geometry_pipeline = false;
|
||||
}
|
||||
ASSERT(!g_state.geometry_pipeline.NeedIndexInput());
|
||||
g_state.geometry_pipeline.Setup(shader_engine);
|
||||
g_state.geometry_pipeline.SubmitVertex(output);
|
||||
|
||||
// TODO: If drawing after every immediate mode triangle kills performance,
|
||||
// change it to flush triangles whenever a drawing config register changes
|
||||
// See: https://github.com/citra-emu/citra/pull/2866#issuecomment-327011550
|
||||
VideoCore::g_renderer->Rasterizer()->DrawTriangles();
|
||||
if (g_debug_context) {
|
||||
g_debug_context->OnEvent(DebugContext::Event::FinishedPrimitiveBatch, nullptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void Draw(u32 command_id) {
|
||||
MICROPROFILE_SCOPE(GPU_Drawing);
|
||||
auto& regs = g_state.regs;
|
||||
|
||||
#if PICA_LOG_TEV
|
||||
DebugUtils::DumpTevStageConfig(regs.GetTevStages());
|
||||
#endif
|
||||
if (g_debug_context)
|
||||
g_debug_context->OnEvent(DebugContext::Event::IncomingPrimitiveBatch, nullptr);
|
||||
|
||||
// Processes information about internal vertex attributes to figure out how a vertex is
|
||||
// loaded.
|
||||
// Later, these can be compiled and cached.
|
||||
const u32 base_address = regs.pipeline.vertex_attributes.GetPhysicalBaseAddress();
|
||||
VertexLoader loader(regs.pipeline);
|
||||
|
||||
// Load vertices
|
||||
bool is_indexed = (command_id == PICA_REG_INDEX(pipeline.trigger_draw_indexed));
|
||||
|
||||
const auto& index_info = regs.pipeline.index_array;
|
||||
const u8* index_address_8 = Memory::GetPhysicalPointer(base_address + index_info.offset);
|
||||
if (!index_address_8) {
|
||||
LOG_CRITICAL(HW_GPU, "Invalid index_address_8 %08x", index_address_8);
|
||||
return;
|
||||
}
|
||||
const u16* index_address_16 = reinterpret_cast<const u16*>(index_address_8);
|
||||
bool index_u16 = index_info.format != 0;
|
||||
|
||||
PrimitiveAssembler<Shader::OutputVertex>& primitive_assembler = g_state.primitive_assembler;
|
||||
|
||||
if (g_debug_context && g_debug_context->recorder) {
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
const auto texture = regs.texturing.GetTextures()[i];
|
||||
if (!texture.enabled)
|
||||
continue;
|
||||
|
||||
u8* texture_data = Memory::GetPhysicalPointer(texture.config.GetPhysicalAddress());
|
||||
g_debug_context->recorder->MemoryAccessed(
|
||||
texture_data, Pica::TexturingRegs::NibblesPerPixel(texture.format) *
|
||||
texture.config.width / 2 * texture.config.height,
|
||||
texture.config.GetPhysicalAddress());
|
||||
}
|
||||
}
|
||||
|
||||
DebugUtils::MemoryAccessTracker memory_accesses;
|
||||
|
||||
// Simple circular-replacement vertex cache
|
||||
// The size has been tuned for optimal balance between hit-rate and the cost of lookup
|
||||
const size_t VERTEX_CACHE_SIZE = 32;
|
||||
std::array<u16, VERTEX_CACHE_SIZE> vertex_cache_ids;
|
||||
std::array<Shader::AttributeBuffer, VERTEX_CACHE_SIZE> vertex_cache;
|
||||
Shader::AttributeBuffer vs_output;
|
||||
|
||||
unsigned int vertex_cache_pos = 0;
|
||||
vertex_cache_ids.fill(-1);
|
||||
|
||||
auto* shader_engine = Shader::GetEngine();
|
||||
Shader::UnitState shader_unit;
|
||||
|
||||
shader_engine->SetupBatch(g_state.vs, regs.vs.main_offset);
|
||||
|
||||
g_state.geometry_pipeline.Reconfigure();
|
||||
g_state.geometry_pipeline.Setup(shader_engine);
|
||||
if (g_state.geometry_pipeline.NeedIndexInput())
|
||||
ASSERT(is_indexed);
|
||||
|
||||
for (unsigned int index = 0; index < regs.pipeline.num_vertices; ++index) {
|
||||
// Indexed rendering doesn't use the start offset
|
||||
unsigned int vertex = is_indexed
|
||||
? (index_u16 ? index_address_16[index] : index_address_8[index])
|
||||
: (index + regs.pipeline.vertex_offset);
|
||||
|
||||
// -1 is a common special value used for primitive restart. Since it's unknown if
|
||||
// the PICA supports it, and it would mess up the caching, guard against it here.
|
||||
ASSERT(vertex != -1);
|
||||
|
||||
bool vertex_cache_hit = false;
|
||||
|
||||
if (is_indexed) {
|
||||
if (g_state.geometry_pipeline.NeedIndexInput()) {
|
||||
g_state.geometry_pipeline.SubmitIndex(vertex);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (g_debug_context && Pica::g_debug_context->recorder) {
|
||||
int size = index_u16 ? 2 : 1;
|
||||
memory_accesses.AddAccess(base_address + index_info.offset + size * index, size);
|
||||
}
|
||||
|
||||
for (unsigned int i = 0; i < VERTEX_CACHE_SIZE; ++i) {
|
||||
if (vertex == vertex_cache_ids[i]) {
|
||||
vs_output = vertex_cache[i];
|
||||
vertex_cache_hit = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!vertex_cache_hit) {
|
||||
// Initialize data for the current vertex
|
||||
Shader::AttributeBuffer input;
|
||||
loader.LoadVertex(base_address, index, vertex, input, memory_accesses);
|
||||
|
||||
// Send to vertex shader
|
||||
if (g_debug_context)
|
||||
g_debug_context->OnEvent(DebugContext::Event::VertexShaderInvocation,
|
||||
(void*)&input);
|
||||
shader_unit.LoadInput(regs.vs, input);
|
||||
shader_engine->Run(g_state.vs, shader_unit);
|
||||
shader_unit.WriteOutput(regs.vs, vs_output);
|
||||
|
||||
if (is_indexed) {
|
||||
vertex_cache[vertex_cache_pos] = vs_output;
|
||||
vertex_cache_ids[vertex_cache_pos] = vertex;
|
||||
vertex_cache_pos = (vertex_cache_pos + 1) % VERTEX_CACHE_SIZE;
|
||||
}
|
||||
}
|
||||
|
||||
// Send to geometry pipeline
|
||||
g_state.geometry_pipeline.SubmitVertex(vs_output);
|
||||
}
|
||||
|
||||
for (auto& range : memory_accesses.ranges) {
|
||||
g_debug_context->recorder->MemoryAccessed(Memory::GetPhysicalPointer(range.first),
|
||||
range.second, range.first);
|
||||
}
|
||||
|
||||
VideoCore::g_renderer->Rasterizer()->DrawTriangles();
|
||||
if (g_debug_context) {
|
||||
g_debug_context->OnEvent(DebugContext::Event::FinishedPrimitiveBatch, nullptr);
|
||||
}
|
||||
}
|
||||
|
||||
static void WritePicaReg(u32 id, u32 value, u32 mask) {
|
||||
auto& regs = g_state.regs;
|
||||
|
||||
@ -390,9 +170,95 @@ static void WritePicaReg(u32 id, u32 value, u32 mask) {
|
||||
// Load default vertex input attributes
|
||||
case PICA_REG_INDEX_WORKAROUND(pipeline.vs_default_attributes_setup.set_value[0], 0x233):
|
||||
case PICA_REG_INDEX_WORKAROUND(pipeline.vs_default_attributes_setup.set_value[1], 0x234):
|
||||
case PICA_REG_INDEX_WORKAROUND(pipeline.vs_default_attributes_setup.set_value[2], 0x235):
|
||||
LoadDefaultVertexAttributes(value);
|
||||
case PICA_REG_INDEX_WORKAROUND(pipeline.vs_default_attributes_setup.set_value[2], 0x235): {
|
||||
// TODO: Does actual hardware indeed keep an intermediate buffer or does
|
||||
// it directly write the values?
|
||||
default_attr_write_buffer[default_attr_counter++] = value;
|
||||
|
||||
// Default attributes are written in a packed format such that four float24 values are
|
||||
// encoded in
|
||||
// three 32-bit numbers. We write to internal memory once a full such vector is
|
||||
// written.
|
||||
if (default_attr_counter >= 3) {
|
||||
default_attr_counter = 0;
|
||||
|
||||
auto& setup = regs.pipeline.vs_default_attributes_setup;
|
||||
|
||||
if (setup.index >= 16) {
|
||||
LOG_ERROR(HW_GPU, "Invalid VS default attribute index %d", (int)setup.index);
|
||||
break;
|
||||
}
|
||||
|
||||
Math::Vec4<float24> attribute;
|
||||
|
||||
// NOTE: The destination component order indeed is "backwards"
|
||||
attribute.w = float24::FromRaw(default_attr_write_buffer[0] >> 8);
|
||||
attribute.z = float24::FromRaw(((default_attr_write_buffer[0] & 0xFF) << 16) |
|
||||
((default_attr_write_buffer[1] >> 16) & 0xFFFF));
|
||||
attribute.y = float24::FromRaw(((default_attr_write_buffer[1] & 0xFFFF) << 8) |
|
||||
((default_attr_write_buffer[2] >> 24) & 0xFF));
|
||||
attribute.x = float24::FromRaw(default_attr_write_buffer[2] & 0xFFFFFF);
|
||||
|
||||
LOG_TRACE(HW_GPU, "Set default VS attribute %x to (%f %f %f %f)", (int)setup.index,
|
||||
attribute.x.ToFloat32(), attribute.y.ToFloat32(), attribute.z.ToFloat32(),
|
||||
attribute.w.ToFloat32());
|
||||
|
||||
// TODO: Verify that this actually modifies the register!
|
||||
if (setup.index < 15) {
|
||||
g_state.input_default_attributes.attr[setup.index] = attribute;
|
||||
setup.index++;
|
||||
} else {
|
||||
// Put each attribute into an immediate input buffer. When all specified immediate
|
||||
// attributes are present, the Vertex Shader is invoked and everything is sent to
|
||||
// the primitive assembler.
|
||||
|
||||
auto& immediate_input = g_state.immediate.input_vertex;
|
||||
auto& immediate_attribute_id = g_state.immediate.current_attribute;
|
||||
|
||||
immediate_input.attr[immediate_attribute_id] = attribute;
|
||||
|
||||
if (immediate_attribute_id < regs.pipeline.max_input_attrib_index) {
|
||||
immediate_attribute_id += 1;
|
||||
} else {
|
||||
MICROPROFILE_SCOPE(GPU_Drawing);
|
||||
immediate_attribute_id = 0;
|
||||
|
||||
auto* shader_engine = Shader::GetEngine();
|
||||
shader_engine->SetupBatch(g_state.vs, regs.vs.main_offset);
|
||||
|
||||
// Send to vertex shader
|
||||
if (g_debug_context)
|
||||
g_debug_context->OnEvent(DebugContext::Event::VertexShaderInvocation,
|
||||
static_cast<void*>(&immediate_input));
|
||||
Shader::UnitState shader_unit;
|
||||
Shader::AttributeBuffer output{};
|
||||
|
||||
shader_unit.LoadInput(regs.vs, immediate_input);
|
||||
shader_engine->Run(g_state.vs, shader_unit);
|
||||
shader_unit.WriteOutput(regs.vs, output);
|
||||
|
||||
// Send to geometry pipeline
|
||||
if (g_state.immediate.reset_geometry_pipeline) {
|
||||
g_state.geometry_pipeline.Reconfigure();
|
||||
g_state.immediate.reset_geometry_pipeline = false;
|
||||
}
|
||||
ASSERT(!g_state.geometry_pipeline.NeedIndexInput());
|
||||
g_state.geometry_pipeline.Setup(shader_engine);
|
||||
g_state.geometry_pipeline.SubmitVertex(output);
|
||||
|
||||
// TODO: If drawing after every immediate mode triangle kills performance,
|
||||
// change it to flush triangles whenever a drawing config register changes
|
||||
// See: https://github.com/citra-emu/citra/pull/2866#issuecomment-327011550
|
||||
VideoCore::g_renderer->Rasterizer()->DrawTriangles();
|
||||
if (g_debug_context) {
|
||||
g_debug_context->OnEvent(DebugContext::Event::FinishedPrimitiveBatch,
|
||||
nullptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(pipeline.gpu_mode):
|
||||
// This register likely just enables vertex processing and doesn't need any special handling
|
||||
@ -411,9 +277,213 @@ static void WritePicaReg(u32 id, u32 value, u32 mask) {
|
||||
|
||||
// It seems like these trigger vertex rendering
|
||||
case PICA_REG_INDEX(pipeline.trigger_draw):
|
||||
case PICA_REG_INDEX(pipeline.trigger_draw_indexed):
|
||||
Draw(id);
|
||||
case PICA_REG_INDEX(pipeline.trigger_draw_indexed): {
|
||||
MICROPROFILE_SCOPE(GPU_Drawing);
|
||||
const bool is_indexed = (id == PICA_REG_INDEX(pipeline.trigger_draw_indexed));
|
||||
|
||||
#if PICA_LOG_TEV
|
||||
DebugUtils::DumpTevStageConfig(regs.GetTevStages());
|
||||
#endif
|
||||
if (g_debug_context)
|
||||
g_debug_context->OnEvent(DebugContext::Event::IncomingPrimitiveBatch, nullptr);
|
||||
|
||||
struct CachedVertex {
|
||||
explicit CachedVertex() : batch(0), lock{ ATOMIC_FLAG_INIT } {}
|
||||
CachedVertex(const CachedVertex& other) : CachedVertex() {}
|
||||
union {
|
||||
Shader::AttributeBuffer output_attr; // GS used
|
||||
Shader::OutputVertex output_vertex; // No GS
|
||||
};
|
||||
std::atomic<u32> batch;
|
||||
std::atomic_flag lock;
|
||||
};
|
||||
static std::vector<CachedVertex> vs_output(0x10000);
|
||||
|
||||
if (!is_indexed && vs_output.size() < regs.pipeline.num_vertices)
|
||||
vs_output.resize(regs.pipeline.num_vertices);
|
||||
|
||||
// used as a mean to invalidate data from the previous batch without clearing it
|
||||
static u32 batch_id = std::numeric_limits<u32>::max();
|
||||
|
||||
++batch_id;
|
||||
if (batch_id == 0) { // reset cache when id overflows for safety
|
||||
++batch_id;
|
||||
for (auto& entry : vs_output)
|
||||
entry.batch = 0;
|
||||
}
|
||||
|
||||
// Processes information about internal vertex attributes to figure out how a vertex is
|
||||
// loaded.
|
||||
// Later, these can be compiled and cached.
|
||||
const u32 base_address = regs.pipeline.vertex_attributes.GetPhysicalBaseAddress();
|
||||
VertexLoader loader(regs.pipeline);
|
||||
|
||||
const auto& index_info = regs.pipeline.index_array;
|
||||
const u8* index_address_8 = Memory::GetPhysicalPointer(base_address + index_info.offset);
|
||||
if (!index_address_8) {
|
||||
LOG_CRITICAL(HW_GPU, "Invalid index_address_8 %08x", index_address_8);
|
||||
return;
|
||||
}
|
||||
const u16* index_address_16 = reinterpret_cast<const u16*>(index_address_8);
|
||||
bool index_u16 = index_info.format != 0;
|
||||
|
||||
auto VertexIndex = [&](unsigned int index) {
|
||||
// Indexed rendering doesn't use the start offset
|
||||
return is_indexed ? (index_u16 ? index_address_16[index] : index_address_8[index])
|
||||
: (index + regs.pipeline.vertex_offset);
|
||||
};
|
||||
|
||||
PrimitiveAssembler<Shader::OutputVertex>& primitive_assembler = g_state.primitive_assembler;
|
||||
|
||||
if (g_debug_context && g_debug_context->recorder) {
|
||||
for (int i = 0; i < 3; ++i) {
|
||||
const auto texture = regs.texturing.GetTextures()[i];
|
||||
if (!texture.enabled)
|
||||
continue;
|
||||
|
||||
u8* texture_data = Memory::GetPhysicalPointer(texture.config.GetPhysicalAddress());
|
||||
g_debug_context->recorder->MemoryAccessed(
|
||||
texture_data, Pica::TexturingRegs::NibblesPerPixel(texture.format) *
|
||||
texture.config.width / 2 * texture.config.height,
|
||||
texture.config.GetPhysicalAddress());
|
||||
}
|
||||
}
|
||||
|
||||
DebugUtils::MemoryAccessTracker memory_accesses;
|
||||
|
||||
auto* shader_engine = Shader::GetEngine();
|
||||
|
||||
shader_engine->SetupBatch(g_state.vs, regs.vs.main_offset);
|
||||
|
||||
const bool use_gs = regs.pipeline.use_gs == PipelineRegs::UseGS::Yes;
|
||||
|
||||
auto VSUnitLoop = [&](u32 thread_id, auto num_threads) {
|
||||
constexpr bool single_thread = std::is_same_v<std::integral_constant<u32, 1>, decltype(num_threads)>;
|
||||
Shader::UnitState shader_unit;
|
||||
|
||||
for (unsigned int index = thread_id; index < regs.pipeline.num_vertices; index += num_threads) {
|
||||
unsigned int vertex = VertexIndex(index);
|
||||
auto& cached_vertex = vs_output[is_indexed ? vertex : index];
|
||||
|
||||
// -1 is a common special value used for primitive restart. Since it's unknown if
|
||||
// the PICA supports it, and it would mess up the caching, guard against it here.
|
||||
ASSERT(vertex != -1);
|
||||
|
||||
if (is_indexed) {
|
||||
if (g_debug_context && Pica::g_debug_context->recorder) {
|
||||
int size = index_u16 ? 2 : 1;
|
||||
memory_accesses.AddAccess(base_address + index_info.offset + size * index,
|
||||
size);
|
||||
}
|
||||
|
||||
if (!single_thread) {
|
||||
// Try locking this vertex
|
||||
if (cached_vertex.lock.test_and_set(std::memory_order_acquire)) {
|
||||
// Another thread is processing this vertex
|
||||
continue;
|
||||
}
|
||||
// Vertex is not being processed and is from the correct batch
|
||||
else if (cached_vertex.batch.load(std::memory_order_acquire) == batch_id) {
|
||||
// Unlock
|
||||
cached_vertex.lock.clear(std::memory_order_release);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
else if (cached_vertex.batch.load(std::memory_order_relaxed) == batch_id) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
Shader::AttributeBuffer attribute_buffer;
|
||||
Shader::AttributeBuffer& output_attr = use_gs ? cached_vertex.output_attr : attribute_buffer;
|
||||
|
||||
// Initialize data for the current vertex
|
||||
loader.LoadVertex(base_address, index, vertex, attribute_buffer, memory_accesses);
|
||||
|
||||
// Send to vertex shader
|
||||
if (g_debug_context)
|
||||
g_debug_context->OnEvent(DebugContext::Event::VertexShaderInvocation, &attribute_buffer);
|
||||
shader_unit.LoadInput(regs.vs, attribute_buffer);
|
||||
shader_engine->Run(g_state.vs, shader_unit);
|
||||
|
||||
shader_unit.WriteOutput(regs.vs, output_attr);
|
||||
if (!use_gs)
|
||||
cached_vertex.output_vertex = Shader::OutputVertex::FromAttributeBuffer(regs.rasterizer, output_attr);
|
||||
|
||||
if (!single_thread) {
|
||||
cached_vertex.batch.store(batch_id, std::memory_order_release);
|
||||
if (is_indexed) {
|
||||
cached_vertex.lock.clear(std::memory_order_release);
|
||||
}
|
||||
}
|
||||
else if (is_indexed) {
|
||||
cached_vertex.batch.store(batch_id, std::memory_order_relaxed);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
auto& thread_pool = Common::ThreadPool::GetPool();
|
||||
std::vector<std::future<void>> futures;
|
||||
|
||||
constexpr unsigned int MIN_VERTICES_PER_THREAD = 10;
|
||||
unsigned int vs_threads = regs.pipeline.num_vertices / MIN_VERTICES_PER_THREAD;
|
||||
vs_threads = std::min(vs_threads, std::thread::hardware_concurrency() - 1);
|
||||
|
||||
if (!vs_threads) {
|
||||
VSUnitLoop(0, std::integral_constant<u32, 1>{});
|
||||
} else {
|
||||
for (unsigned int thread_id = 0; thread_id < vs_threads; ++thread_id) {
|
||||
futures.emplace_back(thread_pool.push(VSUnitLoop, thread_id, vs_threads));
|
||||
}
|
||||
}
|
||||
|
||||
g_state.geometry_pipeline.Reconfigure();
|
||||
g_state.geometry_pipeline.Setup(shader_engine);
|
||||
if (g_state.geometry_pipeline.NeedIndexInput())
|
||||
ASSERT(is_indexed);
|
||||
|
||||
for (unsigned int index = 0; index < regs.pipeline.num_vertices; ++index) {
|
||||
unsigned int vertex = VertexIndex(index);
|
||||
auto& cached_vertex = vs_output[is_indexed ? vertex : index];
|
||||
|
||||
if (use_gs && is_indexed && g_state.geometry_pipeline.NeedIndexInput()) {
|
||||
g_state.geometry_pipeline.SubmitIndex(vertex);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Synchronize threads
|
||||
if (vs_threads) {
|
||||
while (cached_vertex.batch.load(std::memory_order_acquire) != batch_id) {
|
||||
std::this_thread::yield();
|
||||
}
|
||||
}
|
||||
|
||||
if (use_gs) {
|
||||
// Send to geometry pipeline
|
||||
g_state.geometry_pipeline.SubmitVertex(cached_vertex.output_attr);
|
||||
} else {
|
||||
primitive_assembler.SubmitVertex(cached_vertex.output_vertex,
|
||||
std::bind(&std::decay_t<decltype(*VideoCore::g_renderer->Rasterizer())>::AddTriangle,
|
||||
VideoCore::g_renderer->Rasterizer(),
|
||||
std::placeholders::_1, std::placeholders::_2, std::placeholders::_3));
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& future : futures)
|
||||
future.get();
|
||||
|
||||
for (auto& range : memory_accesses.ranges) {
|
||||
g_debug_context->recorder->MemoryAccessed(Memory::GetPhysicalPointer(range.first),
|
||||
range.second, range.first);
|
||||
}
|
||||
|
||||
VideoCore::g_renderer->Rasterizer()->DrawTriangles();
|
||||
|
||||
if (g_debug_context) {
|
||||
g_debug_context->OnEvent(DebugContext::Event::FinishedPrimitiveBatch, nullptr);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
case PICA_REG_INDEX(gs.bool_uniforms):
|
||||
WriteUniformBoolReg(g_state.gs, g_state.regs.gs.bool_uniforms.Value());
|
||||
|
@ -235,6 +235,8 @@ class MemoryAccessTracker {
|
||||
public:
|
||||
/// Record a particular memory access in the list
|
||||
void AddAccess(u32 paddr, u32 size) {
|
||||
std::lock_guard<std::mutex> lock(mutex);
|
||||
|
||||
// Create new range or extend existing one
|
||||
ranges[paddr] = std::max(ranges[paddr], size);
|
||||
|
||||
@ -242,6 +244,8 @@ public:
|
||||
SimplifyRanges();
|
||||
}
|
||||
|
||||
std::mutex mutex;
|
||||
|
||||
/// Map of accessed ranges (mapping start address to range size)
|
||||
std::map<u32, u32> ranges;
|
||||
};
|
||||
|
@ -15,7 +15,7 @@ PrimitiveAssembler<VertexType>::PrimitiveAssembler(PipelineRegs::TriangleTopolog
|
||||
|
||||
template <typename VertexType>
|
||||
void PrimitiveAssembler<VertexType>::SubmitVertex(const VertexType& vtx,
|
||||
TriangleHandler triangle_handler) {
|
||||
const TriangleHandler& triangle_handler) {
|
||||
switch (topology) {
|
||||
case PipelineRegs::TriangleTopology::List:
|
||||
case PipelineRegs::TriangleTopology::Shader:
|
||||
|
@ -27,7 +27,7 @@ struct PrimitiveAssembler {
|
||||
* NOTE: We could specify the triangle handler in the constructor, but this way we can
|
||||
* keep event and handler code next to each other.
|
||||
*/
|
||||
void SubmitVertex(const VertexType& vtx, TriangleHandler triangle_handler);
|
||||
void SubmitVertex(const VertexType& vtx, const TriangleHandler& triangle_handler);
|
||||
|
||||
/**
|
||||
* Invert the vertex order of the next triangle. Called by geometry shader emitter.
|
||||
|
@ -29,7 +29,7 @@ MICROPROFILE_DEFINE(OpenGL_Drawing, "OpenGL", "Drawing", MP_RGB(128, 128, 192));
|
||||
MICROPROFILE_DEFINE(OpenGL_Blits, "OpenGL", "Blits", MP_RGB(100, 100, 255));
|
||||
MICROPROFILE_DEFINE(OpenGL_CacheManagement, "OpenGL", "Cache Mgmt", MP_RGB(100, 255, 100));
|
||||
|
||||
RasterizerOpenGL::RasterizerOpenGL() : shader_dirty(true) {
|
||||
RasterizerOpenGL::RasterizerOpenGL() : shader_dirty(true), vertex_buffer_size(0) {
|
||||
// Clipping plane 0 is always enabled for PICA fixed clip plane z <= 0
|
||||
state.clip_distance[0] = true;
|
||||
|
||||
@ -277,23 +277,23 @@ void RasterizerOpenGL::DrawTriangles() {
|
||||
state.Apply();
|
||||
|
||||
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D,
|
||||
color_surface != nullptr ? color_surface->texture.handle : 0, 0);
|
||||
color_surface != nullptr ? color_surface->texture.handle : 0, 0);
|
||||
if (depth_surface != nullptr) {
|
||||
if (has_stencil) {
|
||||
// attach both depth and stencil
|
||||
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D,
|
||||
depth_surface->texture.handle, 0);
|
||||
depth_surface->texture.handle, 0);
|
||||
} else {
|
||||
// attach depth
|
||||
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D,
|
||||
depth_surface->texture.handle, 0);
|
||||
depth_surface->texture.handle, 0);
|
||||
// clear stencil attachment
|
||||
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0, 0);
|
||||
}
|
||||
} else {
|
||||
// clear both depth and stencil attachment
|
||||
glFramebufferTexture2D(GL_DRAW_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_TEXTURE_2D, 0,
|
||||
0);
|
||||
0);
|
||||
}
|
||||
|
||||
// Sync the viewport
|
||||
@ -405,7 +405,7 @@ void RasterizerOpenGL::DrawTriangles() {
|
||||
// Sync the uniform data
|
||||
if (uniform_block_data.dirty) {
|
||||
glBufferData(GL_UNIFORM_BUFFER, sizeof(UniformData), &uniform_block_data.data,
|
||||
GL_STATIC_DRAW);
|
||||
GL_STATIC_DRAW);
|
||||
uniform_block_data.dirty = false;
|
||||
}
|
||||
|
||||
@ -421,15 +421,18 @@ void RasterizerOpenGL::DrawTriangles() {
|
||||
state.Apply();
|
||||
|
||||
// Draw the vertex batch
|
||||
glBufferData(GL_ARRAY_BUFFER, vertex_batch.size() * sizeof(HardwareVertex), vertex_batch.data(),
|
||||
GL_STREAM_DRAW);
|
||||
glDrawArrays(GL_TRIANGLES, 0, (GLsizei)vertex_batch.size());
|
||||
GLsizeiptr target_size = vertex_batch.size() * sizeof(HardwareVertex);
|
||||
if (vertex_buffer_size < target_size) {
|
||||
vertex_buffer_size = target_size * 2;
|
||||
glBufferData(GL_ARRAY_BUFFER, vertex_buffer_size, nullptr, GL_STREAM_DRAW);
|
||||
}
|
||||
glBufferSubData(GL_ARRAY_BUFFER, 0, target_size, vertex_batch.data());
|
||||
glDrawArrays(GL_TRIANGLES, 0, static_cast<GLsizei>(vertex_batch.size()));
|
||||
vertex_batch.clear();
|
||||
|
||||
// Disable scissor test
|
||||
state.scissor.enabled = false;
|
||||
|
||||
vertex_batch.clear();
|
||||
|
||||
// Unbind textures for potential future use as framebuffer attachments
|
||||
for (unsigned texture_index = 0; texture_index < pica_textures.size(); ++texture_index) {
|
||||
state.texture_units[texture_index].texture_2d = 0;
|
||||
|
@ -284,6 +284,7 @@ private:
|
||||
std::array<SamplerInfo, 3> texture_samplers;
|
||||
OGLVertexArray vertex_array;
|
||||
OGLBuffer vertex_buffer;
|
||||
GLsizeiptr vertex_buffer_size;
|
||||
OGLBuffer uniform_buffer;
|
||||
OGLFramebuffer framebuffer;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user