# Conflicts:
#	src/video_core/command_processor.cpp
#	src/video_core/renderer_opengl/gl_rasterizer.cpp
This commit is contained in:
citra 2017-10-19 19:04:31 +01:00
commit adcbdb2b5f
8 changed files with 438 additions and 239 deletions

View File

@ -76,6 +76,7 @@ set(HEADERS
synchronized_wrapper.h synchronized_wrapper.h
telemetry.h telemetry.h
thread.h thread.h
thread_pool.h
thread_queue_list.h thread_queue_list.h
timer.h timer.h
vector_math.h vector_math.h

120
src/common/thread_pool.h Normal file
View File

@ -0,0 +1,120 @@
// Copyright 2017 Citra Emulator Project
// Licensed under GPLv2 or any later version
// Refer to the license.txt file included.
#include <condition_variable>
#include <functional>
#include <future>
#include <mutex>
#include <thread>
#include <vector>
#include "common/assert.h"
namespace Common {
class ThreadPool {
private:
explicit ThreadPool(size_t num_threads) : num_threads(num_threads), workers(num_threads) {
ASSERT(num_threads);
}
public:
static ThreadPool& GetPool() {
static ThreadPool thread_pool(std::thread::hardware_concurrency());
return thread_pool;
}
template <typename F, typename... Args>
auto push(F&& f, Args&&... args) {
auto ret = workers[next_worker].push(std::forward<F>(f), std::forward<Args>(args)...);
next_worker = (next_worker + 1) % num_threads;
return ret;
}
const size_t total_threads() const {
return num_threads;
}
private:
template <typename T>
class ThreadsafeQueue {
private:
const size_t capacity;
std::vector<T> queue_storage;
std::mutex mutex;
std::condition_variable queue_changed;
public:
explicit ThreadsafeQueue(const size_t capacity) : capacity(capacity) {
queue_storage.reserve(capacity);
}
void push(const T& element) {
std::unique_lock<std::mutex> lock(mutex);
while (queue_storage.size() >= capacity) {
queue_changed.wait(lock);
}
queue_storage.push_back(element);
queue_changed.notify_one();
}
T Pop() {
std::unique_lock<std::mutex> lock(mutex);
while (queue_storage.empty()) {
queue_changed.wait(lock);
}
T element(std::move(queue_storage.back()));
queue_storage.pop_back();
queue_changed.notify_one();
return element;
}
void push(T&& element) {
std::unique_lock<std::mutex> lock(mutex);
while (queue_storage.size() >= capacity) {
queue_changed.wait(lock);
}
queue_storage.emplace_back(std::move(element));
queue_changed.notify_one();
}
};
class Worker {
private:
ThreadsafeQueue<std::function<void()>> queue;
std::thread thread;
static constexpr size_t MAX_QUEUE_CAPACITY = 50;
public:
Worker() : queue(MAX_QUEUE_CAPACITY), thread([this] { Loop(); }) {}
~Worker() {
queue.push(nullptr); // Exit the loop
thread.join();
}
void Loop() {
while (true) {
std::function<void()> fn(queue.Pop());
if (!fn) // a nullptr function is the signal to exit the loop
break;
fn();
}
}
template <typename F, typename... Args>
auto push(F&& f, Args&&... args) {
auto task = std::make_shared<std::packaged_task<decltype(f(args...))()>>(
std::bind(std::forward<F>(f), std::forward<Args>(args)...));
queue.push([task] { (*task)(); });
return task->get_future();
}
};
const size_t num_threads;
size_t next_worker = 0;
std::vector<Worker> workers;
};
} // namespace Common

View File

@ -4,11 +4,13 @@
#include <array> #include <array>
#include <cstddef> #include <cstddef>
#include <future>
#include <memory> #include <memory>
#include <utility> #include <utility>
#include "common/assert.h" #include "common/assert.h"
#include "common/logging/log.h" #include "common/logging/log.h"
#include "common/microprofile.h" #include "common/microprofile.h"
#include "common/thread_pool.h"
#include "common/vector_math.h" #include "common/vector_math.h"
#include "core/hle/service/gsp_gpu.h" #include "core/hle/service/gsp_gpu.h"
#include "core/hw/gpu.h" #include "core/hw/gpu.h"
@ -119,16 +121,64 @@ static void WriteUniformFloatReg(ShaderRegs& config, Shader::ShaderSetup& setup,
} }
} }
static void LoadDefaultVertexAttributes(u32 register_value) { static void WritePicaReg(u32 id, u32 value, u32 mask) {
auto& regs = g_state.regs; auto& regs = g_state.regs;
if (id >= Regs::NUM_REGS) {
LOG_ERROR(HW_GPU,
"Commandlist tried to write to invalid register 0x%03X (value: %08X, mask: %X)",
id, value, mask);
return;
}
// TODO: Figure out how register masking acts on e.g. vs.uniform_setup.set_value
u32 old_value = regs.reg_array[id];
const u32 write_mask = expand_bits_to_bytes[mask];
regs.reg_array[id] = (old_value & ~write_mask) | (value & write_mask);
// Double check for is_pica_tracing to avoid call overhead
if (DebugUtils::IsPicaTracing()) {
DebugUtils::OnPicaRegWrite({(u16)id, (u16)mask, regs.reg_array[id]});
}
if (g_debug_context)
g_debug_context->OnEvent(DebugContext::Event::PicaCommandLoaded,
reinterpret_cast<void*>(&id));
switch (id) {
// Trigger IRQ
case PICA_REG_INDEX(trigger_irq):
Service::GSP::SignalInterrupt(Service::GSP::InterruptId::P3D);
break;
case PICA_REG_INDEX(pipeline.triangle_topology):
g_state.primitive_assembler.Reconfigure(regs.pipeline.triangle_topology);
break;
case PICA_REG_INDEX(pipeline.restart_primitive):
g_state.primitive_assembler.Reset();
break;
case PICA_REG_INDEX(pipeline.vs_default_attributes_setup.index):
g_state.immediate.current_attribute = 0;
g_state.immediate.reset_geometry_pipeline = true;
default_attr_counter = 0;
break;
// Load default vertex input attributes
case PICA_REG_INDEX_WORKAROUND(pipeline.vs_default_attributes_setup.set_value[0], 0x233):
case PICA_REG_INDEX_WORKAROUND(pipeline.vs_default_attributes_setup.set_value[1], 0x234):
case PICA_REG_INDEX_WORKAROUND(pipeline.vs_default_attributes_setup.set_value[2], 0x235): {
// TODO: Does actual hardware indeed keep an intermediate buffer or does // TODO: Does actual hardware indeed keep an intermediate buffer or does
// it directly write the values? // it directly write the values?
default_attr_write_buffer[default_attr_counter++] = register_value; default_attr_write_buffer[default_attr_counter++] = value;
// Default attributes are written in a packed format such that four float24 values are encoded // Default attributes are written in a packed format such that four float24 values are
// in three 32-bit numbers. // encoded in
// We write to internal memory once a full such vector is written. // three 32-bit numbers. We write to internal memory once a full such vector is
// written.
if (default_attr_counter >= 3) { if (default_attr_counter >= 3) {
default_attr_counter = 0; default_attr_counter = 0;
@ -136,7 +186,7 @@ static void LoadDefaultVertexAttributes(u32 register_value) {
if (setup.index >= 16) { if (setup.index >= 16) {
LOG_ERROR(HW_GPU, "Invalid VS default attribute index %d", (int)setup.index); LOG_ERROR(HW_GPU, "Invalid VS default attribute index %d", (int)setup.index);
return; break;
} }
Math::Vec4<float24> attribute; Math::Vec4<float24> attribute;
@ -201,16 +251,35 @@ static void LoadDefaultVertexAttributes(u32 register_value) {
// See: https://github.com/citra-emu/citra/pull/2866#issuecomment-327011550 // See: https://github.com/citra-emu/citra/pull/2866#issuecomment-327011550
VideoCore::g_renderer->Rasterizer()->DrawTriangles(); VideoCore::g_renderer->Rasterizer()->DrawTriangles();
if (g_debug_context) { if (g_debug_context) {
g_debug_context->OnEvent(DebugContext::Event::FinishedPrimitiveBatch, nullptr); g_debug_context->OnEvent(DebugContext::Event::FinishedPrimitiveBatch,
nullptr);
} }
} }
} }
} }
break;
} }
static void Draw(u32 command_id) { case PICA_REG_INDEX(pipeline.gpu_mode):
// This register likely just enables vertex processing and doesn't need any special handling
break;
case PICA_REG_INDEX_WORKAROUND(pipeline.command_buffer.trigger[0], 0x23c):
case PICA_REG_INDEX_WORKAROUND(pipeline.command_buffer.trigger[1], 0x23d): {
unsigned index =
static_cast<unsigned>(id - PICA_REG_INDEX(pipeline.command_buffer.trigger[0]));
u32* head_ptr = (u32*)Memory::GetPhysicalPointer(
regs.pipeline.command_buffer.GetPhysicalAddress(index));
g_state.cmd_list.head_ptr = g_state.cmd_list.current_ptr = head_ptr;
g_state.cmd_list.length = regs.pipeline.command_buffer.GetSize(index) / sizeof(u32);
break;
}
// It seems like these trigger vertex rendering
case PICA_REG_INDEX(pipeline.trigger_draw):
case PICA_REG_INDEX(pipeline.trigger_draw_indexed): {
MICROPROFILE_SCOPE(GPU_Drawing); MICROPROFILE_SCOPE(GPU_Drawing);
auto& regs = g_state.regs; const bool is_indexed = (id == PICA_REG_INDEX(pipeline.trigger_draw_indexed));
#if PICA_LOG_TEV #if PICA_LOG_TEV
DebugUtils::DumpTevStageConfig(regs.GetTevStages()); DebugUtils::DumpTevStageConfig(regs.GetTevStages());
@ -218,15 +287,37 @@ static void Draw(u32 command_id) {
if (g_debug_context) if (g_debug_context)
g_debug_context->OnEvent(DebugContext::Event::IncomingPrimitiveBatch, nullptr); g_debug_context->OnEvent(DebugContext::Event::IncomingPrimitiveBatch, nullptr);
struct CachedVertex {
explicit CachedVertex() : batch(0), lock{ ATOMIC_FLAG_INIT } {}
CachedVertex(const CachedVertex& other) : CachedVertex() {}
union {
Shader::AttributeBuffer output_attr; // GS used
Shader::OutputVertex output_vertex; // No GS
};
std::atomic<u32> batch;
std::atomic_flag lock;
};
static std::vector<CachedVertex> vs_output(0x10000);
if (!is_indexed && vs_output.size() < regs.pipeline.num_vertices)
vs_output.resize(regs.pipeline.num_vertices);
// used as a mean to invalidate data from the previous batch without clearing it
static u32 batch_id = std::numeric_limits<u32>::max();
++batch_id;
if (batch_id == 0) { // reset cache when id overflows for safety
++batch_id;
for (auto& entry : vs_output)
entry.batch = 0;
}
// Processes information about internal vertex attributes to figure out how a vertex is // Processes information about internal vertex attributes to figure out how a vertex is
// loaded. // loaded.
// Later, these can be compiled and cached. // Later, these can be compiled and cached.
const u32 base_address = regs.pipeline.vertex_attributes.GetPhysicalBaseAddress(); const u32 base_address = regs.pipeline.vertex_attributes.GetPhysicalBaseAddress();
VertexLoader loader(regs.pipeline); VertexLoader loader(regs.pipeline);
// Load vertices
bool is_indexed = (command_id == PICA_REG_INDEX(pipeline.trigger_draw_indexed));
const auto& index_info = regs.pipeline.index_array; const auto& index_info = regs.pipeline.index_array;
const u8* index_address_8 = Memory::GetPhysicalPointer(base_address + index_info.offset); const u8* index_address_8 = Memory::GetPhysicalPointer(base_address + index_info.offset);
if (!index_address_8) { if (!index_address_8) {
@ -236,6 +327,12 @@ static void Draw(u32 command_id) {
const u16* index_address_16 = reinterpret_cast<const u16*>(index_address_8); const u16* index_address_16 = reinterpret_cast<const u16*>(index_address_8);
bool index_u16 = index_info.format != 0; bool index_u16 = index_info.format != 0;
auto VertexIndex = [&](unsigned int index) {
// Indexed rendering doesn't use the start offset
return is_indexed ? (index_u16 ? index_address_16[index] : index_address_8[index])
: (index + regs.pipeline.vertex_offset);
};
PrimitiveAssembler<Shader::OutputVertex>& primitive_assembler = g_state.primitive_assembler; PrimitiveAssembler<Shader::OutputVertex>& primitive_assembler = g_state.primitive_assembler;
if (g_debug_context && g_debug_context->recorder) { if (g_debug_context && g_debug_context->recorder) {
@ -254,81 +351,125 @@ static void Draw(u32 command_id) {
DebugUtils::MemoryAccessTracker memory_accesses; DebugUtils::MemoryAccessTracker memory_accesses;
// Simple circular-replacement vertex cache
// The size has been tuned for optimal balance between hit-rate and the cost of lookup
const size_t VERTEX_CACHE_SIZE = 32;
std::array<u16, VERTEX_CACHE_SIZE> vertex_cache_ids;
std::array<Shader::AttributeBuffer, VERTEX_CACHE_SIZE> vertex_cache;
Shader::AttributeBuffer vs_output;
unsigned int vertex_cache_pos = 0;
vertex_cache_ids.fill(-1);
auto* shader_engine = Shader::GetEngine(); auto* shader_engine = Shader::GetEngine();
Shader::UnitState shader_unit;
shader_engine->SetupBatch(g_state.vs, regs.vs.main_offset); shader_engine->SetupBatch(g_state.vs, regs.vs.main_offset);
const bool use_gs = regs.pipeline.use_gs == PipelineRegs::UseGS::Yes;
auto VSUnitLoop = [&](u32 thread_id, auto num_threads) {
constexpr bool single_thread = std::is_same_v<std::integral_constant<u32, 1>, decltype(num_threads)>;
Shader::UnitState shader_unit;
for (unsigned int index = thread_id; index < regs.pipeline.num_vertices; index += num_threads) {
unsigned int vertex = VertexIndex(index);
auto& cached_vertex = vs_output[is_indexed ? vertex : index];
// -1 is a common special value used for primitive restart. Since it's unknown if
// the PICA supports it, and it would mess up the caching, guard against it here.
ASSERT(vertex != -1);
if (is_indexed) {
if (g_debug_context && Pica::g_debug_context->recorder) {
int size = index_u16 ? 2 : 1;
memory_accesses.AddAccess(base_address + index_info.offset + size * index,
size);
}
if (!single_thread) {
// Try locking this vertex
if (cached_vertex.lock.test_and_set(std::memory_order_acquire)) {
// Another thread is processing this vertex
continue;
}
// Vertex is not being processed and is from the correct batch
else if (cached_vertex.batch.load(std::memory_order_acquire) == batch_id) {
// Unlock
cached_vertex.lock.clear(std::memory_order_release);
continue;
}
}
else if (cached_vertex.batch.load(std::memory_order_relaxed) == batch_id) {
continue;
}
}
Shader::AttributeBuffer attribute_buffer;
Shader::AttributeBuffer& output_attr = use_gs ? cached_vertex.output_attr : attribute_buffer;
// Initialize data for the current vertex
loader.LoadVertex(base_address, index, vertex, attribute_buffer, memory_accesses);
// Send to vertex shader
if (g_debug_context)
g_debug_context->OnEvent(DebugContext::Event::VertexShaderInvocation, &attribute_buffer);
shader_unit.LoadInput(regs.vs, attribute_buffer);
shader_engine->Run(g_state.vs, shader_unit);
shader_unit.WriteOutput(regs.vs, output_attr);
if (!use_gs)
cached_vertex.output_vertex = Shader::OutputVertex::FromAttributeBuffer(regs.rasterizer, output_attr);
if (!single_thread) {
cached_vertex.batch.store(batch_id, std::memory_order_release);
if (is_indexed) {
cached_vertex.lock.clear(std::memory_order_release);
}
}
else if (is_indexed) {
cached_vertex.batch.store(batch_id, std::memory_order_relaxed);
}
}
};
auto& thread_pool = Common::ThreadPool::GetPool();
std::vector<std::future<void>> futures;
constexpr unsigned int MIN_VERTICES_PER_THREAD = 10;
unsigned int vs_threads = regs.pipeline.num_vertices / MIN_VERTICES_PER_THREAD;
vs_threads = std::min(vs_threads, std::thread::hardware_concurrency() - 1);
if (!vs_threads) {
VSUnitLoop(0, std::integral_constant<u32, 1>{});
} else {
for (unsigned int thread_id = 0; thread_id < vs_threads; ++thread_id) {
futures.emplace_back(thread_pool.push(VSUnitLoop, thread_id, vs_threads));
}
}
g_state.geometry_pipeline.Reconfigure(); g_state.geometry_pipeline.Reconfigure();
g_state.geometry_pipeline.Setup(shader_engine); g_state.geometry_pipeline.Setup(shader_engine);
if (g_state.geometry_pipeline.NeedIndexInput()) if (g_state.geometry_pipeline.NeedIndexInput())
ASSERT(is_indexed); ASSERT(is_indexed);
for (unsigned int index = 0; index < regs.pipeline.num_vertices; ++index) { for (unsigned int index = 0; index < regs.pipeline.num_vertices; ++index) {
// Indexed rendering doesn't use the start offset unsigned int vertex = VertexIndex(index);
unsigned int vertex = is_indexed auto& cached_vertex = vs_output[is_indexed ? vertex : index];
? (index_u16 ? index_address_16[index] : index_address_8[index])
: (index + regs.pipeline.vertex_offset);
// -1 is a common special value used for primitive restart. Since it's unknown if if (use_gs && is_indexed && g_state.geometry_pipeline.NeedIndexInput()) {
// the PICA supports it, and it would mess up the caching, guard against it here.
ASSERT(vertex != -1);
bool vertex_cache_hit = false;
if (is_indexed) {
if (g_state.geometry_pipeline.NeedIndexInput()) {
g_state.geometry_pipeline.SubmitIndex(vertex); g_state.geometry_pipeline.SubmitIndex(vertex);
continue; continue;
} }
if (g_debug_context && Pica::g_debug_context->recorder) { // Synchronize threads
int size = index_u16 ? 2 : 1; if (vs_threads) {
memory_accesses.AddAccess(base_address + index_info.offset + size * index, size); while (cached_vertex.batch.load(std::memory_order_acquire) != batch_id) {
} std::this_thread::yield();
for (unsigned int i = 0; i < VERTEX_CACHE_SIZE; ++i) {
if (vertex == vertex_cache_ids[i]) {
vs_output = vertex_cache[i];
vertex_cache_hit = true;
break;
}
}
}
if (!vertex_cache_hit) {
// Initialize data for the current vertex
Shader::AttributeBuffer input;
loader.LoadVertex(base_address, index, vertex, input, memory_accesses);
// Send to vertex shader
if (g_debug_context)
g_debug_context->OnEvent(DebugContext::Event::VertexShaderInvocation,
(void*)&input);
shader_unit.LoadInput(regs.vs, input);
shader_engine->Run(g_state.vs, shader_unit);
shader_unit.WriteOutput(regs.vs, vs_output);
if (is_indexed) {
vertex_cache[vertex_cache_pos] = vs_output;
vertex_cache_ids[vertex_cache_pos] = vertex;
vertex_cache_pos = (vertex_cache_pos + 1) % VERTEX_CACHE_SIZE;
} }
} }
if (use_gs) {
// Send to geometry pipeline // Send to geometry pipeline
g_state.geometry_pipeline.SubmitVertex(vs_output); g_state.geometry_pipeline.SubmitVertex(cached_vertex.output_attr);
} else {
primitive_assembler.SubmitVertex(cached_vertex.output_vertex,
std::bind(&std::decay_t<decltype(*VideoCore::g_renderer->Rasterizer())>::AddTriangle,
VideoCore::g_renderer->Rasterizer(),
std::placeholders::_1, std::placeholders::_2, std::placeholders::_3));
} }
}
for (auto& future : futures)
future.get();
for (auto& range : memory_accesses.ranges) { for (auto& range : memory_accesses.ranges) {
g_debug_context->recorder->MemoryAccessed(Memory::GetPhysicalPointer(range.first), g_debug_context->recorder->MemoryAccessed(Memory::GetPhysicalPointer(range.first),
@ -336,85 +477,14 @@ static void Draw(u32 command_id) {
} }
VideoCore::g_renderer->Rasterizer()->DrawTriangles(); VideoCore::g_renderer->Rasterizer()->DrawTriangles();
if (g_debug_context) { if (g_debug_context) {
g_debug_context->OnEvent(DebugContext::Event::FinishedPrimitiveBatch, nullptr); g_debug_context->OnEvent(DebugContext::Event::FinishedPrimitiveBatch, nullptr);
} }
}
static void WritePicaReg(u32 id, u32 value, u32 mask) {
auto& regs = g_state.regs;
if (id >= Regs::NUM_REGS) {
LOG_ERROR(HW_GPU,
"Commandlist tried to write to invalid register 0x%03X (value: %08X, mask: %X)",
id, value, mask);
return;
}
// TODO: Figure out how register masking acts on e.g. vs.uniform_setup.set_value
u32 old_value = regs.reg_array[id];
const u32 write_mask = expand_bits_to_bytes[mask];
regs.reg_array[id] = (old_value & ~write_mask) | (value & write_mask);
// Double check for is_pica_tracing to avoid call overhead
if (DebugUtils::IsPicaTracing()) {
DebugUtils::OnPicaRegWrite({(u16)id, (u16)mask, regs.reg_array[id]});
}
if (g_debug_context)
g_debug_context->OnEvent(DebugContext::Event::PicaCommandLoaded,
reinterpret_cast<void*>(&id));
switch (id) {
// Trigger IRQ
case PICA_REG_INDEX(trigger_irq):
Service::GSP::SignalInterrupt(Service::GSP::InterruptId::P3D);
break;
case PICA_REG_INDEX(pipeline.triangle_topology):
g_state.primitive_assembler.Reconfigure(regs.pipeline.triangle_topology);
break;
case PICA_REG_INDEX(pipeline.restart_primitive):
g_state.primitive_assembler.Reset();
break;
case PICA_REG_INDEX(pipeline.vs_default_attributes_setup.index):
g_state.immediate.current_attribute = 0;
g_state.immediate.reset_geometry_pipeline = true;
default_attr_counter = 0;
break;
// Load default vertex input attributes
case PICA_REG_INDEX_WORKAROUND(pipeline.vs_default_attributes_setup.set_value[0], 0x233):
case PICA_REG_INDEX_WORKAROUND(pipeline.vs_default_attributes_setup.set_value[1], 0x234):
case PICA_REG_INDEX_WORKAROUND(pipeline.vs_default_attributes_setup.set_value[2], 0x235):
LoadDefaultVertexAttributes(value);
break;
case PICA_REG_INDEX(pipeline.gpu_mode):
// This register likely just enables vertex processing and doesn't need any special handling
break;
case PICA_REG_INDEX_WORKAROUND(pipeline.command_buffer.trigger[0], 0x23c):
case PICA_REG_INDEX_WORKAROUND(pipeline.command_buffer.trigger[1], 0x23d): {
unsigned index =
static_cast<unsigned>(id - PICA_REG_INDEX(pipeline.command_buffer.trigger[0]));
u32* head_ptr = (u32*)Memory::GetPhysicalPointer(
regs.pipeline.command_buffer.GetPhysicalAddress(index));
g_state.cmd_list.head_ptr = g_state.cmd_list.current_ptr = head_ptr;
g_state.cmd_list.length = regs.pipeline.command_buffer.GetSize(index) / sizeof(u32);
break; break;
} }
// It seems like these trigger vertex rendering
case PICA_REG_INDEX(pipeline.trigger_draw):
case PICA_REG_INDEX(pipeline.trigger_draw_indexed):
Draw(id);
break;
case PICA_REG_INDEX(gs.bool_uniforms): case PICA_REG_INDEX(gs.bool_uniforms):
WriteUniformBoolReg(g_state.gs, g_state.regs.gs.bool_uniforms.Value()); WriteUniformBoolReg(g_state.gs, g_state.regs.gs.bool_uniforms.Value());
break; break;

View File

@ -235,6 +235,8 @@ class MemoryAccessTracker {
public: public:
/// Record a particular memory access in the list /// Record a particular memory access in the list
void AddAccess(u32 paddr, u32 size) { void AddAccess(u32 paddr, u32 size) {
std::lock_guard<std::mutex> lock(mutex);
// Create new range or extend existing one // Create new range or extend existing one
ranges[paddr] = std::max(ranges[paddr], size); ranges[paddr] = std::max(ranges[paddr], size);
@ -242,6 +244,8 @@ public:
SimplifyRanges(); SimplifyRanges();
} }
std::mutex mutex;
/// Map of accessed ranges (mapping start address to range size) /// Map of accessed ranges (mapping start address to range size)
std::map<u32, u32> ranges; std::map<u32, u32> ranges;
}; };

View File

@ -15,7 +15,7 @@ PrimitiveAssembler<VertexType>::PrimitiveAssembler(PipelineRegs::TriangleTopolog
template <typename VertexType> template <typename VertexType>
void PrimitiveAssembler<VertexType>::SubmitVertex(const VertexType& vtx, void PrimitiveAssembler<VertexType>::SubmitVertex(const VertexType& vtx,
TriangleHandler triangle_handler) { const TriangleHandler& triangle_handler) {
switch (topology) { switch (topology) {
case PipelineRegs::TriangleTopology::List: case PipelineRegs::TriangleTopology::List:
case PipelineRegs::TriangleTopology::Shader: case PipelineRegs::TriangleTopology::Shader:

View File

@ -27,7 +27,7 @@ struct PrimitiveAssembler {
* NOTE: We could specify the triangle handler in the constructor, but this way we can * NOTE: We could specify the triangle handler in the constructor, but this way we can
* keep event and handler code next to each other. * keep event and handler code next to each other.
*/ */
void SubmitVertex(const VertexType& vtx, TriangleHandler triangle_handler); void SubmitVertex(const VertexType& vtx, const TriangleHandler& triangle_handler);
/** /**
* Invert the vertex order of the next triangle. Called by geometry shader emitter. * Invert the vertex order of the next triangle. Called by geometry shader emitter.

View File

@ -29,7 +29,7 @@ MICROPROFILE_DEFINE(OpenGL_Drawing, "OpenGL", "Drawing", MP_RGB(128, 128, 192));
MICROPROFILE_DEFINE(OpenGL_Blits, "OpenGL", "Blits", MP_RGB(100, 100, 255)); MICROPROFILE_DEFINE(OpenGL_Blits, "OpenGL", "Blits", MP_RGB(100, 100, 255));
MICROPROFILE_DEFINE(OpenGL_CacheManagement, "OpenGL", "Cache Mgmt", MP_RGB(100, 255, 100)); MICROPROFILE_DEFINE(OpenGL_CacheManagement, "OpenGL", "Cache Mgmt", MP_RGB(100, 255, 100));
RasterizerOpenGL::RasterizerOpenGL() : shader_dirty(true) { RasterizerOpenGL::RasterizerOpenGL() : shader_dirty(true), vertex_buffer_size(0) {
// Clipping plane 0 is always enabled for PICA fixed clip plane z <= 0 // Clipping plane 0 is always enabled for PICA fixed clip plane z <= 0
state.clip_distance[0] = true; state.clip_distance[0] = true;
@ -421,15 +421,18 @@ void RasterizerOpenGL::DrawTriangles() {
state.Apply(); state.Apply();
// Draw the vertex batch // Draw the vertex batch
glBufferData(GL_ARRAY_BUFFER, vertex_batch.size() * sizeof(HardwareVertex), vertex_batch.data(), GLsizeiptr target_size = vertex_batch.size() * sizeof(HardwareVertex);
GL_STREAM_DRAW); if (vertex_buffer_size < target_size) {
glDrawArrays(GL_TRIANGLES, 0, (GLsizei)vertex_batch.size()); vertex_buffer_size = target_size * 2;
glBufferData(GL_ARRAY_BUFFER, vertex_buffer_size, nullptr, GL_STREAM_DRAW);
}
glBufferSubData(GL_ARRAY_BUFFER, 0, target_size, vertex_batch.data());
glDrawArrays(GL_TRIANGLES, 0, static_cast<GLsizei>(vertex_batch.size()));
vertex_batch.clear();
// Disable scissor test // Disable scissor test
state.scissor.enabled = false; state.scissor.enabled = false;
vertex_batch.clear();
// Unbind textures for potential future use as framebuffer attachments // Unbind textures for potential future use as framebuffer attachments
for (unsigned texture_index = 0; texture_index < pica_textures.size(); ++texture_index) { for (unsigned texture_index = 0; texture_index < pica_textures.size(); ++texture_index) {
state.texture_units[texture_index].texture_2d = 0; state.texture_units[texture_index].texture_2d = 0;

View File

@ -284,6 +284,7 @@ private:
std::array<SamplerInfo, 3> texture_samplers; std::array<SamplerInfo, 3> texture_samplers;
OGLVertexArray vertex_array; OGLVertexArray vertex_array;
OGLBuffer vertex_buffer; OGLBuffer vertex_buffer;
GLsizeiptr vertex_buffer_size;
OGLBuffer uniform_buffer; OGLBuffer uniform_buffer;
OGLFramebuffer framebuffer; OGLFramebuffer framebuffer;